1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 */ 24 25 #include <linux/page_counter.h> 26 #include <linux/memcontrol.h> 27 #include <linux/cgroup.h> 28 #include <linux/pagewalk.h> 29 #include <linux/sched/mm.h> 30 #include <linux/shmem_fs.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vm_event_item.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/swap_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include <linux/tracehook.h> 60 #include <linux/psi.h> 61 #include <linux/seq_buf.h> 62 #include "internal.h" 63 #include <net/sock.h> 64 #include <net/ip.h> 65 #include "slab.h" 66 67 #include <linux/uaccess.h> 68 69 #include <trace/events/vmscan.h> 70 71 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 72 EXPORT_SYMBOL(memory_cgrp_subsys); 73 74 struct mem_cgroup *root_mem_cgroup __read_mostly; 75 76 /* Active memory cgroup to use from an interrupt context */ 77 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 78 79 /* Socket memory accounting disabled? */ 80 static bool cgroup_memory_nosocket; 81 82 /* Kernel memory accounting disabled? */ 83 static bool cgroup_memory_nokmem; 84 85 /* Whether the swap controller is active */ 86 #ifdef CONFIG_MEMCG_SWAP 87 bool cgroup_memory_noswap __read_mostly; 88 #else 89 #define cgroup_memory_noswap 1 90 #endif 91 92 #ifdef CONFIG_CGROUP_WRITEBACK 93 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 94 #endif 95 96 /* Whether legacy memory+swap accounting is active */ 97 static bool do_memsw_account(void) 98 { 99 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 100 } 101 102 #define THRESHOLDS_EVENTS_TARGET 128 103 #define SOFTLIMIT_EVENTS_TARGET 1024 104 105 /* 106 * Cgroups above their limits are maintained in a RB-Tree, independent of 107 * their hierarchy representation 108 */ 109 110 struct mem_cgroup_tree_per_node { 111 struct rb_root rb_root; 112 struct rb_node *rb_rightmost; 113 spinlock_t lock; 114 }; 115 116 struct mem_cgroup_tree { 117 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 118 }; 119 120 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 121 122 /* for OOM */ 123 struct mem_cgroup_eventfd_list { 124 struct list_head list; 125 struct eventfd_ctx *eventfd; 126 }; 127 128 /* 129 * cgroup_event represents events which userspace want to receive. 130 */ 131 struct mem_cgroup_event { 132 /* 133 * memcg which the event belongs to. 134 */ 135 struct mem_cgroup *memcg; 136 /* 137 * eventfd to signal userspace about the event. 138 */ 139 struct eventfd_ctx *eventfd; 140 /* 141 * Each of these stored in a list by the cgroup. 142 */ 143 struct list_head list; 144 /* 145 * register_event() callback will be used to add new userspace 146 * waiter for changes related to this event. Use eventfd_signal() 147 * on eventfd to send notification to userspace. 148 */ 149 int (*register_event)(struct mem_cgroup *memcg, 150 struct eventfd_ctx *eventfd, const char *args); 151 /* 152 * unregister_event() callback will be called when userspace closes 153 * the eventfd or on cgroup removing. This callback must be set, 154 * if you want provide notification functionality. 155 */ 156 void (*unregister_event)(struct mem_cgroup *memcg, 157 struct eventfd_ctx *eventfd); 158 /* 159 * All fields below needed to unregister event when 160 * userspace closes eventfd. 161 */ 162 poll_table pt; 163 wait_queue_head_t *wqh; 164 wait_queue_entry_t wait; 165 struct work_struct remove; 166 }; 167 168 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 169 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 170 171 /* Stuffs for move charges at task migration. */ 172 /* 173 * Types of charges to be moved. 174 */ 175 #define MOVE_ANON 0x1U 176 #define MOVE_FILE 0x2U 177 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 178 179 /* "mc" and its members are protected by cgroup_mutex */ 180 static struct move_charge_struct { 181 spinlock_t lock; /* for from, to */ 182 struct mm_struct *mm; 183 struct mem_cgroup *from; 184 struct mem_cgroup *to; 185 unsigned long flags; 186 unsigned long precharge; 187 unsigned long moved_charge; 188 unsigned long moved_swap; 189 struct task_struct *moving_task; /* a task moving charges */ 190 wait_queue_head_t waitq; /* a waitq for other context */ 191 } mc = { 192 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 193 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 194 }; 195 196 /* 197 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 198 * limit reclaim to prevent infinite loops, if they ever occur. 199 */ 200 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 201 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 202 203 /* for encoding cft->private value on file */ 204 enum res_type { 205 _MEM, 206 _MEMSWAP, 207 _OOM_TYPE, 208 _KMEM, 209 _TCP, 210 }; 211 212 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 213 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 214 #define MEMFILE_ATTR(val) ((val) & 0xffff) 215 /* Used for OOM nofiier */ 216 #define OOM_CONTROL (0) 217 218 /* 219 * Iteration constructs for visiting all cgroups (under a tree). If 220 * loops are exited prematurely (break), mem_cgroup_iter_break() must 221 * be used for reference counting. 222 */ 223 #define for_each_mem_cgroup_tree(iter, root) \ 224 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 225 iter != NULL; \ 226 iter = mem_cgroup_iter(root, iter, NULL)) 227 228 #define for_each_mem_cgroup(iter) \ 229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 230 iter != NULL; \ 231 iter = mem_cgroup_iter(NULL, iter, NULL)) 232 233 static inline bool should_force_charge(void) 234 { 235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 236 (current->flags & PF_EXITING); 237 } 238 239 /* Some nice accessors for the vmpressure. */ 240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 241 { 242 if (!memcg) 243 memcg = root_mem_cgroup; 244 return &memcg->vmpressure; 245 } 246 247 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 248 { 249 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 250 } 251 252 #ifdef CONFIG_MEMCG_KMEM 253 extern spinlock_t css_set_lock; 254 255 static void obj_cgroup_release(struct percpu_ref *ref) 256 { 257 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 258 struct mem_cgroup *memcg; 259 unsigned int nr_bytes; 260 unsigned int nr_pages; 261 unsigned long flags; 262 263 /* 264 * At this point all allocated objects are freed, and 265 * objcg->nr_charged_bytes can't have an arbitrary byte value. 266 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 267 * 268 * The following sequence can lead to it: 269 * 1) CPU0: objcg == stock->cached_objcg 270 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 271 * PAGE_SIZE bytes are charged 272 * 3) CPU1: a process from another memcg is allocating something, 273 * the stock if flushed, 274 * objcg->nr_charged_bytes = PAGE_SIZE - 92 275 * 5) CPU0: we do release this object, 276 * 92 bytes are added to stock->nr_bytes 277 * 6) CPU0: stock is flushed, 278 * 92 bytes are added to objcg->nr_charged_bytes 279 * 280 * In the result, nr_charged_bytes == PAGE_SIZE. 281 * This page will be uncharged in obj_cgroup_release(). 282 */ 283 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 284 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 285 nr_pages = nr_bytes >> PAGE_SHIFT; 286 287 spin_lock_irqsave(&css_set_lock, flags); 288 memcg = obj_cgroup_memcg(objcg); 289 if (nr_pages) 290 __memcg_kmem_uncharge(memcg, nr_pages); 291 list_del(&objcg->list); 292 mem_cgroup_put(memcg); 293 spin_unlock_irqrestore(&css_set_lock, flags); 294 295 percpu_ref_exit(ref); 296 kfree_rcu(objcg, rcu); 297 } 298 299 static struct obj_cgroup *obj_cgroup_alloc(void) 300 { 301 struct obj_cgroup *objcg; 302 int ret; 303 304 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 305 if (!objcg) 306 return NULL; 307 308 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 309 GFP_KERNEL); 310 if (ret) { 311 kfree(objcg); 312 return NULL; 313 } 314 INIT_LIST_HEAD(&objcg->list); 315 return objcg; 316 } 317 318 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 319 struct mem_cgroup *parent) 320 { 321 struct obj_cgroup *objcg, *iter; 322 323 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 324 325 spin_lock_irq(&css_set_lock); 326 327 /* Move active objcg to the parent's list */ 328 xchg(&objcg->memcg, parent); 329 css_get(&parent->css); 330 list_add(&objcg->list, &parent->objcg_list); 331 332 /* Move already reparented objcgs to the parent's list */ 333 list_for_each_entry(iter, &memcg->objcg_list, list) { 334 css_get(&parent->css); 335 xchg(&iter->memcg, parent); 336 css_put(&memcg->css); 337 } 338 list_splice(&memcg->objcg_list, &parent->objcg_list); 339 340 spin_unlock_irq(&css_set_lock); 341 342 percpu_ref_kill(&objcg->refcnt); 343 } 344 345 /* 346 * This will be used as a shrinker list's index. 347 * The main reason for not using cgroup id for this: 348 * this works better in sparse environments, where we have a lot of memcgs, 349 * but only a few kmem-limited. Or also, if we have, for instance, 200 350 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 351 * 200 entry array for that. 352 * 353 * The current size of the caches array is stored in memcg_nr_cache_ids. It 354 * will double each time we have to increase it. 355 */ 356 static DEFINE_IDA(memcg_cache_ida); 357 int memcg_nr_cache_ids; 358 359 /* Protects memcg_nr_cache_ids */ 360 static DECLARE_RWSEM(memcg_cache_ids_sem); 361 362 void memcg_get_cache_ids(void) 363 { 364 down_read(&memcg_cache_ids_sem); 365 } 366 367 void memcg_put_cache_ids(void) 368 { 369 up_read(&memcg_cache_ids_sem); 370 } 371 372 /* 373 * MIN_SIZE is different than 1, because we would like to avoid going through 374 * the alloc/free process all the time. In a small machine, 4 kmem-limited 375 * cgroups is a reasonable guess. In the future, it could be a parameter or 376 * tunable, but that is strictly not necessary. 377 * 378 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 379 * this constant directly from cgroup, but it is understandable that this is 380 * better kept as an internal representation in cgroup.c. In any case, the 381 * cgrp_id space is not getting any smaller, and we don't have to necessarily 382 * increase ours as well if it increases. 383 */ 384 #define MEMCG_CACHES_MIN_SIZE 4 385 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 386 387 /* 388 * A lot of the calls to the cache allocation functions are expected to be 389 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 390 * conditional to this static branch, we'll have to allow modules that does 391 * kmem_cache_alloc and the such to see this symbol as well 392 */ 393 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 394 EXPORT_SYMBOL(memcg_kmem_enabled_key); 395 #endif 396 397 static int memcg_shrinker_map_size; 398 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 399 400 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 401 { 402 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 403 } 404 405 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 406 int size, int old_size) 407 { 408 struct memcg_shrinker_map *new, *old; 409 int nid; 410 411 lockdep_assert_held(&memcg_shrinker_map_mutex); 412 413 for_each_node(nid) { 414 old = rcu_dereference_protected( 415 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 416 /* Not yet online memcg */ 417 if (!old) 418 return 0; 419 420 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 421 if (!new) 422 return -ENOMEM; 423 424 /* Set all old bits, clear all new bits */ 425 memset(new->map, (int)0xff, old_size); 426 memset((void *)new->map + old_size, 0, size - old_size); 427 428 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 429 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 430 } 431 432 return 0; 433 } 434 435 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 436 { 437 struct mem_cgroup_per_node *pn; 438 struct memcg_shrinker_map *map; 439 int nid; 440 441 if (mem_cgroup_is_root(memcg)) 442 return; 443 444 for_each_node(nid) { 445 pn = mem_cgroup_nodeinfo(memcg, nid); 446 map = rcu_dereference_protected(pn->shrinker_map, true); 447 if (map) 448 kvfree(map); 449 rcu_assign_pointer(pn->shrinker_map, NULL); 450 } 451 } 452 453 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 454 { 455 struct memcg_shrinker_map *map; 456 int nid, size, ret = 0; 457 458 if (mem_cgroup_is_root(memcg)) 459 return 0; 460 461 mutex_lock(&memcg_shrinker_map_mutex); 462 size = memcg_shrinker_map_size; 463 for_each_node(nid) { 464 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 465 if (!map) { 466 memcg_free_shrinker_maps(memcg); 467 ret = -ENOMEM; 468 break; 469 } 470 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 471 } 472 mutex_unlock(&memcg_shrinker_map_mutex); 473 474 return ret; 475 } 476 477 int memcg_expand_shrinker_maps(int new_id) 478 { 479 int size, old_size, ret = 0; 480 struct mem_cgroup *memcg; 481 482 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 483 old_size = memcg_shrinker_map_size; 484 if (size <= old_size) 485 return 0; 486 487 mutex_lock(&memcg_shrinker_map_mutex); 488 if (!root_mem_cgroup) 489 goto unlock; 490 491 for_each_mem_cgroup(memcg) { 492 if (mem_cgroup_is_root(memcg)) 493 continue; 494 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 495 if (ret) { 496 mem_cgroup_iter_break(NULL, memcg); 497 goto unlock; 498 } 499 } 500 unlock: 501 if (!ret) 502 memcg_shrinker_map_size = size; 503 mutex_unlock(&memcg_shrinker_map_mutex); 504 return ret; 505 } 506 507 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 508 { 509 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 510 struct memcg_shrinker_map *map; 511 512 rcu_read_lock(); 513 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 514 /* Pairs with smp mb in shrink_slab() */ 515 smp_mb__before_atomic(); 516 set_bit(shrinker_id, map->map); 517 rcu_read_unlock(); 518 } 519 } 520 521 /** 522 * mem_cgroup_css_from_page - css of the memcg associated with a page 523 * @page: page of interest 524 * 525 * If memcg is bound to the default hierarchy, css of the memcg associated 526 * with @page is returned. The returned css remains associated with @page 527 * until it is released. 528 * 529 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 530 * is returned. 531 */ 532 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 533 { 534 struct mem_cgroup *memcg; 535 536 memcg = page->mem_cgroup; 537 538 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 539 memcg = root_mem_cgroup; 540 541 return &memcg->css; 542 } 543 544 /** 545 * page_cgroup_ino - return inode number of the memcg a page is charged to 546 * @page: the page 547 * 548 * Look up the closest online ancestor of the memory cgroup @page is charged to 549 * and return its inode number or 0 if @page is not charged to any cgroup. It 550 * is safe to call this function without holding a reference to @page. 551 * 552 * Note, this function is inherently racy, because there is nothing to prevent 553 * the cgroup inode from getting torn down and potentially reallocated a moment 554 * after page_cgroup_ino() returns, so it only should be used by callers that 555 * do not care (such as procfs interfaces). 556 */ 557 ino_t page_cgroup_ino(struct page *page) 558 { 559 struct mem_cgroup *memcg; 560 unsigned long ino = 0; 561 562 rcu_read_lock(); 563 memcg = page->mem_cgroup; 564 565 /* 566 * The lowest bit set means that memcg isn't a valid 567 * memcg pointer, but a obj_cgroups pointer. 568 * In this case the page is shared and doesn't belong 569 * to any specific memory cgroup. 570 */ 571 if ((unsigned long) memcg & 0x1UL) 572 memcg = NULL; 573 574 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 575 memcg = parent_mem_cgroup(memcg); 576 if (memcg) 577 ino = cgroup_ino(memcg->css.cgroup); 578 rcu_read_unlock(); 579 return ino; 580 } 581 582 static struct mem_cgroup_per_node * 583 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 584 { 585 int nid = page_to_nid(page); 586 587 return memcg->nodeinfo[nid]; 588 } 589 590 static struct mem_cgroup_tree_per_node * 591 soft_limit_tree_node(int nid) 592 { 593 return soft_limit_tree.rb_tree_per_node[nid]; 594 } 595 596 static struct mem_cgroup_tree_per_node * 597 soft_limit_tree_from_page(struct page *page) 598 { 599 int nid = page_to_nid(page); 600 601 return soft_limit_tree.rb_tree_per_node[nid]; 602 } 603 604 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 605 struct mem_cgroup_tree_per_node *mctz, 606 unsigned long new_usage_in_excess) 607 { 608 struct rb_node **p = &mctz->rb_root.rb_node; 609 struct rb_node *parent = NULL; 610 struct mem_cgroup_per_node *mz_node; 611 bool rightmost = true; 612 613 if (mz->on_tree) 614 return; 615 616 mz->usage_in_excess = new_usage_in_excess; 617 if (!mz->usage_in_excess) 618 return; 619 while (*p) { 620 parent = *p; 621 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 622 tree_node); 623 if (mz->usage_in_excess < mz_node->usage_in_excess) { 624 p = &(*p)->rb_left; 625 rightmost = false; 626 } 627 628 /* 629 * We can't avoid mem cgroups that are over their soft 630 * limit by the same amount 631 */ 632 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 633 p = &(*p)->rb_right; 634 } 635 636 if (rightmost) 637 mctz->rb_rightmost = &mz->tree_node; 638 639 rb_link_node(&mz->tree_node, parent, p); 640 rb_insert_color(&mz->tree_node, &mctz->rb_root); 641 mz->on_tree = true; 642 } 643 644 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 645 struct mem_cgroup_tree_per_node *mctz) 646 { 647 if (!mz->on_tree) 648 return; 649 650 if (&mz->tree_node == mctz->rb_rightmost) 651 mctz->rb_rightmost = rb_prev(&mz->tree_node); 652 653 rb_erase(&mz->tree_node, &mctz->rb_root); 654 mz->on_tree = false; 655 } 656 657 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 658 struct mem_cgroup_tree_per_node *mctz) 659 { 660 unsigned long flags; 661 662 spin_lock_irqsave(&mctz->lock, flags); 663 __mem_cgroup_remove_exceeded(mz, mctz); 664 spin_unlock_irqrestore(&mctz->lock, flags); 665 } 666 667 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 668 { 669 unsigned long nr_pages = page_counter_read(&memcg->memory); 670 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 671 unsigned long excess = 0; 672 673 if (nr_pages > soft_limit) 674 excess = nr_pages - soft_limit; 675 676 return excess; 677 } 678 679 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 680 { 681 unsigned long excess; 682 struct mem_cgroup_per_node *mz; 683 struct mem_cgroup_tree_per_node *mctz; 684 685 mctz = soft_limit_tree_from_page(page); 686 if (!mctz) 687 return; 688 /* 689 * Necessary to update all ancestors when hierarchy is used. 690 * because their event counter is not touched. 691 */ 692 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 693 mz = mem_cgroup_page_nodeinfo(memcg, page); 694 excess = soft_limit_excess(memcg); 695 /* 696 * We have to update the tree if mz is on RB-tree or 697 * mem is over its softlimit. 698 */ 699 if (excess || mz->on_tree) { 700 unsigned long flags; 701 702 spin_lock_irqsave(&mctz->lock, flags); 703 /* if on-tree, remove it */ 704 if (mz->on_tree) 705 __mem_cgroup_remove_exceeded(mz, mctz); 706 /* 707 * Insert again. mz->usage_in_excess will be updated. 708 * If excess is 0, no tree ops. 709 */ 710 __mem_cgroup_insert_exceeded(mz, mctz, excess); 711 spin_unlock_irqrestore(&mctz->lock, flags); 712 } 713 } 714 } 715 716 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 717 { 718 struct mem_cgroup_tree_per_node *mctz; 719 struct mem_cgroup_per_node *mz; 720 int nid; 721 722 for_each_node(nid) { 723 mz = mem_cgroup_nodeinfo(memcg, nid); 724 mctz = soft_limit_tree_node(nid); 725 if (mctz) 726 mem_cgroup_remove_exceeded(mz, mctz); 727 } 728 } 729 730 static struct mem_cgroup_per_node * 731 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 732 { 733 struct mem_cgroup_per_node *mz; 734 735 retry: 736 mz = NULL; 737 if (!mctz->rb_rightmost) 738 goto done; /* Nothing to reclaim from */ 739 740 mz = rb_entry(mctz->rb_rightmost, 741 struct mem_cgroup_per_node, tree_node); 742 /* 743 * Remove the node now but someone else can add it back, 744 * we will to add it back at the end of reclaim to its correct 745 * position in the tree. 746 */ 747 __mem_cgroup_remove_exceeded(mz, mctz); 748 if (!soft_limit_excess(mz->memcg) || 749 !css_tryget(&mz->memcg->css)) 750 goto retry; 751 done: 752 return mz; 753 } 754 755 static struct mem_cgroup_per_node * 756 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 757 { 758 struct mem_cgroup_per_node *mz; 759 760 spin_lock_irq(&mctz->lock); 761 mz = __mem_cgroup_largest_soft_limit_node(mctz); 762 spin_unlock_irq(&mctz->lock); 763 return mz; 764 } 765 766 /** 767 * __mod_memcg_state - update cgroup memory statistics 768 * @memcg: the memory cgroup 769 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 770 * @val: delta to add to the counter, can be negative 771 */ 772 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 773 { 774 long x, threshold = MEMCG_CHARGE_BATCH; 775 776 if (mem_cgroup_disabled()) 777 return; 778 779 if (memcg_stat_item_in_bytes(idx)) 780 threshold <<= PAGE_SHIFT; 781 782 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 783 if (unlikely(abs(x) > threshold)) { 784 struct mem_cgroup *mi; 785 786 /* 787 * Batch local counters to keep them in sync with 788 * the hierarchical ones. 789 */ 790 __this_cpu_add(memcg->vmstats_local->stat[idx], x); 791 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 792 atomic_long_add(x, &mi->vmstats[idx]); 793 x = 0; 794 } 795 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 796 } 797 798 static struct mem_cgroup_per_node * 799 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 800 { 801 struct mem_cgroup *parent; 802 803 parent = parent_mem_cgroup(pn->memcg); 804 if (!parent) 805 return NULL; 806 return mem_cgroup_nodeinfo(parent, nid); 807 } 808 809 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 810 int val) 811 { 812 struct mem_cgroup_per_node *pn; 813 struct mem_cgroup *memcg; 814 long x, threshold = MEMCG_CHARGE_BATCH; 815 816 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 817 memcg = pn->memcg; 818 819 /* Update memcg */ 820 __mod_memcg_state(memcg, idx, val); 821 822 /* Update lruvec */ 823 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 824 825 if (vmstat_item_in_bytes(idx)) 826 threshold <<= PAGE_SHIFT; 827 828 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 829 if (unlikely(abs(x) > threshold)) { 830 pg_data_t *pgdat = lruvec_pgdat(lruvec); 831 struct mem_cgroup_per_node *pi; 832 833 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 834 atomic_long_add(x, &pi->lruvec_stat[idx]); 835 x = 0; 836 } 837 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 838 } 839 840 /** 841 * __mod_lruvec_state - update lruvec memory statistics 842 * @lruvec: the lruvec 843 * @idx: the stat item 844 * @val: delta to add to the counter, can be negative 845 * 846 * The lruvec is the intersection of the NUMA node and a cgroup. This 847 * function updates the all three counters that are affected by a 848 * change of state at this level: per-node, per-cgroup, per-lruvec. 849 */ 850 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 851 int val) 852 { 853 /* Update node */ 854 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 855 856 /* Update memcg and lruvec */ 857 if (!mem_cgroup_disabled()) 858 __mod_memcg_lruvec_state(lruvec, idx, val); 859 } 860 861 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) 862 { 863 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 864 struct mem_cgroup *memcg; 865 struct lruvec *lruvec; 866 867 rcu_read_lock(); 868 memcg = mem_cgroup_from_obj(p); 869 870 /* Untracked pages have no memcg, no lruvec. Update only the node */ 871 if (!memcg || memcg == root_mem_cgroup) { 872 __mod_node_page_state(pgdat, idx, val); 873 } else { 874 lruvec = mem_cgroup_lruvec(memcg, pgdat); 875 __mod_lruvec_state(lruvec, idx, val); 876 } 877 rcu_read_unlock(); 878 } 879 880 void mod_memcg_obj_state(void *p, int idx, int val) 881 { 882 struct mem_cgroup *memcg; 883 884 rcu_read_lock(); 885 memcg = mem_cgroup_from_obj(p); 886 if (memcg) 887 mod_memcg_state(memcg, idx, val); 888 rcu_read_unlock(); 889 } 890 891 /** 892 * __count_memcg_events - account VM events in a cgroup 893 * @memcg: the memory cgroup 894 * @idx: the event item 895 * @count: the number of events that occured 896 */ 897 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 898 unsigned long count) 899 { 900 unsigned long x; 901 902 if (mem_cgroup_disabled()) 903 return; 904 905 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 906 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 907 struct mem_cgroup *mi; 908 909 /* 910 * Batch local counters to keep them in sync with 911 * the hierarchical ones. 912 */ 913 __this_cpu_add(memcg->vmstats_local->events[idx], x); 914 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 915 atomic_long_add(x, &mi->vmevents[idx]); 916 x = 0; 917 } 918 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 919 } 920 921 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 922 { 923 return atomic_long_read(&memcg->vmevents[event]); 924 } 925 926 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 927 { 928 long x = 0; 929 int cpu; 930 931 for_each_possible_cpu(cpu) 932 x += per_cpu(memcg->vmstats_local->events[event], cpu); 933 return x; 934 } 935 936 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 937 struct page *page, 938 int nr_pages) 939 { 940 /* pagein of a big page is an event. So, ignore page size */ 941 if (nr_pages > 0) 942 __count_memcg_events(memcg, PGPGIN, 1); 943 else { 944 __count_memcg_events(memcg, PGPGOUT, 1); 945 nr_pages = -nr_pages; /* for event */ 946 } 947 948 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 949 } 950 951 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 952 enum mem_cgroup_events_target target) 953 { 954 unsigned long val, next; 955 956 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 957 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 958 /* from time_after() in jiffies.h */ 959 if ((long)(next - val) < 0) { 960 switch (target) { 961 case MEM_CGROUP_TARGET_THRESH: 962 next = val + THRESHOLDS_EVENTS_TARGET; 963 break; 964 case MEM_CGROUP_TARGET_SOFTLIMIT: 965 next = val + SOFTLIMIT_EVENTS_TARGET; 966 break; 967 default: 968 break; 969 } 970 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 971 return true; 972 } 973 return false; 974 } 975 976 /* 977 * Check events in order. 978 * 979 */ 980 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 981 { 982 /* threshold event is triggered in finer grain than soft limit */ 983 if (unlikely(mem_cgroup_event_ratelimit(memcg, 984 MEM_CGROUP_TARGET_THRESH))) { 985 bool do_softlimit; 986 987 do_softlimit = mem_cgroup_event_ratelimit(memcg, 988 MEM_CGROUP_TARGET_SOFTLIMIT); 989 mem_cgroup_threshold(memcg); 990 if (unlikely(do_softlimit)) 991 mem_cgroup_update_tree(memcg, page); 992 } 993 } 994 995 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 996 { 997 /* 998 * mm_update_next_owner() may clear mm->owner to NULL 999 * if it races with swapoff, page migration, etc. 1000 * So this can be called with p == NULL. 1001 */ 1002 if (unlikely(!p)) 1003 return NULL; 1004 1005 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1006 } 1007 EXPORT_SYMBOL(mem_cgroup_from_task); 1008 1009 /** 1010 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1011 * @mm: mm from which memcg should be extracted. It can be NULL. 1012 * 1013 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 1014 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 1015 * returned. 1016 */ 1017 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1018 { 1019 struct mem_cgroup *memcg; 1020 1021 if (mem_cgroup_disabled()) 1022 return NULL; 1023 1024 rcu_read_lock(); 1025 do { 1026 /* 1027 * Page cache insertions can happen withou an 1028 * actual mm context, e.g. during disk probing 1029 * on boot, loopback IO, acct() writes etc. 1030 */ 1031 if (unlikely(!mm)) 1032 memcg = root_mem_cgroup; 1033 else { 1034 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1035 if (unlikely(!memcg)) 1036 memcg = root_mem_cgroup; 1037 } 1038 } while (!css_tryget(&memcg->css)); 1039 rcu_read_unlock(); 1040 return memcg; 1041 } 1042 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1043 1044 /** 1045 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 1046 * @page: page from which memcg should be extracted. 1047 * 1048 * Obtain a reference on page->memcg and returns it if successful. Otherwise 1049 * root_mem_cgroup is returned. 1050 */ 1051 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 1052 { 1053 struct mem_cgroup *memcg = page->mem_cgroup; 1054 1055 if (mem_cgroup_disabled()) 1056 return NULL; 1057 1058 rcu_read_lock(); 1059 /* Page should not get uncharged and freed memcg under us. */ 1060 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 1061 memcg = root_mem_cgroup; 1062 rcu_read_unlock(); 1063 return memcg; 1064 } 1065 EXPORT_SYMBOL(get_mem_cgroup_from_page); 1066 1067 static __always_inline struct mem_cgroup *active_memcg(void) 1068 { 1069 if (in_interrupt()) 1070 return this_cpu_read(int_active_memcg); 1071 else 1072 return current->active_memcg; 1073 } 1074 1075 static __always_inline struct mem_cgroup *get_active_memcg(void) 1076 { 1077 struct mem_cgroup *memcg; 1078 1079 rcu_read_lock(); 1080 memcg = active_memcg(); 1081 if (memcg) { 1082 /* current->active_memcg must hold a ref. */ 1083 if (WARN_ON_ONCE(!css_tryget(&memcg->css))) 1084 memcg = root_mem_cgroup; 1085 else 1086 memcg = current->active_memcg; 1087 } 1088 rcu_read_unlock(); 1089 1090 return memcg; 1091 } 1092 1093 static __always_inline bool memcg_kmem_bypass(void) 1094 { 1095 /* Allow remote memcg charging from any context. */ 1096 if (unlikely(active_memcg())) 1097 return false; 1098 1099 /* Memcg to charge can't be determined. */ 1100 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 1101 return true; 1102 1103 return false; 1104 } 1105 1106 /** 1107 * If active memcg is set, do not fallback to current->mm->memcg. 1108 */ 1109 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1110 { 1111 if (memcg_kmem_bypass()) 1112 return NULL; 1113 1114 if (unlikely(active_memcg())) 1115 return get_active_memcg(); 1116 1117 return get_mem_cgroup_from_mm(current->mm); 1118 } 1119 1120 /** 1121 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1122 * @root: hierarchy root 1123 * @prev: previously returned memcg, NULL on first invocation 1124 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1125 * 1126 * Returns references to children of the hierarchy below @root, or 1127 * @root itself, or %NULL after a full round-trip. 1128 * 1129 * Caller must pass the return value in @prev on subsequent 1130 * invocations for reference counting, or use mem_cgroup_iter_break() 1131 * to cancel a hierarchy walk before the round-trip is complete. 1132 * 1133 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1134 * in the hierarchy among all concurrent reclaimers operating on the 1135 * same node. 1136 */ 1137 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1138 struct mem_cgroup *prev, 1139 struct mem_cgroup_reclaim_cookie *reclaim) 1140 { 1141 struct mem_cgroup_reclaim_iter *iter; 1142 struct cgroup_subsys_state *css = NULL; 1143 struct mem_cgroup *memcg = NULL; 1144 struct mem_cgroup *pos = NULL; 1145 1146 if (mem_cgroup_disabled()) 1147 return NULL; 1148 1149 if (!root) 1150 root = root_mem_cgroup; 1151 1152 if (prev && !reclaim) 1153 pos = prev; 1154 1155 if (!root->use_hierarchy && root != root_mem_cgroup) { 1156 if (prev) 1157 goto out; 1158 return root; 1159 } 1160 1161 rcu_read_lock(); 1162 1163 if (reclaim) { 1164 struct mem_cgroup_per_node *mz; 1165 1166 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1167 iter = &mz->iter; 1168 1169 if (prev && reclaim->generation != iter->generation) 1170 goto out_unlock; 1171 1172 while (1) { 1173 pos = READ_ONCE(iter->position); 1174 if (!pos || css_tryget(&pos->css)) 1175 break; 1176 /* 1177 * css reference reached zero, so iter->position will 1178 * be cleared by ->css_released. However, we should not 1179 * rely on this happening soon, because ->css_released 1180 * is called from a work queue, and by busy-waiting we 1181 * might block it. So we clear iter->position right 1182 * away. 1183 */ 1184 (void)cmpxchg(&iter->position, pos, NULL); 1185 } 1186 } 1187 1188 if (pos) 1189 css = &pos->css; 1190 1191 for (;;) { 1192 css = css_next_descendant_pre(css, &root->css); 1193 if (!css) { 1194 /* 1195 * Reclaimers share the hierarchy walk, and a 1196 * new one might jump in right at the end of 1197 * the hierarchy - make sure they see at least 1198 * one group and restart from the beginning. 1199 */ 1200 if (!prev) 1201 continue; 1202 break; 1203 } 1204 1205 /* 1206 * Verify the css and acquire a reference. The root 1207 * is provided by the caller, so we know it's alive 1208 * and kicking, and don't take an extra reference. 1209 */ 1210 memcg = mem_cgroup_from_css(css); 1211 1212 if (css == &root->css) 1213 break; 1214 1215 if (css_tryget(css)) 1216 break; 1217 1218 memcg = NULL; 1219 } 1220 1221 if (reclaim) { 1222 /* 1223 * The position could have already been updated by a competing 1224 * thread, so check that the value hasn't changed since we read 1225 * it to avoid reclaiming from the same cgroup twice. 1226 */ 1227 (void)cmpxchg(&iter->position, pos, memcg); 1228 1229 if (pos) 1230 css_put(&pos->css); 1231 1232 if (!memcg) 1233 iter->generation++; 1234 else if (!prev) 1235 reclaim->generation = iter->generation; 1236 } 1237 1238 out_unlock: 1239 rcu_read_unlock(); 1240 out: 1241 if (prev && prev != root) 1242 css_put(&prev->css); 1243 1244 return memcg; 1245 } 1246 1247 /** 1248 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1249 * @root: hierarchy root 1250 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1251 */ 1252 void mem_cgroup_iter_break(struct mem_cgroup *root, 1253 struct mem_cgroup *prev) 1254 { 1255 if (!root) 1256 root = root_mem_cgroup; 1257 if (prev && prev != root) 1258 css_put(&prev->css); 1259 } 1260 1261 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1262 struct mem_cgroup *dead_memcg) 1263 { 1264 struct mem_cgroup_reclaim_iter *iter; 1265 struct mem_cgroup_per_node *mz; 1266 int nid; 1267 1268 for_each_node(nid) { 1269 mz = mem_cgroup_nodeinfo(from, nid); 1270 iter = &mz->iter; 1271 cmpxchg(&iter->position, dead_memcg, NULL); 1272 } 1273 } 1274 1275 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1276 { 1277 struct mem_cgroup *memcg = dead_memcg; 1278 struct mem_cgroup *last; 1279 1280 do { 1281 __invalidate_reclaim_iterators(memcg, dead_memcg); 1282 last = memcg; 1283 } while ((memcg = parent_mem_cgroup(memcg))); 1284 1285 /* 1286 * When cgruop1 non-hierarchy mode is used, 1287 * parent_mem_cgroup() does not walk all the way up to the 1288 * cgroup root (root_mem_cgroup). So we have to handle 1289 * dead_memcg from cgroup root separately. 1290 */ 1291 if (last != root_mem_cgroup) 1292 __invalidate_reclaim_iterators(root_mem_cgroup, 1293 dead_memcg); 1294 } 1295 1296 /** 1297 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1298 * @memcg: hierarchy root 1299 * @fn: function to call for each task 1300 * @arg: argument passed to @fn 1301 * 1302 * This function iterates over tasks attached to @memcg or to any of its 1303 * descendants and calls @fn for each task. If @fn returns a non-zero 1304 * value, the function breaks the iteration loop and returns the value. 1305 * Otherwise, it will iterate over all tasks and return 0. 1306 * 1307 * This function must not be called for the root memory cgroup. 1308 */ 1309 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1310 int (*fn)(struct task_struct *, void *), void *arg) 1311 { 1312 struct mem_cgroup *iter; 1313 int ret = 0; 1314 1315 BUG_ON(memcg == root_mem_cgroup); 1316 1317 for_each_mem_cgroup_tree(iter, memcg) { 1318 struct css_task_iter it; 1319 struct task_struct *task; 1320 1321 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1322 while (!ret && (task = css_task_iter_next(&it))) 1323 ret = fn(task, arg); 1324 css_task_iter_end(&it); 1325 if (ret) { 1326 mem_cgroup_iter_break(memcg, iter); 1327 break; 1328 } 1329 } 1330 return ret; 1331 } 1332 1333 /** 1334 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1335 * @page: the page 1336 * @pgdat: pgdat of the page 1337 * 1338 * This function relies on page->mem_cgroup being stable - see the 1339 * access rules in commit_charge(). 1340 */ 1341 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1342 { 1343 struct mem_cgroup_per_node *mz; 1344 struct mem_cgroup *memcg; 1345 struct lruvec *lruvec; 1346 1347 if (mem_cgroup_disabled()) { 1348 lruvec = &pgdat->__lruvec; 1349 goto out; 1350 } 1351 1352 memcg = page->mem_cgroup; 1353 /* 1354 * Swapcache readahead pages are added to the LRU - and 1355 * possibly migrated - before they are charged. 1356 */ 1357 if (!memcg) 1358 memcg = root_mem_cgroup; 1359 1360 mz = mem_cgroup_page_nodeinfo(memcg, page); 1361 lruvec = &mz->lruvec; 1362 out: 1363 /* 1364 * Since a node can be onlined after the mem_cgroup was created, 1365 * we have to be prepared to initialize lruvec->zone here; 1366 * and if offlined then reonlined, we need to reinitialize it. 1367 */ 1368 if (unlikely(lruvec->pgdat != pgdat)) 1369 lruvec->pgdat = pgdat; 1370 return lruvec; 1371 } 1372 1373 /** 1374 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1375 * @lruvec: mem_cgroup per zone lru vector 1376 * @lru: index of lru list the page is sitting on 1377 * @zid: zone id of the accounted pages 1378 * @nr_pages: positive when adding or negative when removing 1379 * 1380 * This function must be called under lru_lock, just before a page is added 1381 * to or just after a page is removed from an lru list (that ordering being 1382 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1383 */ 1384 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1385 int zid, int nr_pages) 1386 { 1387 struct mem_cgroup_per_node *mz; 1388 unsigned long *lru_size; 1389 long size; 1390 1391 if (mem_cgroup_disabled()) 1392 return; 1393 1394 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1395 lru_size = &mz->lru_zone_size[zid][lru]; 1396 1397 if (nr_pages < 0) 1398 *lru_size += nr_pages; 1399 1400 size = *lru_size; 1401 if (WARN_ONCE(size < 0, 1402 "%s(%p, %d, %d): lru_size %ld\n", 1403 __func__, lruvec, lru, nr_pages, size)) { 1404 VM_BUG_ON(1); 1405 *lru_size = 0; 1406 } 1407 1408 if (nr_pages > 0) 1409 *lru_size += nr_pages; 1410 } 1411 1412 /** 1413 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1414 * @memcg: the memory cgroup 1415 * 1416 * Returns the maximum amount of memory @mem can be charged with, in 1417 * pages. 1418 */ 1419 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1420 { 1421 unsigned long margin = 0; 1422 unsigned long count; 1423 unsigned long limit; 1424 1425 count = page_counter_read(&memcg->memory); 1426 limit = READ_ONCE(memcg->memory.max); 1427 if (count < limit) 1428 margin = limit - count; 1429 1430 if (do_memsw_account()) { 1431 count = page_counter_read(&memcg->memsw); 1432 limit = READ_ONCE(memcg->memsw.max); 1433 if (count < limit) 1434 margin = min(margin, limit - count); 1435 else 1436 margin = 0; 1437 } 1438 1439 return margin; 1440 } 1441 1442 /* 1443 * A routine for checking "mem" is under move_account() or not. 1444 * 1445 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1446 * moving cgroups. This is for waiting at high-memory pressure 1447 * caused by "move". 1448 */ 1449 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1450 { 1451 struct mem_cgroup *from; 1452 struct mem_cgroup *to; 1453 bool ret = false; 1454 /* 1455 * Unlike task_move routines, we access mc.to, mc.from not under 1456 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1457 */ 1458 spin_lock(&mc.lock); 1459 from = mc.from; 1460 to = mc.to; 1461 if (!from) 1462 goto unlock; 1463 1464 ret = mem_cgroup_is_descendant(from, memcg) || 1465 mem_cgroup_is_descendant(to, memcg); 1466 unlock: 1467 spin_unlock(&mc.lock); 1468 return ret; 1469 } 1470 1471 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1472 { 1473 if (mc.moving_task && current != mc.moving_task) { 1474 if (mem_cgroup_under_move(memcg)) { 1475 DEFINE_WAIT(wait); 1476 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1477 /* moving charge context might have finished. */ 1478 if (mc.moving_task) 1479 schedule(); 1480 finish_wait(&mc.waitq, &wait); 1481 return true; 1482 } 1483 } 1484 return false; 1485 } 1486 1487 struct memory_stat { 1488 const char *name; 1489 unsigned int ratio; 1490 unsigned int idx; 1491 }; 1492 1493 static struct memory_stat memory_stats[] = { 1494 { "anon", PAGE_SIZE, NR_ANON_MAPPED }, 1495 { "file", PAGE_SIZE, NR_FILE_PAGES }, 1496 { "kernel_stack", 1024, NR_KERNEL_STACK_KB }, 1497 { "percpu", 1, MEMCG_PERCPU_B }, 1498 { "sock", PAGE_SIZE, MEMCG_SOCK }, 1499 { "shmem", PAGE_SIZE, NR_SHMEM }, 1500 { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED }, 1501 { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY }, 1502 { "file_writeback", PAGE_SIZE, NR_WRITEBACK }, 1503 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1504 /* 1505 * The ratio will be initialized in memory_stats_init(). Because 1506 * on some architectures, the macro of HPAGE_PMD_SIZE is not 1507 * constant(e.g. powerpc). 1508 */ 1509 { "anon_thp", 0, NR_ANON_THPS }, 1510 #endif 1511 { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON }, 1512 { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON }, 1513 { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE }, 1514 { "active_file", PAGE_SIZE, NR_ACTIVE_FILE }, 1515 { "unevictable", PAGE_SIZE, NR_UNEVICTABLE }, 1516 1517 /* 1518 * Note: The slab_reclaimable and slab_unreclaimable must be 1519 * together and slab_reclaimable must be in front. 1520 */ 1521 { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B }, 1522 { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B }, 1523 1524 /* The memory events */ 1525 { "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON }, 1526 { "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE }, 1527 { "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON }, 1528 { "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE }, 1529 { "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON }, 1530 { "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE }, 1531 { "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM }, 1532 }; 1533 1534 static int __init memory_stats_init(void) 1535 { 1536 int i; 1537 1538 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1539 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1540 if (memory_stats[i].idx == NR_ANON_THPS) 1541 memory_stats[i].ratio = HPAGE_PMD_SIZE; 1542 #endif 1543 VM_BUG_ON(!memory_stats[i].ratio); 1544 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT); 1545 } 1546 1547 return 0; 1548 } 1549 pure_initcall(memory_stats_init); 1550 1551 static char *memory_stat_format(struct mem_cgroup *memcg) 1552 { 1553 struct seq_buf s; 1554 int i; 1555 1556 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1557 if (!s.buffer) 1558 return NULL; 1559 1560 /* 1561 * Provide statistics on the state of the memory subsystem as 1562 * well as cumulative event counters that show past behavior. 1563 * 1564 * This list is ordered following a combination of these gradients: 1565 * 1) generic big picture -> specifics and details 1566 * 2) reflecting userspace activity -> reflecting kernel heuristics 1567 * 1568 * Current memory state: 1569 */ 1570 1571 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1572 u64 size; 1573 1574 size = memcg_page_state(memcg, memory_stats[i].idx); 1575 size *= memory_stats[i].ratio; 1576 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1577 1578 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1579 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + 1580 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); 1581 seq_buf_printf(&s, "slab %llu\n", size); 1582 } 1583 } 1584 1585 /* Accumulated memory events */ 1586 1587 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1588 memcg_events(memcg, PGFAULT)); 1589 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1590 memcg_events(memcg, PGMAJFAULT)); 1591 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1592 memcg_events(memcg, PGREFILL)); 1593 seq_buf_printf(&s, "pgscan %lu\n", 1594 memcg_events(memcg, PGSCAN_KSWAPD) + 1595 memcg_events(memcg, PGSCAN_DIRECT)); 1596 seq_buf_printf(&s, "pgsteal %lu\n", 1597 memcg_events(memcg, PGSTEAL_KSWAPD) + 1598 memcg_events(memcg, PGSTEAL_DIRECT)); 1599 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1600 memcg_events(memcg, PGACTIVATE)); 1601 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1602 memcg_events(memcg, PGDEACTIVATE)); 1603 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1604 memcg_events(memcg, PGLAZYFREE)); 1605 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1606 memcg_events(memcg, PGLAZYFREED)); 1607 1608 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1609 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1610 memcg_events(memcg, THP_FAULT_ALLOC)); 1611 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1612 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1613 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1614 1615 /* The above should easily fit into one page */ 1616 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1617 1618 return s.buffer; 1619 } 1620 1621 #define K(x) ((x) << (PAGE_SHIFT-10)) 1622 /** 1623 * mem_cgroup_print_oom_context: Print OOM information relevant to 1624 * memory controller. 1625 * @memcg: The memory cgroup that went over limit 1626 * @p: Task that is going to be killed 1627 * 1628 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1629 * enabled 1630 */ 1631 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1632 { 1633 rcu_read_lock(); 1634 1635 if (memcg) { 1636 pr_cont(",oom_memcg="); 1637 pr_cont_cgroup_path(memcg->css.cgroup); 1638 } else 1639 pr_cont(",global_oom"); 1640 if (p) { 1641 pr_cont(",task_memcg="); 1642 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1643 } 1644 rcu_read_unlock(); 1645 } 1646 1647 /** 1648 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1649 * memory controller. 1650 * @memcg: The memory cgroup that went over limit 1651 */ 1652 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1653 { 1654 char *buf; 1655 1656 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1657 K((u64)page_counter_read(&memcg->memory)), 1658 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1659 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1660 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1661 K((u64)page_counter_read(&memcg->swap)), 1662 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1663 else { 1664 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1665 K((u64)page_counter_read(&memcg->memsw)), 1666 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1667 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1668 K((u64)page_counter_read(&memcg->kmem)), 1669 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1670 } 1671 1672 pr_info("Memory cgroup stats for "); 1673 pr_cont_cgroup_path(memcg->css.cgroup); 1674 pr_cont(":"); 1675 buf = memory_stat_format(memcg); 1676 if (!buf) 1677 return; 1678 pr_info("%s", buf); 1679 kfree(buf); 1680 } 1681 1682 /* 1683 * Return the memory (and swap, if configured) limit for a memcg. 1684 */ 1685 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1686 { 1687 unsigned long max = READ_ONCE(memcg->memory.max); 1688 1689 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1690 if (mem_cgroup_swappiness(memcg)) 1691 max += min(READ_ONCE(memcg->swap.max), 1692 (unsigned long)total_swap_pages); 1693 } else { /* v1 */ 1694 if (mem_cgroup_swappiness(memcg)) { 1695 /* Calculate swap excess capacity from memsw limit */ 1696 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1697 1698 max += min(swap, (unsigned long)total_swap_pages); 1699 } 1700 } 1701 return max; 1702 } 1703 1704 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1705 { 1706 return page_counter_read(&memcg->memory); 1707 } 1708 1709 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1710 int order) 1711 { 1712 struct oom_control oc = { 1713 .zonelist = NULL, 1714 .nodemask = NULL, 1715 .memcg = memcg, 1716 .gfp_mask = gfp_mask, 1717 .order = order, 1718 }; 1719 bool ret = true; 1720 1721 if (mutex_lock_killable(&oom_lock)) 1722 return true; 1723 1724 if (mem_cgroup_margin(memcg) >= (1 << order)) 1725 goto unlock; 1726 1727 /* 1728 * A few threads which were not waiting at mutex_lock_killable() can 1729 * fail to bail out. Therefore, check again after holding oom_lock. 1730 */ 1731 ret = should_force_charge() || out_of_memory(&oc); 1732 1733 unlock: 1734 mutex_unlock(&oom_lock); 1735 return ret; 1736 } 1737 1738 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1739 pg_data_t *pgdat, 1740 gfp_t gfp_mask, 1741 unsigned long *total_scanned) 1742 { 1743 struct mem_cgroup *victim = NULL; 1744 int total = 0; 1745 int loop = 0; 1746 unsigned long excess; 1747 unsigned long nr_scanned; 1748 struct mem_cgroup_reclaim_cookie reclaim = { 1749 .pgdat = pgdat, 1750 }; 1751 1752 excess = soft_limit_excess(root_memcg); 1753 1754 while (1) { 1755 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1756 if (!victim) { 1757 loop++; 1758 if (loop >= 2) { 1759 /* 1760 * If we have not been able to reclaim 1761 * anything, it might because there are 1762 * no reclaimable pages under this hierarchy 1763 */ 1764 if (!total) 1765 break; 1766 /* 1767 * We want to do more targeted reclaim. 1768 * excess >> 2 is not to excessive so as to 1769 * reclaim too much, nor too less that we keep 1770 * coming back to reclaim from this cgroup 1771 */ 1772 if (total >= (excess >> 2) || 1773 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1774 break; 1775 } 1776 continue; 1777 } 1778 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1779 pgdat, &nr_scanned); 1780 *total_scanned += nr_scanned; 1781 if (!soft_limit_excess(root_memcg)) 1782 break; 1783 } 1784 mem_cgroup_iter_break(root_memcg, victim); 1785 return total; 1786 } 1787 1788 #ifdef CONFIG_LOCKDEP 1789 static struct lockdep_map memcg_oom_lock_dep_map = { 1790 .name = "memcg_oom_lock", 1791 }; 1792 #endif 1793 1794 static DEFINE_SPINLOCK(memcg_oom_lock); 1795 1796 /* 1797 * Check OOM-Killer is already running under our hierarchy. 1798 * If someone is running, return false. 1799 */ 1800 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1801 { 1802 struct mem_cgroup *iter, *failed = NULL; 1803 1804 spin_lock(&memcg_oom_lock); 1805 1806 for_each_mem_cgroup_tree(iter, memcg) { 1807 if (iter->oom_lock) { 1808 /* 1809 * this subtree of our hierarchy is already locked 1810 * so we cannot give a lock. 1811 */ 1812 failed = iter; 1813 mem_cgroup_iter_break(memcg, iter); 1814 break; 1815 } else 1816 iter->oom_lock = true; 1817 } 1818 1819 if (failed) { 1820 /* 1821 * OK, we failed to lock the whole subtree so we have 1822 * to clean up what we set up to the failing subtree 1823 */ 1824 for_each_mem_cgroup_tree(iter, memcg) { 1825 if (iter == failed) { 1826 mem_cgroup_iter_break(memcg, iter); 1827 break; 1828 } 1829 iter->oom_lock = false; 1830 } 1831 } else 1832 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1833 1834 spin_unlock(&memcg_oom_lock); 1835 1836 return !failed; 1837 } 1838 1839 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1840 { 1841 struct mem_cgroup *iter; 1842 1843 spin_lock(&memcg_oom_lock); 1844 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1845 for_each_mem_cgroup_tree(iter, memcg) 1846 iter->oom_lock = false; 1847 spin_unlock(&memcg_oom_lock); 1848 } 1849 1850 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1851 { 1852 struct mem_cgroup *iter; 1853 1854 spin_lock(&memcg_oom_lock); 1855 for_each_mem_cgroup_tree(iter, memcg) 1856 iter->under_oom++; 1857 spin_unlock(&memcg_oom_lock); 1858 } 1859 1860 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1861 { 1862 struct mem_cgroup *iter; 1863 1864 /* 1865 * Be careful about under_oom underflows becase a child memcg 1866 * could have been added after mem_cgroup_mark_under_oom. 1867 */ 1868 spin_lock(&memcg_oom_lock); 1869 for_each_mem_cgroup_tree(iter, memcg) 1870 if (iter->under_oom > 0) 1871 iter->under_oom--; 1872 spin_unlock(&memcg_oom_lock); 1873 } 1874 1875 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1876 1877 struct oom_wait_info { 1878 struct mem_cgroup *memcg; 1879 wait_queue_entry_t wait; 1880 }; 1881 1882 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1883 unsigned mode, int sync, void *arg) 1884 { 1885 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1886 struct mem_cgroup *oom_wait_memcg; 1887 struct oom_wait_info *oom_wait_info; 1888 1889 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1890 oom_wait_memcg = oom_wait_info->memcg; 1891 1892 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1893 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1894 return 0; 1895 return autoremove_wake_function(wait, mode, sync, arg); 1896 } 1897 1898 static void memcg_oom_recover(struct mem_cgroup *memcg) 1899 { 1900 /* 1901 * For the following lockless ->under_oom test, the only required 1902 * guarantee is that it must see the state asserted by an OOM when 1903 * this function is called as a result of userland actions 1904 * triggered by the notification of the OOM. This is trivially 1905 * achieved by invoking mem_cgroup_mark_under_oom() before 1906 * triggering notification. 1907 */ 1908 if (memcg && memcg->under_oom) 1909 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1910 } 1911 1912 enum oom_status { 1913 OOM_SUCCESS, 1914 OOM_FAILED, 1915 OOM_ASYNC, 1916 OOM_SKIPPED 1917 }; 1918 1919 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1920 { 1921 enum oom_status ret; 1922 bool locked; 1923 1924 if (order > PAGE_ALLOC_COSTLY_ORDER) 1925 return OOM_SKIPPED; 1926 1927 memcg_memory_event(memcg, MEMCG_OOM); 1928 1929 /* 1930 * We are in the middle of the charge context here, so we 1931 * don't want to block when potentially sitting on a callstack 1932 * that holds all kinds of filesystem and mm locks. 1933 * 1934 * cgroup1 allows disabling the OOM killer and waiting for outside 1935 * handling until the charge can succeed; remember the context and put 1936 * the task to sleep at the end of the page fault when all locks are 1937 * released. 1938 * 1939 * On the other hand, in-kernel OOM killer allows for an async victim 1940 * memory reclaim (oom_reaper) and that means that we are not solely 1941 * relying on the oom victim to make a forward progress and we can 1942 * invoke the oom killer here. 1943 * 1944 * Please note that mem_cgroup_out_of_memory might fail to find a 1945 * victim and then we have to bail out from the charge path. 1946 */ 1947 if (memcg->oom_kill_disable) { 1948 if (!current->in_user_fault) 1949 return OOM_SKIPPED; 1950 css_get(&memcg->css); 1951 current->memcg_in_oom = memcg; 1952 current->memcg_oom_gfp_mask = mask; 1953 current->memcg_oom_order = order; 1954 1955 return OOM_ASYNC; 1956 } 1957 1958 mem_cgroup_mark_under_oom(memcg); 1959 1960 locked = mem_cgroup_oom_trylock(memcg); 1961 1962 if (locked) 1963 mem_cgroup_oom_notify(memcg); 1964 1965 mem_cgroup_unmark_under_oom(memcg); 1966 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1967 ret = OOM_SUCCESS; 1968 else 1969 ret = OOM_FAILED; 1970 1971 if (locked) 1972 mem_cgroup_oom_unlock(memcg); 1973 1974 return ret; 1975 } 1976 1977 /** 1978 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1979 * @handle: actually kill/wait or just clean up the OOM state 1980 * 1981 * This has to be called at the end of a page fault if the memcg OOM 1982 * handler was enabled. 1983 * 1984 * Memcg supports userspace OOM handling where failed allocations must 1985 * sleep on a waitqueue until the userspace task resolves the 1986 * situation. Sleeping directly in the charge context with all kinds 1987 * of locks held is not a good idea, instead we remember an OOM state 1988 * in the task and mem_cgroup_oom_synchronize() has to be called at 1989 * the end of the page fault to complete the OOM handling. 1990 * 1991 * Returns %true if an ongoing memcg OOM situation was detected and 1992 * completed, %false otherwise. 1993 */ 1994 bool mem_cgroup_oom_synchronize(bool handle) 1995 { 1996 struct mem_cgroup *memcg = current->memcg_in_oom; 1997 struct oom_wait_info owait; 1998 bool locked; 1999 2000 /* OOM is global, do not handle */ 2001 if (!memcg) 2002 return false; 2003 2004 if (!handle) 2005 goto cleanup; 2006 2007 owait.memcg = memcg; 2008 owait.wait.flags = 0; 2009 owait.wait.func = memcg_oom_wake_function; 2010 owait.wait.private = current; 2011 INIT_LIST_HEAD(&owait.wait.entry); 2012 2013 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2014 mem_cgroup_mark_under_oom(memcg); 2015 2016 locked = mem_cgroup_oom_trylock(memcg); 2017 2018 if (locked) 2019 mem_cgroup_oom_notify(memcg); 2020 2021 if (locked && !memcg->oom_kill_disable) { 2022 mem_cgroup_unmark_under_oom(memcg); 2023 finish_wait(&memcg_oom_waitq, &owait.wait); 2024 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 2025 current->memcg_oom_order); 2026 } else { 2027 schedule(); 2028 mem_cgroup_unmark_under_oom(memcg); 2029 finish_wait(&memcg_oom_waitq, &owait.wait); 2030 } 2031 2032 if (locked) { 2033 mem_cgroup_oom_unlock(memcg); 2034 /* 2035 * There is no guarantee that an OOM-lock contender 2036 * sees the wakeups triggered by the OOM kill 2037 * uncharges. Wake any sleepers explicitely. 2038 */ 2039 memcg_oom_recover(memcg); 2040 } 2041 cleanup: 2042 current->memcg_in_oom = NULL; 2043 css_put(&memcg->css); 2044 return true; 2045 } 2046 2047 /** 2048 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2049 * @victim: task to be killed by the OOM killer 2050 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2051 * 2052 * Returns a pointer to a memory cgroup, which has to be cleaned up 2053 * by killing all belonging OOM-killable tasks. 2054 * 2055 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2056 */ 2057 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2058 struct mem_cgroup *oom_domain) 2059 { 2060 struct mem_cgroup *oom_group = NULL; 2061 struct mem_cgroup *memcg; 2062 2063 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2064 return NULL; 2065 2066 if (!oom_domain) 2067 oom_domain = root_mem_cgroup; 2068 2069 rcu_read_lock(); 2070 2071 memcg = mem_cgroup_from_task(victim); 2072 if (memcg == root_mem_cgroup) 2073 goto out; 2074 2075 /* 2076 * If the victim task has been asynchronously moved to a different 2077 * memory cgroup, we might end up killing tasks outside oom_domain. 2078 * In this case it's better to ignore memory.group.oom. 2079 */ 2080 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2081 goto out; 2082 2083 /* 2084 * Traverse the memory cgroup hierarchy from the victim task's 2085 * cgroup up to the OOMing cgroup (or root) to find the 2086 * highest-level memory cgroup with oom.group set. 2087 */ 2088 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2089 if (memcg->oom_group) 2090 oom_group = memcg; 2091 2092 if (memcg == oom_domain) 2093 break; 2094 } 2095 2096 if (oom_group) 2097 css_get(&oom_group->css); 2098 out: 2099 rcu_read_unlock(); 2100 2101 return oom_group; 2102 } 2103 2104 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2105 { 2106 pr_info("Tasks in "); 2107 pr_cont_cgroup_path(memcg->css.cgroup); 2108 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2109 } 2110 2111 /** 2112 * lock_page_memcg - lock a page->mem_cgroup binding 2113 * @page: the page 2114 * 2115 * This function protects unlocked LRU pages from being moved to 2116 * another cgroup. 2117 * 2118 * It ensures lifetime of the returned memcg. Caller is responsible 2119 * for the lifetime of the page; __unlock_page_memcg() is available 2120 * when @page might get freed inside the locked section. 2121 */ 2122 struct mem_cgroup *lock_page_memcg(struct page *page) 2123 { 2124 struct page *head = compound_head(page); /* rmap on tail pages */ 2125 struct mem_cgroup *memcg; 2126 unsigned long flags; 2127 2128 /* 2129 * The RCU lock is held throughout the transaction. The fast 2130 * path can get away without acquiring the memcg->move_lock 2131 * because page moving starts with an RCU grace period. 2132 * 2133 * The RCU lock also protects the memcg from being freed when 2134 * the page state that is going to change is the only thing 2135 * preventing the page itself from being freed. E.g. writeback 2136 * doesn't hold a page reference and relies on PG_writeback to 2137 * keep off truncation, migration and so forth. 2138 */ 2139 rcu_read_lock(); 2140 2141 if (mem_cgroup_disabled()) 2142 return NULL; 2143 again: 2144 memcg = head->mem_cgroup; 2145 if (unlikely(!memcg)) 2146 return NULL; 2147 2148 if (atomic_read(&memcg->moving_account) <= 0) 2149 return memcg; 2150 2151 spin_lock_irqsave(&memcg->move_lock, flags); 2152 if (memcg != head->mem_cgroup) { 2153 spin_unlock_irqrestore(&memcg->move_lock, flags); 2154 goto again; 2155 } 2156 2157 /* 2158 * When charge migration first begins, we can have locked and 2159 * unlocked page stat updates happening concurrently. Track 2160 * the task who has the lock for unlock_page_memcg(). 2161 */ 2162 memcg->move_lock_task = current; 2163 memcg->move_lock_flags = flags; 2164 2165 return memcg; 2166 } 2167 EXPORT_SYMBOL(lock_page_memcg); 2168 2169 /** 2170 * __unlock_page_memcg - unlock and unpin a memcg 2171 * @memcg: the memcg 2172 * 2173 * Unlock and unpin a memcg returned by lock_page_memcg(). 2174 */ 2175 void __unlock_page_memcg(struct mem_cgroup *memcg) 2176 { 2177 if (memcg && memcg->move_lock_task == current) { 2178 unsigned long flags = memcg->move_lock_flags; 2179 2180 memcg->move_lock_task = NULL; 2181 memcg->move_lock_flags = 0; 2182 2183 spin_unlock_irqrestore(&memcg->move_lock, flags); 2184 } 2185 2186 rcu_read_unlock(); 2187 } 2188 2189 /** 2190 * unlock_page_memcg - unlock a page->mem_cgroup binding 2191 * @page: the page 2192 */ 2193 void unlock_page_memcg(struct page *page) 2194 { 2195 struct page *head = compound_head(page); 2196 2197 __unlock_page_memcg(head->mem_cgroup); 2198 } 2199 EXPORT_SYMBOL(unlock_page_memcg); 2200 2201 struct memcg_stock_pcp { 2202 struct mem_cgroup *cached; /* this never be root cgroup */ 2203 unsigned int nr_pages; 2204 2205 #ifdef CONFIG_MEMCG_KMEM 2206 struct obj_cgroup *cached_objcg; 2207 unsigned int nr_bytes; 2208 #endif 2209 2210 struct work_struct work; 2211 unsigned long flags; 2212 #define FLUSHING_CACHED_CHARGE 0 2213 }; 2214 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2215 static DEFINE_MUTEX(percpu_charge_mutex); 2216 2217 #ifdef CONFIG_MEMCG_KMEM 2218 static void drain_obj_stock(struct memcg_stock_pcp *stock); 2219 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2220 struct mem_cgroup *root_memcg); 2221 2222 #else 2223 static inline void drain_obj_stock(struct memcg_stock_pcp *stock) 2224 { 2225 } 2226 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2227 struct mem_cgroup *root_memcg) 2228 { 2229 return false; 2230 } 2231 #endif 2232 2233 /** 2234 * consume_stock: Try to consume stocked charge on this cpu. 2235 * @memcg: memcg to consume from. 2236 * @nr_pages: how many pages to charge. 2237 * 2238 * The charges will only happen if @memcg matches the current cpu's memcg 2239 * stock, and at least @nr_pages are available in that stock. Failure to 2240 * service an allocation will refill the stock. 2241 * 2242 * returns true if successful, false otherwise. 2243 */ 2244 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2245 { 2246 struct memcg_stock_pcp *stock; 2247 unsigned long flags; 2248 bool ret = false; 2249 2250 if (nr_pages > MEMCG_CHARGE_BATCH) 2251 return ret; 2252 2253 local_irq_save(flags); 2254 2255 stock = this_cpu_ptr(&memcg_stock); 2256 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2257 stock->nr_pages -= nr_pages; 2258 ret = true; 2259 } 2260 2261 local_irq_restore(flags); 2262 2263 return ret; 2264 } 2265 2266 /* 2267 * Returns stocks cached in percpu and reset cached information. 2268 */ 2269 static void drain_stock(struct memcg_stock_pcp *stock) 2270 { 2271 struct mem_cgroup *old = stock->cached; 2272 2273 if (!old) 2274 return; 2275 2276 if (stock->nr_pages) { 2277 page_counter_uncharge(&old->memory, stock->nr_pages); 2278 if (do_memsw_account()) 2279 page_counter_uncharge(&old->memsw, stock->nr_pages); 2280 stock->nr_pages = 0; 2281 } 2282 2283 css_put(&old->css); 2284 stock->cached = NULL; 2285 } 2286 2287 static void drain_local_stock(struct work_struct *dummy) 2288 { 2289 struct memcg_stock_pcp *stock; 2290 unsigned long flags; 2291 2292 /* 2293 * The only protection from memory hotplug vs. drain_stock races is 2294 * that we always operate on local CPU stock here with IRQ disabled 2295 */ 2296 local_irq_save(flags); 2297 2298 stock = this_cpu_ptr(&memcg_stock); 2299 drain_obj_stock(stock); 2300 drain_stock(stock); 2301 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2302 2303 local_irq_restore(flags); 2304 } 2305 2306 /* 2307 * Cache charges(val) to local per_cpu area. 2308 * This will be consumed by consume_stock() function, later. 2309 */ 2310 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2311 { 2312 struct memcg_stock_pcp *stock; 2313 unsigned long flags; 2314 2315 local_irq_save(flags); 2316 2317 stock = this_cpu_ptr(&memcg_stock); 2318 if (stock->cached != memcg) { /* reset if necessary */ 2319 drain_stock(stock); 2320 css_get(&memcg->css); 2321 stock->cached = memcg; 2322 } 2323 stock->nr_pages += nr_pages; 2324 2325 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2326 drain_stock(stock); 2327 2328 local_irq_restore(flags); 2329 } 2330 2331 /* 2332 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2333 * of the hierarchy under it. 2334 */ 2335 static void drain_all_stock(struct mem_cgroup *root_memcg) 2336 { 2337 int cpu, curcpu; 2338 2339 /* If someone's already draining, avoid adding running more workers. */ 2340 if (!mutex_trylock(&percpu_charge_mutex)) 2341 return; 2342 /* 2343 * Notify other cpus that system-wide "drain" is running 2344 * We do not care about races with the cpu hotplug because cpu down 2345 * as well as workers from this path always operate on the local 2346 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2347 */ 2348 curcpu = get_cpu(); 2349 for_each_online_cpu(cpu) { 2350 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2351 struct mem_cgroup *memcg; 2352 bool flush = false; 2353 2354 rcu_read_lock(); 2355 memcg = stock->cached; 2356 if (memcg && stock->nr_pages && 2357 mem_cgroup_is_descendant(memcg, root_memcg)) 2358 flush = true; 2359 if (obj_stock_flush_required(stock, root_memcg)) 2360 flush = true; 2361 rcu_read_unlock(); 2362 2363 if (flush && 2364 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2365 if (cpu == curcpu) 2366 drain_local_stock(&stock->work); 2367 else 2368 schedule_work_on(cpu, &stock->work); 2369 } 2370 } 2371 put_cpu(); 2372 mutex_unlock(&percpu_charge_mutex); 2373 } 2374 2375 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2376 { 2377 struct memcg_stock_pcp *stock; 2378 struct mem_cgroup *memcg, *mi; 2379 2380 stock = &per_cpu(memcg_stock, cpu); 2381 drain_stock(stock); 2382 2383 for_each_mem_cgroup(memcg) { 2384 int i; 2385 2386 for (i = 0; i < MEMCG_NR_STAT; i++) { 2387 int nid; 2388 long x; 2389 2390 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2391 if (x) 2392 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2393 atomic_long_add(x, &memcg->vmstats[i]); 2394 2395 if (i >= NR_VM_NODE_STAT_ITEMS) 2396 continue; 2397 2398 for_each_node(nid) { 2399 struct mem_cgroup_per_node *pn; 2400 2401 pn = mem_cgroup_nodeinfo(memcg, nid); 2402 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2403 if (x) 2404 do { 2405 atomic_long_add(x, &pn->lruvec_stat[i]); 2406 } while ((pn = parent_nodeinfo(pn, nid))); 2407 } 2408 } 2409 2410 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2411 long x; 2412 2413 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2414 if (x) 2415 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2416 atomic_long_add(x, &memcg->vmevents[i]); 2417 } 2418 } 2419 2420 return 0; 2421 } 2422 2423 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2424 unsigned int nr_pages, 2425 gfp_t gfp_mask) 2426 { 2427 unsigned long nr_reclaimed = 0; 2428 2429 do { 2430 unsigned long pflags; 2431 2432 if (page_counter_read(&memcg->memory) <= 2433 READ_ONCE(memcg->memory.high)) 2434 continue; 2435 2436 memcg_memory_event(memcg, MEMCG_HIGH); 2437 2438 psi_memstall_enter(&pflags); 2439 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2440 gfp_mask, true); 2441 psi_memstall_leave(&pflags); 2442 } while ((memcg = parent_mem_cgroup(memcg)) && 2443 !mem_cgroup_is_root(memcg)); 2444 2445 return nr_reclaimed; 2446 } 2447 2448 static void high_work_func(struct work_struct *work) 2449 { 2450 struct mem_cgroup *memcg; 2451 2452 memcg = container_of(work, struct mem_cgroup, high_work); 2453 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2454 } 2455 2456 /* 2457 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2458 * enough to still cause a significant slowdown in most cases, while still 2459 * allowing diagnostics and tracing to proceed without becoming stuck. 2460 */ 2461 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2462 2463 /* 2464 * When calculating the delay, we use these either side of the exponentiation to 2465 * maintain precision and scale to a reasonable number of jiffies (see the table 2466 * below. 2467 * 2468 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2469 * overage ratio to a delay. 2470 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2471 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2472 * to produce a reasonable delay curve. 2473 * 2474 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2475 * reasonable delay curve compared to precision-adjusted overage, not 2476 * penalising heavily at first, but still making sure that growth beyond the 2477 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2478 * example, with a high of 100 megabytes: 2479 * 2480 * +-------+------------------------+ 2481 * | usage | time to allocate in ms | 2482 * +-------+------------------------+ 2483 * | 100M | 0 | 2484 * | 101M | 6 | 2485 * | 102M | 25 | 2486 * | 103M | 57 | 2487 * | 104M | 102 | 2488 * | 105M | 159 | 2489 * | 106M | 230 | 2490 * | 107M | 313 | 2491 * | 108M | 409 | 2492 * | 109M | 518 | 2493 * | 110M | 639 | 2494 * | 111M | 774 | 2495 * | 112M | 921 | 2496 * | 113M | 1081 | 2497 * | 114M | 1254 | 2498 * | 115M | 1439 | 2499 * | 116M | 1638 | 2500 * | 117M | 1849 | 2501 * | 118M | 2000 | 2502 * | 119M | 2000 | 2503 * | 120M | 2000 | 2504 * +-------+------------------------+ 2505 */ 2506 #define MEMCG_DELAY_PRECISION_SHIFT 20 2507 #define MEMCG_DELAY_SCALING_SHIFT 14 2508 2509 static u64 calculate_overage(unsigned long usage, unsigned long high) 2510 { 2511 u64 overage; 2512 2513 if (usage <= high) 2514 return 0; 2515 2516 /* 2517 * Prevent division by 0 in overage calculation by acting as if 2518 * it was a threshold of 1 page 2519 */ 2520 high = max(high, 1UL); 2521 2522 overage = usage - high; 2523 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2524 return div64_u64(overage, high); 2525 } 2526 2527 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2528 { 2529 u64 overage, max_overage = 0; 2530 2531 do { 2532 overage = calculate_overage(page_counter_read(&memcg->memory), 2533 READ_ONCE(memcg->memory.high)); 2534 max_overage = max(overage, max_overage); 2535 } while ((memcg = parent_mem_cgroup(memcg)) && 2536 !mem_cgroup_is_root(memcg)); 2537 2538 return max_overage; 2539 } 2540 2541 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2542 { 2543 u64 overage, max_overage = 0; 2544 2545 do { 2546 overage = calculate_overage(page_counter_read(&memcg->swap), 2547 READ_ONCE(memcg->swap.high)); 2548 if (overage) 2549 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2550 max_overage = max(overage, max_overage); 2551 } while ((memcg = parent_mem_cgroup(memcg)) && 2552 !mem_cgroup_is_root(memcg)); 2553 2554 return max_overage; 2555 } 2556 2557 /* 2558 * Get the number of jiffies that we should penalise a mischievous cgroup which 2559 * is exceeding its memory.high by checking both it and its ancestors. 2560 */ 2561 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2562 unsigned int nr_pages, 2563 u64 max_overage) 2564 { 2565 unsigned long penalty_jiffies; 2566 2567 if (!max_overage) 2568 return 0; 2569 2570 /* 2571 * We use overage compared to memory.high to calculate the number of 2572 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2573 * fairly lenient on small overages, and increasingly harsh when the 2574 * memcg in question makes it clear that it has no intention of stopping 2575 * its crazy behaviour, so we exponentially increase the delay based on 2576 * overage amount. 2577 */ 2578 penalty_jiffies = max_overage * max_overage * HZ; 2579 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2580 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2581 2582 /* 2583 * Factor in the task's own contribution to the overage, such that four 2584 * N-sized allocations are throttled approximately the same as one 2585 * 4N-sized allocation. 2586 * 2587 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2588 * larger the current charge patch is than that. 2589 */ 2590 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2591 } 2592 2593 /* 2594 * Scheduled by try_charge() to be executed from the userland return path 2595 * and reclaims memory over the high limit. 2596 */ 2597 void mem_cgroup_handle_over_high(void) 2598 { 2599 unsigned long penalty_jiffies; 2600 unsigned long pflags; 2601 unsigned long nr_reclaimed; 2602 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2603 int nr_retries = MAX_RECLAIM_RETRIES; 2604 struct mem_cgroup *memcg; 2605 bool in_retry = false; 2606 2607 if (likely(!nr_pages)) 2608 return; 2609 2610 memcg = get_mem_cgroup_from_mm(current->mm); 2611 current->memcg_nr_pages_over_high = 0; 2612 2613 retry_reclaim: 2614 /* 2615 * The allocating task should reclaim at least the batch size, but for 2616 * subsequent retries we only want to do what's necessary to prevent oom 2617 * or breaching resource isolation. 2618 * 2619 * This is distinct from memory.max or page allocator behaviour because 2620 * memory.high is currently batched, whereas memory.max and the page 2621 * allocator run every time an allocation is made. 2622 */ 2623 nr_reclaimed = reclaim_high(memcg, 2624 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2625 GFP_KERNEL); 2626 2627 /* 2628 * memory.high is breached and reclaim is unable to keep up. Throttle 2629 * allocators proactively to slow down excessive growth. 2630 */ 2631 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2632 mem_find_max_overage(memcg)); 2633 2634 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2635 swap_find_max_overage(memcg)); 2636 2637 /* 2638 * Clamp the max delay per usermode return so as to still keep the 2639 * application moving forwards and also permit diagnostics, albeit 2640 * extremely slowly. 2641 */ 2642 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2643 2644 /* 2645 * Don't sleep if the amount of jiffies this memcg owes us is so low 2646 * that it's not even worth doing, in an attempt to be nice to those who 2647 * go only a small amount over their memory.high value and maybe haven't 2648 * been aggressively reclaimed enough yet. 2649 */ 2650 if (penalty_jiffies <= HZ / 100) 2651 goto out; 2652 2653 /* 2654 * If reclaim is making forward progress but we're still over 2655 * memory.high, we want to encourage that rather than doing allocator 2656 * throttling. 2657 */ 2658 if (nr_reclaimed || nr_retries--) { 2659 in_retry = true; 2660 goto retry_reclaim; 2661 } 2662 2663 /* 2664 * If we exit early, we're guaranteed to die (since 2665 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2666 * need to account for any ill-begotten jiffies to pay them off later. 2667 */ 2668 psi_memstall_enter(&pflags); 2669 schedule_timeout_killable(penalty_jiffies); 2670 psi_memstall_leave(&pflags); 2671 2672 out: 2673 css_put(&memcg->css); 2674 } 2675 2676 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2677 unsigned int nr_pages) 2678 { 2679 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2680 int nr_retries = MAX_RECLAIM_RETRIES; 2681 struct mem_cgroup *mem_over_limit; 2682 struct page_counter *counter; 2683 enum oom_status oom_status; 2684 unsigned long nr_reclaimed; 2685 bool may_swap = true; 2686 bool drained = false; 2687 unsigned long pflags; 2688 2689 if (mem_cgroup_is_root(memcg)) 2690 return 0; 2691 retry: 2692 if (consume_stock(memcg, nr_pages)) 2693 return 0; 2694 2695 if (!do_memsw_account() || 2696 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2697 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2698 goto done_restock; 2699 if (do_memsw_account()) 2700 page_counter_uncharge(&memcg->memsw, batch); 2701 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2702 } else { 2703 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2704 may_swap = false; 2705 } 2706 2707 if (batch > nr_pages) { 2708 batch = nr_pages; 2709 goto retry; 2710 } 2711 2712 /* 2713 * Memcg doesn't have a dedicated reserve for atomic 2714 * allocations. But like the global atomic pool, we need to 2715 * put the burden of reclaim on regular allocation requests 2716 * and let these go through as privileged allocations. 2717 */ 2718 if (gfp_mask & __GFP_ATOMIC) 2719 goto force; 2720 2721 /* 2722 * Unlike in global OOM situations, memcg is not in a physical 2723 * memory shortage. Allow dying and OOM-killed tasks to 2724 * bypass the last charges so that they can exit quickly and 2725 * free their memory. 2726 */ 2727 if (unlikely(should_force_charge())) 2728 goto force; 2729 2730 /* 2731 * Prevent unbounded recursion when reclaim operations need to 2732 * allocate memory. This might exceed the limits temporarily, 2733 * but we prefer facilitating memory reclaim and getting back 2734 * under the limit over triggering OOM kills in these cases. 2735 */ 2736 if (unlikely(current->flags & PF_MEMALLOC)) 2737 goto force; 2738 2739 if (unlikely(task_in_memcg_oom(current))) 2740 goto nomem; 2741 2742 if (!gfpflags_allow_blocking(gfp_mask)) 2743 goto nomem; 2744 2745 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2746 2747 psi_memstall_enter(&pflags); 2748 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2749 gfp_mask, may_swap); 2750 psi_memstall_leave(&pflags); 2751 2752 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2753 goto retry; 2754 2755 if (!drained) { 2756 drain_all_stock(mem_over_limit); 2757 drained = true; 2758 goto retry; 2759 } 2760 2761 if (gfp_mask & __GFP_NORETRY) 2762 goto nomem; 2763 /* 2764 * Even though the limit is exceeded at this point, reclaim 2765 * may have been able to free some pages. Retry the charge 2766 * before killing the task. 2767 * 2768 * Only for regular pages, though: huge pages are rather 2769 * unlikely to succeed so close to the limit, and we fall back 2770 * to regular pages anyway in case of failure. 2771 */ 2772 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2773 goto retry; 2774 /* 2775 * At task move, charge accounts can be doubly counted. So, it's 2776 * better to wait until the end of task_move if something is going on. 2777 */ 2778 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2779 goto retry; 2780 2781 if (nr_retries--) 2782 goto retry; 2783 2784 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2785 goto nomem; 2786 2787 if (gfp_mask & __GFP_NOFAIL) 2788 goto force; 2789 2790 if (fatal_signal_pending(current)) 2791 goto force; 2792 2793 /* 2794 * keep retrying as long as the memcg oom killer is able to make 2795 * a forward progress or bypass the charge if the oom killer 2796 * couldn't make any progress. 2797 */ 2798 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2799 get_order(nr_pages * PAGE_SIZE)); 2800 switch (oom_status) { 2801 case OOM_SUCCESS: 2802 nr_retries = MAX_RECLAIM_RETRIES; 2803 goto retry; 2804 case OOM_FAILED: 2805 goto force; 2806 default: 2807 goto nomem; 2808 } 2809 nomem: 2810 if (!(gfp_mask & __GFP_NOFAIL)) 2811 return -ENOMEM; 2812 force: 2813 /* 2814 * The allocation either can't fail or will lead to more memory 2815 * being freed very soon. Allow memory usage go over the limit 2816 * temporarily by force charging it. 2817 */ 2818 page_counter_charge(&memcg->memory, nr_pages); 2819 if (do_memsw_account()) 2820 page_counter_charge(&memcg->memsw, nr_pages); 2821 2822 return 0; 2823 2824 done_restock: 2825 if (batch > nr_pages) 2826 refill_stock(memcg, batch - nr_pages); 2827 2828 /* 2829 * If the hierarchy is above the normal consumption range, schedule 2830 * reclaim on returning to userland. We can perform reclaim here 2831 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2832 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2833 * not recorded as it most likely matches current's and won't 2834 * change in the meantime. As high limit is checked again before 2835 * reclaim, the cost of mismatch is negligible. 2836 */ 2837 do { 2838 bool mem_high, swap_high; 2839 2840 mem_high = page_counter_read(&memcg->memory) > 2841 READ_ONCE(memcg->memory.high); 2842 swap_high = page_counter_read(&memcg->swap) > 2843 READ_ONCE(memcg->swap.high); 2844 2845 /* Don't bother a random interrupted task */ 2846 if (in_interrupt()) { 2847 if (mem_high) { 2848 schedule_work(&memcg->high_work); 2849 break; 2850 } 2851 continue; 2852 } 2853 2854 if (mem_high || swap_high) { 2855 /* 2856 * The allocating tasks in this cgroup will need to do 2857 * reclaim or be throttled to prevent further growth 2858 * of the memory or swap footprints. 2859 * 2860 * Target some best-effort fairness between the tasks, 2861 * and distribute reclaim work and delay penalties 2862 * based on how much each task is actually allocating. 2863 */ 2864 current->memcg_nr_pages_over_high += batch; 2865 set_notify_resume(current); 2866 break; 2867 } 2868 } while ((memcg = parent_mem_cgroup(memcg))); 2869 2870 return 0; 2871 } 2872 2873 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2874 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2875 { 2876 if (mem_cgroup_is_root(memcg)) 2877 return; 2878 2879 page_counter_uncharge(&memcg->memory, nr_pages); 2880 if (do_memsw_account()) 2881 page_counter_uncharge(&memcg->memsw, nr_pages); 2882 } 2883 #endif 2884 2885 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2886 { 2887 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2888 /* 2889 * Any of the following ensures page->mem_cgroup stability: 2890 * 2891 * - the page lock 2892 * - LRU isolation 2893 * - lock_page_memcg() 2894 * - exclusive reference 2895 */ 2896 page->mem_cgroup = memcg; 2897 } 2898 2899 #ifdef CONFIG_MEMCG_KMEM 2900 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2901 gfp_t gfp) 2902 { 2903 unsigned int objects = objs_per_slab_page(s, page); 2904 void *vec; 2905 2906 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2907 page_to_nid(page)); 2908 if (!vec) 2909 return -ENOMEM; 2910 2911 if (cmpxchg(&page->obj_cgroups, NULL, 2912 (struct obj_cgroup **) ((unsigned long)vec | 0x1UL))) 2913 kfree(vec); 2914 else 2915 kmemleak_not_leak(vec); 2916 2917 return 0; 2918 } 2919 2920 /* 2921 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2922 * 2923 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2924 * cgroup_mutex, etc. 2925 */ 2926 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2927 { 2928 struct page *page; 2929 2930 if (mem_cgroup_disabled()) 2931 return NULL; 2932 2933 page = virt_to_head_page(p); 2934 2935 /* 2936 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer 2937 * or a pointer to obj_cgroup vector. In the latter case the lowest 2938 * bit of the pointer is set. 2939 * The page->mem_cgroup pointer can be asynchronously changed 2940 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed 2941 * from a valid memcg pointer to objcg vector or back. 2942 */ 2943 if (!page->mem_cgroup) 2944 return NULL; 2945 2946 /* 2947 * Slab objects are accounted individually, not per-page. 2948 * Memcg membership data for each individual object is saved in 2949 * the page->obj_cgroups. 2950 */ 2951 if (page_has_obj_cgroups(page)) { 2952 struct obj_cgroup *objcg; 2953 unsigned int off; 2954 2955 off = obj_to_index(page->slab_cache, page, p); 2956 objcg = page_obj_cgroups(page)[off]; 2957 if (objcg) 2958 return obj_cgroup_memcg(objcg); 2959 2960 return NULL; 2961 } 2962 2963 /* All other pages use page->mem_cgroup */ 2964 return page->mem_cgroup; 2965 } 2966 2967 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2968 { 2969 struct obj_cgroup *objcg = NULL; 2970 struct mem_cgroup *memcg; 2971 2972 if (memcg_kmem_bypass()) 2973 return NULL; 2974 2975 rcu_read_lock(); 2976 if (unlikely(active_memcg())) 2977 memcg = active_memcg(); 2978 else 2979 memcg = mem_cgroup_from_task(current); 2980 2981 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2982 objcg = rcu_dereference(memcg->objcg); 2983 if (objcg && obj_cgroup_tryget(objcg)) 2984 break; 2985 } 2986 rcu_read_unlock(); 2987 2988 return objcg; 2989 } 2990 2991 static int memcg_alloc_cache_id(void) 2992 { 2993 int id, size; 2994 int err; 2995 2996 id = ida_simple_get(&memcg_cache_ida, 2997 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2998 if (id < 0) 2999 return id; 3000 3001 if (id < memcg_nr_cache_ids) 3002 return id; 3003 3004 /* 3005 * There's no space for the new id in memcg_caches arrays, 3006 * so we have to grow them. 3007 */ 3008 down_write(&memcg_cache_ids_sem); 3009 3010 size = 2 * (id + 1); 3011 if (size < MEMCG_CACHES_MIN_SIZE) 3012 size = MEMCG_CACHES_MIN_SIZE; 3013 else if (size > MEMCG_CACHES_MAX_SIZE) 3014 size = MEMCG_CACHES_MAX_SIZE; 3015 3016 err = memcg_update_all_list_lrus(size); 3017 if (!err) 3018 memcg_nr_cache_ids = size; 3019 3020 up_write(&memcg_cache_ids_sem); 3021 3022 if (err) { 3023 ida_simple_remove(&memcg_cache_ida, id); 3024 return err; 3025 } 3026 return id; 3027 } 3028 3029 static void memcg_free_cache_id(int id) 3030 { 3031 ida_simple_remove(&memcg_cache_ida, id); 3032 } 3033 3034 /** 3035 * __memcg_kmem_charge: charge a number of kernel pages to a memcg 3036 * @memcg: memory cgroup to charge 3037 * @gfp: reclaim mode 3038 * @nr_pages: number of pages to charge 3039 * 3040 * Returns 0 on success, an error code on failure. 3041 */ 3042 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 3043 unsigned int nr_pages) 3044 { 3045 struct page_counter *counter; 3046 int ret; 3047 3048 ret = try_charge(memcg, gfp, nr_pages); 3049 if (ret) 3050 return ret; 3051 3052 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3053 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3054 3055 /* 3056 * Enforce __GFP_NOFAIL allocation because callers are not 3057 * prepared to see failures and likely do not have any failure 3058 * handling code. 3059 */ 3060 if (gfp & __GFP_NOFAIL) { 3061 page_counter_charge(&memcg->kmem, nr_pages); 3062 return 0; 3063 } 3064 cancel_charge(memcg, nr_pages); 3065 return -ENOMEM; 3066 } 3067 return 0; 3068 } 3069 3070 /** 3071 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg 3072 * @memcg: memcg to uncharge 3073 * @nr_pages: number of pages to uncharge 3074 */ 3075 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 3076 { 3077 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3078 page_counter_uncharge(&memcg->kmem, nr_pages); 3079 3080 page_counter_uncharge(&memcg->memory, nr_pages); 3081 if (do_memsw_account()) 3082 page_counter_uncharge(&memcg->memsw, nr_pages); 3083 } 3084 3085 /** 3086 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3087 * @page: page to charge 3088 * @gfp: reclaim mode 3089 * @order: allocation order 3090 * 3091 * Returns 0 on success, an error code on failure. 3092 */ 3093 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3094 { 3095 struct mem_cgroup *memcg; 3096 int ret = 0; 3097 3098 memcg = get_mem_cgroup_from_current(); 3099 if (memcg && !mem_cgroup_is_root(memcg)) { 3100 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); 3101 if (!ret) { 3102 page->mem_cgroup = memcg; 3103 __SetPageKmemcg(page); 3104 return 0; 3105 } 3106 css_put(&memcg->css); 3107 } 3108 return ret; 3109 } 3110 3111 /** 3112 * __memcg_kmem_uncharge_page: uncharge a kmem page 3113 * @page: page to uncharge 3114 * @order: allocation order 3115 */ 3116 void __memcg_kmem_uncharge_page(struct page *page, int order) 3117 { 3118 struct mem_cgroup *memcg = page->mem_cgroup; 3119 unsigned int nr_pages = 1 << order; 3120 3121 if (!memcg) 3122 return; 3123 3124 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3125 __memcg_kmem_uncharge(memcg, nr_pages); 3126 page->mem_cgroup = NULL; 3127 css_put(&memcg->css); 3128 3129 /* slab pages do not have PageKmemcg flag set */ 3130 if (PageKmemcg(page)) 3131 __ClearPageKmemcg(page); 3132 } 3133 3134 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3135 { 3136 struct memcg_stock_pcp *stock; 3137 unsigned long flags; 3138 bool ret = false; 3139 3140 local_irq_save(flags); 3141 3142 stock = this_cpu_ptr(&memcg_stock); 3143 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3144 stock->nr_bytes -= nr_bytes; 3145 ret = true; 3146 } 3147 3148 local_irq_restore(flags); 3149 3150 return ret; 3151 } 3152 3153 static void drain_obj_stock(struct memcg_stock_pcp *stock) 3154 { 3155 struct obj_cgroup *old = stock->cached_objcg; 3156 3157 if (!old) 3158 return; 3159 3160 if (stock->nr_bytes) { 3161 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3162 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3163 3164 if (nr_pages) { 3165 rcu_read_lock(); 3166 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages); 3167 rcu_read_unlock(); 3168 } 3169 3170 /* 3171 * The leftover is flushed to the centralized per-memcg value. 3172 * On the next attempt to refill obj stock it will be moved 3173 * to a per-cpu stock (probably, on an other CPU), see 3174 * refill_obj_stock(). 3175 * 3176 * How often it's flushed is a trade-off between the memory 3177 * limit enforcement accuracy and potential CPU contention, 3178 * so it might be changed in the future. 3179 */ 3180 atomic_add(nr_bytes, &old->nr_charged_bytes); 3181 stock->nr_bytes = 0; 3182 } 3183 3184 obj_cgroup_put(old); 3185 stock->cached_objcg = NULL; 3186 } 3187 3188 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3189 struct mem_cgroup *root_memcg) 3190 { 3191 struct mem_cgroup *memcg; 3192 3193 if (stock->cached_objcg) { 3194 memcg = obj_cgroup_memcg(stock->cached_objcg); 3195 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3196 return true; 3197 } 3198 3199 return false; 3200 } 3201 3202 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3203 { 3204 struct memcg_stock_pcp *stock; 3205 unsigned long flags; 3206 3207 local_irq_save(flags); 3208 3209 stock = this_cpu_ptr(&memcg_stock); 3210 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3211 drain_obj_stock(stock); 3212 obj_cgroup_get(objcg); 3213 stock->cached_objcg = objcg; 3214 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); 3215 } 3216 stock->nr_bytes += nr_bytes; 3217 3218 if (stock->nr_bytes > PAGE_SIZE) 3219 drain_obj_stock(stock); 3220 3221 local_irq_restore(flags); 3222 } 3223 3224 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3225 { 3226 struct mem_cgroup *memcg; 3227 unsigned int nr_pages, nr_bytes; 3228 int ret; 3229 3230 if (consume_obj_stock(objcg, size)) 3231 return 0; 3232 3233 /* 3234 * In theory, memcg->nr_charged_bytes can have enough 3235 * pre-charged bytes to satisfy the allocation. However, 3236 * flushing memcg->nr_charged_bytes requires two atomic 3237 * operations, and memcg->nr_charged_bytes can't be big, 3238 * so it's better to ignore it and try grab some new pages. 3239 * memcg->nr_charged_bytes will be flushed in 3240 * refill_obj_stock(), called from this function or 3241 * independently later. 3242 */ 3243 rcu_read_lock(); 3244 memcg = obj_cgroup_memcg(objcg); 3245 css_get(&memcg->css); 3246 rcu_read_unlock(); 3247 3248 nr_pages = size >> PAGE_SHIFT; 3249 nr_bytes = size & (PAGE_SIZE - 1); 3250 3251 if (nr_bytes) 3252 nr_pages += 1; 3253 3254 ret = __memcg_kmem_charge(memcg, gfp, nr_pages); 3255 if (!ret && nr_bytes) 3256 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); 3257 3258 css_put(&memcg->css); 3259 return ret; 3260 } 3261 3262 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3263 { 3264 refill_obj_stock(objcg, size); 3265 } 3266 3267 #endif /* CONFIG_MEMCG_KMEM */ 3268 3269 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3270 3271 /* 3272 * Because tail pages are not marked as "used", set it. We're under 3273 * pgdat->lru_lock and migration entries setup in all page mappings. 3274 */ 3275 void mem_cgroup_split_huge_fixup(struct page *head) 3276 { 3277 struct mem_cgroup *memcg = head->mem_cgroup; 3278 int i; 3279 3280 if (mem_cgroup_disabled()) 3281 return; 3282 3283 for (i = 1; i < HPAGE_PMD_NR; i++) { 3284 css_get(&memcg->css); 3285 head[i].mem_cgroup = memcg; 3286 } 3287 } 3288 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3289 3290 #ifdef CONFIG_MEMCG_SWAP 3291 /** 3292 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3293 * @entry: swap entry to be moved 3294 * @from: mem_cgroup which the entry is moved from 3295 * @to: mem_cgroup which the entry is moved to 3296 * 3297 * It succeeds only when the swap_cgroup's record for this entry is the same 3298 * as the mem_cgroup's id of @from. 3299 * 3300 * Returns 0 on success, -EINVAL on failure. 3301 * 3302 * The caller must have charged to @to, IOW, called page_counter_charge() about 3303 * both res and memsw, and called css_get(). 3304 */ 3305 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3306 struct mem_cgroup *from, struct mem_cgroup *to) 3307 { 3308 unsigned short old_id, new_id; 3309 3310 old_id = mem_cgroup_id(from); 3311 new_id = mem_cgroup_id(to); 3312 3313 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3314 mod_memcg_state(from, MEMCG_SWAP, -1); 3315 mod_memcg_state(to, MEMCG_SWAP, 1); 3316 return 0; 3317 } 3318 return -EINVAL; 3319 } 3320 #else 3321 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3322 struct mem_cgroup *from, struct mem_cgroup *to) 3323 { 3324 return -EINVAL; 3325 } 3326 #endif 3327 3328 static DEFINE_MUTEX(memcg_max_mutex); 3329 3330 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3331 unsigned long max, bool memsw) 3332 { 3333 bool enlarge = false; 3334 bool drained = false; 3335 int ret; 3336 bool limits_invariant; 3337 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3338 3339 do { 3340 if (signal_pending(current)) { 3341 ret = -EINTR; 3342 break; 3343 } 3344 3345 mutex_lock(&memcg_max_mutex); 3346 /* 3347 * Make sure that the new limit (memsw or memory limit) doesn't 3348 * break our basic invariant rule memory.max <= memsw.max. 3349 */ 3350 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3351 max <= memcg->memsw.max; 3352 if (!limits_invariant) { 3353 mutex_unlock(&memcg_max_mutex); 3354 ret = -EINVAL; 3355 break; 3356 } 3357 if (max > counter->max) 3358 enlarge = true; 3359 ret = page_counter_set_max(counter, max); 3360 mutex_unlock(&memcg_max_mutex); 3361 3362 if (!ret) 3363 break; 3364 3365 if (!drained) { 3366 drain_all_stock(memcg); 3367 drained = true; 3368 continue; 3369 } 3370 3371 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3372 GFP_KERNEL, !memsw)) { 3373 ret = -EBUSY; 3374 break; 3375 } 3376 } while (true); 3377 3378 if (!ret && enlarge) 3379 memcg_oom_recover(memcg); 3380 3381 return ret; 3382 } 3383 3384 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3385 gfp_t gfp_mask, 3386 unsigned long *total_scanned) 3387 { 3388 unsigned long nr_reclaimed = 0; 3389 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3390 unsigned long reclaimed; 3391 int loop = 0; 3392 struct mem_cgroup_tree_per_node *mctz; 3393 unsigned long excess; 3394 unsigned long nr_scanned; 3395 3396 if (order > 0) 3397 return 0; 3398 3399 mctz = soft_limit_tree_node(pgdat->node_id); 3400 3401 /* 3402 * Do not even bother to check the largest node if the root 3403 * is empty. Do it lockless to prevent lock bouncing. Races 3404 * are acceptable as soft limit is best effort anyway. 3405 */ 3406 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3407 return 0; 3408 3409 /* 3410 * This loop can run a while, specially if mem_cgroup's continuously 3411 * keep exceeding their soft limit and putting the system under 3412 * pressure 3413 */ 3414 do { 3415 if (next_mz) 3416 mz = next_mz; 3417 else 3418 mz = mem_cgroup_largest_soft_limit_node(mctz); 3419 if (!mz) 3420 break; 3421 3422 nr_scanned = 0; 3423 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3424 gfp_mask, &nr_scanned); 3425 nr_reclaimed += reclaimed; 3426 *total_scanned += nr_scanned; 3427 spin_lock_irq(&mctz->lock); 3428 __mem_cgroup_remove_exceeded(mz, mctz); 3429 3430 /* 3431 * If we failed to reclaim anything from this memory cgroup 3432 * it is time to move on to the next cgroup 3433 */ 3434 next_mz = NULL; 3435 if (!reclaimed) 3436 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3437 3438 excess = soft_limit_excess(mz->memcg); 3439 /* 3440 * One school of thought says that we should not add 3441 * back the node to the tree if reclaim returns 0. 3442 * But our reclaim could return 0, simply because due 3443 * to priority we are exposing a smaller subset of 3444 * memory to reclaim from. Consider this as a longer 3445 * term TODO. 3446 */ 3447 /* If excess == 0, no tree ops */ 3448 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3449 spin_unlock_irq(&mctz->lock); 3450 css_put(&mz->memcg->css); 3451 loop++; 3452 /* 3453 * Could not reclaim anything and there are no more 3454 * mem cgroups to try or we seem to be looping without 3455 * reclaiming anything. 3456 */ 3457 if (!nr_reclaimed && 3458 (next_mz == NULL || 3459 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3460 break; 3461 } while (!nr_reclaimed); 3462 if (next_mz) 3463 css_put(&next_mz->memcg->css); 3464 return nr_reclaimed; 3465 } 3466 3467 /* 3468 * Test whether @memcg has children, dead or alive. Note that this 3469 * function doesn't care whether @memcg has use_hierarchy enabled and 3470 * returns %true if there are child csses according to the cgroup 3471 * hierarchy. Testing use_hierarchy is the caller's responsibility. 3472 */ 3473 static inline bool memcg_has_children(struct mem_cgroup *memcg) 3474 { 3475 bool ret; 3476 3477 rcu_read_lock(); 3478 ret = css_next_child(NULL, &memcg->css); 3479 rcu_read_unlock(); 3480 return ret; 3481 } 3482 3483 /* 3484 * Reclaims as many pages from the given memcg as possible. 3485 * 3486 * Caller is responsible for holding css reference for memcg. 3487 */ 3488 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3489 { 3490 int nr_retries = MAX_RECLAIM_RETRIES; 3491 3492 /* we call try-to-free pages for make this cgroup empty */ 3493 lru_add_drain_all(); 3494 3495 drain_all_stock(memcg); 3496 3497 /* try to free all pages in this cgroup */ 3498 while (nr_retries && page_counter_read(&memcg->memory)) { 3499 int progress; 3500 3501 if (signal_pending(current)) 3502 return -EINTR; 3503 3504 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3505 GFP_KERNEL, true); 3506 if (!progress) { 3507 nr_retries--; 3508 /* maybe some writeback is necessary */ 3509 congestion_wait(BLK_RW_ASYNC, HZ/10); 3510 } 3511 3512 } 3513 3514 return 0; 3515 } 3516 3517 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3518 char *buf, size_t nbytes, 3519 loff_t off) 3520 { 3521 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3522 3523 if (mem_cgroup_is_root(memcg)) 3524 return -EINVAL; 3525 return mem_cgroup_force_empty(memcg) ?: nbytes; 3526 } 3527 3528 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3529 struct cftype *cft) 3530 { 3531 return mem_cgroup_from_css(css)->use_hierarchy; 3532 } 3533 3534 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3535 struct cftype *cft, u64 val) 3536 { 3537 int retval = 0; 3538 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3539 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3540 3541 if (memcg->use_hierarchy == val) 3542 return 0; 3543 3544 /* 3545 * If parent's use_hierarchy is set, we can't make any modifications 3546 * in the child subtrees. If it is unset, then the change can 3547 * occur, provided the current cgroup has no children. 3548 * 3549 * For the root cgroup, parent_mem is NULL, we allow value to be 3550 * set if there are no children. 3551 */ 3552 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3553 (val == 1 || val == 0)) { 3554 if (!memcg_has_children(memcg)) 3555 memcg->use_hierarchy = val; 3556 else 3557 retval = -EBUSY; 3558 } else 3559 retval = -EINVAL; 3560 3561 return retval; 3562 } 3563 3564 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3565 { 3566 unsigned long val; 3567 3568 if (mem_cgroup_is_root(memcg)) { 3569 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3570 memcg_page_state(memcg, NR_ANON_MAPPED); 3571 if (swap) 3572 val += memcg_page_state(memcg, MEMCG_SWAP); 3573 } else { 3574 if (!swap) 3575 val = page_counter_read(&memcg->memory); 3576 else 3577 val = page_counter_read(&memcg->memsw); 3578 } 3579 return val; 3580 } 3581 3582 enum { 3583 RES_USAGE, 3584 RES_LIMIT, 3585 RES_MAX_USAGE, 3586 RES_FAILCNT, 3587 RES_SOFT_LIMIT, 3588 }; 3589 3590 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3591 struct cftype *cft) 3592 { 3593 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3594 struct page_counter *counter; 3595 3596 switch (MEMFILE_TYPE(cft->private)) { 3597 case _MEM: 3598 counter = &memcg->memory; 3599 break; 3600 case _MEMSWAP: 3601 counter = &memcg->memsw; 3602 break; 3603 case _KMEM: 3604 counter = &memcg->kmem; 3605 break; 3606 case _TCP: 3607 counter = &memcg->tcpmem; 3608 break; 3609 default: 3610 BUG(); 3611 } 3612 3613 switch (MEMFILE_ATTR(cft->private)) { 3614 case RES_USAGE: 3615 if (counter == &memcg->memory) 3616 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3617 if (counter == &memcg->memsw) 3618 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3619 return (u64)page_counter_read(counter) * PAGE_SIZE; 3620 case RES_LIMIT: 3621 return (u64)counter->max * PAGE_SIZE; 3622 case RES_MAX_USAGE: 3623 return (u64)counter->watermark * PAGE_SIZE; 3624 case RES_FAILCNT: 3625 return counter->failcnt; 3626 case RES_SOFT_LIMIT: 3627 return (u64)memcg->soft_limit * PAGE_SIZE; 3628 default: 3629 BUG(); 3630 } 3631 } 3632 3633 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) 3634 { 3635 unsigned long stat[MEMCG_NR_STAT] = {0}; 3636 struct mem_cgroup *mi; 3637 int node, cpu, i; 3638 3639 for_each_online_cpu(cpu) 3640 for (i = 0; i < MEMCG_NR_STAT; i++) 3641 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); 3642 3643 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3644 for (i = 0; i < MEMCG_NR_STAT; i++) 3645 atomic_long_add(stat[i], &mi->vmstats[i]); 3646 3647 for_each_node(node) { 3648 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3649 struct mem_cgroup_per_node *pi; 3650 3651 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3652 stat[i] = 0; 3653 3654 for_each_online_cpu(cpu) 3655 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3656 stat[i] += per_cpu( 3657 pn->lruvec_stat_cpu->count[i], cpu); 3658 3659 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) 3660 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3661 atomic_long_add(stat[i], &pi->lruvec_stat[i]); 3662 } 3663 } 3664 3665 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) 3666 { 3667 unsigned long events[NR_VM_EVENT_ITEMS]; 3668 struct mem_cgroup *mi; 3669 int cpu, i; 3670 3671 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3672 events[i] = 0; 3673 3674 for_each_online_cpu(cpu) 3675 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3676 events[i] += per_cpu(memcg->vmstats_percpu->events[i], 3677 cpu); 3678 3679 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3680 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3681 atomic_long_add(events[i], &mi->vmevents[i]); 3682 } 3683 3684 #ifdef CONFIG_MEMCG_KMEM 3685 static int memcg_online_kmem(struct mem_cgroup *memcg) 3686 { 3687 struct obj_cgroup *objcg; 3688 int memcg_id; 3689 3690 if (cgroup_memory_nokmem) 3691 return 0; 3692 3693 BUG_ON(memcg->kmemcg_id >= 0); 3694 BUG_ON(memcg->kmem_state); 3695 3696 memcg_id = memcg_alloc_cache_id(); 3697 if (memcg_id < 0) 3698 return memcg_id; 3699 3700 objcg = obj_cgroup_alloc(); 3701 if (!objcg) { 3702 memcg_free_cache_id(memcg_id); 3703 return -ENOMEM; 3704 } 3705 objcg->memcg = memcg; 3706 rcu_assign_pointer(memcg->objcg, objcg); 3707 3708 static_branch_enable(&memcg_kmem_enabled_key); 3709 3710 /* 3711 * A memory cgroup is considered kmem-online as soon as it gets 3712 * kmemcg_id. Setting the id after enabling static branching will 3713 * guarantee no one starts accounting before all call sites are 3714 * patched. 3715 */ 3716 memcg->kmemcg_id = memcg_id; 3717 memcg->kmem_state = KMEM_ONLINE; 3718 3719 return 0; 3720 } 3721 3722 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3723 { 3724 struct cgroup_subsys_state *css; 3725 struct mem_cgroup *parent, *child; 3726 int kmemcg_id; 3727 3728 if (memcg->kmem_state != KMEM_ONLINE) 3729 return; 3730 3731 memcg->kmem_state = KMEM_ALLOCATED; 3732 3733 parent = parent_mem_cgroup(memcg); 3734 if (!parent) 3735 parent = root_mem_cgroup; 3736 3737 memcg_reparent_objcgs(memcg, parent); 3738 3739 kmemcg_id = memcg->kmemcg_id; 3740 BUG_ON(kmemcg_id < 0); 3741 3742 /* 3743 * Change kmemcg_id of this cgroup and all its descendants to the 3744 * parent's id, and then move all entries from this cgroup's list_lrus 3745 * to ones of the parent. After we have finished, all list_lrus 3746 * corresponding to this cgroup are guaranteed to remain empty. The 3747 * ordering is imposed by list_lru_node->lock taken by 3748 * memcg_drain_all_list_lrus(). 3749 */ 3750 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3751 css_for_each_descendant_pre(css, &memcg->css) { 3752 child = mem_cgroup_from_css(css); 3753 BUG_ON(child->kmemcg_id != kmemcg_id); 3754 child->kmemcg_id = parent->kmemcg_id; 3755 if (!memcg->use_hierarchy) 3756 break; 3757 } 3758 rcu_read_unlock(); 3759 3760 memcg_drain_all_list_lrus(kmemcg_id, parent); 3761 3762 memcg_free_cache_id(kmemcg_id); 3763 } 3764 3765 static void memcg_free_kmem(struct mem_cgroup *memcg) 3766 { 3767 /* css_alloc() failed, offlining didn't happen */ 3768 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3769 memcg_offline_kmem(memcg); 3770 } 3771 #else 3772 static int memcg_online_kmem(struct mem_cgroup *memcg) 3773 { 3774 return 0; 3775 } 3776 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3777 { 3778 } 3779 static void memcg_free_kmem(struct mem_cgroup *memcg) 3780 { 3781 } 3782 #endif /* CONFIG_MEMCG_KMEM */ 3783 3784 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3785 unsigned long max) 3786 { 3787 int ret; 3788 3789 mutex_lock(&memcg_max_mutex); 3790 ret = page_counter_set_max(&memcg->kmem, max); 3791 mutex_unlock(&memcg_max_mutex); 3792 return ret; 3793 } 3794 3795 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3796 { 3797 int ret; 3798 3799 mutex_lock(&memcg_max_mutex); 3800 3801 ret = page_counter_set_max(&memcg->tcpmem, max); 3802 if (ret) 3803 goto out; 3804 3805 if (!memcg->tcpmem_active) { 3806 /* 3807 * The active flag needs to be written after the static_key 3808 * update. This is what guarantees that the socket activation 3809 * function is the last one to run. See mem_cgroup_sk_alloc() 3810 * for details, and note that we don't mark any socket as 3811 * belonging to this memcg until that flag is up. 3812 * 3813 * We need to do this, because static_keys will span multiple 3814 * sites, but we can't control their order. If we mark a socket 3815 * as accounted, but the accounting functions are not patched in 3816 * yet, we'll lose accounting. 3817 * 3818 * We never race with the readers in mem_cgroup_sk_alloc(), 3819 * because when this value change, the code to process it is not 3820 * patched in yet. 3821 */ 3822 static_branch_inc(&memcg_sockets_enabled_key); 3823 memcg->tcpmem_active = true; 3824 } 3825 out: 3826 mutex_unlock(&memcg_max_mutex); 3827 return ret; 3828 } 3829 3830 /* 3831 * The user of this function is... 3832 * RES_LIMIT. 3833 */ 3834 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3835 char *buf, size_t nbytes, loff_t off) 3836 { 3837 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3838 unsigned long nr_pages; 3839 int ret; 3840 3841 buf = strstrip(buf); 3842 ret = page_counter_memparse(buf, "-1", &nr_pages); 3843 if (ret) 3844 return ret; 3845 3846 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3847 case RES_LIMIT: 3848 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3849 ret = -EINVAL; 3850 break; 3851 } 3852 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3853 case _MEM: 3854 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3855 break; 3856 case _MEMSWAP: 3857 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3858 break; 3859 case _KMEM: 3860 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3861 "Please report your usecase to linux-mm@kvack.org if you " 3862 "depend on this functionality.\n"); 3863 ret = memcg_update_kmem_max(memcg, nr_pages); 3864 break; 3865 case _TCP: 3866 ret = memcg_update_tcp_max(memcg, nr_pages); 3867 break; 3868 } 3869 break; 3870 case RES_SOFT_LIMIT: 3871 memcg->soft_limit = nr_pages; 3872 ret = 0; 3873 break; 3874 } 3875 return ret ?: nbytes; 3876 } 3877 3878 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3879 size_t nbytes, loff_t off) 3880 { 3881 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3882 struct page_counter *counter; 3883 3884 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3885 case _MEM: 3886 counter = &memcg->memory; 3887 break; 3888 case _MEMSWAP: 3889 counter = &memcg->memsw; 3890 break; 3891 case _KMEM: 3892 counter = &memcg->kmem; 3893 break; 3894 case _TCP: 3895 counter = &memcg->tcpmem; 3896 break; 3897 default: 3898 BUG(); 3899 } 3900 3901 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3902 case RES_MAX_USAGE: 3903 page_counter_reset_watermark(counter); 3904 break; 3905 case RES_FAILCNT: 3906 counter->failcnt = 0; 3907 break; 3908 default: 3909 BUG(); 3910 } 3911 3912 return nbytes; 3913 } 3914 3915 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3916 struct cftype *cft) 3917 { 3918 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3919 } 3920 3921 #ifdef CONFIG_MMU 3922 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3923 struct cftype *cft, u64 val) 3924 { 3925 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3926 3927 if (val & ~MOVE_MASK) 3928 return -EINVAL; 3929 3930 /* 3931 * No kind of locking is needed in here, because ->can_attach() will 3932 * check this value once in the beginning of the process, and then carry 3933 * on with stale data. This means that changes to this value will only 3934 * affect task migrations starting after the change. 3935 */ 3936 memcg->move_charge_at_immigrate = val; 3937 return 0; 3938 } 3939 #else 3940 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3941 struct cftype *cft, u64 val) 3942 { 3943 return -ENOSYS; 3944 } 3945 #endif 3946 3947 #ifdef CONFIG_NUMA 3948 3949 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3950 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3951 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3952 3953 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3954 int nid, unsigned int lru_mask, bool tree) 3955 { 3956 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3957 unsigned long nr = 0; 3958 enum lru_list lru; 3959 3960 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3961 3962 for_each_lru(lru) { 3963 if (!(BIT(lru) & lru_mask)) 3964 continue; 3965 if (tree) 3966 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3967 else 3968 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3969 } 3970 return nr; 3971 } 3972 3973 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3974 unsigned int lru_mask, 3975 bool tree) 3976 { 3977 unsigned long nr = 0; 3978 enum lru_list lru; 3979 3980 for_each_lru(lru) { 3981 if (!(BIT(lru) & lru_mask)) 3982 continue; 3983 if (tree) 3984 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3985 else 3986 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3987 } 3988 return nr; 3989 } 3990 3991 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3992 { 3993 struct numa_stat { 3994 const char *name; 3995 unsigned int lru_mask; 3996 }; 3997 3998 static const struct numa_stat stats[] = { 3999 { "total", LRU_ALL }, 4000 { "file", LRU_ALL_FILE }, 4001 { "anon", LRU_ALL_ANON }, 4002 { "unevictable", BIT(LRU_UNEVICTABLE) }, 4003 }; 4004 const struct numa_stat *stat; 4005 int nid; 4006 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4007 4008 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4009 seq_printf(m, "%s=%lu", stat->name, 4010 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4011 false)); 4012 for_each_node_state(nid, N_MEMORY) 4013 seq_printf(m, " N%d=%lu", nid, 4014 mem_cgroup_node_nr_lru_pages(memcg, nid, 4015 stat->lru_mask, false)); 4016 seq_putc(m, '\n'); 4017 } 4018 4019 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4020 4021 seq_printf(m, "hierarchical_%s=%lu", stat->name, 4022 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4023 true)); 4024 for_each_node_state(nid, N_MEMORY) 4025 seq_printf(m, " N%d=%lu", nid, 4026 mem_cgroup_node_nr_lru_pages(memcg, nid, 4027 stat->lru_mask, true)); 4028 seq_putc(m, '\n'); 4029 } 4030 4031 return 0; 4032 } 4033 #endif /* CONFIG_NUMA */ 4034 4035 static const unsigned int memcg1_stats[] = { 4036 NR_FILE_PAGES, 4037 NR_ANON_MAPPED, 4038 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4039 NR_ANON_THPS, 4040 #endif 4041 NR_SHMEM, 4042 NR_FILE_MAPPED, 4043 NR_FILE_DIRTY, 4044 NR_WRITEBACK, 4045 MEMCG_SWAP, 4046 }; 4047 4048 static const char *const memcg1_stat_names[] = { 4049 "cache", 4050 "rss", 4051 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4052 "rss_huge", 4053 #endif 4054 "shmem", 4055 "mapped_file", 4056 "dirty", 4057 "writeback", 4058 "swap", 4059 }; 4060 4061 /* Universal VM events cgroup1 shows, original sort order */ 4062 static const unsigned int memcg1_events[] = { 4063 PGPGIN, 4064 PGPGOUT, 4065 PGFAULT, 4066 PGMAJFAULT, 4067 }; 4068 4069 static int memcg_stat_show(struct seq_file *m, void *v) 4070 { 4071 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4072 unsigned long memory, memsw; 4073 struct mem_cgroup *mi; 4074 unsigned int i; 4075 4076 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4077 4078 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4079 unsigned long nr; 4080 4081 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4082 continue; 4083 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4084 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4085 if (memcg1_stats[i] == NR_ANON_THPS) 4086 nr *= HPAGE_PMD_NR; 4087 #endif 4088 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4089 } 4090 4091 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4092 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4093 memcg_events_local(memcg, memcg1_events[i])); 4094 4095 for (i = 0; i < NR_LRU_LISTS; i++) 4096 seq_printf(m, "%s %lu\n", lru_list_name(i), 4097 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4098 PAGE_SIZE); 4099 4100 /* Hierarchical information */ 4101 memory = memsw = PAGE_COUNTER_MAX; 4102 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4103 memory = min(memory, READ_ONCE(mi->memory.max)); 4104 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4105 } 4106 seq_printf(m, "hierarchical_memory_limit %llu\n", 4107 (u64)memory * PAGE_SIZE); 4108 if (do_memsw_account()) 4109 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4110 (u64)memsw * PAGE_SIZE); 4111 4112 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4113 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4114 continue; 4115 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4116 (u64)memcg_page_state(memcg, memcg1_stats[i]) * 4117 PAGE_SIZE); 4118 } 4119 4120 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4121 seq_printf(m, "total_%s %llu\n", 4122 vm_event_name(memcg1_events[i]), 4123 (u64)memcg_events(memcg, memcg1_events[i])); 4124 4125 for (i = 0; i < NR_LRU_LISTS; i++) 4126 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4127 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4128 PAGE_SIZE); 4129 4130 #ifdef CONFIG_DEBUG_VM 4131 { 4132 pg_data_t *pgdat; 4133 struct mem_cgroup_per_node *mz; 4134 unsigned long anon_cost = 0; 4135 unsigned long file_cost = 0; 4136 4137 for_each_online_pgdat(pgdat) { 4138 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 4139 4140 anon_cost += mz->lruvec.anon_cost; 4141 file_cost += mz->lruvec.file_cost; 4142 } 4143 seq_printf(m, "anon_cost %lu\n", anon_cost); 4144 seq_printf(m, "file_cost %lu\n", file_cost); 4145 } 4146 #endif 4147 4148 return 0; 4149 } 4150 4151 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4152 struct cftype *cft) 4153 { 4154 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4155 4156 return mem_cgroup_swappiness(memcg); 4157 } 4158 4159 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4160 struct cftype *cft, u64 val) 4161 { 4162 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4163 4164 if (val > 100) 4165 return -EINVAL; 4166 4167 if (css->parent) 4168 memcg->swappiness = val; 4169 else 4170 vm_swappiness = val; 4171 4172 return 0; 4173 } 4174 4175 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4176 { 4177 struct mem_cgroup_threshold_ary *t; 4178 unsigned long usage; 4179 int i; 4180 4181 rcu_read_lock(); 4182 if (!swap) 4183 t = rcu_dereference(memcg->thresholds.primary); 4184 else 4185 t = rcu_dereference(memcg->memsw_thresholds.primary); 4186 4187 if (!t) 4188 goto unlock; 4189 4190 usage = mem_cgroup_usage(memcg, swap); 4191 4192 /* 4193 * current_threshold points to threshold just below or equal to usage. 4194 * If it's not true, a threshold was crossed after last 4195 * call of __mem_cgroup_threshold(). 4196 */ 4197 i = t->current_threshold; 4198 4199 /* 4200 * Iterate backward over array of thresholds starting from 4201 * current_threshold and check if a threshold is crossed. 4202 * If none of thresholds below usage is crossed, we read 4203 * only one element of the array here. 4204 */ 4205 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4206 eventfd_signal(t->entries[i].eventfd, 1); 4207 4208 /* i = current_threshold + 1 */ 4209 i++; 4210 4211 /* 4212 * Iterate forward over array of thresholds starting from 4213 * current_threshold+1 and check if a threshold is crossed. 4214 * If none of thresholds above usage is crossed, we read 4215 * only one element of the array here. 4216 */ 4217 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4218 eventfd_signal(t->entries[i].eventfd, 1); 4219 4220 /* Update current_threshold */ 4221 t->current_threshold = i - 1; 4222 unlock: 4223 rcu_read_unlock(); 4224 } 4225 4226 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4227 { 4228 while (memcg) { 4229 __mem_cgroup_threshold(memcg, false); 4230 if (do_memsw_account()) 4231 __mem_cgroup_threshold(memcg, true); 4232 4233 memcg = parent_mem_cgroup(memcg); 4234 } 4235 } 4236 4237 static int compare_thresholds(const void *a, const void *b) 4238 { 4239 const struct mem_cgroup_threshold *_a = a; 4240 const struct mem_cgroup_threshold *_b = b; 4241 4242 if (_a->threshold > _b->threshold) 4243 return 1; 4244 4245 if (_a->threshold < _b->threshold) 4246 return -1; 4247 4248 return 0; 4249 } 4250 4251 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4252 { 4253 struct mem_cgroup_eventfd_list *ev; 4254 4255 spin_lock(&memcg_oom_lock); 4256 4257 list_for_each_entry(ev, &memcg->oom_notify, list) 4258 eventfd_signal(ev->eventfd, 1); 4259 4260 spin_unlock(&memcg_oom_lock); 4261 return 0; 4262 } 4263 4264 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4265 { 4266 struct mem_cgroup *iter; 4267 4268 for_each_mem_cgroup_tree(iter, memcg) 4269 mem_cgroup_oom_notify_cb(iter); 4270 } 4271 4272 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4273 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4274 { 4275 struct mem_cgroup_thresholds *thresholds; 4276 struct mem_cgroup_threshold_ary *new; 4277 unsigned long threshold; 4278 unsigned long usage; 4279 int i, size, ret; 4280 4281 ret = page_counter_memparse(args, "-1", &threshold); 4282 if (ret) 4283 return ret; 4284 4285 mutex_lock(&memcg->thresholds_lock); 4286 4287 if (type == _MEM) { 4288 thresholds = &memcg->thresholds; 4289 usage = mem_cgroup_usage(memcg, false); 4290 } else if (type == _MEMSWAP) { 4291 thresholds = &memcg->memsw_thresholds; 4292 usage = mem_cgroup_usage(memcg, true); 4293 } else 4294 BUG(); 4295 4296 /* Check if a threshold crossed before adding a new one */ 4297 if (thresholds->primary) 4298 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4299 4300 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4301 4302 /* Allocate memory for new array of thresholds */ 4303 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4304 if (!new) { 4305 ret = -ENOMEM; 4306 goto unlock; 4307 } 4308 new->size = size; 4309 4310 /* Copy thresholds (if any) to new array */ 4311 if (thresholds->primary) 4312 memcpy(new->entries, thresholds->primary->entries, 4313 flex_array_size(new, entries, size - 1)); 4314 4315 /* Add new threshold */ 4316 new->entries[size - 1].eventfd = eventfd; 4317 new->entries[size - 1].threshold = threshold; 4318 4319 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4320 sort(new->entries, size, sizeof(*new->entries), 4321 compare_thresholds, NULL); 4322 4323 /* Find current threshold */ 4324 new->current_threshold = -1; 4325 for (i = 0; i < size; i++) { 4326 if (new->entries[i].threshold <= usage) { 4327 /* 4328 * new->current_threshold will not be used until 4329 * rcu_assign_pointer(), so it's safe to increment 4330 * it here. 4331 */ 4332 ++new->current_threshold; 4333 } else 4334 break; 4335 } 4336 4337 /* Free old spare buffer and save old primary buffer as spare */ 4338 kfree(thresholds->spare); 4339 thresholds->spare = thresholds->primary; 4340 4341 rcu_assign_pointer(thresholds->primary, new); 4342 4343 /* To be sure that nobody uses thresholds */ 4344 synchronize_rcu(); 4345 4346 unlock: 4347 mutex_unlock(&memcg->thresholds_lock); 4348 4349 return ret; 4350 } 4351 4352 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4353 struct eventfd_ctx *eventfd, const char *args) 4354 { 4355 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4356 } 4357 4358 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4359 struct eventfd_ctx *eventfd, const char *args) 4360 { 4361 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4362 } 4363 4364 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4365 struct eventfd_ctx *eventfd, enum res_type type) 4366 { 4367 struct mem_cgroup_thresholds *thresholds; 4368 struct mem_cgroup_threshold_ary *new; 4369 unsigned long usage; 4370 int i, j, size, entries; 4371 4372 mutex_lock(&memcg->thresholds_lock); 4373 4374 if (type == _MEM) { 4375 thresholds = &memcg->thresholds; 4376 usage = mem_cgroup_usage(memcg, false); 4377 } else if (type == _MEMSWAP) { 4378 thresholds = &memcg->memsw_thresholds; 4379 usage = mem_cgroup_usage(memcg, true); 4380 } else 4381 BUG(); 4382 4383 if (!thresholds->primary) 4384 goto unlock; 4385 4386 /* Check if a threshold crossed before removing */ 4387 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4388 4389 /* Calculate new number of threshold */ 4390 size = entries = 0; 4391 for (i = 0; i < thresholds->primary->size; i++) { 4392 if (thresholds->primary->entries[i].eventfd != eventfd) 4393 size++; 4394 else 4395 entries++; 4396 } 4397 4398 new = thresholds->spare; 4399 4400 /* If no items related to eventfd have been cleared, nothing to do */ 4401 if (!entries) 4402 goto unlock; 4403 4404 /* Set thresholds array to NULL if we don't have thresholds */ 4405 if (!size) { 4406 kfree(new); 4407 new = NULL; 4408 goto swap_buffers; 4409 } 4410 4411 new->size = size; 4412 4413 /* Copy thresholds and find current threshold */ 4414 new->current_threshold = -1; 4415 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4416 if (thresholds->primary->entries[i].eventfd == eventfd) 4417 continue; 4418 4419 new->entries[j] = thresholds->primary->entries[i]; 4420 if (new->entries[j].threshold <= usage) { 4421 /* 4422 * new->current_threshold will not be used 4423 * until rcu_assign_pointer(), so it's safe to increment 4424 * it here. 4425 */ 4426 ++new->current_threshold; 4427 } 4428 j++; 4429 } 4430 4431 swap_buffers: 4432 /* Swap primary and spare array */ 4433 thresholds->spare = thresholds->primary; 4434 4435 rcu_assign_pointer(thresholds->primary, new); 4436 4437 /* To be sure that nobody uses thresholds */ 4438 synchronize_rcu(); 4439 4440 /* If all events are unregistered, free the spare array */ 4441 if (!new) { 4442 kfree(thresholds->spare); 4443 thresholds->spare = NULL; 4444 } 4445 unlock: 4446 mutex_unlock(&memcg->thresholds_lock); 4447 } 4448 4449 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4450 struct eventfd_ctx *eventfd) 4451 { 4452 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4453 } 4454 4455 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4456 struct eventfd_ctx *eventfd) 4457 { 4458 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4459 } 4460 4461 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4462 struct eventfd_ctx *eventfd, const char *args) 4463 { 4464 struct mem_cgroup_eventfd_list *event; 4465 4466 event = kmalloc(sizeof(*event), GFP_KERNEL); 4467 if (!event) 4468 return -ENOMEM; 4469 4470 spin_lock(&memcg_oom_lock); 4471 4472 event->eventfd = eventfd; 4473 list_add(&event->list, &memcg->oom_notify); 4474 4475 /* already in OOM ? */ 4476 if (memcg->under_oom) 4477 eventfd_signal(eventfd, 1); 4478 spin_unlock(&memcg_oom_lock); 4479 4480 return 0; 4481 } 4482 4483 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4484 struct eventfd_ctx *eventfd) 4485 { 4486 struct mem_cgroup_eventfd_list *ev, *tmp; 4487 4488 spin_lock(&memcg_oom_lock); 4489 4490 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4491 if (ev->eventfd == eventfd) { 4492 list_del(&ev->list); 4493 kfree(ev); 4494 } 4495 } 4496 4497 spin_unlock(&memcg_oom_lock); 4498 } 4499 4500 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4501 { 4502 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4503 4504 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4505 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4506 seq_printf(sf, "oom_kill %lu\n", 4507 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4508 return 0; 4509 } 4510 4511 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4512 struct cftype *cft, u64 val) 4513 { 4514 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4515 4516 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4517 if (!css->parent || !((val == 0) || (val == 1))) 4518 return -EINVAL; 4519 4520 memcg->oom_kill_disable = val; 4521 if (!val) 4522 memcg_oom_recover(memcg); 4523 4524 return 0; 4525 } 4526 4527 #ifdef CONFIG_CGROUP_WRITEBACK 4528 4529 #include <trace/events/writeback.h> 4530 4531 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4532 { 4533 return wb_domain_init(&memcg->cgwb_domain, gfp); 4534 } 4535 4536 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4537 { 4538 wb_domain_exit(&memcg->cgwb_domain); 4539 } 4540 4541 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4542 { 4543 wb_domain_size_changed(&memcg->cgwb_domain); 4544 } 4545 4546 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4547 { 4548 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4549 4550 if (!memcg->css.parent) 4551 return NULL; 4552 4553 return &memcg->cgwb_domain; 4554 } 4555 4556 /* 4557 * idx can be of type enum memcg_stat_item or node_stat_item. 4558 * Keep in sync with memcg_exact_page(). 4559 */ 4560 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 4561 { 4562 long x = atomic_long_read(&memcg->vmstats[idx]); 4563 int cpu; 4564 4565 for_each_online_cpu(cpu) 4566 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 4567 if (x < 0) 4568 x = 0; 4569 return x; 4570 } 4571 4572 /** 4573 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4574 * @wb: bdi_writeback in question 4575 * @pfilepages: out parameter for number of file pages 4576 * @pheadroom: out parameter for number of allocatable pages according to memcg 4577 * @pdirty: out parameter for number of dirty pages 4578 * @pwriteback: out parameter for number of pages under writeback 4579 * 4580 * Determine the numbers of file, headroom, dirty, and writeback pages in 4581 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4582 * is a bit more involved. 4583 * 4584 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4585 * headroom is calculated as the lowest headroom of itself and the 4586 * ancestors. Note that this doesn't consider the actual amount of 4587 * available memory in the system. The caller should further cap 4588 * *@pheadroom accordingly. 4589 */ 4590 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4591 unsigned long *pheadroom, unsigned long *pdirty, 4592 unsigned long *pwriteback) 4593 { 4594 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4595 struct mem_cgroup *parent; 4596 4597 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4598 4599 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4600 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4601 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4602 *pheadroom = PAGE_COUNTER_MAX; 4603 4604 while ((parent = parent_mem_cgroup(memcg))) { 4605 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4606 READ_ONCE(memcg->memory.high)); 4607 unsigned long used = page_counter_read(&memcg->memory); 4608 4609 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4610 memcg = parent; 4611 } 4612 } 4613 4614 /* 4615 * Foreign dirty flushing 4616 * 4617 * There's an inherent mismatch between memcg and writeback. The former 4618 * trackes ownership per-page while the latter per-inode. This was a 4619 * deliberate design decision because honoring per-page ownership in the 4620 * writeback path is complicated, may lead to higher CPU and IO overheads 4621 * and deemed unnecessary given that write-sharing an inode across 4622 * different cgroups isn't a common use-case. 4623 * 4624 * Combined with inode majority-writer ownership switching, this works well 4625 * enough in most cases but there are some pathological cases. For 4626 * example, let's say there are two cgroups A and B which keep writing to 4627 * different but confined parts of the same inode. B owns the inode and 4628 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4629 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4630 * triggering background writeback. A will be slowed down without a way to 4631 * make writeback of the dirty pages happen. 4632 * 4633 * Conditions like the above can lead to a cgroup getting repatedly and 4634 * severely throttled after making some progress after each 4635 * dirty_expire_interval while the underyling IO device is almost 4636 * completely idle. 4637 * 4638 * Solving this problem completely requires matching the ownership tracking 4639 * granularities between memcg and writeback in either direction. However, 4640 * the more egregious behaviors can be avoided by simply remembering the 4641 * most recent foreign dirtying events and initiating remote flushes on 4642 * them when local writeback isn't enough to keep the memory clean enough. 4643 * 4644 * The following two functions implement such mechanism. When a foreign 4645 * page - a page whose memcg and writeback ownerships don't match - is 4646 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4647 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4648 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4649 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4650 * foreign bdi_writebacks which haven't expired. Both the numbers of 4651 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4652 * limited to MEMCG_CGWB_FRN_CNT. 4653 * 4654 * The mechanism only remembers IDs and doesn't hold any object references. 4655 * As being wrong occasionally doesn't matter, updates and accesses to the 4656 * records are lockless and racy. 4657 */ 4658 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4659 struct bdi_writeback *wb) 4660 { 4661 struct mem_cgroup *memcg = page->mem_cgroup; 4662 struct memcg_cgwb_frn *frn; 4663 u64 now = get_jiffies_64(); 4664 u64 oldest_at = now; 4665 int oldest = -1; 4666 int i; 4667 4668 trace_track_foreign_dirty(page, wb); 4669 4670 /* 4671 * Pick the slot to use. If there is already a slot for @wb, keep 4672 * using it. If not replace the oldest one which isn't being 4673 * written out. 4674 */ 4675 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4676 frn = &memcg->cgwb_frn[i]; 4677 if (frn->bdi_id == wb->bdi->id && 4678 frn->memcg_id == wb->memcg_css->id) 4679 break; 4680 if (time_before64(frn->at, oldest_at) && 4681 atomic_read(&frn->done.cnt) == 1) { 4682 oldest = i; 4683 oldest_at = frn->at; 4684 } 4685 } 4686 4687 if (i < MEMCG_CGWB_FRN_CNT) { 4688 /* 4689 * Re-using an existing one. Update timestamp lazily to 4690 * avoid making the cacheline hot. We want them to be 4691 * reasonably up-to-date and significantly shorter than 4692 * dirty_expire_interval as that's what expires the record. 4693 * Use the shorter of 1s and dirty_expire_interval / 8. 4694 */ 4695 unsigned long update_intv = 4696 min_t(unsigned long, HZ, 4697 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4698 4699 if (time_before64(frn->at, now - update_intv)) 4700 frn->at = now; 4701 } else if (oldest >= 0) { 4702 /* replace the oldest free one */ 4703 frn = &memcg->cgwb_frn[oldest]; 4704 frn->bdi_id = wb->bdi->id; 4705 frn->memcg_id = wb->memcg_css->id; 4706 frn->at = now; 4707 } 4708 } 4709 4710 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4711 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4712 { 4713 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4714 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4715 u64 now = jiffies_64; 4716 int i; 4717 4718 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4719 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4720 4721 /* 4722 * If the record is older than dirty_expire_interval, 4723 * writeback on it has already started. No need to kick it 4724 * off again. Also, don't start a new one if there's 4725 * already one in flight. 4726 */ 4727 if (time_after64(frn->at, now - intv) && 4728 atomic_read(&frn->done.cnt) == 1) { 4729 frn->at = 0; 4730 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4731 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4732 WB_REASON_FOREIGN_FLUSH, 4733 &frn->done); 4734 } 4735 } 4736 } 4737 4738 #else /* CONFIG_CGROUP_WRITEBACK */ 4739 4740 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4741 { 4742 return 0; 4743 } 4744 4745 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4746 { 4747 } 4748 4749 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4750 { 4751 } 4752 4753 #endif /* CONFIG_CGROUP_WRITEBACK */ 4754 4755 /* 4756 * DO NOT USE IN NEW FILES. 4757 * 4758 * "cgroup.event_control" implementation. 4759 * 4760 * This is way over-engineered. It tries to support fully configurable 4761 * events for each user. Such level of flexibility is completely 4762 * unnecessary especially in the light of the planned unified hierarchy. 4763 * 4764 * Please deprecate this and replace with something simpler if at all 4765 * possible. 4766 */ 4767 4768 /* 4769 * Unregister event and free resources. 4770 * 4771 * Gets called from workqueue. 4772 */ 4773 static void memcg_event_remove(struct work_struct *work) 4774 { 4775 struct mem_cgroup_event *event = 4776 container_of(work, struct mem_cgroup_event, remove); 4777 struct mem_cgroup *memcg = event->memcg; 4778 4779 remove_wait_queue(event->wqh, &event->wait); 4780 4781 event->unregister_event(memcg, event->eventfd); 4782 4783 /* Notify userspace the event is going away. */ 4784 eventfd_signal(event->eventfd, 1); 4785 4786 eventfd_ctx_put(event->eventfd); 4787 kfree(event); 4788 css_put(&memcg->css); 4789 } 4790 4791 /* 4792 * Gets called on EPOLLHUP on eventfd when user closes it. 4793 * 4794 * Called with wqh->lock held and interrupts disabled. 4795 */ 4796 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4797 int sync, void *key) 4798 { 4799 struct mem_cgroup_event *event = 4800 container_of(wait, struct mem_cgroup_event, wait); 4801 struct mem_cgroup *memcg = event->memcg; 4802 __poll_t flags = key_to_poll(key); 4803 4804 if (flags & EPOLLHUP) { 4805 /* 4806 * If the event has been detached at cgroup removal, we 4807 * can simply return knowing the other side will cleanup 4808 * for us. 4809 * 4810 * We can't race against event freeing since the other 4811 * side will require wqh->lock via remove_wait_queue(), 4812 * which we hold. 4813 */ 4814 spin_lock(&memcg->event_list_lock); 4815 if (!list_empty(&event->list)) { 4816 list_del_init(&event->list); 4817 /* 4818 * We are in atomic context, but cgroup_event_remove() 4819 * may sleep, so we have to call it in workqueue. 4820 */ 4821 schedule_work(&event->remove); 4822 } 4823 spin_unlock(&memcg->event_list_lock); 4824 } 4825 4826 return 0; 4827 } 4828 4829 static void memcg_event_ptable_queue_proc(struct file *file, 4830 wait_queue_head_t *wqh, poll_table *pt) 4831 { 4832 struct mem_cgroup_event *event = 4833 container_of(pt, struct mem_cgroup_event, pt); 4834 4835 event->wqh = wqh; 4836 add_wait_queue(wqh, &event->wait); 4837 } 4838 4839 /* 4840 * DO NOT USE IN NEW FILES. 4841 * 4842 * Parse input and register new cgroup event handler. 4843 * 4844 * Input must be in format '<event_fd> <control_fd> <args>'. 4845 * Interpretation of args is defined by control file implementation. 4846 */ 4847 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4848 char *buf, size_t nbytes, loff_t off) 4849 { 4850 struct cgroup_subsys_state *css = of_css(of); 4851 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4852 struct mem_cgroup_event *event; 4853 struct cgroup_subsys_state *cfile_css; 4854 unsigned int efd, cfd; 4855 struct fd efile; 4856 struct fd cfile; 4857 const char *name; 4858 char *endp; 4859 int ret; 4860 4861 buf = strstrip(buf); 4862 4863 efd = simple_strtoul(buf, &endp, 10); 4864 if (*endp != ' ') 4865 return -EINVAL; 4866 buf = endp + 1; 4867 4868 cfd = simple_strtoul(buf, &endp, 10); 4869 if ((*endp != ' ') && (*endp != '\0')) 4870 return -EINVAL; 4871 buf = endp + 1; 4872 4873 event = kzalloc(sizeof(*event), GFP_KERNEL); 4874 if (!event) 4875 return -ENOMEM; 4876 4877 event->memcg = memcg; 4878 INIT_LIST_HEAD(&event->list); 4879 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4880 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4881 INIT_WORK(&event->remove, memcg_event_remove); 4882 4883 efile = fdget(efd); 4884 if (!efile.file) { 4885 ret = -EBADF; 4886 goto out_kfree; 4887 } 4888 4889 event->eventfd = eventfd_ctx_fileget(efile.file); 4890 if (IS_ERR(event->eventfd)) { 4891 ret = PTR_ERR(event->eventfd); 4892 goto out_put_efile; 4893 } 4894 4895 cfile = fdget(cfd); 4896 if (!cfile.file) { 4897 ret = -EBADF; 4898 goto out_put_eventfd; 4899 } 4900 4901 /* the process need read permission on control file */ 4902 /* AV: shouldn't we check that it's been opened for read instead? */ 4903 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4904 if (ret < 0) 4905 goto out_put_cfile; 4906 4907 /* 4908 * Determine the event callbacks and set them in @event. This used 4909 * to be done via struct cftype but cgroup core no longer knows 4910 * about these events. The following is crude but the whole thing 4911 * is for compatibility anyway. 4912 * 4913 * DO NOT ADD NEW FILES. 4914 */ 4915 name = cfile.file->f_path.dentry->d_name.name; 4916 4917 if (!strcmp(name, "memory.usage_in_bytes")) { 4918 event->register_event = mem_cgroup_usage_register_event; 4919 event->unregister_event = mem_cgroup_usage_unregister_event; 4920 } else if (!strcmp(name, "memory.oom_control")) { 4921 event->register_event = mem_cgroup_oom_register_event; 4922 event->unregister_event = mem_cgroup_oom_unregister_event; 4923 } else if (!strcmp(name, "memory.pressure_level")) { 4924 event->register_event = vmpressure_register_event; 4925 event->unregister_event = vmpressure_unregister_event; 4926 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4927 event->register_event = memsw_cgroup_usage_register_event; 4928 event->unregister_event = memsw_cgroup_usage_unregister_event; 4929 } else { 4930 ret = -EINVAL; 4931 goto out_put_cfile; 4932 } 4933 4934 /* 4935 * Verify @cfile should belong to @css. Also, remaining events are 4936 * automatically removed on cgroup destruction but the removal is 4937 * asynchronous, so take an extra ref on @css. 4938 */ 4939 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4940 &memory_cgrp_subsys); 4941 ret = -EINVAL; 4942 if (IS_ERR(cfile_css)) 4943 goto out_put_cfile; 4944 if (cfile_css != css) { 4945 css_put(cfile_css); 4946 goto out_put_cfile; 4947 } 4948 4949 ret = event->register_event(memcg, event->eventfd, buf); 4950 if (ret) 4951 goto out_put_css; 4952 4953 vfs_poll(efile.file, &event->pt); 4954 4955 spin_lock(&memcg->event_list_lock); 4956 list_add(&event->list, &memcg->event_list); 4957 spin_unlock(&memcg->event_list_lock); 4958 4959 fdput(cfile); 4960 fdput(efile); 4961 4962 return nbytes; 4963 4964 out_put_css: 4965 css_put(css); 4966 out_put_cfile: 4967 fdput(cfile); 4968 out_put_eventfd: 4969 eventfd_ctx_put(event->eventfd); 4970 out_put_efile: 4971 fdput(efile); 4972 out_kfree: 4973 kfree(event); 4974 4975 return ret; 4976 } 4977 4978 static struct cftype mem_cgroup_legacy_files[] = { 4979 { 4980 .name = "usage_in_bytes", 4981 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4982 .read_u64 = mem_cgroup_read_u64, 4983 }, 4984 { 4985 .name = "max_usage_in_bytes", 4986 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4987 .write = mem_cgroup_reset, 4988 .read_u64 = mem_cgroup_read_u64, 4989 }, 4990 { 4991 .name = "limit_in_bytes", 4992 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4993 .write = mem_cgroup_write, 4994 .read_u64 = mem_cgroup_read_u64, 4995 }, 4996 { 4997 .name = "soft_limit_in_bytes", 4998 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4999 .write = mem_cgroup_write, 5000 .read_u64 = mem_cgroup_read_u64, 5001 }, 5002 { 5003 .name = "failcnt", 5004 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5005 .write = mem_cgroup_reset, 5006 .read_u64 = mem_cgroup_read_u64, 5007 }, 5008 { 5009 .name = "stat", 5010 .seq_show = memcg_stat_show, 5011 }, 5012 { 5013 .name = "force_empty", 5014 .write = mem_cgroup_force_empty_write, 5015 }, 5016 { 5017 .name = "use_hierarchy", 5018 .write_u64 = mem_cgroup_hierarchy_write, 5019 .read_u64 = mem_cgroup_hierarchy_read, 5020 }, 5021 { 5022 .name = "cgroup.event_control", /* XXX: for compat */ 5023 .write = memcg_write_event_control, 5024 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 5025 }, 5026 { 5027 .name = "swappiness", 5028 .read_u64 = mem_cgroup_swappiness_read, 5029 .write_u64 = mem_cgroup_swappiness_write, 5030 }, 5031 { 5032 .name = "move_charge_at_immigrate", 5033 .read_u64 = mem_cgroup_move_charge_read, 5034 .write_u64 = mem_cgroup_move_charge_write, 5035 }, 5036 { 5037 .name = "oom_control", 5038 .seq_show = mem_cgroup_oom_control_read, 5039 .write_u64 = mem_cgroup_oom_control_write, 5040 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 5041 }, 5042 { 5043 .name = "pressure_level", 5044 }, 5045 #ifdef CONFIG_NUMA 5046 { 5047 .name = "numa_stat", 5048 .seq_show = memcg_numa_stat_show, 5049 }, 5050 #endif 5051 { 5052 .name = "kmem.limit_in_bytes", 5053 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5054 .write = mem_cgroup_write, 5055 .read_u64 = mem_cgroup_read_u64, 5056 }, 5057 { 5058 .name = "kmem.usage_in_bytes", 5059 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5060 .read_u64 = mem_cgroup_read_u64, 5061 }, 5062 { 5063 .name = "kmem.failcnt", 5064 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5065 .write = mem_cgroup_reset, 5066 .read_u64 = mem_cgroup_read_u64, 5067 }, 5068 { 5069 .name = "kmem.max_usage_in_bytes", 5070 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5071 .write = mem_cgroup_reset, 5072 .read_u64 = mem_cgroup_read_u64, 5073 }, 5074 #if defined(CONFIG_MEMCG_KMEM) && \ 5075 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5076 { 5077 .name = "kmem.slabinfo", 5078 .seq_show = memcg_slab_show, 5079 }, 5080 #endif 5081 { 5082 .name = "kmem.tcp.limit_in_bytes", 5083 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5084 .write = mem_cgroup_write, 5085 .read_u64 = mem_cgroup_read_u64, 5086 }, 5087 { 5088 .name = "kmem.tcp.usage_in_bytes", 5089 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5090 .read_u64 = mem_cgroup_read_u64, 5091 }, 5092 { 5093 .name = "kmem.tcp.failcnt", 5094 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5095 .write = mem_cgroup_reset, 5096 .read_u64 = mem_cgroup_read_u64, 5097 }, 5098 { 5099 .name = "kmem.tcp.max_usage_in_bytes", 5100 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5101 .write = mem_cgroup_reset, 5102 .read_u64 = mem_cgroup_read_u64, 5103 }, 5104 { }, /* terminate */ 5105 }; 5106 5107 /* 5108 * Private memory cgroup IDR 5109 * 5110 * Swap-out records and page cache shadow entries need to store memcg 5111 * references in constrained space, so we maintain an ID space that is 5112 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5113 * memory-controlled cgroups to 64k. 5114 * 5115 * However, there usually are many references to the offline CSS after 5116 * the cgroup has been destroyed, such as page cache or reclaimable 5117 * slab objects, that don't need to hang on to the ID. We want to keep 5118 * those dead CSS from occupying IDs, or we might quickly exhaust the 5119 * relatively small ID space and prevent the creation of new cgroups 5120 * even when there are much fewer than 64k cgroups - possibly none. 5121 * 5122 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5123 * be freed and recycled when it's no longer needed, which is usually 5124 * when the CSS is offlined. 5125 * 5126 * The only exception to that are records of swapped out tmpfs/shmem 5127 * pages that need to be attributed to live ancestors on swapin. But 5128 * those references are manageable from userspace. 5129 */ 5130 5131 static DEFINE_IDR(mem_cgroup_idr); 5132 5133 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5134 { 5135 if (memcg->id.id > 0) { 5136 idr_remove(&mem_cgroup_idr, memcg->id.id); 5137 memcg->id.id = 0; 5138 } 5139 } 5140 5141 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5142 unsigned int n) 5143 { 5144 refcount_add(n, &memcg->id.ref); 5145 } 5146 5147 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5148 { 5149 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5150 mem_cgroup_id_remove(memcg); 5151 5152 /* Memcg ID pins CSS */ 5153 css_put(&memcg->css); 5154 } 5155 } 5156 5157 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5158 { 5159 mem_cgroup_id_put_many(memcg, 1); 5160 } 5161 5162 /** 5163 * mem_cgroup_from_id - look up a memcg from a memcg id 5164 * @id: the memcg id to look up 5165 * 5166 * Caller must hold rcu_read_lock(). 5167 */ 5168 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5169 { 5170 WARN_ON_ONCE(!rcu_read_lock_held()); 5171 return idr_find(&mem_cgroup_idr, id); 5172 } 5173 5174 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5175 { 5176 struct mem_cgroup_per_node *pn; 5177 int tmp = node; 5178 /* 5179 * This routine is called against possible nodes. 5180 * But it's BUG to call kmalloc() against offline node. 5181 * 5182 * TODO: this routine can waste much memory for nodes which will 5183 * never be onlined. It's better to use memory hotplug callback 5184 * function. 5185 */ 5186 if (!node_state(node, N_NORMAL_MEMORY)) 5187 tmp = -1; 5188 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5189 if (!pn) 5190 return 1; 5191 5192 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5193 GFP_KERNEL_ACCOUNT); 5194 if (!pn->lruvec_stat_local) { 5195 kfree(pn); 5196 return 1; 5197 } 5198 5199 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat, 5200 GFP_KERNEL_ACCOUNT); 5201 if (!pn->lruvec_stat_cpu) { 5202 free_percpu(pn->lruvec_stat_local); 5203 kfree(pn); 5204 return 1; 5205 } 5206 5207 lruvec_init(&pn->lruvec); 5208 pn->usage_in_excess = 0; 5209 pn->on_tree = false; 5210 pn->memcg = memcg; 5211 5212 memcg->nodeinfo[node] = pn; 5213 return 0; 5214 } 5215 5216 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5217 { 5218 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5219 5220 if (!pn) 5221 return; 5222 5223 free_percpu(pn->lruvec_stat_cpu); 5224 free_percpu(pn->lruvec_stat_local); 5225 kfree(pn); 5226 } 5227 5228 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5229 { 5230 int node; 5231 5232 for_each_node(node) 5233 free_mem_cgroup_per_node_info(memcg, node); 5234 free_percpu(memcg->vmstats_percpu); 5235 free_percpu(memcg->vmstats_local); 5236 kfree(memcg); 5237 } 5238 5239 static void mem_cgroup_free(struct mem_cgroup *memcg) 5240 { 5241 memcg_wb_domain_exit(memcg); 5242 /* 5243 * Flush percpu vmstats and vmevents to guarantee the value correctness 5244 * on parent's and all ancestor levels. 5245 */ 5246 memcg_flush_percpu_vmstats(memcg); 5247 memcg_flush_percpu_vmevents(memcg); 5248 __mem_cgroup_free(memcg); 5249 } 5250 5251 static struct mem_cgroup *mem_cgroup_alloc(void) 5252 { 5253 struct mem_cgroup *memcg; 5254 unsigned int size; 5255 int node; 5256 int __maybe_unused i; 5257 long error = -ENOMEM; 5258 5259 size = sizeof(struct mem_cgroup); 5260 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5261 5262 memcg = kzalloc(size, GFP_KERNEL); 5263 if (!memcg) 5264 return ERR_PTR(error); 5265 5266 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5267 1, MEM_CGROUP_ID_MAX, 5268 GFP_KERNEL); 5269 if (memcg->id.id < 0) { 5270 error = memcg->id.id; 5271 goto fail; 5272 } 5273 5274 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5275 GFP_KERNEL_ACCOUNT); 5276 if (!memcg->vmstats_local) 5277 goto fail; 5278 5279 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5280 GFP_KERNEL_ACCOUNT); 5281 if (!memcg->vmstats_percpu) 5282 goto fail; 5283 5284 for_each_node(node) 5285 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5286 goto fail; 5287 5288 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5289 goto fail; 5290 5291 INIT_WORK(&memcg->high_work, high_work_func); 5292 INIT_LIST_HEAD(&memcg->oom_notify); 5293 mutex_init(&memcg->thresholds_lock); 5294 spin_lock_init(&memcg->move_lock); 5295 vmpressure_init(&memcg->vmpressure); 5296 INIT_LIST_HEAD(&memcg->event_list); 5297 spin_lock_init(&memcg->event_list_lock); 5298 memcg->socket_pressure = jiffies; 5299 #ifdef CONFIG_MEMCG_KMEM 5300 memcg->kmemcg_id = -1; 5301 INIT_LIST_HEAD(&memcg->objcg_list); 5302 #endif 5303 #ifdef CONFIG_CGROUP_WRITEBACK 5304 INIT_LIST_HEAD(&memcg->cgwb_list); 5305 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5306 memcg->cgwb_frn[i].done = 5307 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5308 #endif 5309 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5310 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5311 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5312 memcg->deferred_split_queue.split_queue_len = 0; 5313 #endif 5314 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5315 return memcg; 5316 fail: 5317 mem_cgroup_id_remove(memcg); 5318 __mem_cgroup_free(memcg); 5319 return ERR_PTR(error); 5320 } 5321 5322 static struct cgroup_subsys_state * __ref 5323 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5324 { 5325 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5326 struct mem_cgroup *memcg, *old_memcg; 5327 long error = -ENOMEM; 5328 5329 old_memcg = set_active_memcg(parent); 5330 memcg = mem_cgroup_alloc(); 5331 set_active_memcg(old_memcg); 5332 if (IS_ERR(memcg)) 5333 return ERR_CAST(memcg); 5334 5335 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5336 memcg->soft_limit = PAGE_COUNTER_MAX; 5337 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5338 if (parent) { 5339 memcg->swappiness = mem_cgroup_swappiness(parent); 5340 memcg->oom_kill_disable = parent->oom_kill_disable; 5341 } 5342 if (parent && parent->use_hierarchy) { 5343 memcg->use_hierarchy = true; 5344 page_counter_init(&memcg->memory, &parent->memory); 5345 page_counter_init(&memcg->swap, &parent->swap); 5346 page_counter_init(&memcg->kmem, &parent->kmem); 5347 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5348 } else { 5349 page_counter_init(&memcg->memory, NULL); 5350 page_counter_init(&memcg->swap, NULL); 5351 page_counter_init(&memcg->kmem, NULL); 5352 page_counter_init(&memcg->tcpmem, NULL); 5353 /* 5354 * Deeper hierachy with use_hierarchy == false doesn't make 5355 * much sense so let cgroup subsystem know about this 5356 * unfortunate state in our controller. 5357 */ 5358 if (parent != root_mem_cgroup) 5359 memory_cgrp_subsys.broken_hierarchy = true; 5360 } 5361 5362 /* The following stuff does not apply to the root */ 5363 if (!parent) { 5364 root_mem_cgroup = memcg; 5365 return &memcg->css; 5366 } 5367 5368 error = memcg_online_kmem(memcg); 5369 if (error) 5370 goto fail; 5371 5372 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5373 static_branch_inc(&memcg_sockets_enabled_key); 5374 5375 return &memcg->css; 5376 fail: 5377 mem_cgroup_id_remove(memcg); 5378 mem_cgroup_free(memcg); 5379 return ERR_PTR(error); 5380 } 5381 5382 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5383 { 5384 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5385 5386 /* 5387 * A memcg must be visible for memcg_expand_shrinker_maps() 5388 * by the time the maps are allocated. So, we allocate maps 5389 * here, when for_each_mem_cgroup() can't skip it. 5390 */ 5391 if (memcg_alloc_shrinker_maps(memcg)) { 5392 mem_cgroup_id_remove(memcg); 5393 return -ENOMEM; 5394 } 5395 5396 /* Online state pins memcg ID, memcg ID pins CSS */ 5397 refcount_set(&memcg->id.ref, 1); 5398 css_get(css); 5399 return 0; 5400 } 5401 5402 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5403 { 5404 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5405 struct mem_cgroup_event *event, *tmp; 5406 5407 /* 5408 * Unregister events and notify userspace. 5409 * Notify userspace about cgroup removing only after rmdir of cgroup 5410 * directory to avoid race between userspace and kernelspace. 5411 */ 5412 spin_lock(&memcg->event_list_lock); 5413 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5414 list_del_init(&event->list); 5415 schedule_work(&event->remove); 5416 } 5417 spin_unlock(&memcg->event_list_lock); 5418 5419 page_counter_set_min(&memcg->memory, 0); 5420 page_counter_set_low(&memcg->memory, 0); 5421 5422 memcg_offline_kmem(memcg); 5423 wb_memcg_offline(memcg); 5424 5425 drain_all_stock(memcg); 5426 5427 mem_cgroup_id_put(memcg); 5428 } 5429 5430 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5431 { 5432 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5433 5434 invalidate_reclaim_iterators(memcg); 5435 } 5436 5437 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5438 { 5439 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5440 int __maybe_unused i; 5441 5442 #ifdef CONFIG_CGROUP_WRITEBACK 5443 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5444 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5445 #endif 5446 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5447 static_branch_dec(&memcg_sockets_enabled_key); 5448 5449 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5450 static_branch_dec(&memcg_sockets_enabled_key); 5451 5452 vmpressure_cleanup(&memcg->vmpressure); 5453 cancel_work_sync(&memcg->high_work); 5454 mem_cgroup_remove_from_trees(memcg); 5455 memcg_free_shrinker_maps(memcg); 5456 memcg_free_kmem(memcg); 5457 mem_cgroup_free(memcg); 5458 } 5459 5460 /** 5461 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5462 * @css: the target css 5463 * 5464 * Reset the states of the mem_cgroup associated with @css. This is 5465 * invoked when the userland requests disabling on the default hierarchy 5466 * but the memcg is pinned through dependency. The memcg should stop 5467 * applying policies and should revert to the vanilla state as it may be 5468 * made visible again. 5469 * 5470 * The current implementation only resets the essential configurations. 5471 * This needs to be expanded to cover all the visible parts. 5472 */ 5473 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5474 { 5475 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5476 5477 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5478 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5479 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5480 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5481 page_counter_set_min(&memcg->memory, 0); 5482 page_counter_set_low(&memcg->memory, 0); 5483 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5484 memcg->soft_limit = PAGE_COUNTER_MAX; 5485 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5486 memcg_wb_domain_size_changed(memcg); 5487 } 5488 5489 #ifdef CONFIG_MMU 5490 /* Handlers for move charge at task migration. */ 5491 static int mem_cgroup_do_precharge(unsigned long count) 5492 { 5493 int ret; 5494 5495 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5496 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5497 if (!ret) { 5498 mc.precharge += count; 5499 return ret; 5500 } 5501 5502 /* Try charges one by one with reclaim, but do not retry */ 5503 while (count--) { 5504 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5505 if (ret) 5506 return ret; 5507 mc.precharge++; 5508 cond_resched(); 5509 } 5510 return 0; 5511 } 5512 5513 union mc_target { 5514 struct page *page; 5515 swp_entry_t ent; 5516 }; 5517 5518 enum mc_target_type { 5519 MC_TARGET_NONE = 0, 5520 MC_TARGET_PAGE, 5521 MC_TARGET_SWAP, 5522 MC_TARGET_DEVICE, 5523 }; 5524 5525 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5526 unsigned long addr, pte_t ptent) 5527 { 5528 struct page *page = vm_normal_page(vma, addr, ptent); 5529 5530 if (!page || !page_mapped(page)) 5531 return NULL; 5532 if (PageAnon(page)) { 5533 if (!(mc.flags & MOVE_ANON)) 5534 return NULL; 5535 } else { 5536 if (!(mc.flags & MOVE_FILE)) 5537 return NULL; 5538 } 5539 if (!get_page_unless_zero(page)) 5540 return NULL; 5541 5542 return page; 5543 } 5544 5545 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5546 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5547 pte_t ptent, swp_entry_t *entry) 5548 { 5549 struct page *page = NULL; 5550 swp_entry_t ent = pte_to_swp_entry(ptent); 5551 5552 if (!(mc.flags & MOVE_ANON)) 5553 return NULL; 5554 5555 /* 5556 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5557 * a device and because they are not accessible by CPU they are store 5558 * as special swap entry in the CPU page table. 5559 */ 5560 if (is_device_private_entry(ent)) { 5561 page = device_private_entry_to_page(ent); 5562 /* 5563 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5564 * a refcount of 1 when free (unlike normal page) 5565 */ 5566 if (!page_ref_add_unless(page, 1, 1)) 5567 return NULL; 5568 return page; 5569 } 5570 5571 if (non_swap_entry(ent)) 5572 return NULL; 5573 5574 /* 5575 * Because lookup_swap_cache() updates some statistics counter, 5576 * we call find_get_page() with swapper_space directly. 5577 */ 5578 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5579 entry->val = ent.val; 5580 5581 return page; 5582 } 5583 #else 5584 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5585 pte_t ptent, swp_entry_t *entry) 5586 { 5587 return NULL; 5588 } 5589 #endif 5590 5591 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5592 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5593 { 5594 if (!vma->vm_file) /* anonymous vma */ 5595 return NULL; 5596 if (!(mc.flags & MOVE_FILE)) 5597 return NULL; 5598 5599 /* page is moved even if it's not RSS of this task(page-faulted). */ 5600 /* shmem/tmpfs may report page out on swap: account for that too. */ 5601 return find_get_incore_page(vma->vm_file->f_mapping, 5602 linear_page_index(vma, addr)); 5603 } 5604 5605 /** 5606 * mem_cgroup_move_account - move account of the page 5607 * @page: the page 5608 * @compound: charge the page as compound or small page 5609 * @from: mem_cgroup which the page is moved from. 5610 * @to: mem_cgroup which the page is moved to. @from != @to. 5611 * 5612 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5613 * 5614 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5615 * from old cgroup. 5616 */ 5617 static int mem_cgroup_move_account(struct page *page, 5618 bool compound, 5619 struct mem_cgroup *from, 5620 struct mem_cgroup *to) 5621 { 5622 struct lruvec *from_vec, *to_vec; 5623 struct pglist_data *pgdat; 5624 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5625 int ret; 5626 5627 VM_BUG_ON(from == to); 5628 VM_BUG_ON_PAGE(PageLRU(page), page); 5629 VM_BUG_ON(compound && !PageTransHuge(page)); 5630 5631 /* 5632 * Prevent mem_cgroup_migrate() from looking at 5633 * page->mem_cgroup of its source page while we change it. 5634 */ 5635 ret = -EBUSY; 5636 if (!trylock_page(page)) 5637 goto out; 5638 5639 ret = -EINVAL; 5640 if (page->mem_cgroup != from) 5641 goto out_unlock; 5642 5643 pgdat = page_pgdat(page); 5644 from_vec = mem_cgroup_lruvec(from, pgdat); 5645 to_vec = mem_cgroup_lruvec(to, pgdat); 5646 5647 lock_page_memcg(page); 5648 5649 if (PageAnon(page)) { 5650 if (page_mapped(page)) { 5651 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5652 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5653 if (PageTransHuge(page)) { 5654 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5655 -nr_pages); 5656 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5657 nr_pages); 5658 } 5659 5660 } 5661 } else { 5662 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5663 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5664 5665 if (PageSwapBacked(page)) { 5666 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5667 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5668 } 5669 5670 if (page_mapped(page)) { 5671 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5672 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5673 } 5674 5675 if (PageDirty(page)) { 5676 struct address_space *mapping = page_mapping(page); 5677 5678 if (mapping_can_writeback(mapping)) { 5679 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5680 -nr_pages); 5681 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5682 nr_pages); 5683 } 5684 } 5685 } 5686 5687 if (PageWriteback(page)) { 5688 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5689 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5690 } 5691 5692 /* 5693 * All state has been migrated, let's switch to the new memcg. 5694 * 5695 * It is safe to change page->mem_cgroup here because the page 5696 * is referenced, charged, isolated, and locked: we can't race 5697 * with (un)charging, migration, LRU putback, or anything else 5698 * that would rely on a stable page->mem_cgroup. 5699 * 5700 * Note that lock_page_memcg is a memcg lock, not a page lock, 5701 * to save space. As soon as we switch page->mem_cgroup to a 5702 * new memcg that isn't locked, the above state can change 5703 * concurrently again. Make sure we're truly done with it. 5704 */ 5705 smp_mb(); 5706 5707 css_get(&to->css); 5708 css_put(&from->css); 5709 5710 page->mem_cgroup = to; 5711 5712 __unlock_page_memcg(from); 5713 5714 ret = 0; 5715 5716 local_irq_disable(); 5717 mem_cgroup_charge_statistics(to, page, nr_pages); 5718 memcg_check_events(to, page); 5719 mem_cgroup_charge_statistics(from, page, -nr_pages); 5720 memcg_check_events(from, page); 5721 local_irq_enable(); 5722 out_unlock: 5723 unlock_page(page); 5724 out: 5725 return ret; 5726 } 5727 5728 /** 5729 * get_mctgt_type - get target type of moving charge 5730 * @vma: the vma the pte to be checked belongs 5731 * @addr: the address corresponding to the pte to be checked 5732 * @ptent: the pte to be checked 5733 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5734 * 5735 * Returns 5736 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5737 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5738 * move charge. if @target is not NULL, the page is stored in target->page 5739 * with extra refcnt got(Callers should handle it). 5740 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5741 * target for charge migration. if @target is not NULL, the entry is stored 5742 * in target->ent. 5743 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5744 * (so ZONE_DEVICE page and thus not on the lru). 5745 * For now we such page is charge like a regular page would be as for all 5746 * intent and purposes it is just special memory taking the place of a 5747 * regular page. 5748 * 5749 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5750 * 5751 * Called with pte lock held. 5752 */ 5753 5754 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5755 unsigned long addr, pte_t ptent, union mc_target *target) 5756 { 5757 struct page *page = NULL; 5758 enum mc_target_type ret = MC_TARGET_NONE; 5759 swp_entry_t ent = { .val = 0 }; 5760 5761 if (pte_present(ptent)) 5762 page = mc_handle_present_pte(vma, addr, ptent); 5763 else if (is_swap_pte(ptent)) 5764 page = mc_handle_swap_pte(vma, ptent, &ent); 5765 else if (pte_none(ptent)) 5766 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5767 5768 if (!page && !ent.val) 5769 return ret; 5770 if (page) { 5771 /* 5772 * Do only loose check w/o serialization. 5773 * mem_cgroup_move_account() checks the page is valid or 5774 * not under LRU exclusion. 5775 */ 5776 if (page->mem_cgroup == mc.from) { 5777 ret = MC_TARGET_PAGE; 5778 if (is_device_private_page(page)) 5779 ret = MC_TARGET_DEVICE; 5780 if (target) 5781 target->page = page; 5782 } 5783 if (!ret || !target) 5784 put_page(page); 5785 } 5786 /* 5787 * There is a swap entry and a page doesn't exist or isn't charged. 5788 * But we cannot move a tail-page in a THP. 5789 */ 5790 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5791 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5792 ret = MC_TARGET_SWAP; 5793 if (target) 5794 target->ent = ent; 5795 } 5796 return ret; 5797 } 5798 5799 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5800 /* 5801 * We don't consider PMD mapped swapping or file mapped pages because THP does 5802 * not support them for now. 5803 * Caller should make sure that pmd_trans_huge(pmd) is true. 5804 */ 5805 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5806 unsigned long addr, pmd_t pmd, union mc_target *target) 5807 { 5808 struct page *page = NULL; 5809 enum mc_target_type ret = MC_TARGET_NONE; 5810 5811 if (unlikely(is_swap_pmd(pmd))) { 5812 VM_BUG_ON(thp_migration_supported() && 5813 !is_pmd_migration_entry(pmd)); 5814 return ret; 5815 } 5816 page = pmd_page(pmd); 5817 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5818 if (!(mc.flags & MOVE_ANON)) 5819 return ret; 5820 if (page->mem_cgroup == mc.from) { 5821 ret = MC_TARGET_PAGE; 5822 if (target) { 5823 get_page(page); 5824 target->page = page; 5825 } 5826 } 5827 return ret; 5828 } 5829 #else 5830 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5831 unsigned long addr, pmd_t pmd, union mc_target *target) 5832 { 5833 return MC_TARGET_NONE; 5834 } 5835 #endif 5836 5837 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5838 unsigned long addr, unsigned long end, 5839 struct mm_walk *walk) 5840 { 5841 struct vm_area_struct *vma = walk->vma; 5842 pte_t *pte; 5843 spinlock_t *ptl; 5844 5845 ptl = pmd_trans_huge_lock(pmd, vma); 5846 if (ptl) { 5847 /* 5848 * Note their can not be MC_TARGET_DEVICE for now as we do not 5849 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5850 * this might change. 5851 */ 5852 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5853 mc.precharge += HPAGE_PMD_NR; 5854 spin_unlock(ptl); 5855 return 0; 5856 } 5857 5858 if (pmd_trans_unstable(pmd)) 5859 return 0; 5860 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5861 for (; addr != end; pte++, addr += PAGE_SIZE) 5862 if (get_mctgt_type(vma, addr, *pte, NULL)) 5863 mc.precharge++; /* increment precharge temporarily */ 5864 pte_unmap_unlock(pte - 1, ptl); 5865 cond_resched(); 5866 5867 return 0; 5868 } 5869 5870 static const struct mm_walk_ops precharge_walk_ops = { 5871 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5872 }; 5873 5874 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5875 { 5876 unsigned long precharge; 5877 5878 mmap_read_lock(mm); 5879 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5880 mmap_read_unlock(mm); 5881 5882 precharge = mc.precharge; 5883 mc.precharge = 0; 5884 5885 return precharge; 5886 } 5887 5888 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5889 { 5890 unsigned long precharge = mem_cgroup_count_precharge(mm); 5891 5892 VM_BUG_ON(mc.moving_task); 5893 mc.moving_task = current; 5894 return mem_cgroup_do_precharge(precharge); 5895 } 5896 5897 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5898 static void __mem_cgroup_clear_mc(void) 5899 { 5900 struct mem_cgroup *from = mc.from; 5901 struct mem_cgroup *to = mc.to; 5902 5903 /* we must uncharge all the leftover precharges from mc.to */ 5904 if (mc.precharge) { 5905 cancel_charge(mc.to, mc.precharge); 5906 mc.precharge = 0; 5907 } 5908 /* 5909 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5910 * we must uncharge here. 5911 */ 5912 if (mc.moved_charge) { 5913 cancel_charge(mc.from, mc.moved_charge); 5914 mc.moved_charge = 0; 5915 } 5916 /* we must fixup refcnts and charges */ 5917 if (mc.moved_swap) { 5918 /* uncharge swap account from the old cgroup */ 5919 if (!mem_cgroup_is_root(mc.from)) 5920 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5921 5922 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5923 5924 /* 5925 * we charged both to->memory and to->memsw, so we 5926 * should uncharge to->memory. 5927 */ 5928 if (!mem_cgroup_is_root(mc.to)) 5929 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5930 5931 mc.moved_swap = 0; 5932 } 5933 memcg_oom_recover(from); 5934 memcg_oom_recover(to); 5935 wake_up_all(&mc.waitq); 5936 } 5937 5938 static void mem_cgroup_clear_mc(void) 5939 { 5940 struct mm_struct *mm = mc.mm; 5941 5942 /* 5943 * we must clear moving_task before waking up waiters at the end of 5944 * task migration. 5945 */ 5946 mc.moving_task = NULL; 5947 __mem_cgroup_clear_mc(); 5948 spin_lock(&mc.lock); 5949 mc.from = NULL; 5950 mc.to = NULL; 5951 mc.mm = NULL; 5952 spin_unlock(&mc.lock); 5953 5954 mmput(mm); 5955 } 5956 5957 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5958 { 5959 struct cgroup_subsys_state *css; 5960 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5961 struct mem_cgroup *from; 5962 struct task_struct *leader, *p; 5963 struct mm_struct *mm; 5964 unsigned long move_flags; 5965 int ret = 0; 5966 5967 /* charge immigration isn't supported on the default hierarchy */ 5968 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5969 return 0; 5970 5971 /* 5972 * Multi-process migrations only happen on the default hierarchy 5973 * where charge immigration is not used. Perform charge 5974 * immigration if @tset contains a leader and whine if there are 5975 * multiple. 5976 */ 5977 p = NULL; 5978 cgroup_taskset_for_each_leader(leader, css, tset) { 5979 WARN_ON_ONCE(p); 5980 p = leader; 5981 memcg = mem_cgroup_from_css(css); 5982 } 5983 if (!p) 5984 return 0; 5985 5986 /* 5987 * We are now commited to this value whatever it is. Changes in this 5988 * tunable will only affect upcoming migrations, not the current one. 5989 * So we need to save it, and keep it going. 5990 */ 5991 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5992 if (!move_flags) 5993 return 0; 5994 5995 from = mem_cgroup_from_task(p); 5996 5997 VM_BUG_ON(from == memcg); 5998 5999 mm = get_task_mm(p); 6000 if (!mm) 6001 return 0; 6002 /* We move charges only when we move a owner of the mm */ 6003 if (mm->owner == p) { 6004 VM_BUG_ON(mc.from); 6005 VM_BUG_ON(mc.to); 6006 VM_BUG_ON(mc.precharge); 6007 VM_BUG_ON(mc.moved_charge); 6008 VM_BUG_ON(mc.moved_swap); 6009 6010 spin_lock(&mc.lock); 6011 mc.mm = mm; 6012 mc.from = from; 6013 mc.to = memcg; 6014 mc.flags = move_flags; 6015 spin_unlock(&mc.lock); 6016 /* We set mc.moving_task later */ 6017 6018 ret = mem_cgroup_precharge_mc(mm); 6019 if (ret) 6020 mem_cgroup_clear_mc(); 6021 } else { 6022 mmput(mm); 6023 } 6024 return ret; 6025 } 6026 6027 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6028 { 6029 if (mc.to) 6030 mem_cgroup_clear_mc(); 6031 } 6032 6033 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6034 unsigned long addr, unsigned long end, 6035 struct mm_walk *walk) 6036 { 6037 int ret = 0; 6038 struct vm_area_struct *vma = walk->vma; 6039 pte_t *pte; 6040 spinlock_t *ptl; 6041 enum mc_target_type target_type; 6042 union mc_target target; 6043 struct page *page; 6044 6045 ptl = pmd_trans_huge_lock(pmd, vma); 6046 if (ptl) { 6047 if (mc.precharge < HPAGE_PMD_NR) { 6048 spin_unlock(ptl); 6049 return 0; 6050 } 6051 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6052 if (target_type == MC_TARGET_PAGE) { 6053 page = target.page; 6054 if (!isolate_lru_page(page)) { 6055 if (!mem_cgroup_move_account(page, true, 6056 mc.from, mc.to)) { 6057 mc.precharge -= HPAGE_PMD_NR; 6058 mc.moved_charge += HPAGE_PMD_NR; 6059 } 6060 putback_lru_page(page); 6061 } 6062 put_page(page); 6063 } else if (target_type == MC_TARGET_DEVICE) { 6064 page = target.page; 6065 if (!mem_cgroup_move_account(page, true, 6066 mc.from, mc.to)) { 6067 mc.precharge -= HPAGE_PMD_NR; 6068 mc.moved_charge += HPAGE_PMD_NR; 6069 } 6070 put_page(page); 6071 } 6072 spin_unlock(ptl); 6073 return 0; 6074 } 6075 6076 if (pmd_trans_unstable(pmd)) 6077 return 0; 6078 retry: 6079 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6080 for (; addr != end; addr += PAGE_SIZE) { 6081 pte_t ptent = *(pte++); 6082 bool device = false; 6083 swp_entry_t ent; 6084 6085 if (!mc.precharge) 6086 break; 6087 6088 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6089 case MC_TARGET_DEVICE: 6090 device = true; 6091 fallthrough; 6092 case MC_TARGET_PAGE: 6093 page = target.page; 6094 /* 6095 * We can have a part of the split pmd here. Moving it 6096 * can be done but it would be too convoluted so simply 6097 * ignore such a partial THP and keep it in original 6098 * memcg. There should be somebody mapping the head. 6099 */ 6100 if (PageTransCompound(page)) 6101 goto put; 6102 if (!device && isolate_lru_page(page)) 6103 goto put; 6104 if (!mem_cgroup_move_account(page, false, 6105 mc.from, mc.to)) { 6106 mc.precharge--; 6107 /* we uncharge from mc.from later. */ 6108 mc.moved_charge++; 6109 } 6110 if (!device) 6111 putback_lru_page(page); 6112 put: /* get_mctgt_type() gets the page */ 6113 put_page(page); 6114 break; 6115 case MC_TARGET_SWAP: 6116 ent = target.ent; 6117 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6118 mc.precharge--; 6119 mem_cgroup_id_get_many(mc.to, 1); 6120 /* we fixup other refcnts and charges later. */ 6121 mc.moved_swap++; 6122 } 6123 break; 6124 default: 6125 break; 6126 } 6127 } 6128 pte_unmap_unlock(pte - 1, ptl); 6129 cond_resched(); 6130 6131 if (addr != end) { 6132 /* 6133 * We have consumed all precharges we got in can_attach(). 6134 * We try charge one by one, but don't do any additional 6135 * charges to mc.to if we have failed in charge once in attach() 6136 * phase. 6137 */ 6138 ret = mem_cgroup_do_precharge(1); 6139 if (!ret) 6140 goto retry; 6141 } 6142 6143 return ret; 6144 } 6145 6146 static const struct mm_walk_ops charge_walk_ops = { 6147 .pmd_entry = mem_cgroup_move_charge_pte_range, 6148 }; 6149 6150 static void mem_cgroup_move_charge(void) 6151 { 6152 lru_add_drain_all(); 6153 /* 6154 * Signal lock_page_memcg() to take the memcg's move_lock 6155 * while we're moving its pages to another memcg. Then wait 6156 * for already started RCU-only updates to finish. 6157 */ 6158 atomic_inc(&mc.from->moving_account); 6159 synchronize_rcu(); 6160 retry: 6161 if (unlikely(!mmap_read_trylock(mc.mm))) { 6162 /* 6163 * Someone who are holding the mmap_lock might be waiting in 6164 * waitq. So we cancel all extra charges, wake up all waiters, 6165 * and retry. Because we cancel precharges, we might not be able 6166 * to move enough charges, but moving charge is a best-effort 6167 * feature anyway, so it wouldn't be a big problem. 6168 */ 6169 __mem_cgroup_clear_mc(); 6170 cond_resched(); 6171 goto retry; 6172 } 6173 /* 6174 * When we have consumed all precharges and failed in doing 6175 * additional charge, the page walk just aborts. 6176 */ 6177 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6178 NULL); 6179 6180 mmap_read_unlock(mc.mm); 6181 atomic_dec(&mc.from->moving_account); 6182 } 6183 6184 static void mem_cgroup_move_task(void) 6185 { 6186 if (mc.to) { 6187 mem_cgroup_move_charge(); 6188 mem_cgroup_clear_mc(); 6189 } 6190 } 6191 #else /* !CONFIG_MMU */ 6192 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6193 { 6194 return 0; 6195 } 6196 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6197 { 6198 } 6199 static void mem_cgroup_move_task(void) 6200 { 6201 } 6202 #endif 6203 6204 /* 6205 * Cgroup retains root cgroups across [un]mount cycles making it necessary 6206 * to verify whether we're attached to the default hierarchy on each mount 6207 * attempt. 6208 */ 6209 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 6210 { 6211 /* 6212 * use_hierarchy is forced on the default hierarchy. cgroup core 6213 * guarantees that @root doesn't have any children, so turning it 6214 * on for the root memcg is enough. 6215 */ 6216 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6217 root_mem_cgroup->use_hierarchy = true; 6218 else 6219 root_mem_cgroup->use_hierarchy = false; 6220 } 6221 6222 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6223 { 6224 if (value == PAGE_COUNTER_MAX) 6225 seq_puts(m, "max\n"); 6226 else 6227 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6228 6229 return 0; 6230 } 6231 6232 static u64 memory_current_read(struct cgroup_subsys_state *css, 6233 struct cftype *cft) 6234 { 6235 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6236 6237 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6238 } 6239 6240 static int memory_min_show(struct seq_file *m, void *v) 6241 { 6242 return seq_puts_memcg_tunable(m, 6243 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6244 } 6245 6246 static ssize_t memory_min_write(struct kernfs_open_file *of, 6247 char *buf, size_t nbytes, loff_t off) 6248 { 6249 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6250 unsigned long min; 6251 int err; 6252 6253 buf = strstrip(buf); 6254 err = page_counter_memparse(buf, "max", &min); 6255 if (err) 6256 return err; 6257 6258 page_counter_set_min(&memcg->memory, min); 6259 6260 return nbytes; 6261 } 6262 6263 static int memory_low_show(struct seq_file *m, void *v) 6264 { 6265 return seq_puts_memcg_tunable(m, 6266 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6267 } 6268 6269 static ssize_t memory_low_write(struct kernfs_open_file *of, 6270 char *buf, size_t nbytes, loff_t off) 6271 { 6272 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6273 unsigned long low; 6274 int err; 6275 6276 buf = strstrip(buf); 6277 err = page_counter_memparse(buf, "max", &low); 6278 if (err) 6279 return err; 6280 6281 page_counter_set_low(&memcg->memory, low); 6282 6283 return nbytes; 6284 } 6285 6286 static int memory_high_show(struct seq_file *m, void *v) 6287 { 6288 return seq_puts_memcg_tunable(m, 6289 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6290 } 6291 6292 static ssize_t memory_high_write(struct kernfs_open_file *of, 6293 char *buf, size_t nbytes, loff_t off) 6294 { 6295 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6296 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6297 bool drained = false; 6298 unsigned long high; 6299 int err; 6300 6301 buf = strstrip(buf); 6302 err = page_counter_memparse(buf, "max", &high); 6303 if (err) 6304 return err; 6305 6306 for (;;) { 6307 unsigned long nr_pages = page_counter_read(&memcg->memory); 6308 unsigned long reclaimed; 6309 6310 if (nr_pages <= high) 6311 break; 6312 6313 if (signal_pending(current)) 6314 break; 6315 6316 if (!drained) { 6317 drain_all_stock(memcg); 6318 drained = true; 6319 continue; 6320 } 6321 6322 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6323 GFP_KERNEL, true); 6324 6325 if (!reclaimed && !nr_retries--) 6326 break; 6327 } 6328 6329 page_counter_set_high(&memcg->memory, high); 6330 6331 memcg_wb_domain_size_changed(memcg); 6332 6333 return nbytes; 6334 } 6335 6336 static int memory_max_show(struct seq_file *m, void *v) 6337 { 6338 return seq_puts_memcg_tunable(m, 6339 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6340 } 6341 6342 static ssize_t memory_max_write(struct kernfs_open_file *of, 6343 char *buf, size_t nbytes, loff_t off) 6344 { 6345 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6346 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6347 bool drained = false; 6348 unsigned long max; 6349 int err; 6350 6351 buf = strstrip(buf); 6352 err = page_counter_memparse(buf, "max", &max); 6353 if (err) 6354 return err; 6355 6356 xchg(&memcg->memory.max, max); 6357 6358 for (;;) { 6359 unsigned long nr_pages = page_counter_read(&memcg->memory); 6360 6361 if (nr_pages <= max) 6362 break; 6363 6364 if (signal_pending(current)) 6365 break; 6366 6367 if (!drained) { 6368 drain_all_stock(memcg); 6369 drained = true; 6370 continue; 6371 } 6372 6373 if (nr_reclaims) { 6374 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6375 GFP_KERNEL, true)) 6376 nr_reclaims--; 6377 continue; 6378 } 6379 6380 memcg_memory_event(memcg, MEMCG_OOM); 6381 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6382 break; 6383 } 6384 6385 memcg_wb_domain_size_changed(memcg); 6386 return nbytes; 6387 } 6388 6389 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6390 { 6391 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6392 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6393 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6394 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6395 seq_printf(m, "oom_kill %lu\n", 6396 atomic_long_read(&events[MEMCG_OOM_KILL])); 6397 } 6398 6399 static int memory_events_show(struct seq_file *m, void *v) 6400 { 6401 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6402 6403 __memory_events_show(m, memcg->memory_events); 6404 return 0; 6405 } 6406 6407 static int memory_events_local_show(struct seq_file *m, void *v) 6408 { 6409 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6410 6411 __memory_events_show(m, memcg->memory_events_local); 6412 return 0; 6413 } 6414 6415 static int memory_stat_show(struct seq_file *m, void *v) 6416 { 6417 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6418 char *buf; 6419 6420 buf = memory_stat_format(memcg); 6421 if (!buf) 6422 return -ENOMEM; 6423 seq_puts(m, buf); 6424 kfree(buf); 6425 return 0; 6426 } 6427 6428 #ifdef CONFIG_NUMA 6429 static int memory_numa_stat_show(struct seq_file *m, void *v) 6430 { 6431 int i; 6432 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6433 6434 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6435 int nid; 6436 6437 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6438 continue; 6439 6440 seq_printf(m, "%s", memory_stats[i].name); 6441 for_each_node_state(nid, N_MEMORY) { 6442 u64 size; 6443 struct lruvec *lruvec; 6444 6445 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6446 size = lruvec_page_state(lruvec, memory_stats[i].idx); 6447 size *= memory_stats[i].ratio; 6448 seq_printf(m, " N%d=%llu", nid, size); 6449 } 6450 seq_putc(m, '\n'); 6451 } 6452 6453 return 0; 6454 } 6455 #endif 6456 6457 static int memory_oom_group_show(struct seq_file *m, void *v) 6458 { 6459 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6460 6461 seq_printf(m, "%d\n", memcg->oom_group); 6462 6463 return 0; 6464 } 6465 6466 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6467 char *buf, size_t nbytes, loff_t off) 6468 { 6469 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6470 int ret, oom_group; 6471 6472 buf = strstrip(buf); 6473 if (!buf) 6474 return -EINVAL; 6475 6476 ret = kstrtoint(buf, 0, &oom_group); 6477 if (ret) 6478 return ret; 6479 6480 if (oom_group != 0 && oom_group != 1) 6481 return -EINVAL; 6482 6483 memcg->oom_group = oom_group; 6484 6485 return nbytes; 6486 } 6487 6488 static struct cftype memory_files[] = { 6489 { 6490 .name = "current", 6491 .flags = CFTYPE_NOT_ON_ROOT, 6492 .read_u64 = memory_current_read, 6493 }, 6494 { 6495 .name = "min", 6496 .flags = CFTYPE_NOT_ON_ROOT, 6497 .seq_show = memory_min_show, 6498 .write = memory_min_write, 6499 }, 6500 { 6501 .name = "low", 6502 .flags = CFTYPE_NOT_ON_ROOT, 6503 .seq_show = memory_low_show, 6504 .write = memory_low_write, 6505 }, 6506 { 6507 .name = "high", 6508 .flags = CFTYPE_NOT_ON_ROOT, 6509 .seq_show = memory_high_show, 6510 .write = memory_high_write, 6511 }, 6512 { 6513 .name = "max", 6514 .flags = CFTYPE_NOT_ON_ROOT, 6515 .seq_show = memory_max_show, 6516 .write = memory_max_write, 6517 }, 6518 { 6519 .name = "events", 6520 .flags = CFTYPE_NOT_ON_ROOT, 6521 .file_offset = offsetof(struct mem_cgroup, events_file), 6522 .seq_show = memory_events_show, 6523 }, 6524 { 6525 .name = "events.local", 6526 .flags = CFTYPE_NOT_ON_ROOT, 6527 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6528 .seq_show = memory_events_local_show, 6529 }, 6530 { 6531 .name = "stat", 6532 .seq_show = memory_stat_show, 6533 }, 6534 #ifdef CONFIG_NUMA 6535 { 6536 .name = "numa_stat", 6537 .seq_show = memory_numa_stat_show, 6538 }, 6539 #endif 6540 { 6541 .name = "oom.group", 6542 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6543 .seq_show = memory_oom_group_show, 6544 .write = memory_oom_group_write, 6545 }, 6546 { } /* terminate */ 6547 }; 6548 6549 struct cgroup_subsys memory_cgrp_subsys = { 6550 .css_alloc = mem_cgroup_css_alloc, 6551 .css_online = mem_cgroup_css_online, 6552 .css_offline = mem_cgroup_css_offline, 6553 .css_released = mem_cgroup_css_released, 6554 .css_free = mem_cgroup_css_free, 6555 .css_reset = mem_cgroup_css_reset, 6556 .can_attach = mem_cgroup_can_attach, 6557 .cancel_attach = mem_cgroup_cancel_attach, 6558 .post_attach = mem_cgroup_move_task, 6559 .bind = mem_cgroup_bind, 6560 .dfl_cftypes = memory_files, 6561 .legacy_cftypes = mem_cgroup_legacy_files, 6562 .early_init = 0, 6563 }; 6564 6565 /* 6566 * This function calculates an individual cgroup's effective 6567 * protection which is derived from its own memory.min/low, its 6568 * parent's and siblings' settings, as well as the actual memory 6569 * distribution in the tree. 6570 * 6571 * The following rules apply to the effective protection values: 6572 * 6573 * 1. At the first level of reclaim, effective protection is equal to 6574 * the declared protection in memory.min and memory.low. 6575 * 6576 * 2. To enable safe delegation of the protection configuration, at 6577 * subsequent levels the effective protection is capped to the 6578 * parent's effective protection. 6579 * 6580 * 3. To make complex and dynamic subtrees easier to configure, the 6581 * user is allowed to overcommit the declared protection at a given 6582 * level. If that is the case, the parent's effective protection is 6583 * distributed to the children in proportion to how much protection 6584 * they have declared and how much of it they are utilizing. 6585 * 6586 * This makes distribution proportional, but also work-conserving: 6587 * if one cgroup claims much more protection than it uses memory, 6588 * the unused remainder is available to its siblings. 6589 * 6590 * 4. Conversely, when the declared protection is undercommitted at a 6591 * given level, the distribution of the larger parental protection 6592 * budget is NOT proportional. A cgroup's protection from a sibling 6593 * is capped to its own memory.min/low setting. 6594 * 6595 * 5. However, to allow protecting recursive subtrees from each other 6596 * without having to declare each individual cgroup's fixed share 6597 * of the ancestor's claim to protection, any unutilized - 6598 * "floating" - protection from up the tree is distributed in 6599 * proportion to each cgroup's *usage*. This makes the protection 6600 * neutral wrt sibling cgroups and lets them compete freely over 6601 * the shared parental protection budget, but it protects the 6602 * subtree as a whole from neighboring subtrees. 6603 * 6604 * Note that 4. and 5. are not in conflict: 4. is about protecting 6605 * against immediate siblings whereas 5. is about protecting against 6606 * neighboring subtrees. 6607 */ 6608 static unsigned long effective_protection(unsigned long usage, 6609 unsigned long parent_usage, 6610 unsigned long setting, 6611 unsigned long parent_effective, 6612 unsigned long siblings_protected) 6613 { 6614 unsigned long protected; 6615 unsigned long ep; 6616 6617 protected = min(usage, setting); 6618 /* 6619 * If all cgroups at this level combined claim and use more 6620 * protection then what the parent affords them, distribute 6621 * shares in proportion to utilization. 6622 * 6623 * We are using actual utilization rather than the statically 6624 * claimed protection in order to be work-conserving: claimed 6625 * but unused protection is available to siblings that would 6626 * otherwise get a smaller chunk than what they claimed. 6627 */ 6628 if (siblings_protected > parent_effective) 6629 return protected * parent_effective / siblings_protected; 6630 6631 /* 6632 * Ok, utilized protection of all children is within what the 6633 * parent affords them, so we know whatever this child claims 6634 * and utilizes is effectively protected. 6635 * 6636 * If there is unprotected usage beyond this value, reclaim 6637 * will apply pressure in proportion to that amount. 6638 * 6639 * If there is unutilized protection, the cgroup will be fully 6640 * shielded from reclaim, but we do return a smaller value for 6641 * protection than what the group could enjoy in theory. This 6642 * is okay. With the overcommit distribution above, effective 6643 * protection is always dependent on how memory is actually 6644 * consumed among the siblings anyway. 6645 */ 6646 ep = protected; 6647 6648 /* 6649 * If the children aren't claiming (all of) the protection 6650 * afforded to them by the parent, distribute the remainder in 6651 * proportion to the (unprotected) memory of each cgroup. That 6652 * way, cgroups that aren't explicitly prioritized wrt each 6653 * other compete freely over the allowance, but they are 6654 * collectively protected from neighboring trees. 6655 * 6656 * We're using unprotected memory for the weight so that if 6657 * some cgroups DO claim explicit protection, we don't protect 6658 * the same bytes twice. 6659 * 6660 * Check both usage and parent_usage against the respective 6661 * protected values. One should imply the other, but they 6662 * aren't read atomically - make sure the division is sane. 6663 */ 6664 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6665 return ep; 6666 if (parent_effective > siblings_protected && 6667 parent_usage > siblings_protected && 6668 usage > protected) { 6669 unsigned long unclaimed; 6670 6671 unclaimed = parent_effective - siblings_protected; 6672 unclaimed *= usage - protected; 6673 unclaimed /= parent_usage - siblings_protected; 6674 6675 ep += unclaimed; 6676 } 6677 6678 return ep; 6679 } 6680 6681 /** 6682 * mem_cgroup_protected - check if memory consumption is in the normal range 6683 * @root: the top ancestor of the sub-tree being checked 6684 * @memcg: the memory cgroup to check 6685 * 6686 * WARNING: This function is not stateless! It can only be used as part 6687 * of a top-down tree iteration, not for isolated queries. 6688 */ 6689 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6690 struct mem_cgroup *memcg) 6691 { 6692 unsigned long usage, parent_usage; 6693 struct mem_cgroup *parent; 6694 6695 if (mem_cgroup_disabled()) 6696 return; 6697 6698 if (!root) 6699 root = root_mem_cgroup; 6700 6701 /* 6702 * Effective values of the reclaim targets are ignored so they 6703 * can be stale. Have a look at mem_cgroup_protection for more 6704 * details. 6705 * TODO: calculation should be more robust so that we do not need 6706 * that special casing. 6707 */ 6708 if (memcg == root) 6709 return; 6710 6711 usage = page_counter_read(&memcg->memory); 6712 if (!usage) 6713 return; 6714 6715 parent = parent_mem_cgroup(memcg); 6716 /* No parent means a non-hierarchical mode on v1 memcg */ 6717 if (!parent) 6718 return; 6719 6720 if (parent == root) { 6721 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6722 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6723 return; 6724 } 6725 6726 parent_usage = page_counter_read(&parent->memory); 6727 6728 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6729 READ_ONCE(memcg->memory.min), 6730 READ_ONCE(parent->memory.emin), 6731 atomic_long_read(&parent->memory.children_min_usage))); 6732 6733 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6734 READ_ONCE(memcg->memory.low), 6735 READ_ONCE(parent->memory.elow), 6736 atomic_long_read(&parent->memory.children_low_usage))); 6737 } 6738 6739 /** 6740 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6741 * @page: page to charge 6742 * @mm: mm context of the victim 6743 * @gfp_mask: reclaim mode 6744 * 6745 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6746 * pages according to @gfp_mask if necessary. 6747 * 6748 * Returns 0 on success. Otherwise, an error code is returned. 6749 */ 6750 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6751 { 6752 unsigned int nr_pages = thp_nr_pages(page); 6753 struct mem_cgroup *memcg = NULL; 6754 int ret = 0; 6755 6756 if (mem_cgroup_disabled()) 6757 goto out; 6758 6759 if (PageSwapCache(page)) { 6760 swp_entry_t ent = { .val = page_private(page), }; 6761 unsigned short id; 6762 6763 /* 6764 * Every swap fault against a single page tries to charge the 6765 * page, bail as early as possible. shmem_unuse() encounters 6766 * already charged pages, too. page->mem_cgroup is protected 6767 * by the page lock, which serializes swap cache removal, which 6768 * in turn serializes uncharging. 6769 */ 6770 VM_BUG_ON_PAGE(!PageLocked(page), page); 6771 if (compound_head(page)->mem_cgroup) 6772 goto out; 6773 6774 id = lookup_swap_cgroup_id(ent); 6775 rcu_read_lock(); 6776 memcg = mem_cgroup_from_id(id); 6777 if (memcg && !css_tryget_online(&memcg->css)) 6778 memcg = NULL; 6779 rcu_read_unlock(); 6780 } 6781 6782 if (!memcg) 6783 memcg = get_mem_cgroup_from_mm(mm); 6784 6785 ret = try_charge(memcg, gfp_mask, nr_pages); 6786 if (ret) 6787 goto out_put; 6788 6789 css_get(&memcg->css); 6790 commit_charge(page, memcg); 6791 6792 local_irq_disable(); 6793 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6794 memcg_check_events(memcg, page); 6795 local_irq_enable(); 6796 6797 if (PageSwapCache(page)) { 6798 swp_entry_t entry = { .val = page_private(page) }; 6799 /* 6800 * The swap entry might not get freed for a long time, 6801 * let's not wait for it. The page already received a 6802 * memory+swap charge, drop the swap entry duplicate. 6803 */ 6804 mem_cgroup_uncharge_swap(entry, nr_pages); 6805 } 6806 6807 out_put: 6808 css_put(&memcg->css); 6809 out: 6810 return ret; 6811 } 6812 6813 struct uncharge_gather { 6814 struct mem_cgroup *memcg; 6815 unsigned long nr_pages; 6816 unsigned long pgpgout; 6817 unsigned long nr_kmem; 6818 struct page *dummy_page; 6819 }; 6820 6821 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6822 { 6823 memset(ug, 0, sizeof(*ug)); 6824 } 6825 6826 static void uncharge_batch(const struct uncharge_gather *ug) 6827 { 6828 unsigned long flags; 6829 6830 if (!mem_cgroup_is_root(ug->memcg)) { 6831 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); 6832 if (do_memsw_account()) 6833 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); 6834 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6835 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6836 memcg_oom_recover(ug->memcg); 6837 } 6838 6839 local_irq_save(flags); 6840 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6841 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); 6842 memcg_check_events(ug->memcg, ug->dummy_page); 6843 local_irq_restore(flags); 6844 6845 /* drop reference from uncharge_page */ 6846 css_put(&ug->memcg->css); 6847 } 6848 6849 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6850 { 6851 unsigned long nr_pages; 6852 6853 VM_BUG_ON_PAGE(PageLRU(page), page); 6854 6855 if (!page->mem_cgroup) 6856 return; 6857 6858 /* 6859 * Nobody should be changing or seriously looking at 6860 * page->mem_cgroup at this point, we have fully 6861 * exclusive access to the page. 6862 */ 6863 6864 if (ug->memcg != page->mem_cgroup) { 6865 if (ug->memcg) { 6866 uncharge_batch(ug); 6867 uncharge_gather_clear(ug); 6868 } 6869 ug->memcg = page->mem_cgroup; 6870 6871 /* pairs with css_put in uncharge_batch */ 6872 css_get(&ug->memcg->css); 6873 } 6874 6875 nr_pages = compound_nr(page); 6876 ug->nr_pages += nr_pages; 6877 6878 if (!PageKmemcg(page)) { 6879 ug->pgpgout++; 6880 } else { 6881 ug->nr_kmem += nr_pages; 6882 __ClearPageKmemcg(page); 6883 } 6884 6885 ug->dummy_page = page; 6886 page->mem_cgroup = NULL; 6887 css_put(&ug->memcg->css); 6888 } 6889 6890 static void uncharge_list(struct list_head *page_list) 6891 { 6892 struct uncharge_gather ug; 6893 struct list_head *next; 6894 6895 uncharge_gather_clear(&ug); 6896 6897 /* 6898 * Note that the list can be a single page->lru; hence the 6899 * do-while loop instead of a simple list_for_each_entry(). 6900 */ 6901 next = page_list->next; 6902 do { 6903 struct page *page; 6904 6905 page = list_entry(next, struct page, lru); 6906 next = page->lru.next; 6907 6908 uncharge_page(page, &ug); 6909 } while (next != page_list); 6910 6911 if (ug.memcg) 6912 uncharge_batch(&ug); 6913 } 6914 6915 /** 6916 * mem_cgroup_uncharge - uncharge a page 6917 * @page: page to uncharge 6918 * 6919 * Uncharge a page previously charged with mem_cgroup_charge(). 6920 */ 6921 void mem_cgroup_uncharge(struct page *page) 6922 { 6923 struct uncharge_gather ug; 6924 6925 if (mem_cgroup_disabled()) 6926 return; 6927 6928 /* Don't touch page->lru of any random page, pre-check: */ 6929 if (!page->mem_cgroup) 6930 return; 6931 6932 uncharge_gather_clear(&ug); 6933 uncharge_page(page, &ug); 6934 uncharge_batch(&ug); 6935 } 6936 6937 /** 6938 * mem_cgroup_uncharge_list - uncharge a list of page 6939 * @page_list: list of pages to uncharge 6940 * 6941 * Uncharge a list of pages previously charged with 6942 * mem_cgroup_charge(). 6943 */ 6944 void mem_cgroup_uncharge_list(struct list_head *page_list) 6945 { 6946 if (mem_cgroup_disabled()) 6947 return; 6948 6949 if (!list_empty(page_list)) 6950 uncharge_list(page_list); 6951 } 6952 6953 /** 6954 * mem_cgroup_migrate - charge a page's replacement 6955 * @oldpage: currently circulating page 6956 * @newpage: replacement page 6957 * 6958 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6959 * be uncharged upon free. 6960 * 6961 * Both pages must be locked, @newpage->mapping must be set up. 6962 */ 6963 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6964 { 6965 struct mem_cgroup *memcg; 6966 unsigned int nr_pages; 6967 unsigned long flags; 6968 6969 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6970 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6971 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6972 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6973 newpage); 6974 6975 if (mem_cgroup_disabled()) 6976 return; 6977 6978 /* Page cache replacement: new page already charged? */ 6979 if (newpage->mem_cgroup) 6980 return; 6981 6982 /* Swapcache readahead pages can get replaced before being charged */ 6983 memcg = oldpage->mem_cgroup; 6984 if (!memcg) 6985 return; 6986 6987 /* Force-charge the new page. The old one will be freed soon */ 6988 nr_pages = thp_nr_pages(newpage); 6989 6990 page_counter_charge(&memcg->memory, nr_pages); 6991 if (do_memsw_account()) 6992 page_counter_charge(&memcg->memsw, nr_pages); 6993 6994 css_get(&memcg->css); 6995 commit_charge(newpage, memcg); 6996 6997 local_irq_save(flags); 6998 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 6999 memcg_check_events(memcg, newpage); 7000 local_irq_restore(flags); 7001 } 7002 7003 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7004 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7005 7006 void mem_cgroup_sk_alloc(struct sock *sk) 7007 { 7008 struct mem_cgroup *memcg; 7009 7010 if (!mem_cgroup_sockets_enabled) 7011 return; 7012 7013 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7014 if (in_interrupt()) 7015 return; 7016 7017 rcu_read_lock(); 7018 memcg = mem_cgroup_from_task(current); 7019 if (memcg == root_mem_cgroup) 7020 goto out; 7021 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7022 goto out; 7023 if (css_tryget(&memcg->css)) 7024 sk->sk_memcg = memcg; 7025 out: 7026 rcu_read_unlock(); 7027 } 7028 7029 void mem_cgroup_sk_free(struct sock *sk) 7030 { 7031 if (sk->sk_memcg) 7032 css_put(&sk->sk_memcg->css); 7033 } 7034 7035 /** 7036 * mem_cgroup_charge_skmem - charge socket memory 7037 * @memcg: memcg to charge 7038 * @nr_pages: number of pages to charge 7039 * 7040 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7041 * @memcg's configured limit, %false if the charge had to be forced. 7042 */ 7043 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7044 { 7045 gfp_t gfp_mask = GFP_KERNEL; 7046 7047 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7048 struct page_counter *fail; 7049 7050 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7051 memcg->tcpmem_pressure = 0; 7052 return true; 7053 } 7054 page_counter_charge(&memcg->tcpmem, nr_pages); 7055 memcg->tcpmem_pressure = 1; 7056 return false; 7057 } 7058 7059 /* Don't block in the packet receive path */ 7060 if (in_softirq()) 7061 gfp_mask = GFP_NOWAIT; 7062 7063 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7064 7065 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7066 return true; 7067 7068 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7069 return false; 7070 } 7071 7072 /** 7073 * mem_cgroup_uncharge_skmem - uncharge socket memory 7074 * @memcg: memcg to uncharge 7075 * @nr_pages: number of pages to uncharge 7076 */ 7077 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7078 { 7079 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7080 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7081 return; 7082 } 7083 7084 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7085 7086 refill_stock(memcg, nr_pages); 7087 } 7088 7089 static int __init cgroup_memory(char *s) 7090 { 7091 char *token; 7092 7093 while ((token = strsep(&s, ",")) != NULL) { 7094 if (!*token) 7095 continue; 7096 if (!strcmp(token, "nosocket")) 7097 cgroup_memory_nosocket = true; 7098 if (!strcmp(token, "nokmem")) 7099 cgroup_memory_nokmem = true; 7100 } 7101 return 0; 7102 } 7103 __setup("cgroup.memory=", cgroup_memory); 7104 7105 /* 7106 * subsys_initcall() for memory controller. 7107 * 7108 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7109 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7110 * basically everything that doesn't depend on a specific mem_cgroup structure 7111 * should be initialized from here. 7112 */ 7113 static int __init mem_cgroup_init(void) 7114 { 7115 int cpu, node; 7116 7117 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7118 memcg_hotplug_cpu_dead); 7119 7120 for_each_possible_cpu(cpu) 7121 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7122 drain_local_stock); 7123 7124 for_each_node(node) { 7125 struct mem_cgroup_tree_per_node *rtpn; 7126 7127 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7128 node_online(node) ? node : NUMA_NO_NODE); 7129 7130 rtpn->rb_root = RB_ROOT; 7131 rtpn->rb_rightmost = NULL; 7132 spin_lock_init(&rtpn->lock); 7133 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7134 } 7135 7136 return 0; 7137 } 7138 subsys_initcall(mem_cgroup_init); 7139 7140 #ifdef CONFIG_MEMCG_SWAP 7141 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7142 { 7143 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7144 /* 7145 * The root cgroup cannot be destroyed, so it's refcount must 7146 * always be >= 1. 7147 */ 7148 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7149 VM_BUG_ON(1); 7150 break; 7151 } 7152 memcg = parent_mem_cgroup(memcg); 7153 if (!memcg) 7154 memcg = root_mem_cgroup; 7155 } 7156 return memcg; 7157 } 7158 7159 /** 7160 * mem_cgroup_swapout - transfer a memsw charge to swap 7161 * @page: page whose memsw charge to transfer 7162 * @entry: swap entry to move the charge to 7163 * 7164 * Transfer the memsw charge of @page to @entry. 7165 */ 7166 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7167 { 7168 struct mem_cgroup *memcg, *swap_memcg; 7169 unsigned int nr_entries; 7170 unsigned short oldid; 7171 7172 VM_BUG_ON_PAGE(PageLRU(page), page); 7173 VM_BUG_ON_PAGE(page_count(page), page); 7174 7175 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7176 return; 7177 7178 memcg = page->mem_cgroup; 7179 7180 /* Readahead page, never charged */ 7181 if (!memcg) 7182 return; 7183 7184 /* 7185 * In case the memcg owning these pages has been offlined and doesn't 7186 * have an ID allocated to it anymore, charge the closest online 7187 * ancestor for the swap instead and transfer the memory+swap charge. 7188 */ 7189 swap_memcg = mem_cgroup_id_get_online(memcg); 7190 nr_entries = thp_nr_pages(page); 7191 /* Get references for the tail pages, too */ 7192 if (nr_entries > 1) 7193 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7194 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7195 nr_entries); 7196 VM_BUG_ON_PAGE(oldid, page); 7197 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7198 7199 page->mem_cgroup = NULL; 7200 7201 if (!mem_cgroup_is_root(memcg)) 7202 page_counter_uncharge(&memcg->memory, nr_entries); 7203 7204 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7205 if (!mem_cgroup_is_root(swap_memcg)) 7206 page_counter_charge(&swap_memcg->memsw, nr_entries); 7207 page_counter_uncharge(&memcg->memsw, nr_entries); 7208 } 7209 7210 /* 7211 * Interrupts should be disabled here because the caller holds the 7212 * i_pages lock which is taken with interrupts-off. It is 7213 * important here to have the interrupts disabled because it is the 7214 * only synchronisation we have for updating the per-CPU variables. 7215 */ 7216 VM_BUG_ON(!irqs_disabled()); 7217 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7218 memcg_check_events(memcg, page); 7219 7220 css_put(&memcg->css); 7221 } 7222 7223 /** 7224 * mem_cgroup_try_charge_swap - try charging swap space for a page 7225 * @page: page being added to swap 7226 * @entry: swap entry to charge 7227 * 7228 * Try to charge @page's memcg for the swap space at @entry. 7229 * 7230 * Returns 0 on success, -ENOMEM on failure. 7231 */ 7232 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7233 { 7234 unsigned int nr_pages = thp_nr_pages(page); 7235 struct page_counter *counter; 7236 struct mem_cgroup *memcg; 7237 unsigned short oldid; 7238 7239 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7240 return 0; 7241 7242 memcg = page->mem_cgroup; 7243 7244 /* Readahead page, never charged */ 7245 if (!memcg) 7246 return 0; 7247 7248 if (!entry.val) { 7249 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7250 return 0; 7251 } 7252 7253 memcg = mem_cgroup_id_get_online(memcg); 7254 7255 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7256 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7257 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7258 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7259 mem_cgroup_id_put(memcg); 7260 return -ENOMEM; 7261 } 7262 7263 /* Get references for the tail pages, too */ 7264 if (nr_pages > 1) 7265 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7266 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7267 VM_BUG_ON_PAGE(oldid, page); 7268 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7269 7270 return 0; 7271 } 7272 7273 /** 7274 * mem_cgroup_uncharge_swap - uncharge swap space 7275 * @entry: swap entry to uncharge 7276 * @nr_pages: the amount of swap space to uncharge 7277 */ 7278 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7279 { 7280 struct mem_cgroup *memcg; 7281 unsigned short id; 7282 7283 id = swap_cgroup_record(entry, 0, nr_pages); 7284 rcu_read_lock(); 7285 memcg = mem_cgroup_from_id(id); 7286 if (memcg) { 7287 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7288 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7289 page_counter_uncharge(&memcg->swap, nr_pages); 7290 else 7291 page_counter_uncharge(&memcg->memsw, nr_pages); 7292 } 7293 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7294 mem_cgroup_id_put_many(memcg, nr_pages); 7295 } 7296 rcu_read_unlock(); 7297 } 7298 7299 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7300 { 7301 long nr_swap_pages = get_nr_swap_pages(); 7302 7303 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7304 return nr_swap_pages; 7305 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7306 nr_swap_pages = min_t(long, nr_swap_pages, 7307 READ_ONCE(memcg->swap.max) - 7308 page_counter_read(&memcg->swap)); 7309 return nr_swap_pages; 7310 } 7311 7312 bool mem_cgroup_swap_full(struct page *page) 7313 { 7314 struct mem_cgroup *memcg; 7315 7316 VM_BUG_ON_PAGE(!PageLocked(page), page); 7317 7318 if (vm_swap_full()) 7319 return true; 7320 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7321 return false; 7322 7323 memcg = page->mem_cgroup; 7324 if (!memcg) 7325 return false; 7326 7327 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7328 unsigned long usage = page_counter_read(&memcg->swap); 7329 7330 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7331 usage * 2 >= READ_ONCE(memcg->swap.max)) 7332 return true; 7333 } 7334 7335 return false; 7336 } 7337 7338 static int __init setup_swap_account(char *s) 7339 { 7340 if (!strcmp(s, "1")) 7341 cgroup_memory_noswap = 0; 7342 else if (!strcmp(s, "0")) 7343 cgroup_memory_noswap = 1; 7344 return 1; 7345 } 7346 __setup("swapaccount=", setup_swap_account); 7347 7348 static u64 swap_current_read(struct cgroup_subsys_state *css, 7349 struct cftype *cft) 7350 { 7351 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7352 7353 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7354 } 7355 7356 static int swap_high_show(struct seq_file *m, void *v) 7357 { 7358 return seq_puts_memcg_tunable(m, 7359 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7360 } 7361 7362 static ssize_t swap_high_write(struct kernfs_open_file *of, 7363 char *buf, size_t nbytes, loff_t off) 7364 { 7365 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7366 unsigned long high; 7367 int err; 7368 7369 buf = strstrip(buf); 7370 err = page_counter_memparse(buf, "max", &high); 7371 if (err) 7372 return err; 7373 7374 page_counter_set_high(&memcg->swap, high); 7375 7376 return nbytes; 7377 } 7378 7379 static int swap_max_show(struct seq_file *m, void *v) 7380 { 7381 return seq_puts_memcg_tunable(m, 7382 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7383 } 7384 7385 static ssize_t swap_max_write(struct kernfs_open_file *of, 7386 char *buf, size_t nbytes, loff_t off) 7387 { 7388 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7389 unsigned long max; 7390 int err; 7391 7392 buf = strstrip(buf); 7393 err = page_counter_memparse(buf, "max", &max); 7394 if (err) 7395 return err; 7396 7397 xchg(&memcg->swap.max, max); 7398 7399 return nbytes; 7400 } 7401 7402 static int swap_events_show(struct seq_file *m, void *v) 7403 { 7404 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7405 7406 seq_printf(m, "high %lu\n", 7407 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7408 seq_printf(m, "max %lu\n", 7409 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7410 seq_printf(m, "fail %lu\n", 7411 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7412 7413 return 0; 7414 } 7415 7416 static struct cftype swap_files[] = { 7417 { 7418 .name = "swap.current", 7419 .flags = CFTYPE_NOT_ON_ROOT, 7420 .read_u64 = swap_current_read, 7421 }, 7422 { 7423 .name = "swap.high", 7424 .flags = CFTYPE_NOT_ON_ROOT, 7425 .seq_show = swap_high_show, 7426 .write = swap_high_write, 7427 }, 7428 { 7429 .name = "swap.max", 7430 .flags = CFTYPE_NOT_ON_ROOT, 7431 .seq_show = swap_max_show, 7432 .write = swap_max_write, 7433 }, 7434 { 7435 .name = "swap.events", 7436 .flags = CFTYPE_NOT_ON_ROOT, 7437 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7438 .seq_show = swap_events_show, 7439 }, 7440 { } /* terminate */ 7441 }; 7442 7443 static struct cftype memsw_files[] = { 7444 { 7445 .name = "memsw.usage_in_bytes", 7446 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7447 .read_u64 = mem_cgroup_read_u64, 7448 }, 7449 { 7450 .name = "memsw.max_usage_in_bytes", 7451 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7452 .write = mem_cgroup_reset, 7453 .read_u64 = mem_cgroup_read_u64, 7454 }, 7455 { 7456 .name = "memsw.limit_in_bytes", 7457 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7458 .write = mem_cgroup_write, 7459 .read_u64 = mem_cgroup_read_u64, 7460 }, 7461 { 7462 .name = "memsw.failcnt", 7463 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7464 .write = mem_cgroup_reset, 7465 .read_u64 = mem_cgroup_read_u64, 7466 }, 7467 { }, /* terminate */ 7468 }; 7469 7470 /* 7471 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7472 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7473 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7474 * boot parameter. This may result in premature OOPS inside 7475 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7476 */ 7477 static int __init mem_cgroup_swap_init(void) 7478 { 7479 /* No memory control -> no swap control */ 7480 if (mem_cgroup_disabled()) 7481 cgroup_memory_noswap = true; 7482 7483 if (cgroup_memory_noswap) 7484 return 0; 7485 7486 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7487 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7488 7489 return 0; 7490 } 7491 core_initcall(mem_cgroup_swap_init); 7492 7493 #endif /* CONFIG_MEMCG_SWAP */ 7494