1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 */ 24 25 #include <linux/page_counter.h> 26 #include <linux/memcontrol.h> 27 #include <linux/cgroup.h> 28 #include <linux/mm.h> 29 #include <linux/sched/mm.h> 30 #include <linux/shmem_fs.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vm_event_item.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/swap_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include <linux/tracehook.h> 60 #include "internal.h" 61 #include <net/sock.h> 62 #include <net/ip.h> 63 #include "slab.h" 64 65 #include <linux/uaccess.h> 66 67 #include <trace/events/vmscan.h> 68 69 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 70 EXPORT_SYMBOL(memory_cgrp_subsys); 71 72 struct mem_cgroup *root_mem_cgroup __read_mostly; 73 74 #define MEM_CGROUP_RECLAIM_RETRIES 5 75 76 /* Socket memory accounting disabled? */ 77 static bool cgroup_memory_nosocket; 78 79 /* Kernel memory accounting disabled? */ 80 static bool cgroup_memory_nokmem; 81 82 /* Whether the swap controller is active */ 83 #ifdef CONFIG_MEMCG_SWAP 84 int do_swap_account __read_mostly; 85 #else 86 #define do_swap_account 0 87 #endif 88 89 /* Whether legacy memory+swap accounting is active */ 90 static bool do_memsw_account(void) 91 { 92 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 93 } 94 95 static const char *const mem_cgroup_lru_names[] = { 96 "inactive_anon", 97 "active_anon", 98 "inactive_file", 99 "active_file", 100 "unevictable", 101 }; 102 103 #define THRESHOLDS_EVENTS_TARGET 128 104 #define SOFTLIMIT_EVENTS_TARGET 1024 105 #define NUMAINFO_EVENTS_TARGET 1024 106 107 /* 108 * Cgroups above their limits are maintained in a RB-Tree, independent of 109 * their hierarchy representation 110 */ 111 112 struct mem_cgroup_tree_per_node { 113 struct rb_root rb_root; 114 struct rb_node *rb_rightmost; 115 spinlock_t lock; 116 }; 117 118 struct mem_cgroup_tree { 119 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 120 }; 121 122 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 123 124 /* for OOM */ 125 struct mem_cgroup_eventfd_list { 126 struct list_head list; 127 struct eventfd_ctx *eventfd; 128 }; 129 130 /* 131 * cgroup_event represents events which userspace want to receive. 132 */ 133 struct mem_cgroup_event { 134 /* 135 * memcg which the event belongs to. 136 */ 137 struct mem_cgroup *memcg; 138 /* 139 * eventfd to signal userspace about the event. 140 */ 141 struct eventfd_ctx *eventfd; 142 /* 143 * Each of these stored in a list by the cgroup. 144 */ 145 struct list_head list; 146 /* 147 * register_event() callback will be used to add new userspace 148 * waiter for changes related to this event. Use eventfd_signal() 149 * on eventfd to send notification to userspace. 150 */ 151 int (*register_event)(struct mem_cgroup *memcg, 152 struct eventfd_ctx *eventfd, const char *args); 153 /* 154 * unregister_event() callback will be called when userspace closes 155 * the eventfd or on cgroup removing. This callback must be set, 156 * if you want provide notification functionality. 157 */ 158 void (*unregister_event)(struct mem_cgroup *memcg, 159 struct eventfd_ctx *eventfd); 160 /* 161 * All fields below needed to unregister event when 162 * userspace closes eventfd. 163 */ 164 poll_table pt; 165 wait_queue_head_t *wqh; 166 wait_queue_entry_t wait; 167 struct work_struct remove; 168 }; 169 170 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 171 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 172 173 /* Stuffs for move charges at task migration. */ 174 /* 175 * Types of charges to be moved. 176 */ 177 #define MOVE_ANON 0x1U 178 #define MOVE_FILE 0x2U 179 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 180 181 /* "mc" and its members are protected by cgroup_mutex */ 182 static struct move_charge_struct { 183 spinlock_t lock; /* for from, to */ 184 struct mm_struct *mm; 185 struct mem_cgroup *from; 186 struct mem_cgroup *to; 187 unsigned long flags; 188 unsigned long precharge; 189 unsigned long moved_charge; 190 unsigned long moved_swap; 191 struct task_struct *moving_task; /* a task moving charges */ 192 wait_queue_head_t waitq; /* a waitq for other context */ 193 } mc = { 194 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 195 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 196 }; 197 198 /* 199 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 200 * limit reclaim to prevent infinite loops, if they ever occur. 201 */ 202 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 203 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 204 205 enum charge_type { 206 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 207 MEM_CGROUP_CHARGE_TYPE_ANON, 208 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 209 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 210 NR_CHARGE_TYPE, 211 }; 212 213 /* for encoding cft->private value on file */ 214 enum res_type { 215 _MEM, 216 _MEMSWAP, 217 _OOM_TYPE, 218 _KMEM, 219 _TCP, 220 }; 221 222 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 223 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 224 #define MEMFILE_ATTR(val) ((val) & 0xffff) 225 /* Used for OOM nofiier */ 226 #define OOM_CONTROL (0) 227 228 /* 229 * Iteration constructs for visiting all cgroups (under a tree). If 230 * loops are exited prematurely (break), mem_cgroup_iter_break() must 231 * be used for reference counting. 232 */ 233 #define for_each_mem_cgroup_tree(iter, root) \ 234 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 235 iter != NULL; \ 236 iter = mem_cgroup_iter(root, iter, NULL)) 237 238 #define for_each_mem_cgroup(iter) \ 239 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 240 iter != NULL; \ 241 iter = mem_cgroup_iter(NULL, iter, NULL)) 242 243 static inline bool should_force_charge(void) 244 { 245 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 246 (current->flags & PF_EXITING); 247 } 248 249 /* Some nice accessors for the vmpressure. */ 250 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 251 { 252 if (!memcg) 253 memcg = root_mem_cgroup; 254 return &memcg->vmpressure; 255 } 256 257 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 258 { 259 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 260 } 261 262 #ifdef CONFIG_MEMCG_KMEM 263 /* 264 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 265 * The main reason for not using cgroup id for this: 266 * this works better in sparse environments, where we have a lot of memcgs, 267 * but only a few kmem-limited. Or also, if we have, for instance, 200 268 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 269 * 200 entry array for that. 270 * 271 * The current size of the caches array is stored in memcg_nr_cache_ids. It 272 * will double each time we have to increase it. 273 */ 274 static DEFINE_IDA(memcg_cache_ida); 275 int memcg_nr_cache_ids; 276 277 /* Protects memcg_nr_cache_ids */ 278 static DECLARE_RWSEM(memcg_cache_ids_sem); 279 280 void memcg_get_cache_ids(void) 281 { 282 down_read(&memcg_cache_ids_sem); 283 } 284 285 void memcg_put_cache_ids(void) 286 { 287 up_read(&memcg_cache_ids_sem); 288 } 289 290 /* 291 * MIN_SIZE is different than 1, because we would like to avoid going through 292 * the alloc/free process all the time. In a small machine, 4 kmem-limited 293 * cgroups is a reasonable guess. In the future, it could be a parameter or 294 * tunable, but that is strictly not necessary. 295 * 296 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 297 * this constant directly from cgroup, but it is understandable that this is 298 * better kept as an internal representation in cgroup.c. In any case, the 299 * cgrp_id space is not getting any smaller, and we don't have to necessarily 300 * increase ours as well if it increases. 301 */ 302 #define MEMCG_CACHES_MIN_SIZE 4 303 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 304 305 /* 306 * A lot of the calls to the cache allocation functions are expected to be 307 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 308 * conditional to this static branch, we'll have to allow modules that does 309 * kmem_cache_alloc and the such to see this symbol as well 310 */ 311 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 312 EXPORT_SYMBOL(memcg_kmem_enabled_key); 313 314 struct workqueue_struct *memcg_kmem_cache_wq; 315 316 static int memcg_shrinker_map_size; 317 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 318 319 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 320 { 321 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 322 } 323 324 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 325 int size, int old_size) 326 { 327 struct memcg_shrinker_map *new, *old; 328 int nid; 329 330 lockdep_assert_held(&memcg_shrinker_map_mutex); 331 332 for_each_node(nid) { 333 old = rcu_dereference_protected( 334 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 335 /* Not yet online memcg */ 336 if (!old) 337 return 0; 338 339 new = kvmalloc(sizeof(*new) + size, GFP_KERNEL); 340 if (!new) 341 return -ENOMEM; 342 343 /* Set all old bits, clear all new bits */ 344 memset(new->map, (int)0xff, old_size); 345 memset((void *)new->map + old_size, 0, size - old_size); 346 347 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 348 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 349 } 350 351 return 0; 352 } 353 354 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 355 { 356 struct mem_cgroup_per_node *pn; 357 struct memcg_shrinker_map *map; 358 int nid; 359 360 if (mem_cgroup_is_root(memcg)) 361 return; 362 363 for_each_node(nid) { 364 pn = mem_cgroup_nodeinfo(memcg, nid); 365 map = rcu_dereference_protected(pn->shrinker_map, true); 366 if (map) 367 kvfree(map); 368 rcu_assign_pointer(pn->shrinker_map, NULL); 369 } 370 } 371 372 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 373 { 374 struct memcg_shrinker_map *map; 375 int nid, size, ret = 0; 376 377 if (mem_cgroup_is_root(memcg)) 378 return 0; 379 380 mutex_lock(&memcg_shrinker_map_mutex); 381 size = memcg_shrinker_map_size; 382 for_each_node(nid) { 383 map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); 384 if (!map) { 385 memcg_free_shrinker_maps(memcg); 386 ret = -ENOMEM; 387 break; 388 } 389 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 390 } 391 mutex_unlock(&memcg_shrinker_map_mutex); 392 393 return ret; 394 } 395 396 int memcg_expand_shrinker_maps(int new_id) 397 { 398 int size, old_size, ret = 0; 399 struct mem_cgroup *memcg; 400 401 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 402 old_size = memcg_shrinker_map_size; 403 if (size <= old_size) 404 return 0; 405 406 mutex_lock(&memcg_shrinker_map_mutex); 407 if (!root_mem_cgroup) 408 goto unlock; 409 410 for_each_mem_cgroup(memcg) { 411 if (mem_cgroup_is_root(memcg)) 412 continue; 413 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 414 if (ret) 415 goto unlock; 416 } 417 unlock: 418 if (!ret) 419 memcg_shrinker_map_size = size; 420 mutex_unlock(&memcg_shrinker_map_mutex); 421 return ret; 422 } 423 424 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 425 { 426 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 427 struct memcg_shrinker_map *map; 428 429 rcu_read_lock(); 430 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 431 /* Pairs with smp mb in shrink_slab() */ 432 smp_mb__before_atomic(); 433 set_bit(shrinker_id, map->map); 434 rcu_read_unlock(); 435 } 436 } 437 438 #else /* CONFIG_MEMCG_KMEM */ 439 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 440 { 441 return 0; 442 } 443 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { } 444 #endif /* CONFIG_MEMCG_KMEM */ 445 446 /** 447 * mem_cgroup_css_from_page - css of the memcg associated with a page 448 * @page: page of interest 449 * 450 * If memcg is bound to the default hierarchy, css of the memcg associated 451 * with @page is returned. The returned css remains associated with @page 452 * until it is released. 453 * 454 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 455 * is returned. 456 */ 457 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 458 { 459 struct mem_cgroup *memcg; 460 461 memcg = page->mem_cgroup; 462 463 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 464 memcg = root_mem_cgroup; 465 466 return &memcg->css; 467 } 468 469 /** 470 * page_cgroup_ino - return inode number of the memcg a page is charged to 471 * @page: the page 472 * 473 * Look up the closest online ancestor of the memory cgroup @page is charged to 474 * and return its inode number or 0 if @page is not charged to any cgroup. It 475 * is safe to call this function without holding a reference to @page. 476 * 477 * Note, this function is inherently racy, because there is nothing to prevent 478 * the cgroup inode from getting torn down and potentially reallocated a moment 479 * after page_cgroup_ino() returns, so it only should be used by callers that 480 * do not care (such as procfs interfaces). 481 */ 482 ino_t page_cgroup_ino(struct page *page) 483 { 484 struct mem_cgroup *memcg; 485 unsigned long ino = 0; 486 487 rcu_read_lock(); 488 memcg = READ_ONCE(page->mem_cgroup); 489 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 490 memcg = parent_mem_cgroup(memcg); 491 if (memcg) 492 ino = cgroup_ino(memcg->css.cgroup); 493 rcu_read_unlock(); 494 return ino; 495 } 496 497 static struct mem_cgroup_per_node * 498 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 499 { 500 int nid = page_to_nid(page); 501 502 return memcg->nodeinfo[nid]; 503 } 504 505 static struct mem_cgroup_tree_per_node * 506 soft_limit_tree_node(int nid) 507 { 508 return soft_limit_tree.rb_tree_per_node[nid]; 509 } 510 511 static struct mem_cgroup_tree_per_node * 512 soft_limit_tree_from_page(struct page *page) 513 { 514 int nid = page_to_nid(page); 515 516 return soft_limit_tree.rb_tree_per_node[nid]; 517 } 518 519 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 520 struct mem_cgroup_tree_per_node *mctz, 521 unsigned long new_usage_in_excess) 522 { 523 struct rb_node **p = &mctz->rb_root.rb_node; 524 struct rb_node *parent = NULL; 525 struct mem_cgroup_per_node *mz_node; 526 bool rightmost = true; 527 528 if (mz->on_tree) 529 return; 530 531 mz->usage_in_excess = new_usage_in_excess; 532 if (!mz->usage_in_excess) 533 return; 534 while (*p) { 535 parent = *p; 536 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 537 tree_node); 538 if (mz->usage_in_excess < mz_node->usage_in_excess) { 539 p = &(*p)->rb_left; 540 rightmost = false; 541 } 542 543 /* 544 * We can't avoid mem cgroups that are over their soft 545 * limit by the same amount 546 */ 547 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 548 p = &(*p)->rb_right; 549 } 550 551 if (rightmost) 552 mctz->rb_rightmost = &mz->tree_node; 553 554 rb_link_node(&mz->tree_node, parent, p); 555 rb_insert_color(&mz->tree_node, &mctz->rb_root); 556 mz->on_tree = true; 557 } 558 559 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 560 struct mem_cgroup_tree_per_node *mctz) 561 { 562 if (!mz->on_tree) 563 return; 564 565 if (&mz->tree_node == mctz->rb_rightmost) 566 mctz->rb_rightmost = rb_prev(&mz->tree_node); 567 568 rb_erase(&mz->tree_node, &mctz->rb_root); 569 mz->on_tree = false; 570 } 571 572 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 573 struct mem_cgroup_tree_per_node *mctz) 574 { 575 unsigned long flags; 576 577 spin_lock_irqsave(&mctz->lock, flags); 578 __mem_cgroup_remove_exceeded(mz, mctz); 579 spin_unlock_irqrestore(&mctz->lock, flags); 580 } 581 582 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 583 { 584 unsigned long nr_pages = page_counter_read(&memcg->memory); 585 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 586 unsigned long excess = 0; 587 588 if (nr_pages > soft_limit) 589 excess = nr_pages - soft_limit; 590 591 return excess; 592 } 593 594 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 595 { 596 unsigned long excess; 597 struct mem_cgroup_per_node *mz; 598 struct mem_cgroup_tree_per_node *mctz; 599 600 mctz = soft_limit_tree_from_page(page); 601 if (!mctz) 602 return; 603 /* 604 * Necessary to update all ancestors when hierarchy is used. 605 * because their event counter is not touched. 606 */ 607 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 608 mz = mem_cgroup_page_nodeinfo(memcg, page); 609 excess = soft_limit_excess(memcg); 610 /* 611 * We have to update the tree if mz is on RB-tree or 612 * mem is over its softlimit. 613 */ 614 if (excess || mz->on_tree) { 615 unsigned long flags; 616 617 spin_lock_irqsave(&mctz->lock, flags); 618 /* if on-tree, remove it */ 619 if (mz->on_tree) 620 __mem_cgroup_remove_exceeded(mz, mctz); 621 /* 622 * Insert again. mz->usage_in_excess will be updated. 623 * If excess is 0, no tree ops. 624 */ 625 __mem_cgroup_insert_exceeded(mz, mctz, excess); 626 spin_unlock_irqrestore(&mctz->lock, flags); 627 } 628 } 629 } 630 631 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 632 { 633 struct mem_cgroup_tree_per_node *mctz; 634 struct mem_cgroup_per_node *mz; 635 int nid; 636 637 for_each_node(nid) { 638 mz = mem_cgroup_nodeinfo(memcg, nid); 639 mctz = soft_limit_tree_node(nid); 640 if (mctz) 641 mem_cgroup_remove_exceeded(mz, mctz); 642 } 643 } 644 645 static struct mem_cgroup_per_node * 646 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 647 { 648 struct mem_cgroup_per_node *mz; 649 650 retry: 651 mz = NULL; 652 if (!mctz->rb_rightmost) 653 goto done; /* Nothing to reclaim from */ 654 655 mz = rb_entry(mctz->rb_rightmost, 656 struct mem_cgroup_per_node, tree_node); 657 /* 658 * Remove the node now but someone else can add it back, 659 * we will to add it back at the end of reclaim to its correct 660 * position in the tree. 661 */ 662 __mem_cgroup_remove_exceeded(mz, mctz); 663 if (!soft_limit_excess(mz->memcg) || 664 !css_tryget_online(&mz->memcg->css)) 665 goto retry; 666 done: 667 return mz; 668 } 669 670 static struct mem_cgroup_per_node * 671 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 672 { 673 struct mem_cgroup_per_node *mz; 674 675 spin_lock_irq(&mctz->lock); 676 mz = __mem_cgroup_largest_soft_limit_node(mctz); 677 spin_unlock_irq(&mctz->lock); 678 return mz; 679 } 680 681 /** 682 * __mod_memcg_state - update cgroup memory statistics 683 * @memcg: the memory cgroup 684 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 685 * @val: delta to add to the counter, can be negative 686 */ 687 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 688 { 689 long x; 690 691 if (mem_cgroup_disabled()) 692 return; 693 694 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 695 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 696 struct mem_cgroup *mi; 697 698 atomic_long_add(x, &memcg->vmstats_local[idx]); 699 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 700 atomic_long_add(x, &mi->vmstats[idx]); 701 x = 0; 702 } 703 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 704 } 705 706 static struct mem_cgroup_per_node * 707 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 708 { 709 struct mem_cgroup *parent; 710 711 parent = parent_mem_cgroup(pn->memcg); 712 if (!parent) 713 return NULL; 714 return mem_cgroup_nodeinfo(parent, nid); 715 } 716 717 /** 718 * __mod_lruvec_state - update lruvec memory statistics 719 * @lruvec: the lruvec 720 * @idx: the stat item 721 * @val: delta to add to the counter, can be negative 722 * 723 * The lruvec is the intersection of the NUMA node and a cgroup. This 724 * function updates the all three counters that are affected by a 725 * change of state at this level: per-node, per-cgroup, per-lruvec. 726 */ 727 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 728 int val) 729 { 730 pg_data_t *pgdat = lruvec_pgdat(lruvec); 731 struct mem_cgroup_per_node *pn; 732 struct mem_cgroup *memcg; 733 long x; 734 735 /* Update node */ 736 __mod_node_page_state(pgdat, idx, val); 737 738 if (mem_cgroup_disabled()) 739 return; 740 741 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 742 memcg = pn->memcg; 743 744 /* Update memcg */ 745 __mod_memcg_state(memcg, idx, val); 746 747 /* Update lruvec */ 748 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 749 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 750 struct mem_cgroup_per_node *pi; 751 752 atomic_long_add(x, &pn->lruvec_stat_local[idx]); 753 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 754 atomic_long_add(x, &pi->lruvec_stat[idx]); 755 x = 0; 756 } 757 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 758 } 759 760 /** 761 * __count_memcg_events - account VM events in a cgroup 762 * @memcg: the memory cgroup 763 * @idx: the event item 764 * @count: the number of events that occured 765 */ 766 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 767 unsigned long count) 768 { 769 unsigned long x; 770 771 if (mem_cgroup_disabled()) 772 return; 773 774 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 775 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 776 struct mem_cgroup *mi; 777 778 atomic_long_add(x, &memcg->vmevents_local[idx]); 779 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 780 atomic_long_add(x, &mi->vmevents[idx]); 781 x = 0; 782 } 783 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 784 } 785 786 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 787 { 788 return atomic_long_read(&memcg->vmevents[event]); 789 } 790 791 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 792 { 793 return atomic_long_read(&memcg->vmevents_local[event]); 794 } 795 796 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 797 struct page *page, 798 bool compound, int nr_pages) 799 { 800 /* 801 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 802 * counted as CACHE even if it's on ANON LRU. 803 */ 804 if (PageAnon(page)) 805 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); 806 else { 807 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); 808 if (PageSwapBacked(page)) 809 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); 810 } 811 812 if (compound) { 813 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 814 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); 815 } 816 817 /* pagein of a big page is an event. So, ignore page size */ 818 if (nr_pages > 0) 819 __count_memcg_events(memcg, PGPGIN, 1); 820 else { 821 __count_memcg_events(memcg, PGPGOUT, 1); 822 nr_pages = -nr_pages; /* for event */ 823 } 824 825 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 826 } 827 828 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 829 enum mem_cgroup_events_target target) 830 { 831 unsigned long val, next; 832 833 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 834 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 835 /* from time_after() in jiffies.h */ 836 if ((long)(next - val) < 0) { 837 switch (target) { 838 case MEM_CGROUP_TARGET_THRESH: 839 next = val + THRESHOLDS_EVENTS_TARGET; 840 break; 841 case MEM_CGROUP_TARGET_SOFTLIMIT: 842 next = val + SOFTLIMIT_EVENTS_TARGET; 843 break; 844 case MEM_CGROUP_TARGET_NUMAINFO: 845 next = val + NUMAINFO_EVENTS_TARGET; 846 break; 847 default: 848 break; 849 } 850 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 851 return true; 852 } 853 return false; 854 } 855 856 /* 857 * Check events in order. 858 * 859 */ 860 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 861 { 862 /* threshold event is triggered in finer grain than soft limit */ 863 if (unlikely(mem_cgroup_event_ratelimit(memcg, 864 MEM_CGROUP_TARGET_THRESH))) { 865 bool do_softlimit; 866 bool do_numainfo __maybe_unused; 867 868 do_softlimit = mem_cgroup_event_ratelimit(memcg, 869 MEM_CGROUP_TARGET_SOFTLIMIT); 870 #if MAX_NUMNODES > 1 871 do_numainfo = mem_cgroup_event_ratelimit(memcg, 872 MEM_CGROUP_TARGET_NUMAINFO); 873 #endif 874 mem_cgroup_threshold(memcg); 875 if (unlikely(do_softlimit)) 876 mem_cgroup_update_tree(memcg, page); 877 #if MAX_NUMNODES > 1 878 if (unlikely(do_numainfo)) 879 atomic_inc(&memcg->numainfo_events); 880 #endif 881 } 882 } 883 884 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 885 { 886 /* 887 * mm_update_next_owner() may clear mm->owner to NULL 888 * if it races with swapoff, page migration, etc. 889 * So this can be called with p == NULL. 890 */ 891 if (unlikely(!p)) 892 return NULL; 893 894 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 895 } 896 EXPORT_SYMBOL(mem_cgroup_from_task); 897 898 /** 899 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 900 * @mm: mm from which memcg should be extracted. It can be NULL. 901 * 902 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 903 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 904 * returned. 905 */ 906 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 907 { 908 struct mem_cgroup *memcg; 909 910 if (mem_cgroup_disabled()) 911 return NULL; 912 913 rcu_read_lock(); 914 do { 915 /* 916 * Page cache insertions can happen withou an 917 * actual mm context, e.g. during disk probing 918 * on boot, loopback IO, acct() writes etc. 919 */ 920 if (unlikely(!mm)) 921 memcg = root_mem_cgroup; 922 else { 923 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 924 if (unlikely(!memcg)) 925 memcg = root_mem_cgroup; 926 } 927 } while (!css_tryget_online(&memcg->css)); 928 rcu_read_unlock(); 929 return memcg; 930 } 931 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 932 933 /** 934 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 935 * @page: page from which memcg should be extracted. 936 * 937 * Obtain a reference on page->memcg and returns it if successful. Otherwise 938 * root_mem_cgroup is returned. 939 */ 940 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 941 { 942 struct mem_cgroup *memcg = page->mem_cgroup; 943 944 if (mem_cgroup_disabled()) 945 return NULL; 946 947 rcu_read_lock(); 948 if (!memcg || !css_tryget_online(&memcg->css)) 949 memcg = root_mem_cgroup; 950 rcu_read_unlock(); 951 return memcg; 952 } 953 EXPORT_SYMBOL(get_mem_cgroup_from_page); 954 955 /** 956 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg. 957 */ 958 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 959 { 960 if (unlikely(current->active_memcg)) { 961 struct mem_cgroup *memcg = root_mem_cgroup; 962 963 rcu_read_lock(); 964 if (css_tryget_online(¤t->active_memcg->css)) 965 memcg = current->active_memcg; 966 rcu_read_unlock(); 967 return memcg; 968 } 969 return get_mem_cgroup_from_mm(current->mm); 970 } 971 972 /** 973 * mem_cgroup_iter - iterate over memory cgroup hierarchy 974 * @root: hierarchy root 975 * @prev: previously returned memcg, NULL on first invocation 976 * @reclaim: cookie for shared reclaim walks, NULL for full walks 977 * 978 * Returns references to children of the hierarchy below @root, or 979 * @root itself, or %NULL after a full round-trip. 980 * 981 * Caller must pass the return value in @prev on subsequent 982 * invocations for reference counting, or use mem_cgroup_iter_break() 983 * to cancel a hierarchy walk before the round-trip is complete. 984 * 985 * Reclaimers can specify a node and a priority level in @reclaim to 986 * divide up the memcgs in the hierarchy among all concurrent 987 * reclaimers operating on the same node and priority. 988 */ 989 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 990 struct mem_cgroup *prev, 991 struct mem_cgroup_reclaim_cookie *reclaim) 992 { 993 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 994 struct cgroup_subsys_state *css = NULL; 995 struct mem_cgroup *memcg = NULL; 996 struct mem_cgroup *pos = NULL; 997 998 if (mem_cgroup_disabled()) 999 return NULL; 1000 1001 if (!root) 1002 root = root_mem_cgroup; 1003 1004 if (prev && !reclaim) 1005 pos = prev; 1006 1007 if (!root->use_hierarchy && root != root_mem_cgroup) { 1008 if (prev) 1009 goto out; 1010 return root; 1011 } 1012 1013 rcu_read_lock(); 1014 1015 if (reclaim) { 1016 struct mem_cgroup_per_node *mz; 1017 1018 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1019 iter = &mz->iter[reclaim->priority]; 1020 1021 if (prev && reclaim->generation != iter->generation) 1022 goto out_unlock; 1023 1024 while (1) { 1025 pos = READ_ONCE(iter->position); 1026 if (!pos || css_tryget(&pos->css)) 1027 break; 1028 /* 1029 * css reference reached zero, so iter->position will 1030 * be cleared by ->css_released. However, we should not 1031 * rely on this happening soon, because ->css_released 1032 * is called from a work queue, and by busy-waiting we 1033 * might block it. So we clear iter->position right 1034 * away. 1035 */ 1036 (void)cmpxchg(&iter->position, pos, NULL); 1037 } 1038 } 1039 1040 if (pos) 1041 css = &pos->css; 1042 1043 for (;;) { 1044 css = css_next_descendant_pre(css, &root->css); 1045 if (!css) { 1046 /* 1047 * Reclaimers share the hierarchy walk, and a 1048 * new one might jump in right at the end of 1049 * the hierarchy - make sure they see at least 1050 * one group and restart from the beginning. 1051 */ 1052 if (!prev) 1053 continue; 1054 break; 1055 } 1056 1057 /* 1058 * Verify the css and acquire a reference. The root 1059 * is provided by the caller, so we know it's alive 1060 * and kicking, and don't take an extra reference. 1061 */ 1062 memcg = mem_cgroup_from_css(css); 1063 1064 if (css == &root->css) 1065 break; 1066 1067 if (css_tryget(css)) 1068 break; 1069 1070 memcg = NULL; 1071 } 1072 1073 if (reclaim) { 1074 /* 1075 * The position could have already been updated by a competing 1076 * thread, so check that the value hasn't changed since we read 1077 * it to avoid reclaiming from the same cgroup twice. 1078 */ 1079 (void)cmpxchg(&iter->position, pos, memcg); 1080 1081 if (pos) 1082 css_put(&pos->css); 1083 1084 if (!memcg) 1085 iter->generation++; 1086 else if (!prev) 1087 reclaim->generation = iter->generation; 1088 } 1089 1090 out_unlock: 1091 rcu_read_unlock(); 1092 out: 1093 if (prev && prev != root) 1094 css_put(&prev->css); 1095 1096 return memcg; 1097 } 1098 1099 /** 1100 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1101 * @root: hierarchy root 1102 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1103 */ 1104 void mem_cgroup_iter_break(struct mem_cgroup *root, 1105 struct mem_cgroup *prev) 1106 { 1107 if (!root) 1108 root = root_mem_cgroup; 1109 if (prev && prev != root) 1110 css_put(&prev->css); 1111 } 1112 1113 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1114 { 1115 struct mem_cgroup *memcg = dead_memcg; 1116 struct mem_cgroup_reclaim_iter *iter; 1117 struct mem_cgroup_per_node *mz; 1118 int nid; 1119 int i; 1120 1121 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1122 for_each_node(nid) { 1123 mz = mem_cgroup_nodeinfo(memcg, nid); 1124 for (i = 0; i <= DEF_PRIORITY; i++) { 1125 iter = &mz->iter[i]; 1126 cmpxchg(&iter->position, 1127 dead_memcg, NULL); 1128 } 1129 } 1130 } 1131 } 1132 1133 /** 1134 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1135 * @memcg: hierarchy root 1136 * @fn: function to call for each task 1137 * @arg: argument passed to @fn 1138 * 1139 * This function iterates over tasks attached to @memcg or to any of its 1140 * descendants and calls @fn for each task. If @fn returns a non-zero 1141 * value, the function breaks the iteration loop and returns the value. 1142 * Otherwise, it will iterate over all tasks and return 0. 1143 * 1144 * This function must not be called for the root memory cgroup. 1145 */ 1146 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1147 int (*fn)(struct task_struct *, void *), void *arg) 1148 { 1149 struct mem_cgroup *iter; 1150 int ret = 0; 1151 1152 BUG_ON(memcg == root_mem_cgroup); 1153 1154 for_each_mem_cgroup_tree(iter, memcg) { 1155 struct css_task_iter it; 1156 struct task_struct *task; 1157 1158 css_task_iter_start(&iter->css, 0, &it); 1159 while (!ret && (task = css_task_iter_next(&it))) 1160 ret = fn(task, arg); 1161 css_task_iter_end(&it); 1162 if (ret) { 1163 mem_cgroup_iter_break(memcg, iter); 1164 break; 1165 } 1166 } 1167 return ret; 1168 } 1169 1170 /** 1171 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1172 * @page: the page 1173 * @pgdat: pgdat of the page 1174 * 1175 * This function is only safe when following the LRU page isolation 1176 * and putback protocol: the LRU lock must be held, and the page must 1177 * either be PageLRU() or the caller must have isolated/allocated it. 1178 */ 1179 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1180 { 1181 struct mem_cgroup_per_node *mz; 1182 struct mem_cgroup *memcg; 1183 struct lruvec *lruvec; 1184 1185 if (mem_cgroup_disabled()) { 1186 lruvec = &pgdat->lruvec; 1187 goto out; 1188 } 1189 1190 memcg = page->mem_cgroup; 1191 /* 1192 * Swapcache readahead pages are added to the LRU - and 1193 * possibly migrated - before they are charged. 1194 */ 1195 if (!memcg) 1196 memcg = root_mem_cgroup; 1197 1198 mz = mem_cgroup_page_nodeinfo(memcg, page); 1199 lruvec = &mz->lruvec; 1200 out: 1201 /* 1202 * Since a node can be onlined after the mem_cgroup was created, 1203 * we have to be prepared to initialize lruvec->zone here; 1204 * and if offlined then reonlined, we need to reinitialize it. 1205 */ 1206 if (unlikely(lruvec->pgdat != pgdat)) 1207 lruvec->pgdat = pgdat; 1208 return lruvec; 1209 } 1210 1211 /** 1212 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1213 * @lruvec: mem_cgroup per zone lru vector 1214 * @lru: index of lru list the page is sitting on 1215 * @zid: zone id of the accounted pages 1216 * @nr_pages: positive when adding or negative when removing 1217 * 1218 * This function must be called under lru_lock, just before a page is added 1219 * to or just after a page is removed from an lru list (that ordering being 1220 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1221 */ 1222 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1223 int zid, int nr_pages) 1224 { 1225 struct mem_cgroup_per_node *mz; 1226 unsigned long *lru_size; 1227 long size; 1228 1229 if (mem_cgroup_disabled()) 1230 return; 1231 1232 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1233 lru_size = &mz->lru_zone_size[zid][lru]; 1234 1235 if (nr_pages < 0) 1236 *lru_size += nr_pages; 1237 1238 size = *lru_size; 1239 if (WARN_ONCE(size < 0, 1240 "%s(%p, %d, %d): lru_size %ld\n", 1241 __func__, lruvec, lru, nr_pages, size)) { 1242 VM_BUG_ON(1); 1243 *lru_size = 0; 1244 } 1245 1246 if (nr_pages > 0) 1247 *lru_size += nr_pages; 1248 } 1249 1250 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1251 { 1252 struct mem_cgroup *task_memcg; 1253 struct task_struct *p; 1254 bool ret; 1255 1256 p = find_lock_task_mm(task); 1257 if (p) { 1258 task_memcg = get_mem_cgroup_from_mm(p->mm); 1259 task_unlock(p); 1260 } else { 1261 /* 1262 * All threads may have already detached their mm's, but the oom 1263 * killer still needs to detect if they have already been oom 1264 * killed to prevent needlessly killing additional tasks. 1265 */ 1266 rcu_read_lock(); 1267 task_memcg = mem_cgroup_from_task(task); 1268 css_get(&task_memcg->css); 1269 rcu_read_unlock(); 1270 } 1271 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1272 css_put(&task_memcg->css); 1273 return ret; 1274 } 1275 1276 /** 1277 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1278 * @memcg: the memory cgroup 1279 * 1280 * Returns the maximum amount of memory @mem can be charged with, in 1281 * pages. 1282 */ 1283 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1284 { 1285 unsigned long margin = 0; 1286 unsigned long count; 1287 unsigned long limit; 1288 1289 count = page_counter_read(&memcg->memory); 1290 limit = READ_ONCE(memcg->memory.max); 1291 if (count < limit) 1292 margin = limit - count; 1293 1294 if (do_memsw_account()) { 1295 count = page_counter_read(&memcg->memsw); 1296 limit = READ_ONCE(memcg->memsw.max); 1297 if (count <= limit) 1298 margin = min(margin, limit - count); 1299 else 1300 margin = 0; 1301 } 1302 1303 return margin; 1304 } 1305 1306 /* 1307 * A routine for checking "mem" is under move_account() or not. 1308 * 1309 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1310 * moving cgroups. This is for waiting at high-memory pressure 1311 * caused by "move". 1312 */ 1313 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1314 { 1315 struct mem_cgroup *from; 1316 struct mem_cgroup *to; 1317 bool ret = false; 1318 /* 1319 * Unlike task_move routines, we access mc.to, mc.from not under 1320 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1321 */ 1322 spin_lock(&mc.lock); 1323 from = mc.from; 1324 to = mc.to; 1325 if (!from) 1326 goto unlock; 1327 1328 ret = mem_cgroup_is_descendant(from, memcg) || 1329 mem_cgroup_is_descendant(to, memcg); 1330 unlock: 1331 spin_unlock(&mc.lock); 1332 return ret; 1333 } 1334 1335 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1336 { 1337 if (mc.moving_task && current != mc.moving_task) { 1338 if (mem_cgroup_under_move(memcg)) { 1339 DEFINE_WAIT(wait); 1340 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1341 /* moving charge context might have finished. */ 1342 if (mc.moving_task) 1343 schedule(); 1344 finish_wait(&mc.waitq, &wait); 1345 return true; 1346 } 1347 } 1348 return false; 1349 } 1350 1351 static const unsigned int memcg1_stats[] = { 1352 MEMCG_CACHE, 1353 MEMCG_RSS, 1354 MEMCG_RSS_HUGE, 1355 NR_SHMEM, 1356 NR_FILE_MAPPED, 1357 NR_FILE_DIRTY, 1358 NR_WRITEBACK, 1359 MEMCG_SWAP, 1360 }; 1361 1362 static const char *const memcg1_stat_names[] = { 1363 "cache", 1364 "rss", 1365 "rss_huge", 1366 "shmem", 1367 "mapped_file", 1368 "dirty", 1369 "writeback", 1370 "swap", 1371 }; 1372 1373 #define K(x) ((x) << (PAGE_SHIFT-10)) 1374 /** 1375 * mem_cgroup_print_oom_context: Print OOM information relevant to 1376 * memory controller. 1377 * @memcg: The memory cgroup that went over limit 1378 * @p: Task that is going to be killed 1379 * 1380 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1381 * enabled 1382 */ 1383 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1384 { 1385 rcu_read_lock(); 1386 1387 if (memcg) { 1388 pr_cont(",oom_memcg="); 1389 pr_cont_cgroup_path(memcg->css.cgroup); 1390 } else 1391 pr_cont(",global_oom"); 1392 if (p) { 1393 pr_cont(",task_memcg="); 1394 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1395 } 1396 rcu_read_unlock(); 1397 } 1398 1399 /** 1400 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1401 * memory controller. 1402 * @memcg: The memory cgroup that went over limit 1403 */ 1404 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1405 { 1406 struct mem_cgroup *iter; 1407 unsigned int i; 1408 1409 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1410 K((u64)page_counter_read(&memcg->memory)), 1411 K((u64)memcg->memory.max), memcg->memory.failcnt); 1412 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1413 K((u64)page_counter_read(&memcg->memsw)), 1414 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1415 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1416 K((u64)page_counter_read(&memcg->kmem)), 1417 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1418 1419 for_each_mem_cgroup_tree(iter, memcg) { 1420 pr_info("Memory cgroup stats for "); 1421 pr_cont_cgroup_path(iter->css.cgroup); 1422 pr_cont(":"); 1423 1424 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 1425 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account) 1426 continue; 1427 pr_cont(" %s:%luKB", memcg1_stat_names[i], 1428 K(memcg_page_state_local(iter, 1429 memcg1_stats[i]))); 1430 } 1431 1432 for (i = 0; i < NR_LRU_LISTS; i++) 1433 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1434 K(memcg_page_state_local(iter, 1435 NR_LRU_BASE + i))); 1436 1437 pr_cont("\n"); 1438 } 1439 } 1440 1441 /* 1442 * Return the memory (and swap, if configured) limit for a memcg. 1443 */ 1444 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1445 { 1446 unsigned long max; 1447 1448 max = memcg->memory.max; 1449 if (mem_cgroup_swappiness(memcg)) { 1450 unsigned long memsw_max; 1451 unsigned long swap_max; 1452 1453 memsw_max = memcg->memsw.max; 1454 swap_max = memcg->swap.max; 1455 swap_max = min(swap_max, (unsigned long)total_swap_pages); 1456 max = min(max + swap_max, memsw_max); 1457 } 1458 return max; 1459 } 1460 1461 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1462 int order) 1463 { 1464 struct oom_control oc = { 1465 .zonelist = NULL, 1466 .nodemask = NULL, 1467 .memcg = memcg, 1468 .gfp_mask = gfp_mask, 1469 .order = order, 1470 }; 1471 bool ret; 1472 1473 if (mutex_lock_killable(&oom_lock)) 1474 return true; 1475 /* 1476 * A few threads which were not waiting at mutex_lock_killable() can 1477 * fail to bail out. Therefore, check again after holding oom_lock. 1478 */ 1479 ret = should_force_charge() || out_of_memory(&oc); 1480 mutex_unlock(&oom_lock); 1481 return ret; 1482 } 1483 1484 #if MAX_NUMNODES > 1 1485 1486 /** 1487 * test_mem_cgroup_node_reclaimable 1488 * @memcg: the target memcg 1489 * @nid: the node ID to be checked. 1490 * @noswap : specify true here if the user wants flle only information. 1491 * 1492 * This function returns whether the specified memcg contains any 1493 * reclaimable pages on a node. Returns true if there are any reclaimable 1494 * pages in the node. 1495 */ 1496 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1497 int nid, bool noswap) 1498 { 1499 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 1500 1501 if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) || 1502 lruvec_page_state(lruvec, NR_ACTIVE_FILE)) 1503 return true; 1504 if (noswap || !total_swap_pages) 1505 return false; 1506 if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) || 1507 lruvec_page_state(lruvec, NR_ACTIVE_ANON)) 1508 return true; 1509 return false; 1510 1511 } 1512 1513 /* 1514 * Always updating the nodemask is not very good - even if we have an empty 1515 * list or the wrong list here, we can start from some node and traverse all 1516 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1517 * 1518 */ 1519 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1520 { 1521 int nid; 1522 /* 1523 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1524 * pagein/pageout changes since the last update. 1525 */ 1526 if (!atomic_read(&memcg->numainfo_events)) 1527 return; 1528 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1529 return; 1530 1531 /* make a nodemask where this memcg uses memory from */ 1532 memcg->scan_nodes = node_states[N_MEMORY]; 1533 1534 for_each_node_mask(nid, node_states[N_MEMORY]) { 1535 1536 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1537 node_clear(nid, memcg->scan_nodes); 1538 } 1539 1540 atomic_set(&memcg->numainfo_events, 0); 1541 atomic_set(&memcg->numainfo_updating, 0); 1542 } 1543 1544 /* 1545 * Selecting a node where we start reclaim from. Because what we need is just 1546 * reducing usage counter, start from anywhere is O,K. Considering 1547 * memory reclaim from current node, there are pros. and cons. 1548 * 1549 * Freeing memory from current node means freeing memory from a node which 1550 * we'll use or we've used. So, it may make LRU bad. And if several threads 1551 * hit limits, it will see a contention on a node. But freeing from remote 1552 * node means more costs for memory reclaim because of memory latency. 1553 * 1554 * Now, we use round-robin. Better algorithm is welcomed. 1555 */ 1556 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1557 { 1558 int node; 1559 1560 mem_cgroup_may_update_nodemask(memcg); 1561 node = memcg->last_scanned_node; 1562 1563 node = next_node_in(node, memcg->scan_nodes); 1564 /* 1565 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1566 * last time it really checked all the LRUs due to rate limiting. 1567 * Fallback to the current node in that case for simplicity. 1568 */ 1569 if (unlikely(node == MAX_NUMNODES)) 1570 node = numa_node_id(); 1571 1572 memcg->last_scanned_node = node; 1573 return node; 1574 } 1575 #else 1576 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1577 { 1578 return 0; 1579 } 1580 #endif 1581 1582 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1583 pg_data_t *pgdat, 1584 gfp_t gfp_mask, 1585 unsigned long *total_scanned) 1586 { 1587 struct mem_cgroup *victim = NULL; 1588 int total = 0; 1589 int loop = 0; 1590 unsigned long excess; 1591 unsigned long nr_scanned; 1592 struct mem_cgroup_reclaim_cookie reclaim = { 1593 .pgdat = pgdat, 1594 .priority = 0, 1595 }; 1596 1597 excess = soft_limit_excess(root_memcg); 1598 1599 while (1) { 1600 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1601 if (!victim) { 1602 loop++; 1603 if (loop >= 2) { 1604 /* 1605 * If we have not been able to reclaim 1606 * anything, it might because there are 1607 * no reclaimable pages under this hierarchy 1608 */ 1609 if (!total) 1610 break; 1611 /* 1612 * We want to do more targeted reclaim. 1613 * excess >> 2 is not to excessive so as to 1614 * reclaim too much, nor too less that we keep 1615 * coming back to reclaim from this cgroup 1616 */ 1617 if (total >= (excess >> 2) || 1618 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1619 break; 1620 } 1621 continue; 1622 } 1623 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1624 pgdat, &nr_scanned); 1625 *total_scanned += nr_scanned; 1626 if (!soft_limit_excess(root_memcg)) 1627 break; 1628 } 1629 mem_cgroup_iter_break(root_memcg, victim); 1630 return total; 1631 } 1632 1633 #ifdef CONFIG_LOCKDEP 1634 static struct lockdep_map memcg_oom_lock_dep_map = { 1635 .name = "memcg_oom_lock", 1636 }; 1637 #endif 1638 1639 static DEFINE_SPINLOCK(memcg_oom_lock); 1640 1641 /* 1642 * Check OOM-Killer is already running under our hierarchy. 1643 * If someone is running, return false. 1644 */ 1645 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1646 { 1647 struct mem_cgroup *iter, *failed = NULL; 1648 1649 spin_lock(&memcg_oom_lock); 1650 1651 for_each_mem_cgroup_tree(iter, memcg) { 1652 if (iter->oom_lock) { 1653 /* 1654 * this subtree of our hierarchy is already locked 1655 * so we cannot give a lock. 1656 */ 1657 failed = iter; 1658 mem_cgroup_iter_break(memcg, iter); 1659 break; 1660 } else 1661 iter->oom_lock = true; 1662 } 1663 1664 if (failed) { 1665 /* 1666 * OK, we failed to lock the whole subtree so we have 1667 * to clean up what we set up to the failing subtree 1668 */ 1669 for_each_mem_cgroup_tree(iter, memcg) { 1670 if (iter == failed) { 1671 mem_cgroup_iter_break(memcg, iter); 1672 break; 1673 } 1674 iter->oom_lock = false; 1675 } 1676 } else 1677 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1678 1679 spin_unlock(&memcg_oom_lock); 1680 1681 return !failed; 1682 } 1683 1684 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1685 { 1686 struct mem_cgroup *iter; 1687 1688 spin_lock(&memcg_oom_lock); 1689 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1690 for_each_mem_cgroup_tree(iter, memcg) 1691 iter->oom_lock = false; 1692 spin_unlock(&memcg_oom_lock); 1693 } 1694 1695 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1696 { 1697 struct mem_cgroup *iter; 1698 1699 spin_lock(&memcg_oom_lock); 1700 for_each_mem_cgroup_tree(iter, memcg) 1701 iter->under_oom++; 1702 spin_unlock(&memcg_oom_lock); 1703 } 1704 1705 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1706 { 1707 struct mem_cgroup *iter; 1708 1709 /* 1710 * When a new child is created while the hierarchy is under oom, 1711 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1712 */ 1713 spin_lock(&memcg_oom_lock); 1714 for_each_mem_cgroup_tree(iter, memcg) 1715 if (iter->under_oom > 0) 1716 iter->under_oom--; 1717 spin_unlock(&memcg_oom_lock); 1718 } 1719 1720 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1721 1722 struct oom_wait_info { 1723 struct mem_cgroup *memcg; 1724 wait_queue_entry_t wait; 1725 }; 1726 1727 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1728 unsigned mode, int sync, void *arg) 1729 { 1730 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1731 struct mem_cgroup *oom_wait_memcg; 1732 struct oom_wait_info *oom_wait_info; 1733 1734 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1735 oom_wait_memcg = oom_wait_info->memcg; 1736 1737 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1738 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1739 return 0; 1740 return autoremove_wake_function(wait, mode, sync, arg); 1741 } 1742 1743 static void memcg_oom_recover(struct mem_cgroup *memcg) 1744 { 1745 /* 1746 * For the following lockless ->under_oom test, the only required 1747 * guarantee is that it must see the state asserted by an OOM when 1748 * this function is called as a result of userland actions 1749 * triggered by the notification of the OOM. This is trivially 1750 * achieved by invoking mem_cgroup_mark_under_oom() before 1751 * triggering notification. 1752 */ 1753 if (memcg && memcg->under_oom) 1754 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1755 } 1756 1757 enum oom_status { 1758 OOM_SUCCESS, 1759 OOM_FAILED, 1760 OOM_ASYNC, 1761 OOM_SKIPPED 1762 }; 1763 1764 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1765 { 1766 enum oom_status ret; 1767 bool locked; 1768 1769 if (order > PAGE_ALLOC_COSTLY_ORDER) 1770 return OOM_SKIPPED; 1771 1772 memcg_memory_event(memcg, MEMCG_OOM); 1773 1774 /* 1775 * We are in the middle of the charge context here, so we 1776 * don't want to block when potentially sitting on a callstack 1777 * that holds all kinds of filesystem and mm locks. 1778 * 1779 * cgroup1 allows disabling the OOM killer and waiting for outside 1780 * handling until the charge can succeed; remember the context and put 1781 * the task to sleep at the end of the page fault when all locks are 1782 * released. 1783 * 1784 * On the other hand, in-kernel OOM killer allows for an async victim 1785 * memory reclaim (oom_reaper) and that means that we are not solely 1786 * relying on the oom victim to make a forward progress and we can 1787 * invoke the oom killer here. 1788 * 1789 * Please note that mem_cgroup_out_of_memory might fail to find a 1790 * victim and then we have to bail out from the charge path. 1791 */ 1792 if (memcg->oom_kill_disable) { 1793 if (!current->in_user_fault) 1794 return OOM_SKIPPED; 1795 css_get(&memcg->css); 1796 current->memcg_in_oom = memcg; 1797 current->memcg_oom_gfp_mask = mask; 1798 current->memcg_oom_order = order; 1799 1800 return OOM_ASYNC; 1801 } 1802 1803 mem_cgroup_mark_under_oom(memcg); 1804 1805 locked = mem_cgroup_oom_trylock(memcg); 1806 1807 if (locked) 1808 mem_cgroup_oom_notify(memcg); 1809 1810 mem_cgroup_unmark_under_oom(memcg); 1811 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1812 ret = OOM_SUCCESS; 1813 else 1814 ret = OOM_FAILED; 1815 1816 if (locked) 1817 mem_cgroup_oom_unlock(memcg); 1818 1819 return ret; 1820 } 1821 1822 /** 1823 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1824 * @handle: actually kill/wait or just clean up the OOM state 1825 * 1826 * This has to be called at the end of a page fault if the memcg OOM 1827 * handler was enabled. 1828 * 1829 * Memcg supports userspace OOM handling where failed allocations must 1830 * sleep on a waitqueue until the userspace task resolves the 1831 * situation. Sleeping directly in the charge context with all kinds 1832 * of locks held is not a good idea, instead we remember an OOM state 1833 * in the task and mem_cgroup_oom_synchronize() has to be called at 1834 * the end of the page fault to complete the OOM handling. 1835 * 1836 * Returns %true if an ongoing memcg OOM situation was detected and 1837 * completed, %false otherwise. 1838 */ 1839 bool mem_cgroup_oom_synchronize(bool handle) 1840 { 1841 struct mem_cgroup *memcg = current->memcg_in_oom; 1842 struct oom_wait_info owait; 1843 bool locked; 1844 1845 /* OOM is global, do not handle */ 1846 if (!memcg) 1847 return false; 1848 1849 if (!handle) 1850 goto cleanup; 1851 1852 owait.memcg = memcg; 1853 owait.wait.flags = 0; 1854 owait.wait.func = memcg_oom_wake_function; 1855 owait.wait.private = current; 1856 INIT_LIST_HEAD(&owait.wait.entry); 1857 1858 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1859 mem_cgroup_mark_under_oom(memcg); 1860 1861 locked = mem_cgroup_oom_trylock(memcg); 1862 1863 if (locked) 1864 mem_cgroup_oom_notify(memcg); 1865 1866 if (locked && !memcg->oom_kill_disable) { 1867 mem_cgroup_unmark_under_oom(memcg); 1868 finish_wait(&memcg_oom_waitq, &owait.wait); 1869 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1870 current->memcg_oom_order); 1871 } else { 1872 schedule(); 1873 mem_cgroup_unmark_under_oom(memcg); 1874 finish_wait(&memcg_oom_waitq, &owait.wait); 1875 } 1876 1877 if (locked) { 1878 mem_cgroup_oom_unlock(memcg); 1879 /* 1880 * There is no guarantee that an OOM-lock contender 1881 * sees the wakeups triggered by the OOM kill 1882 * uncharges. Wake any sleepers explicitely. 1883 */ 1884 memcg_oom_recover(memcg); 1885 } 1886 cleanup: 1887 current->memcg_in_oom = NULL; 1888 css_put(&memcg->css); 1889 return true; 1890 } 1891 1892 /** 1893 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1894 * @victim: task to be killed by the OOM killer 1895 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1896 * 1897 * Returns a pointer to a memory cgroup, which has to be cleaned up 1898 * by killing all belonging OOM-killable tasks. 1899 * 1900 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1901 */ 1902 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1903 struct mem_cgroup *oom_domain) 1904 { 1905 struct mem_cgroup *oom_group = NULL; 1906 struct mem_cgroup *memcg; 1907 1908 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1909 return NULL; 1910 1911 if (!oom_domain) 1912 oom_domain = root_mem_cgroup; 1913 1914 rcu_read_lock(); 1915 1916 memcg = mem_cgroup_from_task(victim); 1917 if (memcg == root_mem_cgroup) 1918 goto out; 1919 1920 /* 1921 * Traverse the memory cgroup hierarchy from the victim task's 1922 * cgroup up to the OOMing cgroup (or root) to find the 1923 * highest-level memory cgroup with oom.group set. 1924 */ 1925 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1926 if (memcg->oom_group) 1927 oom_group = memcg; 1928 1929 if (memcg == oom_domain) 1930 break; 1931 } 1932 1933 if (oom_group) 1934 css_get(&oom_group->css); 1935 out: 1936 rcu_read_unlock(); 1937 1938 return oom_group; 1939 } 1940 1941 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1942 { 1943 pr_info("Tasks in "); 1944 pr_cont_cgroup_path(memcg->css.cgroup); 1945 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1946 } 1947 1948 /** 1949 * lock_page_memcg - lock a page->mem_cgroup binding 1950 * @page: the page 1951 * 1952 * This function protects unlocked LRU pages from being moved to 1953 * another cgroup. 1954 * 1955 * It ensures lifetime of the returned memcg. Caller is responsible 1956 * for the lifetime of the page; __unlock_page_memcg() is available 1957 * when @page might get freed inside the locked section. 1958 */ 1959 struct mem_cgroup *lock_page_memcg(struct page *page) 1960 { 1961 struct mem_cgroup *memcg; 1962 unsigned long flags; 1963 1964 /* 1965 * The RCU lock is held throughout the transaction. The fast 1966 * path can get away without acquiring the memcg->move_lock 1967 * because page moving starts with an RCU grace period. 1968 * 1969 * The RCU lock also protects the memcg from being freed when 1970 * the page state that is going to change is the only thing 1971 * preventing the page itself from being freed. E.g. writeback 1972 * doesn't hold a page reference and relies on PG_writeback to 1973 * keep off truncation, migration and so forth. 1974 */ 1975 rcu_read_lock(); 1976 1977 if (mem_cgroup_disabled()) 1978 return NULL; 1979 again: 1980 memcg = page->mem_cgroup; 1981 if (unlikely(!memcg)) 1982 return NULL; 1983 1984 if (atomic_read(&memcg->moving_account) <= 0) 1985 return memcg; 1986 1987 spin_lock_irqsave(&memcg->move_lock, flags); 1988 if (memcg != page->mem_cgroup) { 1989 spin_unlock_irqrestore(&memcg->move_lock, flags); 1990 goto again; 1991 } 1992 1993 /* 1994 * When charge migration first begins, we can have locked and 1995 * unlocked page stat updates happening concurrently. Track 1996 * the task who has the lock for unlock_page_memcg(). 1997 */ 1998 memcg->move_lock_task = current; 1999 memcg->move_lock_flags = flags; 2000 2001 return memcg; 2002 } 2003 EXPORT_SYMBOL(lock_page_memcg); 2004 2005 /** 2006 * __unlock_page_memcg - unlock and unpin a memcg 2007 * @memcg: the memcg 2008 * 2009 * Unlock and unpin a memcg returned by lock_page_memcg(). 2010 */ 2011 void __unlock_page_memcg(struct mem_cgroup *memcg) 2012 { 2013 if (memcg && memcg->move_lock_task == current) { 2014 unsigned long flags = memcg->move_lock_flags; 2015 2016 memcg->move_lock_task = NULL; 2017 memcg->move_lock_flags = 0; 2018 2019 spin_unlock_irqrestore(&memcg->move_lock, flags); 2020 } 2021 2022 rcu_read_unlock(); 2023 } 2024 2025 /** 2026 * unlock_page_memcg - unlock a page->mem_cgroup binding 2027 * @page: the page 2028 */ 2029 void unlock_page_memcg(struct page *page) 2030 { 2031 __unlock_page_memcg(page->mem_cgroup); 2032 } 2033 EXPORT_SYMBOL(unlock_page_memcg); 2034 2035 struct memcg_stock_pcp { 2036 struct mem_cgroup *cached; /* this never be root cgroup */ 2037 unsigned int nr_pages; 2038 struct work_struct work; 2039 unsigned long flags; 2040 #define FLUSHING_CACHED_CHARGE 0 2041 }; 2042 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2043 static DEFINE_MUTEX(percpu_charge_mutex); 2044 2045 /** 2046 * consume_stock: Try to consume stocked charge on this cpu. 2047 * @memcg: memcg to consume from. 2048 * @nr_pages: how many pages to charge. 2049 * 2050 * The charges will only happen if @memcg matches the current cpu's memcg 2051 * stock, and at least @nr_pages are available in that stock. Failure to 2052 * service an allocation will refill the stock. 2053 * 2054 * returns true if successful, false otherwise. 2055 */ 2056 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2057 { 2058 struct memcg_stock_pcp *stock; 2059 unsigned long flags; 2060 bool ret = false; 2061 2062 if (nr_pages > MEMCG_CHARGE_BATCH) 2063 return ret; 2064 2065 local_irq_save(flags); 2066 2067 stock = this_cpu_ptr(&memcg_stock); 2068 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2069 stock->nr_pages -= nr_pages; 2070 ret = true; 2071 } 2072 2073 local_irq_restore(flags); 2074 2075 return ret; 2076 } 2077 2078 /* 2079 * Returns stocks cached in percpu and reset cached information. 2080 */ 2081 static void drain_stock(struct memcg_stock_pcp *stock) 2082 { 2083 struct mem_cgroup *old = stock->cached; 2084 2085 if (stock->nr_pages) { 2086 page_counter_uncharge(&old->memory, stock->nr_pages); 2087 if (do_memsw_account()) 2088 page_counter_uncharge(&old->memsw, stock->nr_pages); 2089 css_put_many(&old->css, stock->nr_pages); 2090 stock->nr_pages = 0; 2091 } 2092 stock->cached = NULL; 2093 } 2094 2095 static void drain_local_stock(struct work_struct *dummy) 2096 { 2097 struct memcg_stock_pcp *stock; 2098 unsigned long flags; 2099 2100 /* 2101 * The only protection from memory hotplug vs. drain_stock races is 2102 * that we always operate on local CPU stock here with IRQ disabled 2103 */ 2104 local_irq_save(flags); 2105 2106 stock = this_cpu_ptr(&memcg_stock); 2107 drain_stock(stock); 2108 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2109 2110 local_irq_restore(flags); 2111 } 2112 2113 /* 2114 * Cache charges(val) to local per_cpu area. 2115 * This will be consumed by consume_stock() function, later. 2116 */ 2117 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2118 { 2119 struct memcg_stock_pcp *stock; 2120 unsigned long flags; 2121 2122 local_irq_save(flags); 2123 2124 stock = this_cpu_ptr(&memcg_stock); 2125 if (stock->cached != memcg) { /* reset if necessary */ 2126 drain_stock(stock); 2127 stock->cached = memcg; 2128 } 2129 stock->nr_pages += nr_pages; 2130 2131 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2132 drain_stock(stock); 2133 2134 local_irq_restore(flags); 2135 } 2136 2137 /* 2138 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2139 * of the hierarchy under it. 2140 */ 2141 static void drain_all_stock(struct mem_cgroup *root_memcg) 2142 { 2143 int cpu, curcpu; 2144 2145 /* If someone's already draining, avoid adding running more workers. */ 2146 if (!mutex_trylock(&percpu_charge_mutex)) 2147 return; 2148 /* 2149 * Notify other cpus that system-wide "drain" is running 2150 * We do not care about races with the cpu hotplug because cpu down 2151 * as well as workers from this path always operate on the local 2152 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2153 */ 2154 curcpu = get_cpu(); 2155 for_each_online_cpu(cpu) { 2156 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2157 struct mem_cgroup *memcg; 2158 2159 memcg = stock->cached; 2160 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) 2161 continue; 2162 if (!mem_cgroup_is_descendant(memcg, root_memcg)) { 2163 css_put(&memcg->css); 2164 continue; 2165 } 2166 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2167 if (cpu == curcpu) 2168 drain_local_stock(&stock->work); 2169 else 2170 schedule_work_on(cpu, &stock->work); 2171 } 2172 css_put(&memcg->css); 2173 } 2174 put_cpu(); 2175 mutex_unlock(&percpu_charge_mutex); 2176 } 2177 2178 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2179 { 2180 struct memcg_stock_pcp *stock; 2181 struct mem_cgroup *memcg, *mi; 2182 2183 stock = &per_cpu(memcg_stock, cpu); 2184 drain_stock(stock); 2185 2186 for_each_mem_cgroup(memcg) { 2187 int i; 2188 2189 for (i = 0; i < MEMCG_NR_STAT; i++) { 2190 int nid; 2191 long x; 2192 2193 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2194 if (x) { 2195 atomic_long_add(x, &memcg->vmstats_local[i]); 2196 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2197 atomic_long_add(x, &memcg->vmstats[i]); 2198 } 2199 2200 if (i >= NR_VM_NODE_STAT_ITEMS) 2201 continue; 2202 2203 for_each_node(nid) { 2204 struct mem_cgroup_per_node *pn; 2205 2206 pn = mem_cgroup_nodeinfo(memcg, nid); 2207 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2208 if (x) { 2209 atomic_long_add(x, &pn->lruvec_stat_local[i]); 2210 do { 2211 atomic_long_add(x, &pn->lruvec_stat[i]); 2212 } while ((pn = parent_nodeinfo(pn, nid))); 2213 } 2214 } 2215 } 2216 2217 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2218 long x; 2219 2220 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2221 if (x) { 2222 atomic_long_add(x, &memcg->vmevents_local[i]); 2223 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2224 atomic_long_add(x, &memcg->vmevents[i]); 2225 } 2226 } 2227 } 2228 2229 return 0; 2230 } 2231 2232 static void reclaim_high(struct mem_cgroup *memcg, 2233 unsigned int nr_pages, 2234 gfp_t gfp_mask) 2235 { 2236 do { 2237 if (page_counter_read(&memcg->memory) <= memcg->high) 2238 continue; 2239 memcg_memory_event(memcg, MEMCG_HIGH); 2240 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 2241 } while ((memcg = parent_mem_cgroup(memcg))); 2242 } 2243 2244 static void high_work_func(struct work_struct *work) 2245 { 2246 struct mem_cgroup *memcg; 2247 2248 memcg = container_of(work, struct mem_cgroup, high_work); 2249 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2250 } 2251 2252 /* 2253 * Scheduled by try_charge() to be executed from the userland return path 2254 * and reclaims memory over the high limit. 2255 */ 2256 void mem_cgroup_handle_over_high(void) 2257 { 2258 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2259 struct mem_cgroup *memcg; 2260 2261 if (likely(!nr_pages)) 2262 return; 2263 2264 memcg = get_mem_cgroup_from_mm(current->mm); 2265 reclaim_high(memcg, nr_pages, GFP_KERNEL); 2266 css_put(&memcg->css); 2267 current->memcg_nr_pages_over_high = 0; 2268 } 2269 2270 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2271 unsigned int nr_pages) 2272 { 2273 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2274 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2275 struct mem_cgroup *mem_over_limit; 2276 struct page_counter *counter; 2277 unsigned long nr_reclaimed; 2278 bool may_swap = true; 2279 bool drained = false; 2280 bool oomed = false; 2281 enum oom_status oom_status; 2282 2283 if (mem_cgroup_is_root(memcg)) 2284 return 0; 2285 retry: 2286 if (consume_stock(memcg, nr_pages)) 2287 return 0; 2288 2289 if (!do_memsw_account() || 2290 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2291 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2292 goto done_restock; 2293 if (do_memsw_account()) 2294 page_counter_uncharge(&memcg->memsw, batch); 2295 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2296 } else { 2297 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2298 may_swap = false; 2299 } 2300 2301 if (batch > nr_pages) { 2302 batch = nr_pages; 2303 goto retry; 2304 } 2305 2306 /* 2307 * Unlike in global OOM situations, memcg is not in a physical 2308 * memory shortage. Allow dying and OOM-killed tasks to 2309 * bypass the last charges so that they can exit quickly and 2310 * free their memory. 2311 */ 2312 if (unlikely(should_force_charge())) 2313 goto force; 2314 2315 /* 2316 * Prevent unbounded recursion when reclaim operations need to 2317 * allocate memory. This might exceed the limits temporarily, 2318 * but we prefer facilitating memory reclaim and getting back 2319 * under the limit over triggering OOM kills in these cases. 2320 */ 2321 if (unlikely(current->flags & PF_MEMALLOC)) 2322 goto force; 2323 2324 if (unlikely(task_in_memcg_oom(current))) 2325 goto nomem; 2326 2327 if (!gfpflags_allow_blocking(gfp_mask)) 2328 goto nomem; 2329 2330 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2331 2332 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2333 gfp_mask, may_swap); 2334 2335 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2336 goto retry; 2337 2338 if (!drained) { 2339 drain_all_stock(mem_over_limit); 2340 drained = true; 2341 goto retry; 2342 } 2343 2344 if (gfp_mask & __GFP_NORETRY) 2345 goto nomem; 2346 /* 2347 * Even though the limit is exceeded at this point, reclaim 2348 * may have been able to free some pages. Retry the charge 2349 * before killing the task. 2350 * 2351 * Only for regular pages, though: huge pages are rather 2352 * unlikely to succeed so close to the limit, and we fall back 2353 * to regular pages anyway in case of failure. 2354 */ 2355 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2356 goto retry; 2357 /* 2358 * At task move, charge accounts can be doubly counted. So, it's 2359 * better to wait until the end of task_move if something is going on. 2360 */ 2361 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2362 goto retry; 2363 2364 if (nr_retries--) 2365 goto retry; 2366 2367 if (gfp_mask & __GFP_RETRY_MAYFAIL && oomed) 2368 goto nomem; 2369 2370 if (gfp_mask & __GFP_NOFAIL) 2371 goto force; 2372 2373 if (fatal_signal_pending(current)) 2374 goto force; 2375 2376 /* 2377 * keep retrying as long as the memcg oom killer is able to make 2378 * a forward progress or bypass the charge if the oom killer 2379 * couldn't make any progress. 2380 */ 2381 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2382 get_order(nr_pages * PAGE_SIZE)); 2383 switch (oom_status) { 2384 case OOM_SUCCESS: 2385 nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2386 oomed = true; 2387 goto retry; 2388 case OOM_FAILED: 2389 goto force; 2390 default: 2391 goto nomem; 2392 } 2393 nomem: 2394 if (!(gfp_mask & __GFP_NOFAIL)) 2395 return -ENOMEM; 2396 force: 2397 /* 2398 * The allocation either can't fail or will lead to more memory 2399 * being freed very soon. Allow memory usage go over the limit 2400 * temporarily by force charging it. 2401 */ 2402 page_counter_charge(&memcg->memory, nr_pages); 2403 if (do_memsw_account()) 2404 page_counter_charge(&memcg->memsw, nr_pages); 2405 css_get_many(&memcg->css, nr_pages); 2406 2407 return 0; 2408 2409 done_restock: 2410 css_get_many(&memcg->css, batch); 2411 if (batch > nr_pages) 2412 refill_stock(memcg, batch - nr_pages); 2413 2414 /* 2415 * If the hierarchy is above the normal consumption range, schedule 2416 * reclaim on returning to userland. We can perform reclaim here 2417 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2418 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2419 * not recorded as it most likely matches current's and won't 2420 * change in the meantime. As high limit is checked again before 2421 * reclaim, the cost of mismatch is negligible. 2422 */ 2423 do { 2424 if (page_counter_read(&memcg->memory) > memcg->high) { 2425 /* Don't bother a random interrupted task */ 2426 if (in_interrupt()) { 2427 schedule_work(&memcg->high_work); 2428 break; 2429 } 2430 current->memcg_nr_pages_over_high += batch; 2431 set_notify_resume(current); 2432 break; 2433 } 2434 } while ((memcg = parent_mem_cgroup(memcg))); 2435 2436 return 0; 2437 } 2438 2439 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2440 { 2441 if (mem_cgroup_is_root(memcg)) 2442 return; 2443 2444 page_counter_uncharge(&memcg->memory, nr_pages); 2445 if (do_memsw_account()) 2446 page_counter_uncharge(&memcg->memsw, nr_pages); 2447 2448 css_put_many(&memcg->css, nr_pages); 2449 } 2450 2451 static void lock_page_lru(struct page *page, int *isolated) 2452 { 2453 pg_data_t *pgdat = page_pgdat(page); 2454 2455 spin_lock_irq(&pgdat->lru_lock); 2456 if (PageLRU(page)) { 2457 struct lruvec *lruvec; 2458 2459 lruvec = mem_cgroup_page_lruvec(page, pgdat); 2460 ClearPageLRU(page); 2461 del_page_from_lru_list(page, lruvec, page_lru(page)); 2462 *isolated = 1; 2463 } else 2464 *isolated = 0; 2465 } 2466 2467 static void unlock_page_lru(struct page *page, int isolated) 2468 { 2469 pg_data_t *pgdat = page_pgdat(page); 2470 2471 if (isolated) { 2472 struct lruvec *lruvec; 2473 2474 lruvec = mem_cgroup_page_lruvec(page, pgdat); 2475 VM_BUG_ON_PAGE(PageLRU(page), page); 2476 SetPageLRU(page); 2477 add_page_to_lru_list(page, lruvec, page_lru(page)); 2478 } 2479 spin_unlock_irq(&pgdat->lru_lock); 2480 } 2481 2482 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2483 bool lrucare) 2484 { 2485 int isolated; 2486 2487 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2488 2489 /* 2490 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2491 * may already be on some other mem_cgroup's LRU. Take care of it. 2492 */ 2493 if (lrucare) 2494 lock_page_lru(page, &isolated); 2495 2496 /* 2497 * Nobody should be changing or seriously looking at 2498 * page->mem_cgroup at this point: 2499 * 2500 * - the page is uncharged 2501 * 2502 * - the page is off-LRU 2503 * 2504 * - an anonymous fault has exclusive page access, except for 2505 * a locked page table 2506 * 2507 * - a page cache insertion, a swapin fault, or a migration 2508 * have the page locked 2509 */ 2510 page->mem_cgroup = memcg; 2511 2512 if (lrucare) 2513 unlock_page_lru(page, isolated); 2514 } 2515 2516 #ifdef CONFIG_MEMCG_KMEM 2517 static int memcg_alloc_cache_id(void) 2518 { 2519 int id, size; 2520 int err; 2521 2522 id = ida_simple_get(&memcg_cache_ida, 2523 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2524 if (id < 0) 2525 return id; 2526 2527 if (id < memcg_nr_cache_ids) 2528 return id; 2529 2530 /* 2531 * There's no space for the new id in memcg_caches arrays, 2532 * so we have to grow them. 2533 */ 2534 down_write(&memcg_cache_ids_sem); 2535 2536 size = 2 * (id + 1); 2537 if (size < MEMCG_CACHES_MIN_SIZE) 2538 size = MEMCG_CACHES_MIN_SIZE; 2539 else if (size > MEMCG_CACHES_MAX_SIZE) 2540 size = MEMCG_CACHES_MAX_SIZE; 2541 2542 err = memcg_update_all_caches(size); 2543 if (!err) 2544 err = memcg_update_all_list_lrus(size); 2545 if (!err) 2546 memcg_nr_cache_ids = size; 2547 2548 up_write(&memcg_cache_ids_sem); 2549 2550 if (err) { 2551 ida_simple_remove(&memcg_cache_ida, id); 2552 return err; 2553 } 2554 return id; 2555 } 2556 2557 static void memcg_free_cache_id(int id) 2558 { 2559 ida_simple_remove(&memcg_cache_ida, id); 2560 } 2561 2562 struct memcg_kmem_cache_create_work { 2563 struct mem_cgroup *memcg; 2564 struct kmem_cache *cachep; 2565 struct work_struct work; 2566 }; 2567 2568 static void memcg_kmem_cache_create_func(struct work_struct *w) 2569 { 2570 struct memcg_kmem_cache_create_work *cw = 2571 container_of(w, struct memcg_kmem_cache_create_work, work); 2572 struct mem_cgroup *memcg = cw->memcg; 2573 struct kmem_cache *cachep = cw->cachep; 2574 2575 memcg_create_kmem_cache(memcg, cachep); 2576 2577 css_put(&memcg->css); 2578 kfree(cw); 2579 } 2580 2581 /* 2582 * Enqueue the creation of a per-memcg kmem_cache. 2583 */ 2584 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2585 struct kmem_cache *cachep) 2586 { 2587 struct memcg_kmem_cache_create_work *cw; 2588 2589 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2590 if (!cw) 2591 return; 2592 2593 css_get(&memcg->css); 2594 2595 cw->memcg = memcg; 2596 cw->cachep = cachep; 2597 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2598 2599 queue_work(memcg_kmem_cache_wq, &cw->work); 2600 } 2601 2602 static inline bool memcg_kmem_bypass(void) 2603 { 2604 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2605 return true; 2606 return false; 2607 } 2608 2609 /** 2610 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2611 * @cachep: the original global kmem cache 2612 * 2613 * Return the kmem_cache we're supposed to use for a slab allocation. 2614 * We try to use the current memcg's version of the cache. 2615 * 2616 * If the cache does not exist yet, if we are the first user of it, we 2617 * create it asynchronously in a workqueue and let the current allocation 2618 * go through with the original cache. 2619 * 2620 * This function takes a reference to the cache it returns to assure it 2621 * won't get destroyed while we are working with it. Once the caller is 2622 * done with it, memcg_kmem_put_cache() must be called to release the 2623 * reference. 2624 */ 2625 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2626 { 2627 struct mem_cgroup *memcg; 2628 struct kmem_cache *memcg_cachep; 2629 int kmemcg_id; 2630 2631 VM_BUG_ON(!is_root_cache(cachep)); 2632 2633 if (memcg_kmem_bypass()) 2634 return cachep; 2635 2636 memcg = get_mem_cgroup_from_current(); 2637 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2638 if (kmemcg_id < 0) 2639 goto out; 2640 2641 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2642 if (likely(memcg_cachep)) 2643 return memcg_cachep; 2644 2645 /* 2646 * If we are in a safe context (can wait, and not in interrupt 2647 * context), we could be be predictable and return right away. 2648 * This would guarantee that the allocation being performed 2649 * already belongs in the new cache. 2650 * 2651 * However, there are some clashes that can arrive from locking. 2652 * For instance, because we acquire the slab_mutex while doing 2653 * memcg_create_kmem_cache, this means no further allocation 2654 * could happen with the slab_mutex held. So it's better to 2655 * defer everything. 2656 */ 2657 memcg_schedule_kmem_cache_create(memcg, cachep); 2658 out: 2659 css_put(&memcg->css); 2660 return cachep; 2661 } 2662 2663 /** 2664 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2665 * @cachep: the cache returned by memcg_kmem_get_cache 2666 */ 2667 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2668 { 2669 if (!is_root_cache(cachep)) 2670 css_put(&cachep->memcg_params.memcg->css); 2671 } 2672 2673 /** 2674 * __memcg_kmem_charge_memcg: charge a kmem page 2675 * @page: page to charge 2676 * @gfp: reclaim mode 2677 * @order: allocation order 2678 * @memcg: memory cgroup to charge 2679 * 2680 * Returns 0 on success, an error code on failure. 2681 */ 2682 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2683 struct mem_cgroup *memcg) 2684 { 2685 unsigned int nr_pages = 1 << order; 2686 struct page_counter *counter; 2687 int ret; 2688 2689 ret = try_charge(memcg, gfp, nr_pages); 2690 if (ret) 2691 return ret; 2692 2693 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2694 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2695 cancel_charge(memcg, nr_pages); 2696 return -ENOMEM; 2697 } 2698 2699 page->mem_cgroup = memcg; 2700 2701 return 0; 2702 } 2703 2704 /** 2705 * __memcg_kmem_charge: charge a kmem page to the current memory cgroup 2706 * @page: page to charge 2707 * @gfp: reclaim mode 2708 * @order: allocation order 2709 * 2710 * Returns 0 on success, an error code on failure. 2711 */ 2712 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2713 { 2714 struct mem_cgroup *memcg; 2715 int ret = 0; 2716 2717 if (memcg_kmem_bypass()) 2718 return 0; 2719 2720 memcg = get_mem_cgroup_from_current(); 2721 if (!mem_cgroup_is_root(memcg)) { 2722 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); 2723 if (!ret) 2724 __SetPageKmemcg(page); 2725 } 2726 css_put(&memcg->css); 2727 return ret; 2728 } 2729 /** 2730 * __memcg_kmem_uncharge: uncharge a kmem page 2731 * @page: page to uncharge 2732 * @order: allocation order 2733 */ 2734 void __memcg_kmem_uncharge(struct page *page, int order) 2735 { 2736 struct mem_cgroup *memcg = page->mem_cgroup; 2737 unsigned int nr_pages = 1 << order; 2738 2739 if (!memcg) 2740 return; 2741 2742 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2743 2744 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2745 page_counter_uncharge(&memcg->kmem, nr_pages); 2746 2747 page_counter_uncharge(&memcg->memory, nr_pages); 2748 if (do_memsw_account()) 2749 page_counter_uncharge(&memcg->memsw, nr_pages); 2750 2751 page->mem_cgroup = NULL; 2752 2753 /* slab pages do not have PageKmemcg flag set */ 2754 if (PageKmemcg(page)) 2755 __ClearPageKmemcg(page); 2756 2757 css_put_many(&memcg->css, nr_pages); 2758 } 2759 #endif /* CONFIG_MEMCG_KMEM */ 2760 2761 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2762 2763 /* 2764 * Because tail pages are not marked as "used", set it. We're under 2765 * pgdat->lru_lock and migration entries setup in all page mappings. 2766 */ 2767 void mem_cgroup_split_huge_fixup(struct page *head) 2768 { 2769 int i; 2770 2771 if (mem_cgroup_disabled()) 2772 return; 2773 2774 for (i = 1; i < HPAGE_PMD_NR; i++) 2775 head[i].mem_cgroup = head->mem_cgroup; 2776 2777 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); 2778 } 2779 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2780 2781 #ifdef CONFIG_MEMCG_SWAP 2782 /** 2783 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2784 * @entry: swap entry to be moved 2785 * @from: mem_cgroup which the entry is moved from 2786 * @to: mem_cgroup which the entry is moved to 2787 * 2788 * It succeeds only when the swap_cgroup's record for this entry is the same 2789 * as the mem_cgroup's id of @from. 2790 * 2791 * Returns 0 on success, -EINVAL on failure. 2792 * 2793 * The caller must have charged to @to, IOW, called page_counter_charge() about 2794 * both res and memsw, and called css_get(). 2795 */ 2796 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2797 struct mem_cgroup *from, struct mem_cgroup *to) 2798 { 2799 unsigned short old_id, new_id; 2800 2801 old_id = mem_cgroup_id(from); 2802 new_id = mem_cgroup_id(to); 2803 2804 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2805 mod_memcg_state(from, MEMCG_SWAP, -1); 2806 mod_memcg_state(to, MEMCG_SWAP, 1); 2807 return 0; 2808 } 2809 return -EINVAL; 2810 } 2811 #else 2812 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2813 struct mem_cgroup *from, struct mem_cgroup *to) 2814 { 2815 return -EINVAL; 2816 } 2817 #endif 2818 2819 static DEFINE_MUTEX(memcg_max_mutex); 2820 2821 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 2822 unsigned long max, bool memsw) 2823 { 2824 bool enlarge = false; 2825 bool drained = false; 2826 int ret; 2827 bool limits_invariant; 2828 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 2829 2830 do { 2831 if (signal_pending(current)) { 2832 ret = -EINTR; 2833 break; 2834 } 2835 2836 mutex_lock(&memcg_max_mutex); 2837 /* 2838 * Make sure that the new limit (memsw or memory limit) doesn't 2839 * break our basic invariant rule memory.max <= memsw.max. 2840 */ 2841 limits_invariant = memsw ? max >= memcg->memory.max : 2842 max <= memcg->memsw.max; 2843 if (!limits_invariant) { 2844 mutex_unlock(&memcg_max_mutex); 2845 ret = -EINVAL; 2846 break; 2847 } 2848 if (max > counter->max) 2849 enlarge = true; 2850 ret = page_counter_set_max(counter, max); 2851 mutex_unlock(&memcg_max_mutex); 2852 2853 if (!ret) 2854 break; 2855 2856 if (!drained) { 2857 drain_all_stock(memcg); 2858 drained = true; 2859 continue; 2860 } 2861 2862 if (!try_to_free_mem_cgroup_pages(memcg, 1, 2863 GFP_KERNEL, !memsw)) { 2864 ret = -EBUSY; 2865 break; 2866 } 2867 } while (true); 2868 2869 if (!ret && enlarge) 2870 memcg_oom_recover(memcg); 2871 2872 return ret; 2873 } 2874 2875 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2876 gfp_t gfp_mask, 2877 unsigned long *total_scanned) 2878 { 2879 unsigned long nr_reclaimed = 0; 2880 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2881 unsigned long reclaimed; 2882 int loop = 0; 2883 struct mem_cgroup_tree_per_node *mctz; 2884 unsigned long excess; 2885 unsigned long nr_scanned; 2886 2887 if (order > 0) 2888 return 0; 2889 2890 mctz = soft_limit_tree_node(pgdat->node_id); 2891 2892 /* 2893 * Do not even bother to check the largest node if the root 2894 * is empty. Do it lockless to prevent lock bouncing. Races 2895 * are acceptable as soft limit is best effort anyway. 2896 */ 2897 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 2898 return 0; 2899 2900 /* 2901 * This loop can run a while, specially if mem_cgroup's continuously 2902 * keep exceeding their soft limit and putting the system under 2903 * pressure 2904 */ 2905 do { 2906 if (next_mz) 2907 mz = next_mz; 2908 else 2909 mz = mem_cgroup_largest_soft_limit_node(mctz); 2910 if (!mz) 2911 break; 2912 2913 nr_scanned = 0; 2914 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2915 gfp_mask, &nr_scanned); 2916 nr_reclaimed += reclaimed; 2917 *total_scanned += nr_scanned; 2918 spin_lock_irq(&mctz->lock); 2919 __mem_cgroup_remove_exceeded(mz, mctz); 2920 2921 /* 2922 * If we failed to reclaim anything from this memory cgroup 2923 * it is time to move on to the next cgroup 2924 */ 2925 next_mz = NULL; 2926 if (!reclaimed) 2927 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2928 2929 excess = soft_limit_excess(mz->memcg); 2930 /* 2931 * One school of thought says that we should not add 2932 * back the node to the tree if reclaim returns 0. 2933 * But our reclaim could return 0, simply because due 2934 * to priority we are exposing a smaller subset of 2935 * memory to reclaim from. Consider this as a longer 2936 * term TODO. 2937 */ 2938 /* If excess == 0, no tree ops */ 2939 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2940 spin_unlock_irq(&mctz->lock); 2941 css_put(&mz->memcg->css); 2942 loop++; 2943 /* 2944 * Could not reclaim anything and there are no more 2945 * mem cgroups to try or we seem to be looping without 2946 * reclaiming anything. 2947 */ 2948 if (!nr_reclaimed && 2949 (next_mz == NULL || 2950 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2951 break; 2952 } while (!nr_reclaimed); 2953 if (next_mz) 2954 css_put(&next_mz->memcg->css); 2955 return nr_reclaimed; 2956 } 2957 2958 /* 2959 * Test whether @memcg has children, dead or alive. Note that this 2960 * function doesn't care whether @memcg has use_hierarchy enabled and 2961 * returns %true if there are child csses according to the cgroup 2962 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2963 */ 2964 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2965 { 2966 bool ret; 2967 2968 rcu_read_lock(); 2969 ret = css_next_child(NULL, &memcg->css); 2970 rcu_read_unlock(); 2971 return ret; 2972 } 2973 2974 /* 2975 * Reclaims as many pages from the given memcg as possible. 2976 * 2977 * Caller is responsible for holding css reference for memcg. 2978 */ 2979 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2980 { 2981 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2982 2983 /* we call try-to-free pages for make this cgroup empty */ 2984 lru_add_drain_all(); 2985 2986 drain_all_stock(memcg); 2987 2988 /* try to free all pages in this cgroup */ 2989 while (nr_retries && page_counter_read(&memcg->memory)) { 2990 int progress; 2991 2992 if (signal_pending(current)) 2993 return -EINTR; 2994 2995 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2996 GFP_KERNEL, true); 2997 if (!progress) { 2998 nr_retries--; 2999 /* maybe some writeback is necessary */ 3000 congestion_wait(BLK_RW_ASYNC, HZ/10); 3001 } 3002 3003 } 3004 3005 return 0; 3006 } 3007 3008 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3009 char *buf, size_t nbytes, 3010 loff_t off) 3011 { 3012 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3013 3014 if (mem_cgroup_is_root(memcg)) 3015 return -EINVAL; 3016 return mem_cgroup_force_empty(memcg) ?: nbytes; 3017 } 3018 3019 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3020 struct cftype *cft) 3021 { 3022 return mem_cgroup_from_css(css)->use_hierarchy; 3023 } 3024 3025 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3026 struct cftype *cft, u64 val) 3027 { 3028 int retval = 0; 3029 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3030 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3031 3032 if (memcg->use_hierarchy == val) 3033 return 0; 3034 3035 /* 3036 * If parent's use_hierarchy is set, we can't make any modifications 3037 * in the child subtrees. If it is unset, then the change can 3038 * occur, provided the current cgroup has no children. 3039 * 3040 * For the root cgroup, parent_mem is NULL, we allow value to be 3041 * set if there are no children. 3042 */ 3043 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3044 (val == 1 || val == 0)) { 3045 if (!memcg_has_children(memcg)) 3046 memcg->use_hierarchy = val; 3047 else 3048 retval = -EBUSY; 3049 } else 3050 retval = -EINVAL; 3051 3052 return retval; 3053 } 3054 3055 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3056 { 3057 unsigned long val; 3058 3059 if (mem_cgroup_is_root(memcg)) { 3060 val = memcg_page_state(memcg, MEMCG_CACHE) + 3061 memcg_page_state(memcg, MEMCG_RSS); 3062 if (swap) 3063 val += memcg_page_state(memcg, MEMCG_SWAP); 3064 } else { 3065 if (!swap) 3066 val = page_counter_read(&memcg->memory); 3067 else 3068 val = page_counter_read(&memcg->memsw); 3069 } 3070 return val; 3071 } 3072 3073 enum { 3074 RES_USAGE, 3075 RES_LIMIT, 3076 RES_MAX_USAGE, 3077 RES_FAILCNT, 3078 RES_SOFT_LIMIT, 3079 }; 3080 3081 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3082 struct cftype *cft) 3083 { 3084 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3085 struct page_counter *counter; 3086 3087 switch (MEMFILE_TYPE(cft->private)) { 3088 case _MEM: 3089 counter = &memcg->memory; 3090 break; 3091 case _MEMSWAP: 3092 counter = &memcg->memsw; 3093 break; 3094 case _KMEM: 3095 counter = &memcg->kmem; 3096 break; 3097 case _TCP: 3098 counter = &memcg->tcpmem; 3099 break; 3100 default: 3101 BUG(); 3102 } 3103 3104 switch (MEMFILE_ATTR(cft->private)) { 3105 case RES_USAGE: 3106 if (counter == &memcg->memory) 3107 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3108 if (counter == &memcg->memsw) 3109 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3110 return (u64)page_counter_read(counter) * PAGE_SIZE; 3111 case RES_LIMIT: 3112 return (u64)counter->max * PAGE_SIZE; 3113 case RES_MAX_USAGE: 3114 return (u64)counter->watermark * PAGE_SIZE; 3115 case RES_FAILCNT: 3116 return counter->failcnt; 3117 case RES_SOFT_LIMIT: 3118 return (u64)memcg->soft_limit * PAGE_SIZE; 3119 default: 3120 BUG(); 3121 } 3122 } 3123 3124 #ifdef CONFIG_MEMCG_KMEM 3125 static int memcg_online_kmem(struct mem_cgroup *memcg) 3126 { 3127 int memcg_id; 3128 3129 if (cgroup_memory_nokmem) 3130 return 0; 3131 3132 BUG_ON(memcg->kmemcg_id >= 0); 3133 BUG_ON(memcg->kmem_state); 3134 3135 memcg_id = memcg_alloc_cache_id(); 3136 if (memcg_id < 0) 3137 return memcg_id; 3138 3139 static_branch_inc(&memcg_kmem_enabled_key); 3140 /* 3141 * A memory cgroup is considered kmem-online as soon as it gets 3142 * kmemcg_id. Setting the id after enabling static branching will 3143 * guarantee no one starts accounting before all call sites are 3144 * patched. 3145 */ 3146 memcg->kmemcg_id = memcg_id; 3147 memcg->kmem_state = KMEM_ONLINE; 3148 INIT_LIST_HEAD(&memcg->kmem_caches); 3149 3150 return 0; 3151 } 3152 3153 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3154 { 3155 struct cgroup_subsys_state *css; 3156 struct mem_cgroup *parent, *child; 3157 int kmemcg_id; 3158 3159 if (memcg->kmem_state != KMEM_ONLINE) 3160 return; 3161 /* 3162 * Clear the online state before clearing memcg_caches array 3163 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 3164 * guarantees that no cache will be created for this cgroup 3165 * after we are done (see memcg_create_kmem_cache()). 3166 */ 3167 memcg->kmem_state = KMEM_ALLOCATED; 3168 3169 memcg_deactivate_kmem_caches(memcg); 3170 3171 kmemcg_id = memcg->kmemcg_id; 3172 BUG_ON(kmemcg_id < 0); 3173 3174 parent = parent_mem_cgroup(memcg); 3175 if (!parent) 3176 parent = root_mem_cgroup; 3177 3178 /* 3179 * Change kmemcg_id of this cgroup and all its descendants to the 3180 * parent's id, and then move all entries from this cgroup's list_lrus 3181 * to ones of the parent. After we have finished, all list_lrus 3182 * corresponding to this cgroup are guaranteed to remain empty. The 3183 * ordering is imposed by list_lru_node->lock taken by 3184 * memcg_drain_all_list_lrus(). 3185 */ 3186 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3187 css_for_each_descendant_pre(css, &memcg->css) { 3188 child = mem_cgroup_from_css(css); 3189 BUG_ON(child->kmemcg_id != kmemcg_id); 3190 child->kmemcg_id = parent->kmemcg_id; 3191 if (!memcg->use_hierarchy) 3192 break; 3193 } 3194 rcu_read_unlock(); 3195 3196 memcg_drain_all_list_lrus(kmemcg_id, parent); 3197 3198 memcg_free_cache_id(kmemcg_id); 3199 } 3200 3201 static void memcg_free_kmem(struct mem_cgroup *memcg) 3202 { 3203 /* css_alloc() failed, offlining didn't happen */ 3204 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3205 memcg_offline_kmem(memcg); 3206 3207 if (memcg->kmem_state == KMEM_ALLOCATED) { 3208 memcg_destroy_kmem_caches(memcg); 3209 static_branch_dec(&memcg_kmem_enabled_key); 3210 WARN_ON(page_counter_read(&memcg->kmem)); 3211 } 3212 } 3213 #else 3214 static int memcg_online_kmem(struct mem_cgroup *memcg) 3215 { 3216 return 0; 3217 } 3218 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3219 { 3220 } 3221 static void memcg_free_kmem(struct mem_cgroup *memcg) 3222 { 3223 } 3224 #endif /* CONFIG_MEMCG_KMEM */ 3225 3226 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3227 unsigned long max) 3228 { 3229 int ret; 3230 3231 mutex_lock(&memcg_max_mutex); 3232 ret = page_counter_set_max(&memcg->kmem, max); 3233 mutex_unlock(&memcg_max_mutex); 3234 return ret; 3235 } 3236 3237 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3238 { 3239 int ret; 3240 3241 mutex_lock(&memcg_max_mutex); 3242 3243 ret = page_counter_set_max(&memcg->tcpmem, max); 3244 if (ret) 3245 goto out; 3246 3247 if (!memcg->tcpmem_active) { 3248 /* 3249 * The active flag needs to be written after the static_key 3250 * update. This is what guarantees that the socket activation 3251 * function is the last one to run. See mem_cgroup_sk_alloc() 3252 * for details, and note that we don't mark any socket as 3253 * belonging to this memcg until that flag is up. 3254 * 3255 * We need to do this, because static_keys will span multiple 3256 * sites, but we can't control their order. If we mark a socket 3257 * as accounted, but the accounting functions are not patched in 3258 * yet, we'll lose accounting. 3259 * 3260 * We never race with the readers in mem_cgroup_sk_alloc(), 3261 * because when this value change, the code to process it is not 3262 * patched in yet. 3263 */ 3264 static_branch_inc(&memcg_sockets_enabled_key); 3265 memcg->tcpmem_active = true; 3266 } 3267 out: 3268 mutex_unlock(&memcg_max_mutex); 3269 return ret; 3270 } 3271 3272 /* 3273 * The user of this function is... 3274 * RES_LIMIT. 3275 */ 3276 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3277 char *buf, size_t nbytes, loff_t off) 3278 { 3279 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3280 unsigned long nr_pages; 3281 int ret; 3282 3283 buf = strstrip(buf); 3284 ret = page_counter_memparse(buf, "-1", &nr_pages); 3285 if (ret) 3286 return ret; 3287 3288 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3289 case RES_LIMIT: 3290 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3291 ret = -EINVAL; 3292 break; 3293 } 3294 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3295 case _MEM: 3296 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3297 break; 3298 case _MEMSWAP: 3299 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3300 break; 3301 case _KMEM: 3302 ret = memcg_update_kmem_max(memcg, nr_pages); 3303 break; 3304 case _TCP: 3305 ret = memcg_update_tcp_max(memcg, nr_pages); 3306 break; 3307 } 3308 break; 3309 case RES_SOFT_LIMIT: 3310 memcg->soft_limit = nr_pages; 3311 ret = 0; 3312 break; 3313 } 3314 return ret ?: nbytes; 3315 } 3316 3317 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3318 size_t nbytes, loff_t off) 3319 { 3320 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3321 struct page_counter *counter; 3322 3323 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3324 case _MEM: 3325 counter = &memcg->memory; 3326 break; 3327 case _MEMSWAP: 3328 counter = &memcg->memsw; 3329 break; 3330 case _KMEM: 3331 counter = &memcg->kmem; 3332 break; 3333 case _TCP: 3334 counter = &memcg->tcpmem; 3335 break; 3336 default: 3337 BUG(); 3338 } 3339 3340 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3341 case RES_MAX_USAGE: 3342 page_counter_reset_watermark(counter); 3343 break; 3344 case RES_FAILCNT: 3345 counter->failcnt = 0; 3346 break; 3347 default: 3348 BUG(); 3349 } 3350 3351 return nbytes; 3352 } 3353 3354 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3355 struct cftype *cft) 3356 { 3357 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3358 } 3359 3360 #ifdef CONFIG_MMU 3361 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3362 struct cftype *cft, u64 val) 3363 { 3364 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3365 3366 if (val & ~MOVE_MASK) 3367 return -EINVAL; 3368 3369 /* 3370 * No kind of locking is needed in here, because ->can_attach() will 3371 * check this value once in the beginning of the process, and then carry 3372 * on with stale data. This means that changes to this value will only 3373 * affect task migrations starting after the change. 3374 */ 3375 memcg->move_charge_at_immigrate = val; 3376 return 0; 3377 } 3378 #else 3379 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3380 struct cftype *cft, u64 val) 3381 { 3382 return -ENOSYS; 3383 } 3384 #endif 3385 3386 #ifdef CONFIG_NUMA 3387 3388 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3389 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3390 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3391 3392 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3393 int nid, unsigned int lru_mask) 3394 { 3395 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 3396 unsigned long nr = 0; 3397 enum lru_list lru; 3398 3399 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3400 3401 for_each_lru(lru) { 3402 if (!(BIT(lru) & lru_mask)) 3403 continue; 3404 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3405 } 3406 return nr; 3407 } 3408 3409 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3410 unsigned int lru_mask) 3411 { 3412 unsigned long nr = 0; 3413 enum lru_list lru; 3414 3415 for_each_lru(lru) { 3416 if (!(BIT(lru) & lru_mask)) 3417 continue; 3418 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3419 } 3420 return nr; 3421 } 3422 3423 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3424 { 3425 struct numa_stat { 3426 const char *name; 3427 unsigned int lru_mask; 3428 }; 3429 3430 static const struct numa_stat stats[] = { 3431 { "total", LRU_ALL }, 3432 { "file", LRU_ALL_FILE }, 3433 { "anon", LRU_ALL_ANON }, 3434 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3435 }; 3436 const struct numa_stat *stat; 3437 int nid; 3438 unsigned long nr; 3439 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3440 3441 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3442 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3443 seq_printf(m, "%s=%lu", stat->name, nr); 3444 for_each_node_state(nid, N_MEMORY) { 3445 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3446 stat->lru_mask); 3447 seq_printf(m, " N%d=%lu", nid, nr); 3448 } 3449 seq_putc(m, '\n'); 3450 } 3451 3452 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3453 struct mem_cgroup *iter; 3454 3455 nr = 0; 3456 for_each_mem_cgroup_tree(iter, memcg) 3457 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3458 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3459 for_each_node_state(nid, N_MEMORY) { 3460 nr = 0; 3461 for_each_mem_cgroup_tree(iter, memcg) 3462 nr += mem_cgroup_node_nr_lru_pages( 3463 iter, nid, stat->lru_mask); 3464 seq_printf(m, " N%d=%lu", nid, nr); 3465 } 3466 seq_putc(m, '\n'); 3467 } 3468 3469 return 0; 3470 } 3471 #endif /* CONFIG_NUMA */ 3472 3473 /* Universal VM events cgroup1 shows, original sort order */ 3474 static const unsigned int memcg1_events[] = { 3475 PGPGIN, 3476 PGPGOUT, 3477 PGFAULT, 3478 PGMAJFAULT, 3479 }; 3480 3481 static const char *const memcg1_event_names[] = { 3482 "pgpgin", 3483 "pgpgout", 3484 "pgfault", 3485 "pgmajfault", 3486 }; 3487 3488 static int memcg_stat_show(struct seq_file *m, void *v) 3489 { 3490 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3491 unsigned long memory, memsw; 3492 struct mem_cgroup *mi; 3493 unsigned int i; 3494 3495 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3496 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3497 3498 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3499 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3500 continue; 3501 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3502 memcg_page_state_local(memcg, memcg1_stats[i]) * 3503 PAGE_SIZE); 3504 } 3505 3506 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3507 seq_printf(m, "%s %lu\n", memcg1_event_names[i], 3508 memcg_events_local(memcg, memcg1_events[i])); 3509 3510 for (i = 0; i < NR_LRU_LISTS; i++) 3511 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3512 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 3513 PAGE_SIZE); 3514 3515 /* Hierarchical information */ 3516 memory = memsw = PAGE_COUNTER_MAX; 3517 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3518 memory = min(memory, mi->memory.max); 3519 memsw = min(memsw, mi->memsw.max); 3520 } 3521 seq_printf(m, "hierarchical_memory_limit %llu\n", 3522 (u64)memory * PAGE_SIZE); 3523 if (do_memsw_account()) 3524 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3525 (u64)memsw * PAGE_SIZE); 3526 3527 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3528 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3529 continue; 3530 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 3531 (u64)memcg_page_state(memcg, i) * PAGE_SIZE); 3532 } 3533 3534 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3535 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], 3536 (u64)memcg_events(memcg, i)); 3537 3538 for (i = 0; i < NR_LRU_LISTS; i++) 3539 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], 3540 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 3541 PAGE_SIZE); 3542 3543 #ifdef CONFIG_DEBUG_VM 3544 { 3545 pg_data_t *pgdat; 3546 struct mem_cgroup_per_node *mz; 3547 struct zone_reclaim_stat *rstat; 3548 unsigned long recent_rotated[2] = {0, 0}; 3549 unsigned long recent_scanned[2] = {0, 0}; 3550 3551 for_each_online_pgdat(pgdat) { 3552 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3553 rstat = &mz->lruvec.reclaim_stat; 3554 3555 recent_rotated[0] += rstat->recent_rotated[0]; 3556 recent_rotated[1] += rstat->recent_rotated[1]; 3557 recent_scanned[0] += rstat->recent_scanned[0]; 3558 recent_scanned[1] += rstat->recent_scanned[1]; 3559 } 3560 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3561 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3562 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3563 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3564 } 3565 #endif 3566 3567 return 0; 3568 } 3569 3570 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3571 struct cftype *cft) 3572 { 3573 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3574 3575 return mem_cgroup_swappiness(memcg); 3576 } 3577 3578 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3579 struct cftype *cft, u64 val) 3580 { 3581 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3582 3583 if (val > 100) 3584 return -EINVAL; 3585 3586 if (css->parent) 3587 memcg->swappiness = val; 3588 else 3589 vm_swappiness = val; 3590 3591 return 0; 3592 } 3593 3594 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3595 { 3596 struct mem_cgroup_threshold_ary *t; 3597 unsigned long usage; 3598 int i; 3599 3600 rcu_read_lock(); 3601 if (!swap) 3602 t = rcu_dereference(memcg->thresholds.primary); 3603 else 3604 t = rcu_dereference(memcg->memsw_thresholds.primary); 3605 3606 if (!t) 3607 goto unlock; 3608 3609 usage = mem_cgroup_usage(memcg, swap); 3610 3611 /* 3612 * current_threshold points to threshold just below or equal to usage. 3613 * If it's not true, a threshold was crossed after last 3614 * call of __mem_cgroup_threshold(). 3615 */ 3616 i = t->current_threshold; 3617 3618 /* 3619 * Iterate backward over array of thresholds starting from 3620 * current_threshold and check if a threshold is crossed. 3621 * If none of thresholds below usage is crossed, we read 3622 * only one element of the array here. 3623 */ 3624 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3625 eventfd_signal(t->entries[i].eventfd, 1); 3626 3627 /* i = current_threshold + 1 */ 3628 i++; 3629 3630 /* 3631 * Iterate forward over array of thresholds starting from 3632 * current_threshold+1 and check if a threshold is crossed. 3633 * If none of thresholds above usage is crossed, we read 3634 * only one element of the array here. 3635 */ 3636 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3637 eventfd_signal(t->entries[i].eventfd, 1); 3638 3639 /* Update current_threshold */ 3640 t->current_threshold = i - 1; 3641 unlock: 3642 rcu_read_unlock(); 3643 } 3644 3645 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3646 { 3647 while (memcg) { 3648 __mem_cgroup_threshold(memcg, false); 3649 if (do_memsw_account()) 3650 __mem_cgroup_threshold(memcg, true); 3651 3652 memcg = parent_mem_cgroup(memcg); 3653 } 3654 } 3655 3656 static int compare_thresholds(const void *a, const void *b) 3657 { 3658 const struct mem_cgroup_threshold *_a = a; 3659 const struct mem_cgroup_threshold *_b = b; 3660 3661 if (_a->threshold > _b->threshold) 3662 return 1; 3663 3664 if (_a->threshold < _b->threshold) 3665 return -1; 3666 3667 return 0; 3668 } 3669 3670 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3671 { 3672 struct mem_cgroup_eventfd_list *ev; 3673 3674 spin_lock(&memcg_oom_lock); 3675 3676 list_for_each_entry(ev, &memcg->oom_notify, list) 3677 eventfd_signal(ev->eventfd, 1); 3678 3679 spin_unlock(&memcg_oom_lock); 3680 return 0; 3681 } 3682 3683 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3684 { 3685 struct mem_cgroup *iter; 3686 3687 for_each_mem_cgroup_tree(iter, memcg) 3688 mem_cgroup_oom_notify_cb(iter); 3689 } 3690 3691 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3692 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3693 { 3694 struct mem_cgroup_thresholds *thresholds; 3695 struct mem_cgroup_threshold_ary *new; 3696 unsigned long threshold; 3697 unsigned long usage; 3698 int i, size, ret; 3699 3700 ret = page_counter_memparse(args, "-1", &threshold); 3701 if (ret) 3702 return ret; 3703 3704 mutex_lock(&memcg->thresholds_lock); 3705 3706 if (type == _MEM) { 3707 thresholds = &memcg->thresholds; 3708 usage = mem_cgroup_usage(memcg, false); 3709 } else if (type == _MEMSWAP) { 3710 thresholds = &memcg->memsw_thresholds; 3711 usage = mem_cgroup_usage(memcg, true); 3712 } else 3713 BUG(); 3714 3715 /* Check if a threshold crossed before adding a new one */ 3716 if (thresholds->primary) 3717 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3718 3719 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3720 3721 /* Allocate memory for new array of thresholds */ 3722 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 3723 if (!new) { 3724 ret = -ENOMEM; 3725 goto unlock; 3726 } 3727 new->size = size; 3728 3729 /* Copy thresholds (if any) to new array */ 3730 if (thresholds->primary) { 3731 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3732 sizeof(struct mem_cgroup_threshold)); 3733 } 3734 3735 /* Add new threshold */ 3736 new->entries[size - 1].eventfd = eventfd; 3737 new->entries[size - 1].threshold = threshold; 3738 3739 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3740 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3741 compare_thresholds, NULL); 3742 3743 /* Find current threshold */ 3744 new->current_threshold = -1; 3745 for (i = 0; i < size; i++) { 3746 if (new->entries[i].threshold <= usage) { 3747 /* 3748 * new->current_threshold will not be used until 3749 * rcu_assign_pointer(), so it's safe to increment 3750 * it here. 3751 */ 3752 ++new->current_threshold; 3753 } else 3754 break; 3755 } 3756 3757 /* Free old spare buffer and save old primary buffer as spare */ 3758 kfree(thresholds->spare); 3759 thresholds->spare = thresholds->primary; 3760 3761 rcu_assign_pointer(thresholds->primary, new); 3762 3763 /* To be sure that nobody uses thresholds */ 3764 synchronize_rcu(); 3765 3766 unlock: 3767 mutex_unlock(&memcg->thresholds_lock); 3768 3769 return ret; 3770 } 3771 3772 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3773 struct eventfd_ctx *eventfd, const char *args) 3774 { 3775 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3776 } 3777 3778 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3779 struct eventfd_ctx *eventfd, const char *args) 3780 { 3781 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3782 } 3783 3784 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3785 struct eventfd_ctx *eventfd, enum res_type type) 3786 { 3787 struct mem_cgroup_thresholds *thresholds; 3788 struct mem_cgroup_threshold_ary *new; 3789 unsigned long usage; 3790 int i, j, size; 3791 3792 mutex_lock(&memcg->thresholds_lock); 3793 3794 if (type == _MEM) { 3795 thresholds = &memcg->thresholds; 3796 usage = mem_cgroup_usage(memcg, false); 3797 } else if (type == _MEMSWAP) { 3798 thresholds = &memcg->memsw_thresholds; 3799 usage = mem_cgroup_usage(memcg, true); 3800 } else 3801 BUG(); 3802 3803 if (!thresholds->primary) 3804 goto unlock; 3805 3806 /* Check if a threshold crossed before removing */ 3807 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3808 3809 /* Calculate new number of threshold */ 3810 size = 0; 3811 for (i = 0; i < thresholds->primary->size; i++) { 3812 if (thresholds->primary->entries[i].eventfd != eventfd) 3813 size++; 3814 } 3815 3816 new = thresholds->spare; 3817 3818 /* Set thresholds array to NULL if we don't have thresholds */ 3819 if (!size) { 3820 kfree(new); 3821 new = NULL; 3822 goto swap_buffers; 3823 } 3824 3825 new->size = size; 3826 3827 /* Copy thresholds and find current threshold */ 3828 new->current_threshold = -1; 3829 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3830 if (thresholds->primary->entries[i].eventfd == eventfd) 3831 continue; 3832 3833 new->entries[j] = thresholds->primary->entries[i]; 3834 if (new->entries[j].threshold <= usage) { 3835 /* 3836 * new->current_threshold will not be used 3837 * until rcu_assign_pointer(), so it's safe to increment 3838 * it here. 3839 */ 3840 ++new->current_threshold; 3841 } 3842 j++; 3843 } 3844 3845 swap_buffers: 3846 /* Swap primary and spare array */ 3847 thresholds->spare = thresholds->primary; 3848 3849 rcu_assign_pointer(thresholds->primary, new); 3850 3851 /* To be sure that nobody uses thresholds */ 3852 synchronize_rcu(); 3853 3854 /* If all events are unregistered, free the spare array */ 3855 if (!new) { 3856 kfree(thresholds->spare); 3857 thresholds->spare = NULL; 3858 } 3859 unlock: 3860 mutex_unlock(&memcg->thresholds_lock); 3861 } 3862 3863 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3864 struct eventfd_ctx *eventfd) 3865 { 3866 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3867 } 3868 3869 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3870 struct eventfd_ctx *eventfd) 3871 { 3872 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3873 } 3874 3875 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3876 struct eventfd_ctx *eventfd, const char *args) 3877 { 3878 struct mem_cgroup_eventfd_list *event; 3879 3880 event = kmalloc(sizeof(*event), GFP_KERNEL); 3881 if (!event) 3882 return -ENOMEM; 3883 3884 spin_lock(&memcg_oom_lock); 3885 3886 event->eventfd = eventfd; 3887 list_add(&event->list, &memcg->oom_notify); 3888 3889 /* already in OOM ? */ 3890 if (memcg->under_oom) 3891 eventfd_signal(eventfd, 1); 3892 spin_unlock(&memcg_oom_lock); 3893 3894 return 0; 3895 } 3896 3897 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3898 struct eventfd_ctx *eventfd) 3899 { 3900 struct mem_cgroup_eventfd_list *ev, *tmp; 3901 3902 spin_lock(&memcg_oom_lock); 3903 3904 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3905 if (ev->eventfd == eventfd) { 3906 list_del(&ev->list); 3907 kfree(ev); 3908 } 3909 } 3910 3911 spin_unlock(&memcg_oom_lock); 3912 } 3913 3914 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3915 { 3916 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 3917 3918 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3919 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3920 seq_printf(sf, "oom_kill %lu\n", 3921 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 3922 return 0; 3923 } 3924 3925 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3926 struct cftype *cft, u64 val) 3927 { 3928 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3929 3930 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3931 if (!css->parent || !((val == 0) || (val == 1))) 3932 return -EINVAL; 3933 3934 memcg->oom_kill_disable = val; 3935 if (!val) 3936 memcg_oom_recover(memcg); 3937 3938 return 0; 3939 } 3940 3941 #ifdef CONFIG_CGROUP_WRITEBACK 3942 3943 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3944 { 3945 return wb_domain_init(&memcg->cgwb_domain, gfp); 3946 } 3947 3948 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3949 { 3950 wb_domain_exit(&memcg->cgwb_domain); 3951 } 3952 3953 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3954 { 3955 wb_domain_size_changed(&memcg->cgwb_domain); 3956 } 3957 3958 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3959 { 3960 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3961 3962 if (!memcg->css.parent) 3963 return NULL; 3964 3965 return &memcg->cgwb_domain; 3966 } 3967 3968 /* 3969 * idx can be of type enum memcg_stat_item or node_stat_item. 3970 * Keep in sync with memcg_exact_page(). 3971 */ 3972 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 3973 { 3974 long x = atomic_long_read(&memcg->vmstats[idx]); 3975 int cpu; 3976 3977 for_each_online_cpu(cpu) 3978 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 3979 if (x < 0) 3980 x = 0; 3981 return x; 3982 } 3983 3984 /** 3985 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3986 * @wb: bdi_writeback in question 3987 * @pfilepages: out parameter for number of file pages 3988 * @pheadroom: out parameter for number of allocatable pages according to memcg 3989 * @pdirty: out parameter for number of dirty pages 3990 * @pwriteback: out parameter for number of pages under writeback 3991 * 3992 * Determine the numbers of file, headroom, dirty, and writeback pages in 3993 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3994 * is a bit more involved. 3995 * 3996 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3997 * headroom is calculated as the lowest headroom of itself and the 3998 * ancestors. Note that this doesn't consider the actual amount of 3999 * available memory in the system. The caller should further cap 4000 * *@pheadroom accordingly. 4001 */ 4002 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4003 unsigned long *pheadroom, unsigned long *pdirty, 4004 unsigned long *pwriteback) 4005 { 4006 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4007 struct mem_cgroup *parent; 4008 4009 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4010 4011 /* this should eventually include NR_UNSTABLE_NFS */ 4012 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4013 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4014 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4015 *pheadroom = PAGE_COUNTER_MAX; 4016 4017 while ((parent = parent_mem_cgroup(memcg))) { 4018 unsigned long ceiling = min(memcg->memory.max, memcg->high); 4019 unsigned long used = page_counter_read(&memcg->memory); 4020 4021 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4022 memcg = parent; 4023 } 4024 } 4025 4026 #else /* CONFIG_CGROUP_WRITEBACK */ 4027 4028 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4029 { 4030 return 0; 4031 } 4032 4033 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4034 { 4035 } 4036 4037 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4038 { 4039 } 4040 4041 #endif /* CONFIG_CGROUP_WRITEBACK */ 4042 4043 /* 4044 * DO NOT USE IN NEW FILES. 4045 * 4046 * "cgroup.event_control" implementation. 4047 * 4048 * This is way over-engineered. It tries to support fully configurable 4049 * events for each user. Such level of flexibility is completely 4050 * unnecessary especially in the light of the planned unified hierarchy. 4051 * 4052 * Please deprecate this and replace with something simpler if at all 4053 * possible. 4054 */ 4055 4056 /* 4057 * Unregister event and free resources. 4058 * 4059 * Gets called from workqueue. 4060 */ 4061 static void memcg_event_remove(struct work_struct *work) 4062 { 4063 struct mem_cgroup_event *event = 4064 container_of(work, struct mem_cgroup_event, remove); 4065 struct mem_cgroup *memcg = event->memcg; 4066 4067 remove_wait_queue(event->wqh, &event->wait); 4068 4069 event->unregister_event(memcg, event->eventfd); 4070 4071 /* Notify userspace the event is going away. */ 4072 eventfd_signal(event->eventfd, 1); 4073 4074 eventfd_ctx_put(event->eventfd); 4075 kfree(event); 4076 css_put(&memcg->css); 4077 } 4078 4079 /* 4080 * Gets called on EPOLLHUP on eventfd when user closes it. 4081 * 4082 * Called with wqh->lock held and interrupts disabled. 4083 */ 4084 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4085 int sync, void *key) 4086 { 4087 struct mem_cgroup_event *event = 4088 container_of(wait, struct mem_cgroup_event, wait); 4089 struct mem_cgroup *memcg = event->memcg; 4090 __poll_t flags = key_to_poll(key); 4091 4092 if (flags & EPOLLHUP) { 4093 /* 4094 * If the event has been detached at cgroup removal, we 4095 * can simply return knowing the other side will cleanup 4096 * for us. 4097 * 4098 * We can't race against event freeing since the other 4099 * side will require wqh->lock via remove_wait_queue(), 4100 * which we hold. 4101 */ 4102 spin_lock(&memcg->event_list_lock); 4103 if (!list_empty(&event->list)) { 4104 list_del_init(&event->list); 4105 /* 4106 * We are in atomic context, but cgroup_event_remove() 4107 * may sleep, so we have to call it in workqueue. 4108 */ 4109 schedule_work(&event->remove); 4110 } 4111 spin_unlock(&memcg->event_list_lock); 4112 } 4113 4114 return 0; 4115 } 4116 4117 static void memcg_event_ptable_queue_proc(struct file *file, 4118 wait_queue_head_t *wqh, poll_table *pt) 4119 { 4120 struct mem_cgroup_event *event = 4121 container_of(pt, struct mem_cgroup_event, pt); 4122 4123 event->wqh = wqh; 4124 add_wait_queue(wqh, &event->wait); 4125 } 4126 4127 /* 4128 * DO NOT USE IN NEW FILES. 4129 * 4130 * Parse input and register new cgroup event handler. 4131 * 4132 * Input must be in format '<event_fd> <control_fd> <args>'. 4133 * Interpretation of args is defined by control file implementation. 4134 */ 4135 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4136 char *buf, size_t nbytes, loff_t off) 4137 { 4138 struct cgroup_subsys_state *css = of_css(of); 4139 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4140 struct mem_cgroup_event *event; 4141 struct cgroup_subsys_state *cfile_css; 4142 unsigned int efd, cfd; 4143 struct fd efile; 4144 struct fd cfile; 4145 const char *name; 4146 char *endp; 4147 int ret; 4148 4149 buf = strstrip(buf); 4150 4151 efd = simple_strtoul(buf, &endp, 10); 4152 if (*endp != ' ') 4153 return -EINVAL; 4154 buf = endp + 1; 4155 4156 cfd = simple_strtoul(buf, &endp, 10); 4157 if ((*endp != ' ') && (*endp != '\0')) 4158 return -EINVAL; 4159 buf = endp + 1; 4160 4161 event = kzalloc(sizeof(*event), GFP_KERNEL); 4162 if (!event) 4163 return -ENOMEM; 4164 4165 event->memcg = memcg; 4166 INIT_LIST_HEAD(&event->list); 4167 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4168 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4169 INIT_WORK(&event->remove, memcg_event_remove); 4170 4171 efile = fdget(efd); 4172 if (!efile.file) { 4173 ret = -EBADF; 4174 goto out_kfree; 4175 } 4176 4177 event->eventfd = eventfd_ctx_fileget(efile.file); 4178 if (IS_ERR(event->eventfd)) { 4179 ret = PTR_ERR(event->eventfd); 4180 goto out_put_efile; 4181 } 4182 4183 cfile = fdget(cfd); 4184 if (!cfile.file) { 4185 ret = -EBADF; 4186 goto out_put_eventfd; 4187 } 4188 4189 /* the process need read permission on control file */ 4190 /* AV: shouldn't we check that it's been opened for read instead? */ 4191 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4192 if (ret < 0) 4193 goto out_put_cfile; 4194 4195 /* 4196 * Determine the event callbacks and set them in @event. This used 4197 * to be done via struct cftype but cgroup core no longer knows 4198 * about these events. The following is crude but the whole thing 4199 * is for compatibility anyway. 4200 * 4201 * DO NOT ADD NEW FILES. 4202 */ 4203 name = cfile.file->f_path.dentry->d_name.name; 4204 4205 if (!strcmp(name, "memory.usage_in_bytes")) { 4206 event->register_event = mem_cgroup_usage_register_event; 4207 event->unregister_event = mem_cgroup_usage_unregister_event; 4208 } else if (!strcmp(name, "memory.oom_control")) { 4209 event->register_event = mem_cgroup_oom_register_event; 4210 event->unregister_event = mem_cgroup_oom_unregister_event; 4211 } else if (!strcmp(name, "memory.pressure_level")) { 4212 event->register_event = vmpressure_register_event; 4213 event->unregister_event = vmpressure_unregister_event; 4214 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4215 event->register_event = memsw_cgroup_usage_register_event; 4216 event->unregister_event = memsw_cgroup_usage_unregister_event; 4217 } else { 4218 ret = -EINVAL; 4219 goto out_put_cfile; 4220 } 4221 4222 /* 4223 * Verify @cfile should belong to @css. Also, remaining events are 4224 * automatically removed on cgroup destruction but the removal is 4225 * asynchronous, so take an extra ref on @css. 4226 */ 4227 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4228 &memory_cgrp_subsys); 4229 ret = -EINVAL; 4230 if (IS_ERR(cfile_css)) 4231 goto out_put_cfile; 4232 if (cfile_css != css) { 4233 css_put(cfile_css); 4234 goto out_put_cfile; 4235 } 4236 4237 ret = event->register_event(memcg, event->eventfd, buf); 4238 if (ret) 4239 goto out_put_css; 4240 4241 vfs_poll(efile.file, &event->pt); 4242 4243 spin_lock(&memcg->event_list_lock); 4244 list_add(&event->list, &memcg->event_list); 4245 spin_unlock(&memcg->event_list_lock); 4246 4247 fdput(cfile); 4248 fdput(efile); 4249 4250 return nbytes; 4251 4252 out_put_css: 4253 css_put(css); 4254 out_put_cfile: 4255 fdput(cfile); 4256 out_put_eventfd: 4257 eventfd_ctx_put(event->eventfd); 4258 out_put_efile: 4259 fdput(efile); 4260 out_kfree: 4261 kfree(event); 4262 4263 return ret; 4264 } 4265 4266 static struct cftype mem_cgroup_legacy_files[] = { 4267 { 4268 .name = "usage_in_bytes", 4269 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4270 .read_u64 = mem_cgroup_read_u64, 4271 }, 4272 { 4273 .name = "max_usage_in_bytes", 4274 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4275 .write = mem_cgroup_reset, 4276 .read_u64 = mem_cgroup_read_u64, 4277 }, 4278 { 4279 .name = "limit_in_bytes", 4280 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4281 .write = mem_cgroup_write, 4282 .read_u64 = mem_cgroup_read_u64, 4283 }, 4284 { 4285 .name = "soft_limit_in_bytes", 4286 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4287 .write = mem_cgroup_write, 4288 .read_u64 = mem_cgroup_read_u64, 4289 }, 4290 { 4291 .name = "failcnt", 4292 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4293 .write = mem_cgroup_reset, 4294 .read_u64 = mem_cgroup_read_u64, 4295 }, 4296 { 4297 .name = "stat", 4298 .seq_show = memcg_stat_show, 4299 }, 4300 { 4301 .name = "force_empty", 4302 .write = mem_cgroup_force_empty_write, 4303 }, 4304 { 4305 .name = "use_hierarchy", 4306 .write_u64 = mem_cgroup_hierarchy_write, 4307 .read_u64 = mem_cgroup_hierarchy_read, 4308 }, 4309 { 4310 .name = "cgroup.event_control", /* XXX: for compat */ 4311 .write = memcg_write_event_control, 4312 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4313 }, 4314 { 4315 .name = "swappiness", 4316 .read_u64 = mem_cgroup_swappiness_read, 4317 .write_u64 = mem_cgroup_swappiness_write, 4318 }, 4319 { 4320 .name = "move_charge_at_immigrate", 4321 .read_u64 = mem_cgroup_move_charge_read, 4322 .write_u64 = mem_cgroup_move_charge_write, 4323 }, 4324 { 4325 .name = "oom_control", 4326 .seq_show = mem_cgroup_oom_control_read, 4327 .write_u64 = mem_cgroup_oom_control_write, 4328 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4329 }, 4330 { 4331 .name = "pressure_level", 4332 }, 4333 #ifdef CONFIG_NUMA 4334 { 4335 .name = "numa_stat", 4336 .seq_show = memcg_numa_stat_show, 4337 }, 4338 #endif 4339 { 4340 .name = "kmem.limit_in_bytes", 4341 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4342 .write = mem_cgroup_write, 4343 .read_u64 = mem_cgroup_read_u64, 4344 }, 4345 { 4346 .name = "kmem.usage_in_bytes", 4347 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4348 .read_u64 = mem_cgroup_read_u64, 4349 }, 4350 { 4351 .name = "kmem.failcnt", 4352 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4353 .write = mem_cgroup_reset, 4354 .read_u64 = mem_cgroup_read_u64, 4355 }, 4356 { 4357 .name = "kmem.max_usage_in_bytes", 4358 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4359 .write = mem_cgroup_reset, 4360 .read_u64 = mem_cgroup_read_u64, 4361 }, 4362 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 4363 { 4364 .name = "kmem.slabinfo", 4365 .seq_start = memcg_slab_start, 4366 .seq_next = memcg_slab_next, 4367 .seq_stop = memcg_slab_stop, 4368 .seq_show = memcg_slab_show, 4369 }, 4370 #endif 4371 { 4372 .name = "kmem.tcp.limit_in_bytes", 4373 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4374 .write = mem_cgroup_write, 4375 .read_u64 = mem_cgroup_read_u64, 4376 }, 4377 { 4378 .name = "kmem.tcp.usage_in_bytes", 4379 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4380 .read_u64 = mem_cgroup_read_u64, 4381 }, 4382 { 4383 .name = "kmem.tcp.failcnt", 4384 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4385 .write = mem_cgroup_reset, 4386 .read_u64 = mem_cgroup_read_u64, 4387 }, 4388 { 4389 .name = "kmem.tcp.max_usage_in_bytes", 4390 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4391 .write = mem_cgroup_reset, 4392 .read_u64 = mem_cgroup_read_u64, 4393 }, 4394 { }, /* terminate */ 4395 }; 4396 4397 /* 4398 * Private memory cgroup IDR 4399 * 4400 * Swap-out records and page cache shadow entries need to store memcg 4401 * references in constrained space, so we maintain an ID space that is 4402 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4403 * memory-controlled cgroups to 64k. 4404 * 4405 * However, there usually are many references to the oflline CSS after 4406 * the cgroup has been destroyed, such as page cache or reclaimable 4407 * slab objects, that don't need to hang on to the ID. We want to keep 4408 * those dead CSS from occupying IDs, or we might quickly exhaust the 4409 * relatively small ID space and prevent the creation of new cgroups 4410 * even when there are much fewer than 64k cgroups - possibly none. 4411 * 4412 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4413 * be freed and recycled when it's no longer needed, which is usually 4414 * when the CSS is offlined. 4415 * 4416 * The only exception to that are records of swapped out tmpfs/shmem 4417 * pages that need to be attributed to live ancestors on swapin. But 4418 * those references are manageable from userspace. 4419 */ 4420 4421 static DEFINE_IDR(mem_cgroup_idr); 4422 4423 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 4424 { 4425 if (memcg->id.id > 0) { 4426 idr_remove(&mem_cgroup_idr, memcg->id.id); 4427 memcg->id.id = 0; 4428 } 4429 } 4430 4431 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4432 { 4433 refcount_add(n, &memcg->id.ref); 4434 } 4435 4436 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4437 { 4438 if (refcount_sub_and_test(n, &memcg->id.ref)) { 4439 mem_cgroup_id_remove(memcg); 4440 4441 /* Memcg ID pins CSS */ 4442 css_put(&memcg->css); 4443 } 4444 } 4445 4446 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4447 { 4448 mem_cgroup_id_get_many(memcg, 1); 4449 } 4450 4451 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4452 { 4453 mem_cgroup_id_put_many(memcg, 1); 4454 } 4455 4456 /** 4457 * mem_cgroup_from_id - look up a memcg from a memcg id 4458 * @id: the memcg id to look up 4459 * 4460 * Caller must hold rcu_read_lock(). 4461 */ 4462 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4463 { 4464 WARN_ON_ONCE(!rcu_read_lock_held()); 4465 return idr_find(&mem_cgroup_idr, id); 4466 } 4467 4468 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4469 { 4470 struct mem_cgroup_per_node *pn; 4471 int tmp = node; 4472 /* 4473 * This routine is called against possible nodes. 4474 * But it's BUG to call kmalloc() against offline node. 4475 * 4476 * TODO: this routine can waste much memory for nodes which will 4477 * never be onlined. It's better to use memory hotplug callback 4478 * function. 4479 */ 4480 if (!node_state(node, N_NORMAL_MEMORY)) 4481 tmp = -1; 4482 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4483 if (!pn) 4484 return 1; 4485 4486 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); 4487 if (!pn->lruvec_stat_cpu) { 4488 kfree(pn); 4489 return 1; 4490 } 4491 4492 lruvec_init(&pn->lruvec); 4493 pn->usage_in_excess = 0; 4494 pn->on_tree = false; 4495 pn->memcg = memcg; 4496 4497 memcg->nodeinfo[node] = pn; 4498 return 0; 4499 } 4500 4501 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4502 { 4503 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 4504 4505 if (!pn) 4506 return; 4507 4508 free_percpu(pn->lruvec_stat_cpu); 4509 kfree(pn); 4510 } 4511 4512 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4513 { 4514 int node; 4515 4516 for_each_node(node) 4517 free_mem_cgroup_per_node_info(memcg, node); 4518 free_percpu(memcg->vmstats_percpu); 4519 kfree(memcg); 4520 } 4521 4522 static void mem_cgroup_free(struct mem_cgroup *memcg) 4523 { 4524 memcg_wb_domain_exit(memcg); 4525 __mem_cgroup_free(memcg); 4526 } 4527 4528 static struct mem_cgroup *mem_cgroup_alloc(void) 4529 { 4530 struct mem_cgroup *memcg; 4531 unsigned int size; 4532 int node; 4533 4534 size = sizeof(struct mem_cgroup); 4535 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4536 4537 memcg = kzalloc(size, GFP_KERNEL); 4538 if (!memcg) 4539 return NULL; 4540 4541 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4542 1, MEM_CGROUP_ID_MAX, 4543 GFP_KERNEL); 4544 if (memcg->id.id < 0) 4545 goto fail; 4546 4547 memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); 4548 if (!memcg->vmstats_percpu) 4549 goto fail; 4550 4551 for_each_node(node) 4552 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4553 goto fail; 4554 4555 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4556 goto fail; 4557 4558 INIT_WORK(&memcg->high_work, high_work_func); 4559 memcg->last_scanned_node = MAX_NUMNODES; 4560 INIT_LIST_HEAD(&memcg->oom_notify); 4561 mutex_init(&memcg->thresholds_lock); 4562 spin_lock_init(&memcg->move_lock); 4563 vmpressure_init(&memcg->vmpressure); 4564 INIT_LIST_HEAD(&memcg->event_list); 4565 spin_lock_init(&memcg->event_list_lock); 4566 memcg->socket_pressure = jiffies; 4567 #ifdef CONFIG_MEMCG_KMEM 4568 memcg->kmemcg_id = -1; 4569 #endif 4570 #ifdef CONFIG_CGROUP_WRITEBACK 4571 INIT_LIST_HEAD(&memcg->cgwb_list); 4572 #endif 4573 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4574 return memcg; 4575 fail: 4576 mem_cgroup_id_remove(memcg); 4577 __mem_cgroup_free(memcg); 4578 return NULL; 4579 } 4580 4581 static struct cgroup_subsys_state * __ref 4582 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4583 { 4584 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4585 struct mem_cgroup *memcg; 4586 long error = -ENOMEM; 4587 4588 memcg = mem_cgroup_alloc(); 4589 if (!memcg) 4590 return ERR_PTR(error); 4591 4592 memcg->high = PAGE_COUNTER_MAX; 4593 memcg->soft_limit = PAGE_COUNTER_MAX; 4594 if (parent) { 4595 memcg->swappiness = mem_cgroup_swappiness(parent); 4596 memcg->oom_kill_disable = parent->oom_kill_disable; 4597 } 4598 if (parent && parent->use_hierarchy) { 4599 memcg->use_hierarchy = true; 4600 page_counter_init(&memcg->memory, &parent->memory); 4601 page_counter_init(&memcg->swap, &parent->swap); 4602 page_counter_init(&memcg->memsw, &parent->memsw); 4603 page_counter_init(&memcg->kmem, &parent->kmem); 4604 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4605 } else { 4606 page_counter_init(&memcg->memory, NULL); 4607 page_counter_init(&memcg->swap, NULL); 4608 page_counter_init(&memcg->memsw, NULL); 4609 page_counter_init(&memcg->kmem, NULL); 4610 page_counter_init(&memcg->tcpmem, NULL); 4611 /* 4612 * Deeper hierachy with use_hierarchy == false doesn't make 4613 * much sense so let cgroup subsystem know about this 4614 * unfortunate state in our controller. 4615 */ 4616 if (parent != root_mem_cgroup) 4617 memory_cgrp_subsys.broken_hierarchy = true; 4618 } 4619 4620 /* The following stuff does not apply to the root */ 4621 if (!parent) { 4622 root_mem_cgroup = memcg; 4623 return &memcg->css; 4624 } 4625 4626 error = memcg_online_kmem(memcg); 4627 if (error) 4628 goto fail; 4629 4630 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4631 static_branch_inc(&memcg_sockets_enabled_key); 4632 4633 return &memcg->css; 4634 fail: 4635 mem_cgroup_id_remove(memcg); 4636 mem_cgroup_free(memcg); 4637 return ERR_PTR(-ENOMEM); 4638 } 4639 4640 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4641 { 4642 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4643 4644 /* 4645 * A memcg must be visible for memcg_expand_shrinker_maps() 4646 * by the time the maps are allocated. So, we allocate maps 4647 * here, when for_each_mem_cgroup() can't skip it. 4648 */ 4649 if (memcg_alloc_shrinker_maps(memcg)) { 4650 mem_cgroup_id_remove(memcg); 4651 return -ENOMEM; 4652 } 4653 4654 /* Online state pins memcg ID, memcg ID pins CSS */ 4655 refcount_set(&memcg->id.ref, 1); 4656 css_get(css); 4657 return 0; 4658 } 4659 4660 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4661 { 4662 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4663 struct mem_cgroup_event *event, *tmp; 4664 4665 /* 4666 * Unregister events and notify userspace. 4667 * Notify userspace about cgroup removing only after rmdir of cgroup 4668 * directory to avoid race between userspace and kernelspace. 4669 */ 4670 spin_lock(&memcg->event_list_lock); 4671 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4672 list_del_init(&event->list); 4673 schedule_work(&event->remove); 4674 } 4675 spin_unlock(&memcg->event_list_lock); 4676 4677 page_counter_set_min(&memcg->memory, 0); 4678 page_counter_set_low(&memcg->memory, 0); 4679 4680 memcg_offline_kmem(memcg); 4681 wb_memcg_offline(memcg); 4682 4683 drain_all_stock(memcg); 4684 4685 mem_cgroup_id_put(memcg); 4686 } 4687 4688 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4689 { 4690 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4691 4692 invalidate_reclaim_iterators(memcg); 4693 } 4694 4695 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4696 { 4697 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4698 4699 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4700 static_branch_dec(&memcg_sockets_enabled_key); 4701 4702 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4703 static_branch_dec(&memcg_sockets_enabled_key); 4704 4705 vmpressure_cleanup(&memcg->vmpressure); 4706 cancel_work_sync(&memcg->high_work); 4707 mem_cgroup_remove_from_trees(memcg); 4708 memcg_free_shrinker_maps(memcg); 4709 memcg_free_kmem(memcg); 4710 mem_cgroup_free(memcg); 4711 } 4712 4713 /** 4714 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4715 * @css: the target css 4716 * 4717 * Reset the states of the mem_cgroup associated with @css. This is 4718 * invoked when the userland requests disabling on the default hierarchy 4719 * but the memcg is pinned through dependency. The memcg should stop 4720 * applying policies and should revert to the vanilla state as it may be 4721 * made visible again. 4722 * 4723 * The current implementation only resets the essential configurations. 4724 * This needs to be expanded to cover all the visible parts. 4725 */ 4726 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4727 { 4728 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4729 4730 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 4731 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 4732 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); 4733 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 4734 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 4735 page_counter_set_min(&memcg->memory, 0); 4736 page_counter_set_low(&memcg->memory, 0); 4737 memcg->high = PAGE_COUNTER_MAX; 4738 memcg->soft_limit = PAGE_COUNTER_MAX; 4739 memcg_wb_domain_size_changed(memcg); 4740 } 4741 4742 #ifdef CONFIG_MMU 4743 /* Handlers for move charge at task migration. */ 4744 static int mem_cgroup_do_precharge(unsigned long count) 4745 { 4746 int ret; 4747 4748 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4749 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4750 if (!ret) { 4751 mc.precharge += count; 4752 return ret; 4753 } 4754 4755 /* Try charges one by one with reclaim, but do not retry */ 4756 while (count--) { 4757 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 4758 if (ret) 4759 return ret; 4760 mc.precharge++; 4761 cond_resched(); 4762 } 4763 return 0; 4764 } 4765 4766 union mc_target { 4767 struct page *page; 4768 swp_entry_t ent; 4769 }; 4770 4771 enum mc_target_type { 4772 MC_TARGET_NONE = 0, 4773 MC_TARGET_PAGE, 4774 MC_TARGET_SWAP, 4775 MC_TARGET_DEVICE, 4776 }; 4777 4778 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4779 unsigned long addr, pte_t ptent) 4780 { 4781 struct page *page = _vm_normal_page(vma, addr, ptent, true); 4782 4783 if (!page || !page_mapped(page)) 4784 return NULL; 4785 if (PageAnon(page)) { 4786 if (!(mc.flags & MOVE_ANON)) 4787 return NULL; 4788 } else { 4789 if (!(mc.flags & MOVE_FILE)) 4790 return NULL; 4791 } 4792 if (!get_page_unless_zero(page)) 4793 return NULL; 4794 4795 return page; 4796 } 4797 4798 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 4799 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4800 pte_t ptent, swp_entry_t *entry) 4801 { 4802 struct page *page = NULL; 4803 swp_entry_t ent = pte_to_swp_entry(ptent); 4804 4805 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4806 return NULL; 4807 4808 /* 4809 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 4810 * a device and because they are not accessible by CPU they are store 4811 * as special swap entry in the CPU page table. 4812 */ 4813 if (is_device_private_entry(ent)) { 4814 page = device_private_entry_to_page(ent); 4815 /* 4816 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 4817 * a refcount of 1 when free (unlike normal page) 4818 */ 4819 if (!page_ref_add_unless(page, 1, 1)) 4820 return NULL; 4821 return page; 4822 } 4823 4824 /* 4825 * Because lookup_swap_cache() updates some statistics counter, 4826 * we call find_get_page() with swapper_space directly. 4827 */ 4828 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4829 if (do_memsw_account()) 4830 entry->val = ent.val; 4831 4832 return page; 4833 } 4834 #else 4835 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4836 pte_t ptent, swp_entry_t *entry) 4837 { 4838 return NULL; 4839 } 4840 #endif 4841 4842 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4843 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4844 { 4845 struct page *page = NULL; 4846 struct address_space *mapping; 4847 pgoff_t pgoff; 4848 4849 if (!vma->vm_file) /* anonymous vma */ 4850 return NULL; 4851 if (!(mc.flags & MOVE_FILE)) 4852 return NULL; 4853 4854 mapping = vma->vm_file->f_mapping; 4855 pgoff = linear_page_index(vma, addr); 4856 4857 /* page is moved even if it's not RSS of this task(page-faulted). */ 4858 #ifdef CONFIG_SWAP 4859 /* shmem/tmpfs may report page out on swap: account for that too. */ 4860 if (shmem_mapping(mapping)) { 4861 page = find_get_entry(mapping, pgoff); 4862 if (xa_is_value(page)) { 4863 swp_entry_t swp = radix_to_swp_entry(page); 4864 if (do_memsw_account()) 4865 *entry = swp; 4866 page = find_get_page(swap_address_space(swp), 4867 swp_offset(swp)); 4868 } 4869 } else 4870 page = find_get_page(mapping, pgoff); 4871 #else 4872 page = find_get_page(mapping, pgoff); 4873 #endif 4874 return page; 4875 } 4876 4877 /** 4878 * mem_cgroup_move_account - move account of the page 4879 * @page: the page 4880 * @compound: charge the page as compound or small page 4881 * @from: mem_cgroup which the page is moved from. 4882 * @to: mem_cgroup which the page is moved to. @from != @to. 4883 * 4884 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4885 * 4886 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4887 * from old cgroup. 4888 */ 4889 static int mem_cgroup_move_account(struct page *page, 4890 bool compound, 4891 struct mem_cgroup *from, 4892 struct mem_cgroup *to) 4893 { 4894 unsigned long flags; 4895 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4896 int ret; 4897 bool anon; 4898 4899 VM_BUG_ON(from == to); 4900 VM_BUG_ON_PAGE(PageLRU(page), page); 4901 VM_BUG_ON(compound && !PageTransHuge(page)); 4902 4903 /* 4904 * Prevent mem_cgroup_migrate() from looking at 4905 * page->mem_cgroup of its source page while we change it. 4906 */ 4907 ret = -EBUSY; 4908 if (!trylock_page(page)) 4909 goto out; 4910 4911 ret = -EINVAL; 4912 if (page->mem_cgroup != from) 4913 goto out_unlock; 4914 4915 anon = PageAnon(page); 4916 4917 spin_lock_irqsave(&from->move_lock, flags); 4918 4919 if (!anon && page_mapped(page)) { 4920 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages); 4921 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages); 4922 } 4923 4924 /* 4925 * move_lock grabbed above and caller set from->moving_account, so 4926 * mod_memcg_page_state will serialize updates to PageDirty. 4927 * So mapping should be stable for dirty pages. 4928 */ 4929 if (!anon && PageDirty(page)) { 4930 struct address_space *mapping = page_mapping(page); 4931 4932 if (mapping_cap_account_dirty(mapping)) { 4933 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages); 4934 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages); 4935 } 4936 } 4937 4938 if (PageWriteback(page)) { 4939 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages); 4940 __mod_memcg_state(to, NR_WRITEBACK, nr_pages); 4941 } 4942 4943 /* 4944 * It is safe to change page->mem_cgroup here because the page 4945 * is referenced, charged, and isolated - we can't race with 4946 * uncharging, charging, migration, or LRU putback. 4947 */ 4948 4949 /* caller should have done css_get */ 4950 page->mem_cgroup = to; 4951 spin_unlock_irqrestore(&from->move_lock, flags); 4952 4953 ret = 0; 4954 4955 local_irq_disable(); 4956 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4957 memcg_check_events(to, page); 4958 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4959 memcg_check_events(from, page); 4960 local_irq_enable(); 4961 out_unlock: 4962 unlock_page(page); 4963 out: 4964 return ret; 4965 } 4966 4967 /** 4968 * get_mctgt_type - get target type of moving charge 4969 * @vma: the vma the pte to be checked belongs 4970 * @addr: the address corresponding to the pte to be checked 4971 * @ptent: the pte to be checked 4972 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4973 * 4974 * Returns 4975 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4976 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4977 * move charge. if @target is not NULL, the page is stored in target->page 4978 * with extra refcnt got(Callers should handle it). 4979 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4980 * target for charge migration. if @target is not NULL, the entry is stored 4981 * in target->ent. 4982 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC 4983 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru). 4984 * For now we such page is charge like a regular page would be as for all 4985 * intent and purposes it is just special memory taking the place of a 4986 * regular page. 4987 * 4988 * See Documentations/vm/hmm.txt and include/linux/hmm.h 4989 * 4990 * Called with pte lock held. 4991 */ 4992 4993 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4994 unsigned long addr, pte_t ptent, union mc_target *target) 4995 { 4996 struct page *page = NULL; 4997 enum mc_target_type ret = MC_TARGET_NONE; 4998 swp_entry_t ent = { .val = 0 }; 4999 5000 if (pte_present(ptent)) 5001 page = mc_handle_present_pte(vma, addr, ptent); 5002 else if (is_swap_pte(ptent)) 5003 page = mc_handle_swap_pte(vma, ptent, &ent); 5004 else if (pte_none(ptent)) 5005 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5006 5007 if (!page && !ent.val) 5008 return ret; 5009 if (page) { 5010 /* 5011 * Do only loose check w/o serialization. 5012 * mem_cgroup_move_account() checks the page is valid or 5013 * not under LRU exclusion. 5014 */ 5015 if (page->mem_cgroup == mc.from) { 5016 ret = MC_TARGET_PAGE; 5017 if (is_device_private_page(page) || 5018 is_device_public_page(page)) 5019 ret = MC_TARGET_DEVICE; 5020 if (target) 5021 target->page = page; 5022 } 5023 if (!ret || !target) 5024 put_page(page); 5025 } 5026 /* 5027 * There is a swap entry and a page doesn't exist or isn't charged. 5028 * But we cannot move a tail-page in a THP. 5029 */ 5030 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5031 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5032 ret = MC_TARGET_SWAP; 5033 if (target) 5034 target->ent = ent; 5035 } 5036 return ret; 5037 } 5038 5039 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5040 /* 5041 * We don't consider PMD mapped swapping or file mapped pages because THP does 5042 * not support them for now. 5043 * Caller should make sure that pmd_trans_huge(pmd) is true. 5044 */ 5045 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5046 unsigned long addr, pmd_t pmd, union mc_target *target) 5047 { 5048 struct page *page = NULL; 5049 enum mc_target_type ret = MC_TARGET_NONE; 5050 5051 if (unlikely(is_swap_pmd(pmd))) { 5052 VM_BUG_ON(thp_migration_supported() && 5053 !is_pmd_migration_entry(pmd)); 5054 return ret; 5055 } 5056 page = pmd_page(pmd); 5057 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5058 if (!(mc.flags & MOVE_ANON)) 5059 return ret; 5060 if (page->mem_cgroup == mc.from) { 5061 ret = MC_TARGET_PAGE; 5062 if (target) { 5063 get_page(page); 5064 target->page = page; 5065 } 5066 } 5067 return ret; 5068 } 5069 #else 5070 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5071 unsigned long addr, pmd_t pmd, union mc_target *target) 5072 { 5073 return MC_TARGET_NONE; 5074 } 5075 #endif 5076 5077 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5078 unsigned long addr, unsigned long end, 5079 struct mm_walk *walk) 5080 { 5081 struct vm_area_struct *vma = walk->vma; 5082 pte_t *pte; 5083 spinlock_t *ptl; 5084 5085 ptl = pmd_trans_huge_lock(pmd, vma); 5086 if (ptl) { 5087 /* 5088 * Note their can not be MC_TARGET_DEVICE for now as we do not 5089 * support transparent huge page with MEMORY_DEVICE_PUBLIC or 5090 * MEMORY_DEVICE_PRIVATE but this might change. 5091 */ 5092 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5093 mc.precharge += HPAGE_PMD_NR; 5094 spin_unlock(ptl); 5095 return 0; 5096 } 5097 5098 if (pmd_trans_unstable(pmd)) 5099 return 0; 5100 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5101 for (; addr != end; pte++, addr += PAGE_SIZE) 5102 if (get_mctgt_type(vma, addr, *pte, NULL)) 5103 mc.precharge++; /* increment precharge temporarily */ 5104 pte_unmap_unlock(pte - 1, ptl); 5105 cond_resched(); 5106 5107 return 0; 5108 } 5109 5110 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5111 { 5112 unsigned long precharge; 5113 5114 struct mm_walk mem_cgroup_count_precharge_walk = { 5115 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5116 .mm = mm, 5117 }; 5118 down_read(&mm->mmap_sem); 5119 walk_page_range(0, mm->highest_vm_end, 5120 &mem_cgroup_count_precharge_walk); 5121 up_read(&mm->mmap_sem); 5122 5123 precharge = mc.precharge; 5124 mc.precharge = 0; 5125 5126 return precharge; 5127 } 5128 5129 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5130 { 5131 unsigned long precharge = mem_cgroup_count_precharge(mm); 5132 5133 VM_BUG_ON(mc.moving_task); 5134 mc.moving_task = current; 5135 return mem_cgroup_do_precharge(precharge); 5136 } 5137 5138 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5139 static void __mem_cgroup_clear_mc(void) 5140 { 5141 struct mem_cgroup *from = mc.from; 5142 struct mem_cgroup *to = mc.to; 5143 5144 /* we must uncharge all the leftover precharges from mc.to */ 5145 if (mc.precharge) { 5146 cancel_charge(mc.to, mc.precharge); 5147 mc.precharge = 0; 5148 } 5149 /* 5150 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5151 * we must uncharge here. 5152 */ 5153 if (mc.moved_charge) { 5154 cancel_charge(mc.from, mc.moved_charge); 5155 mc.moved_charge = 0; 5156 } 5157 /* we must fixup refcnts and charges */ 5158 if (mc.moved_swap) { 5159 /* uncharge swap account from the old cgroup */ 5160 if (!mem_cgroup_is_root(mc.from)) 5161 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5162 5163 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5164 5165 /* 5166 * we charged both to->memory and to->memsw, so we 5167 * should uncharge to->memory. 5168 */ 5169 if (!mem_cgroup_is_root(mc.to)) 5170 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5171 5172 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 5173 css_put_many(&mc.to->css, mc.moved_swap); 5174 5175 mc.moved_swap = 0; 5176 } 5177 memcg_oom_recover(from); 5178 memcg_oom_recover(to); 5179 wake_up_all(&mc.waitq); 5180 } 5181 5182 static void mem_cgroup_clear_mc(void) 5183 { 5184 struct mm_struct *mm = mc.mm; 5185 5186 /* 5187 * we must clear moving_task before waking up waiters at the end of 5188 * task migration. 5189 */ 5190 mc.moving_task = NULL; 5191 __mem_cgroup_clear_mc(); 5192 spin_lock(&mc.lock); 5193 mc.from = NULL; 5194 mc.to = NULL; 5195 mc.mm = NULL; 5196 spin_unlock(&mc.lock); 5197 5198 mmput(mm); 5199 } 5200 5201 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5202 { 5203 struct cgroup_subsys_state *css; 5204 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5205 struct mem_cgroup *from; 5206 struct task_struct *leader, *p; 5207 struct mm_struct *mm; 5208 unsigned long move_flags; 5209 int ret = 0; 5210 5211 /* charge immigration isn't supported on the default hierarchy */ 5212 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5213 return 0; 5214 5215 /* 5216 * Multi-process migrations only happen on the default hierarchy 5217 * where charge immigration is not used. Perform charge 5218 * immigration if @tset contains a leader and whine if there are 5219 * multiple. 5220 */ 5221 p = NULL; 5222 cgroup_taskset_for_each_leader(leader, css, tset) { 5223 WARN_ON_ONCE(p); 5224 p = leader; 5225 memcg = mem_cgroup_from_css(css); 5226 } 5227 if (!p) 5228 return 0; 5229 5230 /* 5231 * We are now commited to this value whatever it is. Changes in this 5232 * tunable will only affect upcoming migrations, not the current one. 5233 * So we need to save it, and keep it going. 5234 */ 5235 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5236 if (!move_flags) 5237 return 0; 5238 5239 from = mem_cgroup_from_task(p); 5240 5241 VM_BUG_ON(from == memcg); 5242 5243 mm = get_task_mm(p); 5244 if (!mm) 5245 return 0; 5246 /* We move charges only when we move a owner of the mm */ 5247 if (mm->owner == p) { 5248 VM_BUG_ON(mc.from); 5249 VM_BUG_ON(mc.to); 5250 VM_BUG_ON(mc.precharge); 5251 VM_BUG_ON(mc.moved_charge); 5252 VM_BUG_ON(mc.moved_swap); 5253 5254 spin_lock(&mc.lock); 5255 mc.mm = mm; 5256 mc.from = from; 5257 mc.to = memcg; 5258 mc.flags = move_flags; 5259 spin_unlock(&mc.lock); 5260 /* We set mc.moving_task later */ 5261 5262 ret = mem_cgroup_precharge_mc(mm); 5263 if (ret) 5264 mem_cgroup_clear_mc(); 5265 } else { 5266 mmput(mm); 5267 } 5268 return ret; 5269 } 5270 5271 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5272 { 5273 if (mc.to) 5274 mem_cgroup_clear_mc(); 5275 } 5276 5277 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5278 unsigned long addr, unsigned long end, 5279 struct mm_walk *walk) 5280 { 5281 int ret = 0; 5282 struct vm_area_struct *vma = walk->vma; 5283 pte_t *pte; 5284 spinlock_t *ptl; 5285 enum mc_target_type target_type; 5286 union mc_target target; 5287 struct page *page; 5288 5289 ptl = pmd_trans_huge_lock(pmd, vma); 5290 if (ptl) { 5291 if (mc.precharge < HPAGE_PMD_NR) { 5292 spin_unlock(ptl); 5293 return 0; 5294 } 5295 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5296 if (target_type == MC_TARGET_PAGE) { 5297 page = target.page; 5298 if (!isolate_lru_page(page)) { 5299 if (!mem_cgroup_move_account(page, true, 5300 mc.from, mc.to)) { 5301 mc.precharge -= HPAGE_PMD_NR; 5302 mc.moved_charge += HPAGE_PMD_NR; 5303 } 5304 putback_lru_page(page); 5305 } 5306 put_page(page); 5307 } else if (target_type == MC_TARGET_DEVICE) { 5308 page = target.page; 5309 if (!mem_cgroup_move_account(page, true, 5310 mc.from, mc.to)) { 5311 mc.precharge -= HPAGE_PMD_NR; 5312 mc.moved_charge += HPAGE_PMD_NR; 5313 } 5314 put_page(page); 5315 } 5316 spin_unlock(ptl); 5317 return 0; 5318 } 5319 5320 if (pmd_trans_unstable(pmd)) 5321 return 0; 5322 retry: 5323 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5324 for (; addr != end; addr += PAGE_SIZE) { 5325 pte_t ptent = *(pte++); 5326 bool device = false; 5327 swp_entry_t ent; 5328 5329 if (!mc.precharge) 5330 break; 5331 5332 switch (get_mctgt_type(vma, addr, ptent, &target)) { 5333 case MC_TARGET_DEVICE: 5334 device = true; 5335 /* fall through */ 5336 case MC_TARGET_PAGE: 5337 page = target.page; 5338 /* 5339 * We can have a part of the split pmd here. Moving it 5340 * can be done but it would be too convoluted so simply 5341 * ignore such a partial THP and keep it in original 5342 * memcg. There should be somebody mapping the head. 5343 */ 5344 if (PageTransCompound(page)) 5345 goto put; 5346 if (!device && isolate_lru_page(page)) 5347 goto put; 5348 if (!mem_cgroup_move_account(page, false, 5349 mc.from, mc.to)) { 5350 mc.precharge--; 5351 /* we uncharge from mc.from later. */ 5352 mc.moved_charge++; 5353 } 5354 if (!device) 5355 putback_lru_page(page); 5356 put: /* get_mctgt_type() gets the page */ 5357 put_page(page); 5358 break; 5359 case MC_TARGET_SWAP: 5360 ent = target.ent; 5361 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5362 mc.precharge--; 5363 /* we fixup refcnts and charges later. */ 5364 mc.moved_swap++; 5365 } 5366 break; 5367 default: 5368 break; 5369 } 5370 } 5371 pte_unmap_unlock(pte - 1, ptl); 5372 cond_resched(); 5373 5374 if (addr != end) { 5375 /* 5376 * We have consumed all precharges we got in can_attach(). 5377 * We try charge one by one, but don't do any additional 5378 * charges to mc.to if we have failed in charge once in attach() 5379 * phase. 5380 */ 5381 ret = mem_cgroup_do_precharge(1); 5382 if (!ret) 5383 goto retry; 5384 } 5385 5386 return ret; 5387 } 5388 5389 static void mem_cgroup_move_charge(void) 5390 { 5391 struct mm_walk mem_cgroup_move_charge_walk = { 5392 .pmd_entry = mem_cgroup_move_charge_pte_range, 5393 .mm = mc.mm, 5394 }; 5395 5396 lru_add_drain_all(); 5397 /* 5398 * Signal lock_page_memcg() to take the memcg's move_lock 5399 * while we're moving its pages to another memcg. Then wait 5400 * for already started RCU-only updates to finish. 5401 */ 5402 atomic_inc(&mc.from->moving_account); 5403 synchronize_rcu(); 5404 retry: 5405 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 5406 /* 5407 * Someone who are holding the mmap_sem might be waiting in 5408 * waitq. So we cancel all extra charges, wake up all waiters, 5409 * and retry. Because we cancel precharges, we might not be able 5410 * to move enough charges, but moving charge is a best-effort 5411 * feature anyway, so it wouldn't be a big problem. 5412 */ 5413 __mem_cgroup_clear_mc(); 5414 cond_resched(); 5415 goto retry; 5416 } 5417 /* 5418 * When we have consumed all precharges and failed in doing 5419 * additional charge, the page walk just aborts. 5420 */ 5421 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 5422 5423 up_read(&mc.mm->mmap_sem); 5424 atomic_dec(&mc.from->moving_account); 5425 } 5426 5427 static void mem_cgroup_move_task(void) 5428 { 5429 if (mc.to) { 5430 mem_cgroup_move_charge(); 5431 mem_cgroup_clear_mc(); 5432 } 5433 } 5434 #else /* !CONFIG_MMU */ 5435 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5436 { 5437 return 0; 5438 } 5439 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5440 { 5441 } 5442 static void mem_cgroup_move_task(void) 5443 { 5444 } 5445 #endif 5446 5447 /* 5448 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5449 * to verify whether we're attached to the default hierarchy on each mount 5450 * attempt. 5451 */ 5452 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5453 { 5454 /* 5455 * use_hierarchy is forced on the default hierarchy. cgroup core 5456 * guarantees that @root doesn't have any children, so turning it 5457 * on for the root memcg is enough. 5458 */ 5459 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5460 root_mem_cgroup->use_hierarchy = true; 5461 else 5462 root_mem_cgroup->use_hierarchy = false; 5463 } 5464 5465 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 5466 { 5467 if (value == PAGE_COUNTER_MAX) 5468 seq_puts(m, "max\n"); 5469 else 5470 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 5471 5472 return 0; 5473 } 5474 5475 static u64 memory_current_read(struct cgroup_subsys_state *css, 5476 struct cftype *cft) 5477 { 5478 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5479 5480 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5481 } 5482 5483 static int memory_min_show(struct seq_file *m, void *v) 5484 { 5485 return seq_puts_memcg_tunable(m, 5486 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 5487 } 5488 5489 static ssize_t memory_min_write(struct kernfs_open_file *of, 5490 char *buf, size_t nbytes, loff_t off) 5491 { 5492 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5493 unsigned long min; 5494 int err; 5495 5496 buf = strstrip(buf); 5497 err = page_counter_memparse(buf, "max", &min); 5498 if (err) 5499 return err; 5500 5501 page_counter_set_min(&memcg->memory, min); 5502 5503 return nbytes; 5504 } 5505 5506 static int memory_low_show(struct seq_file *m, void *v) 5507 { 5508 return seq_puts_memcg_tunable(m, 5509 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 5510 } 5511 5512 static ssize_t memory_low_write(struct kernfs_open_file *of, 5513 char *buf, size_t nbytes, loff_t off) 5514 { 5515 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5516 unsigned long low; 5517 int err; 5518 5519 buf = strstrip(buf); 5520 err = page_counter_memparse(buf, "max", &low); 5521 if (err) 5522 return err; 5523 5524 page_counter_set_low(&memcg->memory, low); 5525 5526 return nbytes; 5527 } 5528 5529 static int memory_high_show(struct seq_file *m, void *v) 5530 { 5531 return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high)); 5532 } 5533 5534 static ssize_t memory_high_write(struct kernfs_open_file *of, 5535 char *buf, size_t nbytes, loff_t off) 5536 { 5537 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5538 unsigned long nr_pages; 5539 unsigned long high; 5540 int err; 5541 5542 buf = strstrip(buf); 5543 err = page_counter_memparse(buf, "max", &high); 5544 if (err) 5545 return err; 5546 5547 memcg->high = high; 5548 5549 nr_pages = page_counter_read(&memcg->memory); 5550 if (nr_pages > high) 5551 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5552 GFP_KERNEL, true); 5553 5554 memcg_wb_domain_size_changed(memcg); 5555 return nbytes; 5556 } 5557 5558 static int memory_max_show(struct seq_file *m, void *v) 5559 { 5560 return seq_puts_memcg_tunable(m, 5561 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 5562 } 5563 5564 static ssize_t memory_max_write(struct kernfs_open_file *of, 5565 char *buf, size_t nbytes, loff_t off) 5566 { 5567 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5568 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5569 bool drained = false; 5570 unsigned long max; 5571 int err; 5572 5573 buf = strstrip(buf); 5574 err = page_counter_memparse(buf, "max", &max); 5575 if (err) 5576 return err; 5577 5578 xchg(&memcg->memory.max, max); 5579 5580 for (;;) { 5581 unsigned long nr_pages = page_counter_read(&memcg->memory); 5582 5583 if (nr_pages <= max) 5584 break; 5585 5586 if (signal_pending(current)) { 5587 err = -EINTR; 5588 break; 5589 } 5590 5591 if (!drained) { 5592 drain_all_stock(memcg); 5593 drained = true; 5594 continue; 5595 } 5596 5597 if (nr_reclaims) { 5598 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5599 GFP_KERNEL, true)) 5600 nr_reclaims--; 5601 continue; 5602 } 5603 5604 memcg_memory_event(memcg, MEMCG_OOM); 5605 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5606 break; 5607 } 5608 5609 memcg_wb_domain_size_changed(memcg); 5610 return nbytes; 5611 } 5612 5613 static int memory_events_show(struct seq_file *m, void *v) 5614 { 5615 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5616 5617 seq_printf(m, "low %lu\n", 5618 atomic_long_read(&memcg->memory_events[MEMCG_LOW])); 5619 seq_printf(m, "high %lu\n", 5620 atomic_long_read(&memcg->memory_events[MEMCG_HIGH])); 5621 seq_printf(m, "max %lu\n", 5622 atomic_long_read(&memcg->memory_events[MEMCG_MAX])); 5623 seq_printf(m, "oom %lu\n", 5624 atomic_long_read(&memcg->memory_events[MEMCG_OOM])); 5625 seq_printf(m, "oom_kill %lu\n", 5626 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 5627 5628 return 0; 5629 } 5630 5631 static int memory_stat_show(struct seq_file *m, void *v) 5632 { 5633 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5634 int i; 5635 5636 /* 5637 * Provide statistics on the state of the memory subsystem as 5638 * well as cumulative event counters that show past behavior. 5639 * 5640 * This list is ordered following a combination of these gradients: 5641 * 1) generic big picture -> specifics and details 5642 * 2) reflecting userspace activity -> reflecting kernel heuristics 5643 * 5644 * Current memory state: 5645 */ 5646 5647 seq_printf(m, "anon %llu\n", 5648 (u64)memcg_page_state(memcg, MEMCG_RSS) * PAGE_SIZE); 5649 seq_printf(m, "file %llu\n", 5650 (u64)memcg_page_state(memcg, MEMCG_CACHE) * PAGE_SIZE); 5651 seq_printf(m, "kernel_stack %llu\n", 5652 (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1024); 5653 seq_printf(m, "slab %llu\n", 5654 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + 5655 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * 5656 PAGE_SIZE); 5657 seq_printf(m, "sock %llu\n", 5658 (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); 5659 5660 seq_printf(m, "shmem %llu\n", 5661 (u64)memcg_page_state(memcg, NR_SHMEM) * PAGE_SIZE); 5662 seq_printf(m, "file_mapped %llu\n", 5663 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * PAGE_SIZE); 5664 seq_printf(m, "file_dirty %llu\n", 5665 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * PAGE_SIZE); 5666 seq_printf(m, "file_writeback %llu\n", 5667 (u64)memcg_page_state(memcg, NR_WRITEBACK) * PAGE_SIZE); 5668 5669 /* 5670 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter 5671 * with the NR_ANON_THP vm counter, but right now it's a pain in the 5672 * arse because it requires migrating the work out of rmap to a place 5673 * where the page->mem_cgroup is set up and stable. 5674 */ 5675 seq_printf(m, "anon_thp %llu\n", 5676 (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * PAGE_SIZE); 5677 5678 for (i = 0; i < NR_LRU_LISTS; i++) 5679 seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], 5680 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 5681 PAGE_SIZE); 5682 5683 seq_printf(m, "slab_reclaimable %llu\n", 5684 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * 5685 PAGE_SIZE); 5686 seq_printf(m, "slab_unreclaimable %llu\n", 5687 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * 5688 PAGE_SIZE); 5689 5690 /* Accumulated memory events */ 5691 5692 seq_printf(m, "pgfault %lu\n", memcg_events(memcg, PGFAULT)); 5693 seq_printf(m, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT)); 5694 5695 seq_printf(m, "workingset_refault %lu\n", 5696 memcg_page_state(memcg, WORKINGSET_REFAULT)); 5697 seq_printf(m, "workingset_activate %lu\n", 5698 memcg_page_state(memcg, WORKINGSET_ACTIVATE)); 5699 seq_printf(m, "workingset_nodereclaim %lu\n", 5700 memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); 5701 5702 seq_printf(m, "pgrefill %lu\n", memcg_events(memcg, PGREFILL)); 5703 seq_printf(m, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + 5704 memcg_events(memcg, PGSCAN_DIRECT)); 5705 seq_printf(m, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + 5706 memcg_events(memcg, PGSTEAL_DIRECT)); 5707 seq_printf(m, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE)); 5708 seq_printf(m, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE)); 5709 seq_printf(m, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE)); 5710 seq_printf(m, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED)); 5711 5712 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5713 seq_printf(m, "thp_fault_alloc %lu\n", 5714 memcg_events(memcg, THP_FAULT_ALLOC)); 5715 seq_printf(m, "thp_collapse_alloc %lu\n", 5716 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 5717 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 5718 5719 return 0; 5720 } 5721 5722 static int memory_oom_group_show(struct seq_file *m, void *v) 5723 { 5724 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5725 5726 seq_printf(m, "%d\n", memcg->oom_group); 5727 5728 return 0; 5729 } 5730 5731 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 5732 char *buf, size_t nbytes, loff_t off) 5733 { 5734 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5735 int ret, oom_group; 5736 5737 buf = strstrip(buf); 5738 if (!buf) 5739 return -EINVAL; 5740 5741 ret = kstrtoint(buf, 0, &oom_group); 5742 if (ret) 5743 return ret; 5744 5745 if (oom_group != 0 && oom_group != 1) 5746 return -EINVAL; 5747 5748 memcg->oom_group = oom_group; 5749 5750 return nbytes; 5751 } 5752 5753 static struct cftype memory_files[] = { 5754 { 5755 .name = "current", 5756 .flags = CFTYPE_NOT_ON_ROOT, 5757 .read_u64 = memory_current_read, 5758 }, 5759 { 5760 .name = "min", 5761 .flags = CFTYPE_NOT_ON_ROOT, 5762 .seq_show = memory_min_show, 5763 .write = memory_min_write, 5764 }, 5765 { 5766 .name = "low", 5767 .flags = CFTYPE_NOT_ON_ROOT, 5768 .seq_show = memory_low_show, 5769 .write = memory_low_write, 5770 }, 5771 { 5772 .name = "high", 5773 .flags = CFTYPE_NOT_ON_ROOT, 5774 .seq_show = memory_high_show, 5775 .write = memory_high_write, 5776 }, 5777 { 5778 .name = "max", 5779 .flags = CFTYPE_NOT_ON_ROOT, 5780 .seq_show = memory_max_show, 5781 .write = memory_max_write, 5782 }, 5783 { 5784 .name = "events", 5785 .flags = CFTYPE_NOT_ON_ROOT, 5786 .file_offset = offsetof(struct mem_cgroup, events_file), 5787 .seq_show = memory_events_show, 5788 }, 5789 { 5790 .name = "stat", 5791 .flags = CFTYPE_NOT_ON_ROOT, 5792 .seq_show = memory_stat_show, 5793 }, 5794 { 5795 .name = "oom.group", 5796 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 5797 .seq_show = memory_oom_group_show, 5798 .write = memory_oom_group_write, 5799 }, 5800 { } /* terminate */ 5801 }; 5802 5803 struct cgroup_subsys memory_cgrp_subsys = { 5804 .css_alloc = mem_cgroup_css_alloc, 5805 .css_online = mem_cgroup_css_online, 5806 .css_offline = mem_cgroup_css_offline, 5807 .css_released = mem_cgroup_css_released, 5808 .css_free = mem_cgroup_css_free, 5809 .css_reset = mem_cgroup_css_reset, 5810 .can_attach = mem_cgroup_can_attach, 5811 .cancel_attach = mem_cgroup_cancel_attach, 5812 .post_attach = mem_cgroup_move_task, 5813 .bind = mem_cgroup_bind, 5814 .dfl_cftypes = memory_files, 5815 .legacy_cftypes = mem_cgroup_legacy_files, 5816 .early_init = 0, 5817 }; 5818 5819 /** 5820 * mem_cgroup_protected - check if memory consumption is in the normal range 5821 * @root: the top ancestor of the sub-tree being checked 5822 * @memcg: the memory cgroup to check 5823 * 5824 * WARNING: This function is not stateless! It can only be used as part 5825 * of a top-down tree iteration, not for isolated queries. 5826 * 5827 * Returns one of the following: 5828 * MEMCG_PROT_NONE: cgroup memory is not protected 5829 * MEMCG_PROT_LOW: cgroup memory is protected as long there is 5830 * an unprotected supply of reclaimable memory from other cgroups. 5831 * MEMCG_PROT_MIN: cgroup memory is protected 5832 * 5833 * @root is exclusive; it is never protected when looked at directly 5834 * 5835 * To provide a proper hierarchical behavior, effective memory.min/low values 5836 * are used. Below is the description of how effective memory.low is calculated. 5837 * Effective memory.min values is calculated in the same way. 5838 * 5839 * Effective memory.low is always equal or less than the original memory.low. 5840 * If there is no memory.low overcommittment (which is always true for 5841 * top-level memory cgroups), these two values are equal. 5842 * Otherwise, it's a part of parent's effective memory.low, 5843 * calculated as a cgroup's memory.low usage divided by sum of sibling's 5844 * memory.low usages, where memory.low usage is the size of actually 5845 * protected memory. 5846 * 5847 * low_usage 5848 * elow = min( memory.low, parent->elow * ------------------ ), 5849 * siblings_low_usage 5850 * 5851 * | memory.current, if memory.current < memory.low 5852 * low_usage = | 5853 * | 0, otherwise. 5854 * 5855 * 5856 * Such definition of the effective memory.low provides the expected 5857 * hierarchical behavior: parent's memory.low value is limiting 5858 * children, unprotected memory is reclaimed first and cgroups, 5859 * which are not using their guarantee do not affect actual memory 5860 * distribution. 5861 * 5862 * For example, if there are memcgs A, A/B, A/C, A/D and A/E: 5863 * 5864 * A A/memory.low = 2G, A/memory.current = 6G 5865 * //\\ 5866 * BC DE B/memory.low = 3G B/memory.current = 2G 5867 * C/memory.low = 1G C/memory.current = 2G 5868 * D/memory.low = 0 D/memory.current = 2G 5869 * E/memory.low = 10G E/memory.current = 0 5870 * 5871 * and the memory pressure is applied, the following memory distribution 5872 * is expected (approximately): 5873 * 5874 * A/memory.current = 2G 5875 * 5876 * B/memory.current = 1.3G 5877 * C/memory.current = 0.6G 5878 * D/memory.current = 0 5879 * E/memory.current = 0 5880 * 5881 * These calculations require constant tracking of the actual low usages 5882 * (see propagate_protected_usage()), as well as recursive calculation of 5883 * effective memory.low values. But as we do call mem_cgroup_protected() 5884 * path for each memory cgroup top-down from the reclaim, 5885 * it's possible to optimize this part, and save calculated elow 5886 * for next usage. This part is intentionally racy, but it's ok, 5887 * as memory.low is a best-effort mechanism. 5888 */ 5889 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 5890 struct mem_cgroup *memcg) 5891 { 5892 struct mem_cgroup *parent; 5893 unsigned long emin, parent_emin; 5894 unsigned long elow, parent_elow; 5895 unsigned long usage; 5896 5897 if (mem_cgroup_disabled()) 5898 return MEMCG_PROT_NONE; 5899 5900 if (!root) 5901 root = root_mem_cgroup; 5902 if (memcg == root) 5903 return MEMCG_PROT_NONE; 5904 5905 usage = page_counter_read(&memcg->memory); 5906 if (!usage) 5907 return MEMCG_PROT_NONE; 5908 5909 emin = memcg->memory.min; 5910 elow = memcg->memory.low; 5911 5912 parent = parent_mem_cgroup(memcg); 5913 /* No parent means a non-hierarchical mode on v1 memcg */ 5914 if (!parent) 5915 return MEMCG_PROT_NONE; 5916 5917 if (parent == root) 5918 goto exit; 5919 5920 parent_emin = READ_ONCE(parent->memory.emin); 5921 emin = min(emin, parent_emin); 5922 if (emin && parent_emin) { 5923 unsigned long min_usage, siblings_min_usage; 5924 5925 min_usage = min(usage, memcg->memory.min); 5926 siblings_min_usage = atomic_long_read( 5927 &parent->memory.children_min_usage); 5928 5929 if (min_usage && siblings_min_usage) 5930 emin = min(emin, parent_emin * min_usage / 5931 siblings_min_usage); 5932 } 5933 5934 parent_elow = READ_ONCE(parent->memory.elow); 5935 elow = min(elow, parent_elow); 5936 if (elow && parent_elow) { 5937 unsigned long low_usage, siblings_low_usage; 5938 5939 low_usage = min(usage, memcg->memory.low); 5940 siblings_low_usage = atomic_long_read( 5941 &parent->memory.children_low_usage); 5942 5943 if (low_usage && siblings_low_usage) 5944 elow = min(elow, parent_elow * low_usage / 5945 siblings_low_usage); 5946 } 5947 5948 exit: 5949 memcg->memory.emin = emin; 5950 memcg->memory.elow = elow; 5951 5952 if (usage <= emin) 5953 return MEMCG_PROT_MIN; 5954 else if (usage <= elow) 5955 return MEMCG_PROT_LOW; 5956 else 5957 return MEMCG_PROT_NONE; 5958 } 5959 5960 /** 5961 * mem_cgroup_try_charge - try charging a page 5962 * @page: page to charge 5963 * @mm: mm context of the victim 5964 * @gfp_mask: reclaim mode 5965 * @memcgp: charged memcg return 5966 * @compound: charge the page as compound or small page 5967 * 5968 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5969 * pages according to @gfp_mask if necessary. 5970 * 5971 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5972 * Otherwise, an error code is returned. 5973 * 5974 * After page->mapping has been set up, the caller must finalize the 5975 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5976 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5977 */ 5978 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5979 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5980 bool compound) 5981 { 5982 struct mem_cgroup *memcg = NULL; 5983 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5984 int ret = 0; 5985 5986 if (mem_cgroup_disabled()) 5987 goto out; 5988 5989 if (PageSwapCache(page)) { 5990 /* 5991 * Every swap fault against a single page tries to charge the 5992 * page, bail as early as possible. shmem_unuse() encounters 5993 * already charged pages, too. The USED bit is protected by 5994 * the page lock, which serializes swap cache removal, which 5995 * in turn serializes uncharging. 5996 */ 5997 VM_BUG_ON_PAGE(!PageLocked(page), page); 5998 if (compound_head(page)->mem_cgroup) 5999 goto out; 6000 6001 if (do_swap_account) { 6002 swp_entry_t ent = { .val = page_private(page), }; 6003 unsigned short id = lookup_swap_cgroup_id(ent); 6004 6005 rcu_read_lock(); 6006 memcg = mem_cgroup_from_id(id); 6007 if (memcg && !css_tryget_online(&memcg->css)) 6008 memcg = NULL; 6009 rcu_read_unlock(); 6010 } 6011 } 6012 6013 if (!memcg) 6014 memcg = get_mem_cgroup_from_mm(mm); 6015 6016 ret = try_charge(memcg, gfp_mask, nr_pages); 6017 6018 css_put(&memcg->css); 6019 out: 6020 *memcgp = memcg; 6021 return ret; 6022 } 6023 6024 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, 6025 gfp_t gfp_mask, struct mem_cgroup **memcgp, 6026 bool compound) 6027 { 6028 struct mem_cgroup *memcg; 6029 int ret; 6030 6031 ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound); 6032 memcg = *memcgp; 6033 mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask); 6034 return ret; 6035 } 6036 6037 /** 6038 * mem_cgroup_commit_charge - commit a page charge 6039 * @page: page to charge 6040 * @memcg: memcg to charge the page to 6041 * @lrucare: page might be on LRU already 6042 * @compound: charge the page as compound or small page 6043 * 6044 * Finalize a charge transaction started by mem_cgroup_try_charge(), 6045 * after page->mapping has been set up. This must happen atomically 6046 * as part of the page instantiation, i.e. under the page table lock 6047 * for anonymous pages, under the page lock for page and swap cache. 6048 * 6049 * In addition, the page must not be on the LRU during the commit, to 6050 * prevent racing with task migration. If it might be, use @lrucare. 6051 * 6052 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 6053 */ 6054 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 6055 bool lrucare, bool compound) 6056 { 6057 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 6058 6059 VM_BUG_ON_PAGE(!page->mapping, page); 6060 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 6061 6062 if (mem_cgroup_disabled()) 6063 return; 6064 /* 6065 * Swap faults will attempt to charge the same page multiple 6066 * times. But reuse_swap_page() might have removed the page 6067 * from swapcache already, so we can't check PageSwapCache(). 6068 */ 6069 if (!memcg) 6070 return; 6071 6072 commit_charge(page, memcg, lrucare); 6073 6074 local_irq_disable(); 6075 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 6076 memcg_check_events(memcg, page); 6077 local_irq_enable(); 6078 6079 if (do_memsw_account() && PageSwapCache(page)) { 6080 swp_entry_t entry = { .val = page_private(page) }; 6081 /* 6082 * The swap entry might not get freed for a long time, 6083 * let's not wait for it. The page already received a 6084 * memory+swap charge, drop the swap entry duplicate. 6085 */ 6086 mem_cgroup_uncharge_swap(entry, nr_pages); 6087 } 6088 } 6089 6090 /** 6091 * mem_cgroup_cancel_charge - cancel a page charge 6092 * @page: page to charge 6093 * @memcg: memcg to charge the page to 6094 * @compound: charge the page as compound or small page 6095 * 6096 * Cancel a charge transaction started by mem_cgroup_try_charge(). 6097 */ 6098 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 6099 bool compound) 6100 { 6101 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 6102 6103 if (mem_cgroup_disabled()) 6104 return; 6105 /* 6106 * Swap faults will attempt to charge the same page multiple 6107 * times. But reuse_swap_page() might have removed the page 6108 * from swapcache already, so we can't check PageSwapCache(). 6109 */ 6110 if (!memcg) 6111 return; 6112 6113 cancel_charge(memcg, nr_pages); 6114 } 6115 6116 struct uncharge_gather { 6117 struct mem_cgroup *memcg; 6118 unsigned long pgpgout; 6119 unsigned long nr_anon; 6120 unsigned long nr_file; 6121 unsigned long nr_kmem; 6122 unsigned long nr_huge; 6123 unsigned long nr_shmem; 6124 struct page *dummy_page; 6125 }; 6126 6127 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6128 { 6129 memset(ug, 0, sizeof(*ug)); 6130 } 6131 6132 static void uncharge_batch(const struct uncharge_gather *ug) 6133 { 6134 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; 6135 unsigned long flags; 6136 6137 if (!mem_cgroup_is_root(ug->memcg)) { 6138 page_counter_uncharge(&ug->memcg->memory, nr_pages); 6139 if (do_memsw_account()) 6140 page_counter_uncharge(&ug->memcg->memsw, nr_pages); 6141 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6142 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6143 memcg_oom_recover(ug->memcg); 6144 } 6145 6146 local_irq_save(flags); 6147 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); 6148 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); 6149 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); 6150 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); 6151 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6152 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); 6153 memcg_check_events(ug->memcg, ug->dummy_page); 6154 local_irq_restore(flags); 6155 6156 if (!mem_cgroup_is_root(ug->memcg)) 6157 css_put_many(&ug->memcg->css, nr_pages); 6158 } 6159 6160 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6161 { 6162 VM_BUG_ON_PAGE(PageLRU(page), page); 6163 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) && 6164 !PageHWPoison(page) , page); 6165 6166 if (!page->mem_cgroup) 6167 return; 6168 6169 /* 6170 * Nobody should be changing or seriously looking at 6171 * page->mem_cgroup at this point, we have fully 6172 * exclusive access to the page. 6173 */ 6174 6175 if (ug->memcg != page->mem_cgroup) { 6176 if (ug->memcg) { 6177 uncharge_batch(ug); 6178 uncharge_gather_clear(ug); 6179 } 6180 ug->memcg = page->mem_cgroup; 6181 } 6182 6183 if (!PageKmemcg(page)) { 6184 unsigned int nr_pages = 1; 6185 6186 if (PageTransHuge(page)) { 6187 nr_pages <<= compound_order(page); 6188 ug->nr_huge += nr_pages; 6189 } 6190 if (PageAnon(page)) 6191 ug->nr_anon += nr_pages; 6192 else { 6193 ug->nr_file += nr_pages; 6194 if (PageSwapBacked(page)) 6195 ug->nr_shmem += nr_pages; 6196 } 6197 ug->pgpgout++; 6198 } else { 6199 ug->nr_kmem += 1 << compound_order(page); 6200 __ClearPageKmemcg(page); 6201 } 6202 6203 ug->dummy_page = page; 6204 page->mem_cgroup = NULL; 6205 } 6206 6207 static void uncharge_list(struct list_head *page_list) 6208 { 6209 struct uncharge_gather ug; 6210 struct list_head *next; 6211 6212 uncharge_gather_clear(&ug); 6213 6214 /* 6215 * Note that the list can be a single page->lru; hence the 6216 * do-while loop instead of a simple list_for_each_entry(). 6217 */ 6218 next = page_list->next; 6219 do { 6220 struct page *page; 6221 6222 page = list_entry(next, struct page, lru); 6223 next = page->lru.next; 6224 6225 uncharge_page(page, &ug); 6226 } while (next != page_list); 6227 6228 if (ug.memcg) 6229 uncharge_batch(&ug); 6230 } 6231 6232 /** 6233 * mem_cgroup_uncharge - uncharge a page 6234 * @page: page to uncharge 6235 * 6236 * Uncharge a page previously charged with mem_cgroup_try_charge() and 6237 * mem_cgroup_commit_charge(). 6238 */ 6239 void mem_cgroup_uncharge(struct page *page) 6240 { 6241 struct uncharge_gather ug; 6242 6243 if (mem_cgroup_disabled()) 6244 return; 6245 6246 /* Don't touch page->lru of any random page, pre-check: */ 6247 if (!page->mem_cgroup) 6248 return; 6249 6250 uncharge_gather_clear(&ug); 6251 uncharge_page(page, &ug); 6252 uncharge_batch(&ug); 6253 } 6254 6255 /** 6256 * mem_cgroup_uncharge_list - uncharge a list of page 6257 * @page_list: list of pages to uncharge 6258 * 6259 * Uncharge a list of pages previously charged with 6260 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 6261 */ 6262 void mem_cgroup_uncharge_list(struct list_head *page_list) 6263 { 6264 if (mem_cgroup_disabled()) 6265 return; 6266 6267 if (!list_empty(page_list)) 6268 uncharge_list(page_list); 6269 } 6270 6271 /** 6272 * mem_cgroup_migrate - charge a page's replacement 6273 * @oldpage: currently circulating page 6274 * @newpage: replacement page 6275 * 6276 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6277 * be uncharged upon free. 6278 * 6279 * Both pages must be locked, @newpage->mapping must be set up. 6280 */ 6281 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6282 { 6283 struct mem_cgroup *memcg; 6284 unsigned int nr_pages; 6285 bool compound; 6286 unsigned long flags; 6287 6288 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6289 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6290 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6291 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6292 newpage); 6293 6294 if (mem_cgroup_disabled()) 6295 return; 6296 6297 /* Page cache replacement: new page already charged? */ 6298 if (newpage->mem_cgroup) 6299 return; 6300 6301 /* Swapcache readahead pages can get replaced before being charged */ 6302 memcg = oldpage->mem_cgroup; 6303 if (!memcg) 6304 return; 6305 6306 /* Force-charge the new page. The old one will be freed soon */ 6307 compound = PageTransHuge(newpage); 6308 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 6309 6310 page_counter_charge(&memcg->memory, nr_pages); 6311 if (do_memsw_account()) 6312 page_counter_charge(&memcg->memsw, nr_pages); 6313 css_get_many(&memcg->css, nr_pages); 6314 6315 commit_charge(newpage, memcg, false); 6316 6317 local_irq_save(flags); 6318 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 6319 memcg_check_events(memcg, newpage); 6320 local_irq_restore(flags); 6321 } 6322 6323 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6324 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6325 6326 void mem_cgroup_sk_alloc(struct sock *sk) 6327 { 6328 struct mem_cgroup *memcg; 6329 6330 if (!mem_cgroup_sockets_enabled) 6331 return; 6332 6333 /* 6334 * Socket cloning can throw us here with sk_memcg already 6335 * filled. It won't however, necessarily happen from 6336 * process context. So the test for root memcg given 6337 * the current task's memcg won't help us in this case. 6338 * 6339 * Respecting the original socket's memcg is a better 6340 * decision in this case. 6341 */ 6342 if (sk->sk_memcg) { 6343 css_get(&sk->sk_memcg->css); 6344 return; 6345 } 6346 6347 rcu_read_lock(); 6348 memcg = mem_cgroup_from_task(current); 6349 if (memcg == root_mem_cgroup) 6350 goto out; 6351 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6352 goto out; 6353 if (css_tryget_online(&memcg->css)) 6354 sk->sk_memcg = memcg; 6355 out: 6356 rcu_read_unlock(); 6357 } 6358 6359 void mem_cgroup_sk_free(struct sock *sk) 6360 { 6361 if (sk->sk_memcg) 6362 css_put(&sk->sk_memcg->css); 6363 } 6364 6365 /** 6366 * mem_cgroup_charge_skmem - charge socket memory 6367 * @memcg: memcg to charge 6368 * @nr_pages: number of pages to charge 6369 * 6370 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6371 * @memcg's configured limit, %false if the charge had to be forced. 6372 */ 6373 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6374 { 6375 gfp_t gfp_mask = GFP_KERNEL; 6376 6377 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6378 struct page_counter *fail; 6379 6380 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6381 memcg->tcpmem_pressure = 0; 6382 return true; 6383 } 6384 page_counter_charge(&memcg->tcpmem, nr_pages); 6385 memcg->tcpmem_pressure = 1; 6386 return false; 6387 } 6388 6389 /* Don't block in the packet receive path */ 6390 if (in_softirq()) 6391 gfp_mask = GFP_NOWAIT; 6392 6393 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 6394 6395 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 6396 return true; 6397 6398 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 6399 return false; 6400 } 6401 6402 /** 6403 * mem_cgroup_uncharge_skmem - uncharge socket memory 6404 * @memcg: memcg to uncharge 6405 * @nr_pages: number of pages to uncharge 6406 */ 6407 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6408 { 6409 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6410 page_counter_uncharge(&memcg->tcpmem, nr_pages); 6411 return; 6412 } 6413 6414 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 6415 6416 refill_stock(memcg, nr_pages); 6417 } 6418 6419 static int __init cgroup_memory(char *s) 6420 { 6421 char *token; 6422 6423 while ((token = strsep(&s, ",")) != NULL) { 6424 if (!*token) 6425 continue; 6426 if (!strcmp(token, "nosocket")) 6427 cgroup_memory_nosocket = true; 6428 if (!strcmp(token, "nokmem")) 6429 cgroup_memory_nokmem = true; 6430 } 6431 return 0; 6432 } 6433 __setup("cgroup.memory=", cgroup_memory); 6434 6435 /* 6436 * subsys_initcall() for memory controller. 6437 * 6438 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 6439 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 6440 * basically everything that doesn't depend on a specific mem_cgroup structure 6441 * should be initialized from here. 6442 */ 6443 static int __init mem_cgroup_init(void) 6444 { 6445 int cpu, node; 6446 6447 #ifdef CONFIG_MEMCG_KMEM 6448 /* 6449 * Kmem cache creation is mostly done with the slab_mutex held, 6450 * so use a workqueue with limited concurrency to avoid stalling 6451 * all worker threads in case lots of cgroups are created and 6452 * destroyed simultaneously. 6453 */ 6454 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 6455 BUG_ON(!memcg_kmem_cache_wq); 6456 #endif 6457 6458 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 6459 memcg_hotplug_cpu_dead); 6460 6461 for_each_possible_cpu(cpu) 6462 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 6463 drain_local_stock); 6464 6465 for_each_node(node) { 6466 struct mem_cgroup_tree_per_node *rtpn; 6467 6468 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 6469 node_online(node) ? node : NUMA_NO_NODE); 6470 6471 rtpn->rb_root = RB_ROOT; 6472 rtpn->rb_rightmost = NULL; 6473 spin_lock_init(&rtpn->lock); 6474 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6475 } 6476 6477 return 0; 6478 } 6479 subsys_initcall(mem_cgroup_init); 6480 6481 #ifdef CONFIG_MEMCG_SWAP 6482 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 6483 { 6484 while (!refcount_inc_not_zero(&memcg->id.ref)) { 6485 /* 6486 * The root cgroup cannot be destroyed, so it's refcount must 6487 * always be >= 1. 6488 */ 6489 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 6490 VM_BUG_ON(1); 6491 break; 6492 } 6493 memcg = parent_mem_cgroup(memcg); 6494 if (!memcg) 6495 memcg = root_mem_cgroup; 6496 } 6497 return memcg; 6498 } 6499 6500 /** 6501 * mem_cgroup_swapout - transfer a memsw charge to swap 6502 * @page: page whose memsw charge to transfer 6503 * @entry: swap entry to move the charge to 6504 * 6505 * Transfer the memsw charge of @page to @entry. 6506 */ 6507 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 6508 { 6509 struct mem_cgroup *memcg, *swap_memcg; 6510 unsigned int nr_entries; 6511 unsigned short oldid; 6512 6513 VM_BUG_ON_PAGE(PageLRU(page), page); 6514 VM_BUG_ON_PAGE(page_count(page), page); 6515 6516 if (!do_memsw_account()) 6517 return; 6518 6519 memcg = page->mem_cgroup; 6520 6521 /* Readahead page, never charged */ 6522 if (!memcg) 6523 return; 6524 6525 /* 6526 * In case the memcg owning these pages has been offlined and doesn't 6527 * have an ID allocated to it anymore, charge the closest online 6528 * ancestor for the swap instead and transfer the memory+swap charge. 6529 */ 6530 swap_memcg = mem_cgroup_id_get_online(memcg); 6531 nr_entries = hpage_nr_pages(page); 6532 /* Get references for the tail pages, too */ 6533 if (nr_entries > 1) 6534 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 6535 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 6536 nr_entries); 6537 VM_BUG_ON_PAGE(oldid, page); 6538 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 6539 6540 page->mem_cgroup = NULL; 6541 6542 if (!mem_cgroup_is_root(memcg)) 6543 page_counter_uncharge(&memcg->memory, nr_entries); 6544 6545 if (memcg != swap_memcg) { 6546 if (!mem_cgroup_is_root(swap_memcg)) 6547 page_counter_charge(&swap_memcg->memsw, nr_entries); 6548 page_counter_uncharge(&memcg->memsw, nr_entries); 6549 } 6550 6551 /* 6552 * Interrupts should be disabled here because the caller holds the 6553 * i_pages lock which is taken with interrupts-off. It is 6554 * important here to have the interrupts disabled because it is the 6555 * only synchronisation we have for updating the per-CPU variables. 6556 */ 6557 VM_BUG_ON(!irqs_disabled()); 6558 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), 6559 -nr_entries); 6560 memcg_check_events(memcg, page); 6561 6562 if (!mem_cgroup_is_root(memcg)) 6563 css_put_many(&memcg->css, nr_entries); 6564 } 6565 6566 /** 6567 * mem_cgroup_try_charge_swap - try charging swap space for a page 6568 * @page: page being added to swap 6569 * @entry: swap entry to charge 6570 * 6571 * Try to charge @page's memcg for the swap space at @entry. 6572 * 6573 * Returns 0 on success, -ENOMEM on failure. 6574 */ 6575 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 6576 { 6577 unsigned int nr_pages = hpage_nr_pages(page); 6578 struct page_counter *counter; 6579 struct mem_cgroup *memcg; 6580 unsigned short oldid; 6581 6582 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 6583 return 0; 6584 6585 memcg = page->mem_cgroup; 6586 6587 /* Readahead page, never charged */ 6588 if (!memcg) 6589 return 0; 6590 6591 if (!entry.val) { 6592 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6593 return 0; 6594 } 6595 6596 memcg = mem_cgroup_id_get_online(memcg); 6597 6598 if (!mem_cgroup_is_root(memcg) && 6599 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 6600 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 6601 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6602 mem_cgroup_id_put(memcg); 6603 return -ENOMEM; 6604 } 6605 6606 /* Get references for the tail pages, too */ 6607 if (nr_pages > 1) 6608 mem_cgroup_id_get_many(memcg, nr_pages - 1); 6609 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 6610 VM_BUG_ON_PAGE(oldid, page); 6611 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 6612 6613 return 0; 6614 } 6615 6616 /** 6617 * mem_cgroup_uncharge_swap - uncharge swap space 6618 * @entry: swap entry to uncharge 6619 * @nr_pages: the amount of swap space to uncharge 6620 */ 6621 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 6622 { 6623 struct mem_cgroup *memcg; 6624 unsigned short id; 6625 6626 if (!do_swap_account) 6627 return; 6628 6629 id = swap_cgroup_record(entry, 0, nr_pages); 6630 rcu_read_lock(); 6631 memcg = mem_cgroup_from_id(id); 6632 if (memcg) { 6633 if (!mem_cgroup_is_root(memcg)) { 6634 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6635 page_counter_uncharge(&memcg->swap, nr_pages); 6636 else 6637 page_counter_uncharge(&memcg->memsw, nr_pages); 6638 } 6639 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 6640 mem_cgroup_id_put_many(memcg, nr_pages); 6641 } 6642 rcu_read_unlock(); 6643 } 6644 6645 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 6646 { 6647 long nr_swap_pages = get_nr_swap_pages(); 6648 6649 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6650 return nr_swap_pages; 6651 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6652 nr_swap_pages = min_t(long, nr_swap_pages, 6653 READ_ONCE(memcg->swap.max) - 6654 page_counter_read(&memcg->swap)); 6655 return nr_swap_pages; 6656 } 6657 6658 bool mem_cgroup_swap_full(struct page *page) 6659 { 6660 struct mem_cgroup *memcg; 6661 6662 VM_BUG_ON_PAGE(!PageLocked(page), page); 6663 6664 if (vm_swap_full()) 6665 return true; 6666 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6667 return false; 6668 6669 memcg = page->mem_cgroup; 6670 if (!memcg) 6671 return false; 6672 6673 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6674 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) 6675 return true; 6676 6677 return false; 6678 } 6679 6680 /* for remember boot option*/ 6681 #ifdef CONFIG_MEMCG_SWAP_ENABLED 6682 static int really_do_swap_account __initdata = 1; 6683 #else 6684 static int really_do_swap_account __initdata; 6685 #endif 6686 6687 static int __init enable_swap_account(char *s) 6688 { 6689 if (!strcmp(s, "1")) 6690 really_do_swap_account = 1; 6691 else if (!strcmp(s, "0")) 6692 really_do_swap_account = 0; 6693 return 1; 6694 } 6695 __setup("swapaccount=", enable_swap_account); 6696 6697 static u64 swap_current_read(struct cgroup_subsys_state *css, 6698 struct cftype *cft) 6699 { 6700 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6701 6702 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6703 } 6704 6705 static int swap_max_show(struct seq_file *m, void *v) 6706 { 6707 return seq_puts_memcg_tunable(m, 6708 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 6709 } 6710 6711 static ssize_t swap_max_write(struct kernfs_open_file *of, 6712 char *buf, size_t nbytes, loff_t off) 6713 { 6714 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6715 unsigned long max; 6716 int err; 6717 6718 buf = strstrip(buf); 6719 err = page_counter_memparse(buf, "max", &max); 6720 if (err) 6721 return err; 6722 6723 xchg(&memcg->swap.max, max); 6724 6725 return nbytes; 6726 } 6727 6728 static int swap_events_show(struct seq_file *m, void *v) 6729 { 6730 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6731 6732 seq_printf(m, "max %lu\n", 6733 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 6734 seq_printf(m, "fail %lu\n", 6735 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 6736 6737 return 0; 6738 } 6739 6740 static struct cftype swap_files[] = { 6741 { 6742 .name = "swap.current", 6743 .flags = CFTYPE_NOT_ON_ROOT, 6744 .read_u64 = swap_current_read, 6745 }, 6746 { 6747 .name = "swap.max", 6748 .flags = CFTYPE_NOT_ON_ROOT, 6749 .seq_show = swap_max_show, 6750 .write = swap_max_write, 6751 }, 6752 { 6753 .name = "swap.events", 6754 .flags = CFTYPE_NOT_ON_ROOT, 6755 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 6756 .seq_show = swap_events_show, 6757 }, 6758 { } /* terminate */ 6759 }; 6760 6761 static struct cftype memsw_cgroup_files[] = { 6762 { 6763 .name = "memsw.usage_in_bytes", 6764 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6765 .read_u64 = mem_cgroup_read_u64, 6766 }, 6767 { 6768 .name = "memsw.max_usage_in_bytes", 6769 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6770 .write = mem_cgroup_reset, 6771 .read_u64 = mem_cgroup_read_u64, 6772 }, 6773 { 6774 .name = "memsw.limit_in_bytes", 6775 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6776 .write = mem_cgroup_write, 6777 .read_u64 = mem_cgroup_read_u64, 6778 }, 6779 { 6780 .name = "memsw.failcnt", 6781 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6782 .write = mem_cgroup_reset, 6783 .read_u64 = mem_cgroup_read_u64, 6784 }, 6785 { }, /* terminate */ 6786 }; 6787 6788 static int __init mem_cgroup_swap_init(void) 6789 { 6790 if (!mem_cgroup_disabled() && really_do_swap_account) { 6791 do_swap_account = 1; 6792 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6793 swap_files)); 6794 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6795 memsw_cgroup_files)); 6796 } 6797 return 0; 6798 } 6799 subsys_initcall(mem_cgroup_swap_init); 6800 6801 #endif /* CONFIG_MEMCG_SWAP */ 6802