1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/sched/mm.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/hugetlb.h> 41 #include <linux/pagemap.h> 42 #include <linux/vm_event_item.h> 43 #include <linux/smp.h> 44 #include <linux/page-flags.h> 45 #include <linux/backing-dev.h> 46 #include <linux/bit_spinlock.h> 47 #include <linux/rcupdate.h> 48 #include <linux/limits.h> 49 #include <linux/export.h> 50 #include <linux/mutex.h> 51 #include <linux/rbtree.h> 52 #include <linux/slab.h> 53 #include <linux/swap.h> 54 #include <linux/swapops.h> 55 #include <linux/spinlock.h> 56 #include <linux/eventfd.h> 57 #include <linux/poll.h> 58 #include <linux/sort.h> 59 #include <linux/fs.h> 60 #include <linux/seq_file.h> 61 #include <linux/vmpressure.h> 62 #include <linux/mm_inline.h> 63 #include <linux/swap_cgroup.h> 64 #include <linux/cpu.h> 65 #include <linux/oom.h> 66 #include <linux/lockdep.h> 67 #include <linux/file.h> 68 #include <linux/tracehook.h> 69 #include "internal.h" 70 #include <net/sock.h> 71 #include <net/ip.h> 72 #include "slab.h" 73 74 #include <linux/uaccess.h> 75 76 #include <trace/events/vmscan.h> 77 78 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 79 EXPORT_SYMBOL(memory_cgrp_subsys); 80 81 struct mem_cgroup *root_mem_cgroup __read_mostly; 82 83 #define MEM_CGROUP_RECLAIM_RETRIES 5 84 85 /* Socket memory accounting disabled? */ 86 static bool cgroup_memory_nosocket; 87 88 /* Kernel memory accounting disabled? */ 89 static bool cgroup_memory_nokmem; 90 91 /* Whether the swap controller is active */ 92 #ifdef CONFIG_MEMCG_SWAP 93 int do_swap_account __read_mostly; 94 #else 95 #define do_swap_account 0 96 #endif 97 98 /* Whether legacy memory+swap accounting is active */ 99 static bool do_memsw_account(void) 100 { 101 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 102 } 103 104 static const char *const mem_cgroup_lru_names[] = { 105 "inactive_anon", 106 "active_anon", 107 "inactive_file", 108 "active_file", 109 "unevictable", 110 }; 111 112 #define THRESHOLDS_EVENTS_TARGET 128 113 #define SOFTLIMIT_EVENTS_TARGET 1024 114 #define NUMAINFO_EVENTS_TARGET 1024 115 116 /* 117 * Cgroups above their limits are maintained in a RB-Tree, independent of 118 * their hierarchy representation 119 */ 120 121 struct mem_cgroup_tree_per_node { 122 struct rb_root rb_root; 123 struct rb_node *rb_rightmost; 124 spinlock_t lock; 125 }; 126 127 struct mem_cgroup_tree { 128 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 129 }; 130 131 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 132 133 /* for OOM */ 134 struct mem_cgroup_eventfd_list { 135 struct list_head list; 136 struct eventfd_ctx *eventfd; 137 }; 138 139 /* 140 * cgroup_event represents events which userspace want to receive. 141 */ 142 struct mem_cgroup_event { 143 /* 144 * memcg which the event belongs to. 145 */ 146 struct mem_cgroup *memcg; 147 /* 148 * eventfd to signal userspace about the event. 149 */ 150 struct eventfd_ctx *eventfd; 151 /* 152 * Each of these stored in a list by the cgroup. 153 */ 154 struct list_head list; 155 /* 156 * register_event() callback will be used to add new userspace 157 * waiter for changes related to this event. Use eventfd_signal() 158 * on eventfd to send notification to userspace. 159 */ 160 int (*register_event)(struct mem_cgroup *memcg, 161 struct eventfd_ctx *eventfd, const char *args); 162 /* 163 * unregister_event() callback will be called when userspace closes 164 * the eventfd or on cgroup removing. This callback must be set, 165 * if you want provide notification functionality. 166 */ 167 void (*unregister_event)(struct mem_cgroup *memcg, 168 struct eventfd_ctx *eventfd); 169 /* 170 * All fields below needed to unregister event when 171 * userspace closes eventfd. 172 */ 173 poll_table pt; 174 wait_queue_head_t *wqh; 175 wait_queue_entry_t wait; 176 struct work_struct remove; 177 }; 178 179 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 180 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 181 182 /* Stuffs for move charges at task migration. */ 183 /* 184 * Types of charges to be moved. 185 */ 186 #define MOVE_ANON 0x1U 187 #define MOVE_FILE 0x2U 188 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 189 190 /* "mc" and its members are protected by cgroup_mutex */ 191 static struct move_charge_struct { 192 spinlock_t lock; /* for from, to */ 193 struct mm_struct *mm; 194 struct mem_cgroup *from; 195 struct mem_cgroup *to; 196 unsigned long flags; 197 unsigned long precharge; 198 unsigned long moved_charge; 199 unsigned long moved_swap; 200 struct task_struct *moving_task; /* a task moving charges */ 201 wait_queue_head_t waitq; /* a waitq for other context */ 202 } mc = { 203 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 204 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 205 }; 206 207 /* 208 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 209 * limit reclaim to prevent infinite loops, if they ever occur. 210 */ 211 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 212 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 213 214 enum charge_type { 215 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 216 MEM_CGROUP_CHARGE_TYPE_ANON, 217 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 218 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 219 NR_CHARGE_TYPE, 220 }; 221 222 /* for encoding cft->private value on file */ 223 enum res_type { 224 _MEM, 225 _MEMSWAP, 226 _OOM_TYPE, 227 _KMEM, 228 _TCP, 229 }; 230 231 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 232 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 233 #define MEMFILE_ATTR(val) ((val) & 0xffff) 234 /* Used for OOM nofiier */ 235 #define OOM_CONTROL (0) 236 237 /* 238 * Iteration constructs for visiting all cgroups (under a tree). If 239 * loops are exited prematurely (break), mem_cgroup_iter_break() must 240 * be used for reference counting. 241 */ 242 #define for_each_mem_cgroup_tree(iter, root) \ 243 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 244 iter != NULL; \ 245 iter = mem_cgroup_iter(root, iter, NULL)) 246 247 #define for_each_mem_cgroup(iter) \ 248 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 249 iter != NULL; \ 250 iter = mem_cgroup_iter(NULL, iter, NULL)) 251 252 static inline bool should_force_charge(void) 253 { 254 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 255 (current->flags & PF_EXITING); 256 } 257 258 /* Some nice accessors for the vmpressure. */ 259 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 260 { 261 if (!memcg) 262 memcg = root_mem_cgroup; 263 return &memcg->vmpressure; 264 } 265 266 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 267 { 268 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 269 } 270 271 #ifdef CONFIG_MEMCG_KMEM 272 /* 273 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 274 * The main reason for not using cgroup id for this: 275 * this works better in sparse environments, where we have a lot of memcgs, 276 * but only a few kmem-limited. Or also, if we have, for instance, 200 277 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 278 * 200 entry array for that. 279 * 280 * The current size of the caches array is stored in memcg_nr_cache_ids. It 281 * will double each time we have to increase it. 282 */ 283 static DEFINE_IDA(memcg_cache_ida); 284 int memcg_nr_cache_ids; 285 286 /* Protects memcg_nr_cache_ids */ 287 static DECLARE_RWSEM(memcg_cache_ids_sem); 288 289 void memcg_get_cache_ids(void) 290 { 291 down_read(&memcg_cache_ids_sem); 292 } 293 294 void memcg_put_cache_ids(void) 295 { 296 up_read(&memcg_cache_ids_sem); 297 } 298 299 /* 300 * MIN_SIZE is different than 1, because we would like to avoid going through 301 * the alloc/free process all the time. In a small machine, 4 kmem-limited 302 * cgroups is a reasonable guess. In the future, it could be a parameter or 303 * tunable, but that is strictly not necessary. 304 * 305 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 306 * this constant directly from cgroup, but it is understandable that this is 307 * better kept as an internal representation in cgroup.c. In any case, the 308 * cgrp_id space is not getting any smaller, and we don't have to necessarily 309 * increase ours as well if it increases. 310 */ 311 #define MEMCG_CACHES_MIN_SIZE 4 312 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 313 314 /* 315 * A lot of the calls to the cache allocation functions are expected to be 316 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 317 * conditional to this static branch, we'll have to allow modules that does 318 * kmem_cache_alloc and the such to see this symbol as well 319 */ 320 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 321 EXPORT_SYMBOL(memcg_kmem_enabled_key); 322 323 struct workqueue_struct *memcg_kmem_cache_wq; 324 325 static int memcg_shrinker_map_size; 326 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 327 328 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 329 { 330 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 331 } 332 333 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 334 int size, int old_size) 335 { 336 struct memcg_shrinker_map *new, *old; 337 int nid; 338 339 lockdep_assert_held(&memcg_shrinker_map_mutex); 340 341 for_each_node(nid) { 342 old = rcu_dereference_protected( 343 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 344 /* Not yet online memcg */ 345 if (!old) 346 return 0; 347 348 new = kvmalloc(sizeof(*new) + size, GFP_KERNEL); 349 if (!new) 350 return -ENOMEM; 351 352 /* Set all old bits, clear all new bits */ 353 memset(new->map, (int)0xff, old_size); 354 memset((void *)new->map + old_size, 0, size - old_size); 355 356 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 357 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 358 } 359 360 return 0; 361 } 362 363 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 364 { 365 struct mem_cgroup_per_node *pn; 366 struct memcg_shrinker_map *map; 367 int nid; 368 369 if (mem_cgroup_is_root(memcg)) 370 return; 371 372 for_each_node(nid) { 373 pn = mem_cgroup_nodeinfo(memcg, nid); 374 map = rcu_dereference_protected(pn->shrinker_map, true); 375 if (map) 376 kvfree(map); 377 rcu_assign_pointer(pn->shrinker_map, NULL); 378 } 379 } 380 381 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 382 { 383 struct memcg_shrinker_map *map; 384 int nid, size, ret = 0; 385 386 if (mem_cgroup_is_root(memcg)) 387 return 0; 388 389 mutex_lock(&memcg_shrinker_map_mutex); 390 size = memcg_shrinker_map_size; 391 for_each_node(nid) { 392 map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); 393 if (!map) { 394 memcg_free_shrinker_maps(memcg); 395 ret = -ENOMEM; 396 break; 397 } 398 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 399 } 400 mutex_unlock(&memcg_shrinker_map_mutex); 401 402 return ret; 403 } 404 405 int memcg_expand_shrinker_maps(int new_id) 406 { 407 int size, old_size, ret = 0; 408 struct mem_cgroup *memcg; 409 410 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 411 old_size = memcg_shrinker_map_size; 412 if (size <= old_size) 413 return 0; 414 415 mutex_lock(&memcg_shrinker_map_mutex); 416 if (!root_mem_cgroup) 417 goto unlock; 418 419 for_each_mem_cgroup(memcg) { 420 if (mem_cgroup_is_root(memcg)) 421 continue; 422 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 423 if (ret) 424 goto unlock; 425 } 426 unlock: 427 if (!ret) 428 memcg_shrinker_map_size = size; 429 mutex_unlock(&memcg_shrinker_map_mutex); 430 return ret; 431 } 432 433 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 434 { 435 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 436 struct memcg_shrinker_map *map; 437 438 rcu_read_lock(); 439 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 440 /* Pairs with smp mb in shrink_slab() */ 441 smp_mb__before_atomic(); 442 set_bit(shrinker_id, map->map); 443 rcu_read_unlock(); 444 } 445 } 446 447 #else /* CONFIG_MEMCG_KMEM */ 448 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 449 { 450 return 0; 451 } 452 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { } 453 #endif /* CONFIG_MEMCG_KMEM */ 454 455 /** 456 * mem_cgroup_css_from_page - css of the memcg associated with a page 457 * @page: page of interest 458 * 459 * If memcg is bound to the default hierarchy, css of the memcg associated 460 * with @page is returned. The returned css remains associated with @page 461 * until it is released. 462 * 463 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 464 * is returned. 465 */ 466 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 467 { 468 struct mem_cgroup *memcg; 469 470 memcg = page->mem_cgroup; 471 472 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 473 memcg = root_mem_cgroup; 474 475 return &memcg->css; 476 } 477 478 /** 479 * page_cgroup_ino - return inode number of the memcg a page is charged to 480 * @page: the page 481 * 482 * Look up the closest online ancestor of the memory cgroup @page is charged to 483 * and return its inode number or 0 if @page is not charged to any cgroup. It 484 * is safe to call this function without holding a reference to @page. 485 * 486 * Note, this function is inherently racy, because there is nothing to prevent 487 * the cgroup inode from getting torn down and potentially reallocated a moment 488 * after page_cgroup_ino() returns, so it only should be used by callers that 489 * do not care (such as procfs interfaces). 490 */ 491 ino_t page_cgroup_ino(struct page *page) 492 { 493 struct mem_cgroup *memcg; 494 unsigned long ino = 0; 495 496 rcu_read_lock(); 497 memcg = READ_ONCE(page->mem_cgroup); 498 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 499 memcg = parent_mem_cgroup(memcg); 500 if (memcg) 501 ino = cgroup_ino(memcg->css.cgroup); 502 rcu_read_unlock(); 503 return ino; 504 } 505 506 static struct mem_cgroup_per_node * 507 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 508 { 509 int nid = page_to_nid(page); 510 511 return memcg->nodeinfo[nid]; 512 } 513 514 static struct mem_cgroup_tree_per_node * 515 soft_limit_tree_node(int nid) 516 { 517 return soft_limit_tree.rb_tree_per_node[nid]; 518 } 519 520 static struct mem_cgroup_tree_per_node * 521 soft_limit_tree_from_page(struct page *page) 522 { 523 int nid = page_to_nid(page); 524 525 return soft_limit_tree.rb_tree_per_node[nid]; 526 } 527 528 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 529 struct mem_cgroup_tree_per_node *mctz, 530 unsigned long new_usage_in_excess) 531 { 532 struct rb_node **p = &mctz->rb_root.rb_node; 533 struct rb_node *parent = NULL; 534 struct mem_cgroup_per_node *mz_node; 535 bool rightmost = true; 536 537 if (mz->on_tree) 538 return; 539 540 mz->usage_in_excess = new_usage_in_excess; 541 if (!mz->usage_in_excess) 542 return; 543 while (*p) { 544 parent = *p; 545 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 546 tree_node); 547 if (mz->usage_in_excess < mz_node->usage_in_excess) { 548 p = &(*p)->rb_left; 549 rightmost = false; 550 } 551 552 /* 553 * We can't avoid mem cgroups that are over their soft 554 * limit by the same amount 555 */ 556 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 557 p = &(*p)->rb_right; 558 } 559 560 if (rightmost) 561 mctz->rb_rightmost = &mz->tree_node; 562 563 rb_link_node(&mz->tree_node, parent, p); 564 rb_insert_color(&mz->tree_node, &mctz->rb_root); 565 mz->on_tree = true; 566 } 567 568 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 569 struct mem_cgroup_tree_per_node *mctz) 570 { 571 if (!mz->on_tree) 572 return; 573 574 if (&mz->tree_node == mctz->rb_rightmost) 575 mctz->rb_rightmost = rb_prev(&mz->tree_node); 576 577 rb_erase(&mz->tree_node, &mctz->rb_root); 578 mz->on_tree = false; 579 } 580 581 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 582 struct mem_cgroup_tree_per_node *mctz) 583 { 584 unsigned long flags; 585 586 spin_lock_irqsave(&mctz->lock, flags); 587 __mem_cgroup_remove_exceeded(mz, mctz); 588 spin_unlock_irqrestore(&mctz->lock, flags); 589 } 590 591 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 592 { 593 unsigned long nr_pages = page_counter_read(&memcg->memory); 594 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 595 unsigned long excess = 0; 596 597 if (nr_pages > soft_limit) 598 excess = nr_pages - soft_limit; 599 600 return excess; 601 } 602 603 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 604 { 605 unsigned long excess; 606 struct mem_cgroup_per_node *mz; 607 struct mem_cgroup_tree_per_node *mctz; 608 609 mctz = soft_limit_tree_from_page(page); 610 if (!mctz) 611 return; 612 /* 613 * Necessary to update all ancestors when hierarchy is used. 614 * because their event counter is not touched. 615 */ 616 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 617 mz = mem_cgroup_page_nodeinfo(memcg, page); 618 excess = soft_limit_excess(memcg); 619 /* 620 * We have to update the tree if mz is on RB-tree or 621 * mem is over its softlimit. 622 */ 623 if (excess || mz->on_tree) { 624 unsigned long flags; 625 626 spin_lock_irqsave(&mctz->lock, flags); 627 /* if on-tree, remove it */ 628 if (mz->on_tree) 629 __mem_cgroup_remove_exceeded(mz, mctz); 630 /* 631 * Insert again. mz->usage_in_excess will be updated. 632 * If excess is 0, no tree ops. 633 */ 634 __mem_cgroup_insert_exceeded(mz, mctz, excess); 635 spin_unlock_irqrestore(&mctz->lock, flags); 636 } 637 } 638 } 639 640 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 641 { 642 struct mem_cgroup_tree_per_node *mctz; 643 struct mem_cgroup_per_node *mz; 644 int nid; 645 646 for_each_node(nid) { 647 mz = mem_cgroup_nodeinfo(memcg, nid); 648 mctz = soft_limit_tree_node(nid); 649 if (mctz) 650 mem_cgroup_remove_exceeded(mz, mctz); 651 } 652 } 653 654 static struct mem_cgroup_per_node * 655 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 656 { 657 struct mem_cgroup_per_node *mz; 658 659 retry: 660 mz = NULL; 661 if (!mctz->rb_rightmost) 662 goto done; /* Nothing to reclaim from */ 663 664 mz = rb_entry(mctz->rb_rightmost, 665 struct mem_cgroup_per_node, tree_node); 666 /* 667 * Remove the node now but someone else can add it back, 668 * we will to add it back at the end of reclaim to its correct 669 * position in the tree. 670 */ 671 __mem_cgroup_remove_exceeded(mz, mctz); 672 if (!soft_limit_excess(mz->memcg) || 673 !css_tryget_online(&mz->memcg->css)) 674 goto retry; 675 done: 676 return mz; 677 } 678 679 static struct mem_cgroup_per_node * 680 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 681 { 682 struct mem_cgroup_per_node *mz; 683 684 spin_lock_irq(&mctz->lock); 685 mz = __mem_cgroup_largest_soft_limit_node(mctz); 686 spin_unlock_irq(&mctz->lock); 687 return mz; 688 } 689 690 /** 691 * __mod_memcg_state - update cgroup memory statistics 692 * @memcg: the memory cgroup 693 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 694 * @val: delta to add to the counter, can be negative 695 */ 696 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 697 { 698 long x; 699 700 if (mem_cgroup_disabled()) 701 return; 702 703 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 704 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 705 struct mem_cgroup *mi; 706 707 atomic_long_add(x, &memcg->vmstats_local[idx]); 708 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 709 atomic_long_add(x, &mi->vmstats[idx]); 710 x = 0; 711 } 712 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 713 } 714 715 static struct mem_cgroup_per_node * 716 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 717 { 718 struct mem_cgroup *parent; 719 720 parent = parent_mem_cgroup(pn->memcg); 721 if (!parent) 722 return NULL; 723 return mem_cgroup_nodeinfo(parent, nid); 724 } 725 726 /** 727 * __mod_lruvec_state - update lruvec memory statistics 728 * @lruvec: the lruvec 729 * @idx: the stat item 730 * @val: delta to add to the counter, can be negative 731 * 732 * The lruvec is the intersection of the NUMA node and a cgroup. This 733 * function updates the all three counters that are affected by a 734 * change of state at this level: per-node, per-cgroup, per-lruvec. 735 */ 736 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 737 int val) 738 { 739 pg_data_t *pgdat = lruvec_pgdat(lruvec); 740 struct mem_cgroup_per_node *pn; 741 struct mem_cgroup *memcg; 742 long x; 743 744 /* Update node */ 745 __mod_node_page_state(pgdat, idx, val); 746 747 if (mem_cgroup_disabled()) 748 return; 749 750 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 751 memcg = pn->memcg; 752 753 /* Update memcg */ 754 __mod_memcg_state(memcg, idx, val); 755 756 /* Update lruvec */ 757 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 758 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 759 struct mem_cgroup_per_node *pi; 760 761 atomic_long_add(x, &pn->lruvec_stat_local[idx]); 762 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 763 atomic_long_add(x, &pi->lruvec_stat[idx]); 764 x = 0; 765 } 766 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 767 } 768 769 /** 770 * __count_memcg_events - account VM events in a cgroup 771 * @memcg: the memory cgroup 772 * @idx: the event item 773 * @count: the number of events that occured 774 */ 775 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 776 unsigned long count) 777 { 778 unsigned long x; 779 780 if (mem_cgroup_disabled()) 781 return; 782 783 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 784 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 785 struct mem_cgroup *mi; 786 787 atomic_long_add(x, &memcg->vmevents_local[idx]); 788 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 789 atomic_long_add(x, &mi->vmevents[idx]); 790 x = 0; 791 } 792 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 793 } 794 795 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 796 { 797 return atomic_long_read(&memcg->vmevents[event]); 798 } 799 800 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 801 { 802 return atomic_long_read(&memcg->vmevents_local[event]); 803 } 804 805 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 806 struct page *page, 807 bool compound, int nr_pages) 808 { 809 /* 810 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 811 * counted as CACHE even if it's on ANON LRU. 812 */ 813 if (PageAnon(page)) 814 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); 815 else { 816 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); 817 if (PageSwapBacked(page)) 818 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); 819 } 820 821 if (compound) { 822 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 823 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); 824 } 825 826 /* pagein of a big page is an event. So, ignore page size */ 827 if (nr_pages > 0) 828 __count_memcg_events(memcg, PGPGIN, 1); 829 else { 830 __count_memcg_events(memcg, PGPGOUT, 1); 831 nr_pages = -nr_pages; /* for event */ 832 } 833 834 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 835 } 836 837 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 838 enum mem_cgroup_events_target target) 839 { 840 unsigned long val, next; 841 842 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 843 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 844 /* from time_after() in jiffies.h */ 845 if ((long)(next - val) < 0) { 846 switch (target) { 847 case MEM_CGROUP_TARGET_THRESH: 848 next = val + THRESHOLDS_EVENTS_TARGET; 849 break; 850 case MEM_CGROUP_TARGET_SOFTLIMIT: 851 next = val + SOFTLIMIT_EVENTS_TARGET; 852 break; 853 case MEM_CGROUP_TARGET_NUMAINFO: 854 next = val + NUMAINFO_EVENTS_TARGET; 855 break; 856 default: 857 break; 858 } 859 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 860 return true; 861 } 862 return false; 863 } 864 865 /* 866 * Check events in order. 867 * 868 */ 869 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 870 { 871 /* threshold event is triggered in finer grain than soft limit */ 872 if (unlikely(mem_cgroup_event_ratelimit(memcg, 873 MEM_CGROUP_TARGET_THRESH))) { 874 bool do_softlimit; 875 bool do_numainfo __maybe_unused; 876 877 do_softlimit = mem_cgroup_event_ratelimit(memcg, 878 MEM_CGROUP_TARGET_SOFTLIMIT); 879 #if MAX_NUMNODES > 1 880 do_numainfo = mem_cgroup_event_ratelimit(memcg, 881 MEM_CGROUP_TARGET_NUMAINFO); 882 #endif 883 mem_cgroup_threshold(memcg); 884 if (unlikely(do_softlimit)) 885 mem_cgroup_update_tree(memcg, page); 886 #if MAX_NUMNODES > 1 887 if (unlikely(do_numainfo)) 888 atomic_inc(&memcg->numainfo_events); 889 #endif 890 } 891 } 892 893 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 894 { 895 /* 896 * mm_update_next_owner() may clear mm->owner to NULL 897 * if it races with swapoff, page migration, etc. 898 * So this can be called with p == NULL. 899 */ 900 if (unlikely(!p)) 901 return NULL; 902 903 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 904 } 905 EXPORT_SYMBOL(mem_cgroup_from_task); 906 907 /** 908 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 909 * @mm: mm from which memcg should be extracted. It can be NULL. 910 * 911 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 912 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 913 * returned. 914 */ 915 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 916 { 917 struct mem_cgroup *memcg; 918 919 if (mem_cgroup_disabled()) 920 return NULL; 921 922 rcu_read_lock(); 923 do { 924 /* 925 * Page cache insertions can happen withou an 926 * actual mm context, e.g. during disk probing 927 * on boot, loopback IO, acct() writes etc. 928 */ 929 if (unlikely(!mm)) 930 memcg = root_mem_cgroup; 931 else { 932 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 933 if (unlikely(!memcg)) 934 memcg = root_mem_cgroup; 935 } 936 } while (!css_tryget_online(&memcg->css)); 937 rcu_read_unlock(); 938 return memcg; 939 } 940 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 941 942 /** 943 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 944 * @page: page from which memcg should be extracted. 945 * 946 * Obtain a reference on page->memcg and returns it if successful. Otherwise 947 * root_mem_cgroup is returned. 948 */ 949 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 950 { 951 struct mem_cgroup *memcg = page->mem_cgroup; 952 953 if (mem_cgroup_disabled()) 954 return NULL; 955 956 rcu_read_lock(); 957 if (!memcg || !css_tryget_online(&memcg->css)) 958 memcg = root_mem_cgroup; 959 rcu_read_unlock(); 960 return memcg; 961 } 962 EXPORT_SYMBOL(get_mem_cgroup_from_page); 963 964 /** 965 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg. 966 */ 967 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 968 { 969 if (unlikely(current->active_memcg)) { 970 struct mem_cgroup *memcg = root_mem_cgroup; 971 972 rcu_read_lock(); 973 if (css_tryget_online(¤t->active_memcg->css)) 974 memcg = current->active_memcg; 975 rcu_read_unlock(); 976 return memcg; 977 } 978 return get_mem_cgroup_from_mm(current->mm); 979 } 980 981 /** 982 * mem_cgroup_iter - iterate over memory cgroup hierarchy 983 * @root: hierarchy root 984 * @prev: previously returned memcg, NULL on first invocation 985 * @reclaim: cookie for shared reclaim walks, NULL for full walks 986 * 987 * Returns references to children of the hierarchy below @root, or 988 * @root itself, or %NULL after a full round-trip. 989 * 990 * Caller must pass the return value in @prev on subsequent 991 * invocations for reference counting, or use mem_cgroup_iter_break() 992 * to cancel a hierarchy walk before the round-trip is complete. 993 * 994 * Reclaimers can specify a node and a priority level in @reclaim to 995 * divide up the memcgs in the hierarchy among all concurrent 996 * reclaimers operating on the same node and priority. 997 */ 998 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 999 struct mem_cgroup *prev, 1000 struct mem_cgroup_reclaim_cookie *reclaim) 1001 { 1002 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 1003 struct cgroup_subsys_state *css = NULL; 1004 struct mem_cgroup *memcg = NULL; 1005 struct mem_cgroup *pos = NULL; 1006 1007 if (mem_cgroup_disabled()) 1008 return NULL; 1009 1010 if (!root) 1011 root = root_mem_cgroup; 1012 1013 if (prev && !reclaim) 1014 pos = prev; 1015 1016 if (!root->use_hierarchy && root != root_mem_cgroup) { 1017 if (prev) 1018 goto out; 1019 return root; 1020 } 1021 1022 rcu_read_lock(); 1023 1024 if (reclaim) { 1025 struct mem_cgroup_per_node *mz; 1026 1027 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1028 iter = &mz->iter[reclaim->priority]; 1029 1030 if (prev && reclaim->generation != iter->generation) 1031 goto out_unlock; 1032 1033 while (1) { 1034 pos = READ_ONCE(iter->position); 1035 if (!pos || css_tryget(&pos->css)) 1036 break; 1037 /* 1038 * css reference reached zero, so iter->position will 1039 * be cleared by ->css_released. However, we should not 1040 * rely on this happening soon, because ->css_released 1041 * is called from a work queue, and by busy-waiting we 1042 * might block it. So we clear iter->position right 1043 * away. 1044 */ 1045 (void)cmpxchg(&iter->position, pos, NULL); 1046 } 1047 } 1048 1049 if (pos) 1050 css = &pos->css; 1051 1052 for (;;) { 1053 css = css_next_descendant_pre(css, &root->css); 1054 if (!css) { 1055 /* 1056 * Reclaimers share the hierarchy walk, and a 1057 * new one might jump in right at the end of 1058 * the hierarchy - make sure they see at least 1059 * one group and restart from the beginning. 1060 */ 1061 if (!prev) 1062 continue; 1063 break; 1064 } 1065 1066 /* 1067 * Verify the css and acquire a reference. The root 1068 * is provided by the caller, so we know it's alive 1069 * and kicking, and don't take an extra reference. 1070 */ 1071 memcg = mem_cgroup_from_css(css); 1072 1073 if (css == &root->css) 1074 break; 1075 1076 if (css_tryget(css)) 1077 break; 1078 1079 memcg = NULL; 1080 } 1081 1082 if (reclaim) { 1083 /* 1084 * The position could have already been updated by a competing 1085 * thread, so check that the value hasn't changed since we read 1086 * it to avoid reclaiming from the same cgroup twice. 1087 */ 1088 (void)cmpxchg(&iter->position, pos, memcg); 1089 1090 if (pos) 1091 css_put(&pos->css); 1092 1093 if (!memcg) 1094 iter->generation++; 1095 else if (!prev) 1096 reclaim->generation = iter->generation; 1097 } 1098 1099 out_unlock: 1100 rcu_read_unlock(); 1101 out: 1102 if (prev && prev != root) 1103 css_put(&prev->css); 1104 1105 return memcg; 1106 } 1107 1108 /** 1109 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1110 * @root: hierarchy root 1111 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1112 */ 1113 void mem_cgroup_iter_break(struct mem_cgroup *root, 1114 struct mem_cgroup *prev) 1115 { 1116 if (!root) 1117 root = root_mem_cgroup; 1118 if (prev && prev != root) 1119 css_put(&prev->css); 1120 } 1121 1122 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1123 { 1124 struct mem_cgroup *memcg = dead_memcg; 1125 struct mem_cgroup_reclaim_iter *iter; 1126 struct mem_cgroup_per_node *mz; 1127 int nid; 1128 int i; 1129 1130 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1131 for_each_node(nid) { 1132 mz = mem_cgroup_nodeinfo(memcg, nid); 1133 for (i = 0; i <= DEF_PRIORITY; i++) { 1134 iter = &mz->iter[i]; 1135 cmpxchg(&iter->position, 1136 dead_memcg, NULL); 1137 } 1138 } 1139 } 1140 } 1141 1142 /** 1143 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1144 * @memcg: hierarchy root 1145 * @fn: function to call for each task 1146 * @arg: argument passed to @fn 1147 * 1148 * This function iterates over tasks attached to @memcg or to any of its 1149 * descendants and calls @fn for each task. If @fn returns a non-zero 1150 * value, the function breaks the iteration loop and returns the value. 1151 * Otherwise, it will iterate over all tasks and return 0. 1152 * 1153 * This function must not be called for the root memory cgroup. 1154 */ 1155 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1156 int (*fn)(struct task_struct *, void *), void *arg) 1157 { 1158 struct mem_cgroup *iter; 1159 int ret = 0; 1160 1161 BUG_ON(memcg == root_mem_cgroup); 1162 1163 for_each_mem_cgroup_tree(iter, memcg) { 1164 struct css_task_iter it; 1165 struct task_struct *task; 1166 1167 css_task_iter_start(&iter->css, 0, &it); 1168 while (!ret && (task = css_task_iter_next(&it))) 1169 ret = fn(task, arg); 1170 css_task_iter_end(&it); 1171 if (ret) { 1172 mem_cgroup_iter_break(memcg, iter); 1173 break; 1174 } 1175 } 1176 return ret; 1177 } 1178 1179 /** 1180 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1181 * @page: the page 1182 * @pgdat: pgdat of the page 1183 * 1184 * This function is only safe when following the LRU page isolation 1185 * and putback protocol: the LRU lock must be held, and the page must 1186 * either be PageLRU() or the caller must have isolated/allocated it. 1187 */ 1188 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1189 { 1190 struct mem_cgroup_per_node *mz; 1191 struct mem_cgroup *memcg; 1192 struct lruvec *lruvec; 1193 1194 if (mem_cgroup_disabled()) { 1195 lruvec = &pgdat->lruvec; 1196 goto out; 1197 } 1198 1199 memcg = page->mem_cgroup; 1200 /* 1201 * Swapcache readahead pages are added to the LRU - and 1202 * possibly migrated - before they are charged. 1203 */ 1204 if (!memcg) 1205 memcg = root_mem_cgroup; 1206 1207 mz = mem_cgroup_page_nodeinfo(memcg, page); 1208 lruvec = &mz->lruvec; 1209 out: 1210 /* 1211 * Since a node can be onlined after the mem_cgroup was created, 1212 * we have to be prepared to initialize lruvec->zone here; 1213 * and if offlined then reonlined, we need to reinitialize it. 1214 */ 1215 if (unlikely(lruvec->pgdat != pgdat)) 1216 lruvec->pgdat = pgdat; 1217 return lruvec; 1218 } 1219 1220 /** 1221 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1222 * @lruvec: mem_cgroup per zone lru vector 1223 * @lru: index of lru list the page is sitting on 1224 * @zid: zone id of the accounted pages 1225 * @nr_pages: positive when adding or negative when removing 1226 * 1227 * This function must be called under lru_lock, just before a page is added 1228 * to or just after a page is removed from an lru list (that ordering being 1229 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1230 */ 1231 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1232 int zid, int nr_pages) 1233 { 1234 struct mem_cgroup_per_node *mz; 1235 unsigned long *lru_size; 1236 long size; 1237 1238 if (mem_cgroup_disabled()) 1239 return; 1240 1241 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1242 lru_size = &mz->lru_zone_size[zid][lru]; 1243 1244 if (nr_pages < 0) 1245 *lru_size += nr_pages; 1246 1247 size = *lru_size; 1248 if (WARN_ONCE(size < 0, 1249 "%s(%p, %d, %d): lru_size %ld\n", 1250 __func__, lruvec, lru, nr_pages, size)) { 1251 VM_BUG_ON(1); 1252 *lru_size = 0; 1253 } 1254 1255 if (nr_pages > 0) 1256 *lru_size += nr_pages; 1257 } 1258 1259 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1260 { 1261 struct mem_cgroup *task_memcg; 1262 struct task_struct *p; 1263 bool ret; 1264 1265 p = find_lock_task_mm(task); 1266 if (p) { 1267 task_memcg = get_mem_cgroup_from_mm(p->mm); 1268 task_unlock(p); 1269 } else { 1270 /* 1271 * All threads may have already detached their mm's, but the oom 1272 * killer still needs to detect if they have already been oom 1273 * killed to prevent needlessly killing additional tasks. 1274 */ 1275 rcu_read_lock(); 1276 task_memcg = mem_cgroup_from_task(task); 1277 css_get(&task_memcg->css); 1278 rcu_read_unlock(); 1279 } 1280 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1281 css_put(&task_memcg->css); 1282 return ret; 1283 } 1284 1285 /** 1286 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1287 * @memcg: the memory cgroup 1288 * 1289 * Returns the maximum amount of memory @mem can be charged with, in 1290 * pages. 1291 */ 1292 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1293 { 1294 unsigned long margin = 0; 1295 unsigned long count; 1296 unsigned long limit; 1297 1298 count = page_counter_read(&memcg->memory); 1299 limit = READ_ONCE(memcg->memory.max); 1300 if (count < limit) 1301 margin = limit - count; 1302 1303 if (do_memsw_account()) { 1304 count = page_counter_read(&memcg->memsw); 1305 limit = READ_ONCE(memcg->memsw.max); 1306 if (count <= limit) 1307 margin = min(margin, limit - count); 1308 else 1309 margin = 0; 1310 } 1311 1312 return margin; 1313 } 1314 1315 /* 1316 * A routine for checking "mem" is under move_account() or not. 1317 * 1318 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1319 * moving cgroups. This is for waiting at high-memory pressure 1320 * caused by "move". 1321 */ 1322 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1323 { 1324 struct mem_cgroup *from; 1325 struct mem_cgroup *to; 1326 bool ret = false; 1327 /* 1328 * Unlike task_move routines, we access mc.to, mc.from not under 1329 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1330 */ 1331 spin_lock(&mc.lock); 1332 from = mc.from; 1333 to = mc.to; 1334 if (!from) 1335 goto unlock; 1336 1337 ret = mem_cgroup_is_descendant(from, memcg) || 1338 mem_cgroup_is_descendant(to, memcg); 1339 unlock: 1340 spin_unlock(&mc.lock); 1341 return ret; 1342 } 1343 1344 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1345 { 1346 if (mc.moving_task && current != mc.moving_task) { 1347 if (mem_cgroup_under_move(memcg)) { 1348 DEFINE_WAIT(wait); 1349 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1350 /* moving charge context might have finished. */ 1351 if (mc.moving_task) 1352 schedule(); 1353 finish_wait(&mc.waitq, &wait); 1354 return true; 1355 } 1356 } 1357 return false; 1358 } 1359 1360 static const unsigned int memcg1_stats[] = { 1361 MEMCG_CACHE, 1362 MEMCG_RSS, 1363 MEMCG_RSS_HUGE, 1364 NR_SHMEM, 1365 NR_FILE_MAPPED, 1366 NR_FILE_DIRTY, 1367 NR_WRITEBACK, 1368 MEMCG_SWAP, 1369 }; 1370 1371 static const char *const memcg1_stat_names[] = { 1372 "cache", 1373 "rss", 1374 "rss_huge", 1375 "shmem", 1376 "mapped_file", 1377 "dirty", 1378 "writeback", 1379 "swap", 1380 }; 1381 1382 #define K(x) ((x) << (PAGE_SHIFT-10)) 1383 /** 1384 * mem_cgroup_print_oom_context: Print OOM information relevant to 1385 * memory controller. 1386 * @memcg: The memory cgroup that went over limit 1387 * @p: Task that is going to be killed 1388 * 1389 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1390 * enabled 1391 */ 1392 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1393 { 1394 rcu_read_lock(); 1395 1396 if (memcg) { 1397 pr_cont(",oom_memcg="); 1398 pr_cont_cgroup_path(memcg->css.cgroup); 1399 } else 1400 pr_cont(",global_oom"); 1401 if (p) { 1402 pr_cont(",task_memcg="); 1403 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1404 } 1405 rcu_read_unlock(); 1406 } 1407 1408 /** 1409 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1410 * memory controller. 1411 * @memcg: The memory cgroup that went over limit 1412 */ 1413 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1414 { 1415 struct mem_cgroup *iter; 1416 unsigned int i; 1417 1418 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1419 K((u64)page_counter_read(&memcg->memory)), 1420 K((u64)memcg->memory.max), memcg->memory.failcnt); 1421 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1422 K((u64)page_counter_read(&memcg->memsw)), 1423 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1424 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1425 K((u64)page_counter_read(&memcg->kmem)), 1426 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1427 1428 for_each_mem_cgroup_tree(iter, memcg) { 1429 pr_info("Memory cgroup stats for "); 1430 pr_cont_cgroup_path(iter->css.cgroup); 1431 pr_cont(":"); 1432 1433 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 1434 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account) 1435 continue; 1436 pr_cont(" %s:%luKB", memcg1_stat_names[i], 1437 K(memcg_page_state_local(iter, 1438 memcg1_stats[i]))); 1439 } 1440 1441 for (i = 0; i < NR_LRU_LISTS; i++) 1442 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1443 K(memcg_page_state_local(iter, 1444 NR_LRU_BASE + i))); 1445 1446 pr_cont("\n"); 1447 } 1448 } 1449 1450 /* 1451 * Return the memory (and swap, if configured) limit for a memcg. 1452 */ 1453 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1454 { 1455 unsigned long max; 1456 1457 max = memcg->memory.max; 1458 if (mem_cgroup_swappiness(memcg)) { 1459 unsigned long memsw_max; 1460 unsigned long swap_max; 1461 1462 memsw_max = memcg->memsw.max; 1463 swap_max = memcg->swap.max; 1464 swap_max = min(swap_max, (unsigned long)total_swap_pages); 1465 max = min(max + swap_max, memsw_max); 1466 } 1467 return max; 1468 } 1469 1470 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1471 int order) 1472 { 1473 struct oom_control oc = { 1474 .zonelist = NULL, 1475 .nodemask = NULL, 1476 .memcg = memcg, 1477 .gfp_mask = gfp_mask, 1478 .order = order, 1479 }; 1480 bool ret; 1481 1482 if (mutex_lock_killable(&oom_lock)) 1483 return true; 1484 /* 1485 * A few threads which were not waiting at mutex_lock_killable() can 1486 * fail to bail out. Therefore, check again after holding oom_lock. 1487 */ 1488 ret = should_force_charge() || out_of_memory(&oc); 1489 mutex_unlock(&oom_lock); 1490 return ret; 1491 } 1492 1493 #if MAX_NUMNODES > 1 1494 1495 /** 1496 * test_mem_cgroup_node_reclaimable 1497 * @memcg: the target memcg 1498 * @nid: the node ID to be checked. 1499 * @noswap : specify true here if the user wants flle only information. 1500 * 1501 * This function returns whether the specified memcg contains any 1502 * reclaimable pages on a node. Returns true if there are any reclaimable 1503 * pages in the node. 1504 */ 1505 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1506 int nid, bool noswap) 1507 { 1508 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 1509 1510 if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) || 1511 lruvec_page_state(lruvec, NR_ACTIVE_FILE)) 1512 return true; 1513 if (noswap || !total_swap_pages) 1514 return false; 1515 if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) || 1516 lruvec_page_state(lruvec, NR_ACTIVE_ANON)) 1517 return true; 1518 return false; 1519 1520 } 1521 1522 /* 1523 * Always updating the nodemask is not very good - even if we have an empty 1524 * list or the wrong list here, we can start from some node and traverse all 1525 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1526 * 1527 */ 1528 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1529 { 1530 int nid; 1531 /* 1532 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1533 * pagein/pageout changes since the last update. 1534 */ 1535 if (!atomic_read(&memcg->numainfo_events)) 1536 return; 1537 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1538 return; 1539 1540 /* make a nodemask where this memcg uses memory from */ 1541 memcg->scan_nodes = node_states[N_MEMORY]; 1542 1543 for_each_node_mask(nid, node_states[N_MEMORY]) { 1544 1545 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1546 node_clear(nid, memcg->scan_nodes); 1547 } 1548 1549 atomic_set(&memcg->numainfo_events, 0); 1550 atomic_set(&memcg->numainfo_updating, 0); 1551 } 1552 1553 /* 1554 * Selecting a node where we start reclaim from. Because what we need is just 1555 * reducing usage counter, start from anywhere is O,K. Considering 1556 * memory reclaim from current node, there are pros. and cons. 1557 * 1558 * Freeing memory from current node means freeing memory from a node which 1559 * we'll use or we've used. So, it may make LRU bad. And if several threads 1560 * hit limits, it will see a contention on a node. But freeing from remote 1561 * node means more costs for memory reclaim because of memory latency. 1562 * 1563 * Now, we use round-robin. Better algorithm is welcomed. 1564 */ 1565 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1566 { 1567 int node; 1568 1569 mem_cgroup_may_update_nodemask(memcg); 1570 node = memcg->last_scanned_node; 1571 1572 node = next_node_in(node, memcg->scan_nodes); 1573 /* 1574 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1575 * last time it really checked all the LRUs due to rate limiting. 1576 * Fallback to the current node in that case for simplicity. 1577 */ 1578 if (unlikely(node == MAX_NUMNODES)) 1579 node = numa_node_id(); 1580 1581 memcg->last_scanned_node = node; 1582 return node; 1583 } 1584 #else 1585 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1586 { 1587 return 0; 1588 } 1589 #endif 1590 1591 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1592 pg_data_t *pgdat, 1593 gfp_t gfp_mask, 1594 unsigned long *total_scanned) 1595 { 1596 struct mem_cgroup *victim = NULL; 1597 int total = 0; 1598 int loop = 0; 1599 unsigned long excess; 1600 unsigned long nr_scanned; 1601 struct mem_cgroup_reclaim_cookie reclaim = { 1602 .pgdat = pgdat, 1603 .priority = 0, 1604 }; 1605 1606 excess = soft_limit_excess(root_memcg); 1607 1608 while (1) { 1609 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1610 if (!victim) { 1611 loop++; 1612 if (loop >= 2) { 1613 /* 1614 * If we have not been able to reclaim 1615 * anything, it might because there are 1616 * no reclaimable pages under this hierarchy 1617 */ 1618 if (!total) 1619 break; 1620 /* 1621 * We want to do more targeted reclaim. 1622 * excess >> 2 is not to excessive so as to 1623 * reclaim too much, nor too less that we keep 1624 * coming back to reclaim from this cgroup 1625 */ 1626 if (total >= (excess >> 2) || 1627 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1628 break; 1629 } 1630 continue; 1631 } 1632 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1633 pgdat, &nr_scanned); 1634 *total_scanned += nr_scanned; 1635 if (!soft_limit_excess(root_memcg)) 1636 break; 1637 } 1638 mem_cgroup_iter_break(root_memcg, victim); 1639 return total; 1640 } 1641 1642 #ifdef CONFIG_LOCKDEP 1643 static struct lockdep_map memcg_oom_lock_dep_map = { 1644 .name = "memcg_oom_lock", 1645 }; 1646 #endif 1647 1648 static DEFINE_SPINLOCK(memcg_oom_lock); 1649 1650 /* 1651 * Check OOM-Killer is already running under our hierarchy. 1652 * If someone is running, return false. 1653 */ 1654 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1655 { 1656 struct mem_cgroup *iter, *failed = NULL; 1657 1658 spin_lock(&memcg_oom_lock); 1659 1660 for_each_mem_cgroup_tree(iter, memcg) { 1661 if (iter->oom_lock) { 1662 /* 1663 * this subtree of our hierarchy is already locked 1664 * so we cannot give a lock. 1665 */ 1666 failed = iter; 1667 mem_cgroup_iter_break(memcg, iter); 1668 break; 1669 } else 1670 iter->oom_lock = true; 1671 } 1672 1673 if (failed) { 1674 /* 1675 * OK, we failed to lock the whole subtree so we have 1676 * to clean up what we set up to the failing subtree 1677 */ 1678 for_each_mem_cgroup_tree(iter, memcg) { 1679 if (iter == failed) { 1680 mem_cgroup_iter_break(memcg, iter); 1681 break; 1682 } 1683 iter->oom_lock = false; 1684 } 1685 } else 1686 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1687 1688 spin_unlock(&memcg_oom_lock); 1689 1690 return !failed; 1691 } 1692 1693 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1694 { 1695 struct mem_cgroup *iter; 1696 1697 spin_lock(&memcg_oom_lock); 1698 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1699 for_each_mem_cgroup_tree(iter, memcg) 1700 iter->oom_lock = false; 1701 spin_unlock(&memcg_oom_lock); 1702 } 1703 1704 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1705 { 1706 struct mem_cgroup *iter; 1707 1708 spin_lock(&memcg_oom_lock); 1709 for_each_mem_cgroup_tree(iter, memcg) 1710 iter->under_oom++; 1711 spin_unlock(&memcg_oom_lock); 1712 } 1713 1714 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1715 { 1716 struct mem_cgroup *iter; 1717 1718 /* 1719 * When a new child is created while the hierarchy is under oom, 1720 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1721 */ 1722 spin_lock(&memcg_oom_lock); 1723 for_each_mem_cgroup_tree(iter, memcg) 1724 if (iter->under_oom > 0) 1725 iter->under_oom--; 1726 spin_unlock(&memcg_oom_lock); 1727 } 1728 1729 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1730 1731 struct oom_wait_info { 1732 struct mem_cgroup *memcg; 1733 wait_queue_entry_t wait; 1734 }; 1735 1736 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1737 unsigned mode, int sync, void *arg) 1738 { 1739 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1740 struct mem_cgroup *oom_wait_memcg; 1741 struct oom_wait_info *oom_wait_info; 1742 1743 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1744 oom_wait_memcg = oom_wait_info->memcg; 1745 1746 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1747 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1748 return 0; 1749 return autoremove_wake_function(wait, mode, sync, arg); 1750 } 1751 1752 static void memcg_oom_recover(struct mem_cgroup *memcg) 1753 { 1754 /* 1755 * For the following lockless ->under_oom test, the only required 1756 * guarantee is that it must see the state asserted by an OOM when 1757 * this function is called as a result of userland actions 1758 * triggered by the notification of the OOM. This is trivially 1759 * achieved by invoking mem_cgroup_mark_under_oom() before 1760 * triggering notification. 1761 */ 1762 if (memcg && memcg->under_oom) 1763 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1764 } 1765 1766 enum oom_status { 1767 OOM_SUCCESS, 1768 OOM_FAILED, 1769 OOM_ASYNC, 1770 OOM_SKIPPED 1771 }; 1772 1773 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1774 { 1775 enum oom_status ret; 1776 bool locked; 1777 1778 if (order > PAGE_ALLOC_COSTLY_ORDER) 1779 return OOM_SKIPPED; 1780 1781 memcg_memory_event(memcg, MEMCG_OOM); 1782 1783 /* 1784 * We are in the middle of the charge context here, so we 1785 * don't want to block when potentially sitting on a callstack 1786 * that holds all kinds of filesystem and mm locks. 1787 * 1788 * cgroup1 allows disabling the OOM killer and waiting for outside 1789 * handling until the charge can succeed; remember the context and put 1790 * the task to sleep at the end of the page fault when all locks are 1791 * released. 1792 * 1793 * On the other hand, in-kernel OOM killer allows for an async victim 1794 * memory reclaim (oom_reaper) and that means that we are not solely 1795 * relying on the oom victim to make a forward progress and we can 1796 * invoke the oom killer here. 1797 * 1798 * Please note that mem_cgroup_out_of_memory might fail to find a 1799 * victim and then we have to bail out from the charge path. 1800 */ 1801 if (memcg->oom_kill_disable) { 1802 if (!current->in_user_fault) 1803 return OOM_SKIPPED; 1804 css_get(&memcg->css); 1805 current->memcg_in_oom = memcg; 1806 current->memcg_oom_gfp_mask = mask; 1807 current->memcg_oom_order = order; 1808 1809 return OOM_ASYNC; 1810 } 1811 1812 mem_cgroup_mark_under_oom(memcg); 1813 1814 locked = mem_cgroup_oom_trylock(memcg); 1815 1816 if (locked) 1817 mem_cgroup_oom_notify(memcg); 1818 1819 mem_cgroup_unmark_under_oom(memcg); 1820 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1821 ret = OOM_SUCCESS; 1822 else 1823 ret = OOM_FAILED; 1824 1825 if (locked) 1826 mem_cgroup_oom_unlock(memcg); 1827 1828 return ret; 1829 } 1830 1831 /** 1832 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1833 * @handle: actually kill/wait or just clean up the OOM state 1834 * 1835 * This has to be called at the end of a page fault if the memcg OOM 1836 * handler was enabled. 1837 * 1838 * Memcg supports userspace OOM handling where failed allocations must 1839 * sleep on a waitqueue until the userspace task resolves the 1840 * situation. Sleeping directly in the charge context with all kinds 1841 * of locks held is not a good idea, instead we remember an OOM state 1842 * in the task and mem_cgroup_oom_synchronize() has to be called at 1843 * the end of the page fault to complete the OOM handling. 1844 * 1845 * Returns %true if an ongoing memcg OOM situation was detected and 1846 * completed, %false otherwise. 1847 */ 1848 bool mem_cgroup_oom_synchronize(bool handle) 1849 { 1850 struct mem_cgroup *memcg = current->memcg_in_oom; 1851 struct oom_wait_info owait; 1852 bool locked; 1853 1854 /* OOM is global, do not handle */ 1855 if (!memcg) 1856 return false; 1857 1858 if (!handle) 1859 goto cleanup; 1860 1861 owait.memcg = memcg; 1862 owait.wait.flags = 0; 1863 owait.wait.func = memcg_oom_wake_function; 1864 owait.wait.private = current; 1865 INIT_LIST_HEAD(&owait.wait.entry); 1866 1867 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1868 mem_cgroup_mark_under_oom(memcg); 1869 1870 locked = mem_cgroup_oom_trylock(memcg); 1871 1872 if (locked) 1873 mem_cgroup_oom_notify(memcg); 1874 1875 if (locked && !memcg->oom_kill_disable) { 1876 mem_cgroup_unmark_under_oom(memcg); 1877 finish_wait(&memcg_oom_waitq, &owait.wait); 1878 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1879 current->memcg_oom_order); 1880 } else { 1881 schedule(); 1882 mem_cgroup_unmark_under_oom(memcg); 1883 finish_wait(&memcg_oom_waitq, &owait.wait); 1884 } 1885 1886 if (locked) { 1887 mem_cgroup_oom_unlock(memcg); 1888 /* 1889 * There is no guarantee that an OOM-lock contender 1890 * sees the wakeups triggered by the OOM kill 1891 * uncharges. Wake any sleepers explicitely. 1892 */ 1893 memcg_oom_recover(memcg); 1894 } 1895 cleanup: 1896 current->memcg_in_oom = NULL; 1897 css_put(&memcg->css); 1898 return true; 1899 } 1900 1901 /** 1902 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1903 * @victim: task to be killed by the OOM killer 1904 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1905 * 1906 * Returns a pointer to a memory cgroup, which has to be cleaned up 1907 * by killing all belonging OOM-killable tasks. 1908 * 1909 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1910 */ 1911 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1912 struct mem_cgroup *oom_domain) 1913 { 1914 struct mem_cgroup *oom_group = NULL; 1915 struct mem_cgroup *memcg; 1916 1917 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1918 return NULL; 1919 1920 if (!oom_domain) 1921 oom_domain = root_mem_cgroup; 1922 1923 rcu_read_lock(); 1924 1925 memcg = mem_cgroup_from_task(victim); 1926 if (memcg == root_mem_cgroup) 1927 goto out; 1928 1929 /* 1930 * Traverse the memory cgroup hierarchy from the victim task's 1931 * cgroup up to the OOMing cgroup (or root) to find the 1932 * highest-level memory cgroup with oom.group set. 1933 */ 1934 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1935 if (memcg->oom_group) 1936 oom_group = memcg; 1937 1938 if (memcg == oom_domain) 1939 break; 1940 } 1941 1942 if (oom_group) 1943 css_get(&oom_group->css); 1944 out: 1945 rcu_read_unlock(); 1946 1947 return oom_group; 1948 } 1949 1950 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1951 { 1952 pr_info("Tasks in "); 1953 pr_cont_cgroup_path(memcg->css.cgroup); 1954 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1955 } 1956 1957 /** 1958 * lock_page_memcg - lock a page->mem_cgroup binding 1959 * @page: the page 1960 * 1961 * This function protects unlocked LRU pages from being moved to 1962 * another cgroup. 1963 * 1964 * It ensures lifetime of the returned memcg. Caller is responsible 1965 * for the lifetime of the page; __unlock_page_memcg() is available 1966 * when @page might get freed inside the locked section. 1967 */ 1968 struct mem_cgroup *lock_page_memcg(struct page *page) 1969 { 1970 struct mem_cgroup *memcg; 1971 unsigned long flags; 1972 1973 /* 1974 * The RCU lock is held throughout the transaction. The fast 1975 * path can get away without acquiring the memcg->move_lock 1976 * because page moving starts with an RCU grace period. 1977 * 1978 * The RCU lock also protects the memcg from being freed when 1979 * the page state that is going to change is the only thing 1980 * preventing the page itself from being freed. E.g. writeback 1981 * doesn't hold a page reference and relies on PG_writeback to 1982 * keep off truncation, migration and so forth. 1983 */ 1984 rcu_read_lock(); 1985 1986 if (mem_cgroup_disabled()) 1987 return NULL; 1988 again: 1989 memcg = page->mem_cgroup; 1990 if (unlikely(!memcg)) 1991 return NULL; 1992 1993 if (atomic_read(&memcg->moving_account) <= 0) 1994 return memcg; 1995 1996 spin_lock_irqsave(&memcg->move_lock, flags); 1997 if (memcg != page->mem_cgroup) { 1998 spin_unlock_irqrestore(&memcg->move_lock, flags); 1999 goto again; 2000 } 2001 2002 /* 2003 * When charge migration first begins, we can have locked and 2004 * unlocked page stat updates happening concurrently. Track 2005 * the task who has the lock for unlock_page_memcg(). 2006 */ 2007 memcg->move_lock_task = current; 2008 memcg->move_lock_flags = flags; 2009 2010 return memcg; 2011 } 2012 EXPORT_SYMBOL(lock_page_memcg); 2013 2014 /** 2015 * __unlock_page_memcg - unlock and unpin a memcg 2016 * @memcg: the memcg 2017 * 2018 * Unlock and unpin a memcg returned by lock_page_memcg(). 2019 */ 2020 void __unlock_page_memcg(struct mem_cgroup *memcg) 2021 { 2022 if (memcg && memcg->move_lock_task == current) { 2023 unsigned long flags = memcg->move_lock_flags; 2024 2025 memcg->move_lock_task = NULL; 2026 memcg->move_lock_flags = 0; 2027 2028 spin_unlock_irqrestore(&memcg->move_lock, flags); 2029 } 2030 2031 rcu_read_unlock(); 2032 } 2033 2034 /** 2035 * unlock_page_memcg - unlock a page->mem_cgroup binding 2036 * @page: the page 2037 */ 2038 void unlock_page_memcg(struct page *page) 2039 { 2040 __unlock_page_memcg(page->mem_cgroup); 2041 } 2042 EXPORT_SYMBOL(unlock_page_memcg); 2043 2044 struct memcg_stock_pcp { 2045 struct mem_cgroup *cached; /* this never be root cgroup */ 2046 unsigned int nr_pages; 2047 struct work_struct work; 2048 unsigned long flags; 2049 #define FLUSHING_CACHED_CHARGE 0 2050 }; 2051 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2052 static DEFINE_MUTEX(percpu_charge_mutex); 2053 2054 /** 2055 * consume_stock: Try to consume stocked charge on this cpu. 2056 * @memcg: memcg to consume from. 2057 * @nr_pages: how many pages to charge. 2058 * 2059 * The charges will only happen if @memcg matches the current cpu's memcg 2060 * stock, and at least @nr_pages are available in that stock. Failure to 2061 * service an allocation will refill the stock. 2062 * 2063 * returns true if successful, false otherwise. 2064 */ 2065 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2066 { 2067 struct memcg_stock_pcp *stock; 2068 unsigned long flags; 2069 bool ret = false; 2070 2071 if (nr_pages > MEMCG_CHARGE_BATCH) 2072 return ret; 2073 2074 local_irq_save(flags); 2075 2076 stock = this_cpu_ptr(&memcg_stock); 2077 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2078 stock->nr_pages -= nr_pages; 2079 ret = true; 2080 } 2081 2082 local_irq_restore(flags); 2083 2084 return ret; 2085 } 2086 2087 /* 2088 * Returns stocks cached in percpu and reset cached information. 2089 */ 2090 static void drain_stock(struct memcg_stock_pcp *stock) 2091 { 2092 struct mem_cgroup *old = stock->cached; 2093 2094 if (stock->nr_pages) { 2095 page_counter_uncharge(&old->memory, stock->nr_pages); 2096 if (do_memsw_account()) 2097 page_counter_uncharge(&old->memsw, stock->nr_pages); 2098 css_put_many(&old->css, stock->nr_pages); 2099 stock->nr_pages = 0; 2100 } 2101 stock->cached = NULL; 2102 } 2103 2104 static void drain_local_stock(struct work_struct *dummy) 2105 { 2106 struct memcg_stock_pcp *stock; 2107 unsigned long flags; 2108 2109 /* 2110 * The only protection from memory hotplug vs. drain_stock races is 2111 * that we always operate on local CPU stock here with IRQ disabled 2112 */ 2113 local_irq_save(flags); 2114 2115 stock = this_cpu_ptr(&memcg_stock); 2116 drain_stock(stock); 2117 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2118 2119 local_irq_restore(flags); 2120 } 2121 2122 /* 2123 * Cache charges(val) to local per_cpu area. 2124 * This will be consumed by consume_stock() function, later. 2125 */ 2126 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2127 { 2128 struct memcg_stock_pcp *stock; 2129 unsigned long flags; 2130 2131 local_irq_save(flags); 2132 2133 stock = this_cpu_ptr(&memcg_stock); 2134 if (stock->cached != memcg) { /* reset if necessary */ 2135 drain_stock(stock); 2136 stock->cached = memcg; 2137 } 2138 stock->nr_pages += nr_pages; 2139 2140 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2141 drain_stock(stock); 2142 2143 local_irq_restore(flags); 2144 } 2145 2146 /* 2147 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2148 * of the hierarchy under it. 2149 */ 2150 static void drain_all_stock(struct mem_cgroup *root_memcg) 2151 { 2152 int cpu, curcpu; 2153 2154 /* If someone's already draining, avoid adding running more workers. */ 2155 if (!mutex_trylock(&percpu_charge_mutex)) 2156 return; 2157 /* 2158 * Notify other cpus that system-wide "drain" is running 2159 * We do not care about races with the cpu hotplug because cpu down 2160 * as well as workers from this path always operate on the local 2161 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2162 */ 2163 curcpu = get_cpu(); 2164 for_each_online_cpu(cpu) { 2165 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2166 struct mem_cgroup *memcg; 2167 2168 memcg = stock->cached; 2169 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) 2170 continue; 2171 if (!mem_cgroup_is_descendant(memcg, root_memcg)) { 2172 css_put(&memcg->css); 2173 continue; 2174 } 2175 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2176 if (cpu == curcpu) 2177 drain_local_stock(&stock->work); 2178 else 2179 schedule_work_on(cpu, &stock->work); 2180 } 2181 css_put(&memcg->css); 2182 } 2183 put_cpu(); 2184 mutex_unlock(&percpu_charge_mutex); 2185 } 2186 2187 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2188 { 2189 struct memcg_stock_pcp *stock; 2190 struct mem_cgroup *memcg, *mi; 2191 2192 stock = &per_cpu(memcg_stock, cpu); 2193 drain_stock(stock); 2194 2195 for_each_mem_cgroup(memcg) { 2196 int i; 2197 2198 for (i = 0; i < MEMCG_NR_STAT; i++) { 2199 int nid; 2200 long x; 2201 2202 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2203 if (x) { 2204 atomic_long_add(x, &memcg->vmstats_local[i]); 2205 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2206 atomic_long_add(x, &memcg->vmstats[i]); 2207 } 2208 2209 if (i >= NR_VM_NODE_STAT_ITEMS) 2210 continue; 2211 2212 for_each_node(nid) { 2213 struct mem_cgroup_per_node *pn; 2214 2215 pn = mem_cgroup_nodeinfo(memcg, nid); 2216 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2217 if (x) { 2218 atomic_long_add(x, &pn->lruvec_stat_local[i]); 2219 do { 2220 atomic_long_add(x, &pn->lruvec_stat[i]); 2221 } while ((pn = parent_nodeinfo(pn, nid))); 2222 } 2223 } 2224 } 2225 2226 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2227 long x; 2228 2229 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2230 if (x) { 2231 atomic_long_add(x, &memcg->vmevents_local[i]); 2232 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2233 atomic_long_add(x, &memcg->vmevents[i]); 2234 } 2235 } 2236 } 2237 2238 return 0; 2239 } 2240 2241 static void reclaim_high(struct mem_cgroup *memcg, 2242 unsigned int nr_pages, 2243 gfp_t gfp_mask) 2244 { 2245 do { 2246 if (page_counter_read(&memcg->memory) <= memcg->high) 2247 continue; 2248 memcg_memory_event(memcg, MEMCG_HIGH); 2249 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 2250 } while ((memcg = parent_mem_cgroup(memcg))); 2251 } 2252 2253 static void high_work_func(struct work_struct *work) 2254 { 2255 struct mem_cgroup *memcg; 2256 2257 memcg = container_of(work, struct mem_cgroup, high_work); 2258 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2259 } 2260 2261 /* 2262 * Scheduled by try_charge() to be executed from the userland return path 2263 * and reclaims memory over the high limit. 2264 */ 2265 void mem_cgroup_handle_over_high(void) 2266 { 2267 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2268 struct mem_cgroup *memcg; 2269 2270 if (likely(!nr_pages)) 2271 return; 2272 2273 memcg = get_mem_cgroup_from_mm(current->mm); 2274 reclaim_high(memcg, nr_pages, GFP_KERNEL); 2275 css_put(&memcg->css); 2276 current->memcg_nr_pages_over_high = 0; 2277 } 2278 2279 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2280 unsigned int nr_pages) 2281 { 2282 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2283 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2284 struct mem_cgroup *mem_over_limit; 2285 struct page_counter *counter; 2286 unsigned long nr_reclaimed; 2287 bool may_swap = true; 2288 bool drained = false; 2289 bool oomed = false; 2290 enum oom_status oom_status; 2291 2292 if (mem_cgroup_is_root(memcg)) 2293 return 0; 2294 retry: 2295 if (consume_stock(memcg, nr_pages)) 2296 return 0; 2297 2298 if (!do_memsw_account() || 2299 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2300 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2301 goto done_restock; 2302 if (do_memsw_account()) 2303 page_counter_uncharge(&memcg->memsw, batch); 2304 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2305 } else { 2306 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2307 may_swap = false; 2308 } 2309 2310 if (batch > nr_pages) { 2311 batch = nr_pages; 2312 goto retry; 2313 } 2314 2315 /* 2316 * Unlike in global OOM situations, memcg is not in a physical 2317 * memory shortage. Allow dying and OOM-killed tasks to 2318 * bypass the last charges so that they can exit quickly and 2319 * free their memory. 2320 */ 2321 if (unlikely(should_force_charge())) 2322 goto force; 2323 2324 /* 2325 * Prevent unbounded recursion when reclaim operations need to 2326 * allocate memory. This might exceed the limits temporarily, 2327 * but we prefer facilitating memory reclaim and getting back 2328 * under the limit over triggering OOM kills in these cases. 2329 */ 2330 if (unlikely(current->flags & PF_MEMALLOC)) 2331 goto force; 2332 2333 if (unlikely(task_in_memcg_oom(current))) 2334 goto nomem; 2335 2336 if (!gfpflags_allow_blocking(gfp_mask)) 2337 goto nomem; 2338 2339 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2340 2341 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2342 gfp_mask, may_swap); 2343 2344 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2345 goto retry; 2346 2347 if (!drained) { 2348 drain_all_stock(mem_over_limit); 2349 drained = true; 2350 goto retry; 2351 } 2352 2353 if (gfp_mask & __GFP_NORETRY) 2354 goto nomem; 2355 /* 2356 * Even though the limit is exceeded at this point, reclaim 2357 * may have been able to free some pages. Retry the charge 2358 * before killing the task. 2359 * 2360 * Only for regular pages, though: huge pages are rather 2361 * unlikely to succeed so close to the limit, and we fall back 2362 * to regular pages anyway in case of failure. 2363 */ 2364 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2365 goto retry; 2366 /* 2367 * At task move, charge accounts can be doubly counted. So, it's 2368 * better to wait until the end of task_move if something is going on. 2369 */ 2370 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2371 goto retry; 2372 2373 if (nr_retries--) 2374 goto retry; 2375 2376 if (gfp_mask & __GFP_RETRY_MAYFAIL && oomed) 2377 goto nomem; 2378 2379 if (gfp_mask & __GFP_NOFAIL) 2380 goto force; 2381 2382 if (fatal_signal_pending(current)) 2383 goto force; 2384 2385 /* 2386 * keep retrying as long as the memcg oom killer is able to make 2387 * a forward progress or bypass the charge if the oom killer 2388 * couldn't make any progress. 2389 */ 2390 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2391 get_order(nr_pages * PAGE_SIZE)); 2392 switch (oom_status) { 2393 case OOM_SUCCESS: 2394 nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2395 oomed = true; 2396 goto retry; 2397 case OOM_FAILED: 2398 goto force; 2399 default: 2400 goto nomem; 2401 } 2402 nomem: 2403 if (!(gfp_mask & __GFP_NOFAIL)) 2404 return -ENOMEM; 2405 force: 2406 /* 2407 * The allocation either can't fail or will lead to more memory 2408 * being freed very soon. Allow memory usage go over the limit 2409 * temporarily by force charging it. 2410 */ 2411 page_counter_charge(&memcg->memory, nr_pages); 2412 if (do_memsw_account()) 2413 page_counter_charge(&memcg->memsw, nr_pages); 2414 css_get_many(&memcg->css, nr_pages); 2415 2416 return 0; 2417 2418 done_restock: 2419 css_get_many(&memcg->css, batch); 2420 if (batch > nr_pages) 2421 refill_stock(memcg, batch - nr_pages); 2422 2423 /* 2424 * If the hierarchy is above the normal consumption range, schedule 2425 * reclaim on returning to userland. We can perform reclaim here 2426 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2427 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2428 * not recorded as it most likely matches current's and won't 2429 * change in the meantime. As high limit is checked again before 2430 * reclaim, the cost of mismatch is negligible. 2431 */ 2432 do { 2433 if (page_counter_read(&memcg->memory) > memcg->high) { 2434 /* Don't bother a random interrupted task */ 2435 if (in_interrupt()) { 2436 schedule_work(&memcg->high_work); 2437 break; 2438 } 2439 current->memcg_nr_pages_over_high += batch; 2440 set_notify_resume(current); 2441 break; 2442 } 2443 } while ((memcg = parent_mem_cgroup(memcg))); 2444 2445 return 0; 2446 } 2447 2448 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2449 { 2450 if (mem_cgroup_is_root(memcg)) 2451 return; 2452 2453 page_counter_uncharge(&memcg->memory, nr_pages); 2454 if (do_memsw_account()) 2455 page_counter_uncharge(&memcg->memsw, nr_pages); 2456 2457 css_put_many(&memcg->css, nr_pages); 2458 } 2459 2460 static void lock_page_lru(struct page *page, int *isolated) 2461 { 2462 pg_data_t *pgdat = page_pgdat(page); 2463 2464 spin_lock_irq(&pgdat->lru_lock); 2465 if (PageLRU(page)) { 2466 struct lruvec *lruvec; 2467 2468 lruvec = mem_cgroup_page_lruvec(page, pgdat); 2469 ClearPageLRU(page); 2470 del_page_from_lru_list(page, lruvec, page_lru(page)); 2471 *isolated = 1; 2472 } else 2473 *isolated = 0; 2474 } 2475 2476 static void unlock_page_lru(struct page *page, int isolated) 2477 { 2478 pg_data_t *pgdat = page_pgdat(page); 2479 2480 if (isolated) { 2481 struct lruvec *lruvec; 2482 2483 lruvec = mem_cgroup_page_lruvec(page, pgdat); 2484 VM_BUG_ON_PAGE(PageLRU(page), page); 2485 SetPageLRU(page); 2486 add_page_to_lru_list(page, lruvec, page_lru(page)); 2487 } 2488 spin_unlock_irq(&pgdat->lru_lock); 2489 } 2490 2491 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2492 bool lrucare) 2493 { 2494 int isolated; 2495 2496 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2497 2498 /* 2499 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2500 * may already be on some other mem_cgroup's LRU. Take care of it. 2501 */ 2502 if (lrucare) 2503 lock_page_lru(page, &isolated); 2504 2505 /* 2506 * Nobody should be changing or seriously looking at 2507 * page->mem_cgroup at this point: 2508 * 2509 * - the page is uncharged 2510 * 2511 * - the page is off-LRU 2512 * 2513 * - an anonymous fault has exclusive page access, except for 2514 * a locked page table 2515 * 2516 * - a page cache insertion, a swapin fault, or a migration 2517 * have the page locked 2518 */ 2519 page->mem_cgroup = memcg; 2520 2521 if (lrucare) 2522 unlock_page_lru(page, isolated); 2523 } 2524 2525 #ifdef CONFIG_MEMCG_KMEM 2526 static int memcg_alloc_cache_id(void) 2527 { 2528 int id, size; 2529 int err; 2530 2531 id = ida_simple_get(&memcg_cache_ida, 2532 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2533 if (id < 0) 2534 return id; 2535 2536 if (id < memcg_nr_cache_ids) 2537 return id; 2538 2539 /* 2540 * There's no space for the new id in memcg_caches arrays, 2541 * so we have to grow them. 2542 */ 2543 down_write(&memcg_cache_ids_sem); 2544 2545 size = 2 * (id + 1); 2546 if (size < MEMCG_CACHES_MIN_SIZE) 2547 size = MEMCG_CACHES_MIN_SIZE; 2548 else if (size > MEMCG_CACHES_MAX_SIZE) 2549 size = MEMCG_CACHES_MAX_SIZE; 2550 2551 err = memcg_update_all_caches(size); 2552 if (!err) 2553 err = memcg_update_all_list_lrus(size); 2554 if (!err) 2555 memcg_nr_cache_ids = size; 2556 2557 up_write(&memcg_cache_ids_sem); 2558 2559 if (err) { 2560 ida_simple_remove(&memcg_cache_ida, id); 2561 return err; 2562 } 2563 return id; 2564 } 2565 2566 static void memcg_free_cache_id(int id) 2567 { 2568 ida_simple_remove(&memcg_cache_ida, id); 2569 } 2570 2571 struct memcg_kmem_cache_create_work { 2572 struct mem_cgroup *memcg; 2573 struct kmem_cache *cachep; 2574 struct work_struct work; 2575 }; 2576 2577 static void memcg_kmem_cache_create_func(struct work_struct *w) 2578 { 2579 struct memcg_kmem_cache_create_work *cw = 2580 container_of(w, struct memcg_kmem_cache_create_work, work); 2581 struct mem_cgroup *memcg = cw->memcg; 2582 struct kmem_cache *cachep = cw->cachep; 2583 2584 memcg_create_kmem_cache(memcg, cachep); 2585 2586 css_put(&memcg->css); 2587 kfree(cw); 2588 } 2589 2590 /* 2591 * Enqueue the creation of a per-memcg kmem_cache. 2592 */ 2593 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2594 struct kmem_cache *cachep) 2595 { 2596 struct memcg_kmem_cache_create_work *cw; 2597 2598 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2599 if (!cw) 2600 return; 2601 2602 css_get(&memcg->css); 2603 2604 cw->memcg = memcg; 2605 cw->cachep = cachep; 2606 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2607 2608 queue_work(memcg_kmem_cache_wq, &cw->work); 2609 } 2610 2611 static inline bool memcg_kmem_bypass(void) 2612 { 2613 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2614 return true; 2615 return false; 2616 } 2617 2618 /** 2619 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2620 * @cachep: the original global kmem cache 2621 * 2622 * Return the kmem_cache we're supposed to use for a slab allocation. 2623 * We try to use the current memcg's version of the cache. 2624 * 2625 * If the cache does not exist yet, if we are the first user of it, we 2626 * create it asynchronously in a workqueue and let the current allocation 2627 * go through with the original cache. 2628 * 2629 * This function takes a reference to the cache it returns to assure it 2630 * won't get destroyed while we are working with it. Once the caller is 2631 * done with it, memcg_kmem_put_cache() must be called to release the 2632 * reference. 2633 */ 2634 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2635 { 2636 struct mem_cgroup *memcg; 2637 struct kmem_cache *memcg_cachep; 2638 int kmemcg_id; 2639 2640 VM_BUG_ON(!is_root_cache(cachep)); 2641 2642 if (memcg_kmem_bypass()) 2643 return cachep; 2644 2645 memcg = get_mem_cgroup_from_current(); 2646 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2647 if (kmemcg_id < 0) 2648 goto out; 2649 2650 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2651 if (likely(memcg_cachep)) 2652 return memcg_cachep; 2653 2654 /* 2655 * If we are in a safe context (can wait, and not in interrupt 2656 * context), we could be be predictable and return right away. 2657 * This would guarantee that the allocation being performed 2658 * already belongs in the new cache. 2659 * 2660 * However, there are some clashes that can arrive from locking. 2661 * For instance, because we acquire the slab_mutex while doing 2662 * memcg_create_kmem_cache, this means no further allocation 2663 * could happen with the slab_mutex held. So it's better to 2664 * defer everything. 2665 */ 2666 memcg_schedule_kmem_cache_create(memcg, cachep); 2667 out: 2668 css_put(&memcg->css); 2669 return cachep; 2670 } 2671 2672 /** 2673 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2674 * @cachep: the cache returned by memcg_kmem_get_cache 2675 */ 2676 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2677 { 2678 if (!is_root_cache(cachep)) 2679 css_put(&cachep->memcg_params.memcg->css); 2680 } 2681 2682 /** 2683 * __memcg_kmem_charge_memcg: charge a kmem page 2684 * @page: page to charge 2685 * @gfp: reclaim mode 2686 * @order: allocation order 2687 * @memcg: memory cgroup to charge 2688 * 2689 * Returns 0 on success, an error code on failure. 2690 */ 2691 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2692 struct mem_cgroup *memcg) 2693 { 2694 unsigned int nr_pages = 1 << order; 2695 struct page_counter *counter; 2696 int ret; 2697 2698 ret = try_charge(memcg, gfp, nr_pages); 2699 if (ret) 2700 return ret; 2701 2702 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2703 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2704 cancel_charge(memcg, nr_pages); 2705 return -ENOMEM; 2706 } 2707 2708 page->mem_cgroup = memcg; 2709 2710 return 0; 2711 } 2712 2713 /** 2714 * __memcg_kmem_charge: charge a kmem page to the current memory cgroup 2715 * @page: page to charge 2716 * @gfp: reclaim mode 2717 * @order: allocation order 2718 * 2719 * Returns 0 on success, an error code on failure. 2720 */ 2721 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2722 { 2723 struct mem_cgroup *memcg; 2724 int ret = 0; 2725 2726 if (memcg_kmem_bypass()) 2727 return 0; 2728 2729 memcg = get_mem_cgroup_from_current(); 2730 if (!mem_cgroup_is_root(memcg)) { 2731 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); 2732 if (!ret) 2733 __SetPageKmemcg(page); 2734 } 2735 css_put(&memcg->css); 2736 return ret; 2737 } 2738 /** 2739 * __memcg_kmem_uncharge: uncharge a kmem page 2740 * @page: page to uncharge 2741 * @order: allocation order 2742 */ 2743 void __memcg_kmem_uncharge(struct page *page, int order) 2744 { 2745 struct mem_cgroup *memcg = page->mem_cgroup; 2746 unsigned int nr_pages = 1 << order; 2747 2748 if (!memcg) 2749 return; 2750 2751 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2752 2753 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2754 page_counter_uncharge(&memcg->kmem, nr_pages); 2755 2756 page_counter_uncharge(&memcg->memory, nr_pages); 2757 if (do_memsw_account()) 2758 page_counter_uncharge(&memcg->memsw, nr_pages); 2759 2760 page->mem_cgroup = NULL; 2761 2762 /* slab pages do not have PageKmemcg flag set */ 2763 if (PageKmemcg(page)) 2764 __ClearPageKmemcg(page); 2765 2766 css_put_many(&memcg->css, nr_pages); 2767 } 2768 #endif /* CONFIG_MEMCG_KMEM */ 2769 2770 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2771 2772 /* 2773 * Because tail pages are not marked as "used", set it. We're under 2774 * pgdat->lru_lock and migration entries setup in all page mappings. 2775 */ 2776 void mem_cgroup_split_huge_fixup(struct page *head) 2777 { 2778 int i; 2779 2780 if (mem_cgroup_disabled()) 2781 return; 2782 2783 for (i = 1; i < HPAGE_PMD_NR; i++) 2784 head[i].mem_cgroup = head->mem_cgroup; 2785 2786 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); 2787 } 2788 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2789 2790 #ifdef CONFIG_MEMCG_SWAP 2791 /** 2792 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2793 * @entry: swap entry to be moved 2794 * @from: mem_cgroup which the entry is moved from 2795 * @to: mem_cgroup which the entry is moved to 2796 * 2797 * It succeeds only when the swap_cgroup's record for this entry is the same 2798 * as the mem_cgroup's id of @from. 2799 * 2800 * Returns 0 on success, -EINVAL on failure. 2801 * 2802 * The caller must have charged to @to, IOW, called page_counter_charge() about 2803 * both res and memsw, and called css_get(). 2804 */ 2805 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2806 struct mem_cgroup *from, struct mem_cgroup *to) 2807 { 2808 unsigned short old_id, new_id; 2809 2810 old_id = mem_cgroup_id(from); 2811 new_id = mem_cgroup_id(to); 2812 2813 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2814 mod_memcg_state(from, MEMCG_SWAP, -1); 2815 mod_memcg_state(to, MEMCG_SWAP, 1); 2816 return 0; 2817 } 2818 return -EINVAL; 2819 } 2820 #else 2821 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2822 struct mem_cgroup *from, struct mem_cgroup *to) 2823 { 2824 return -EINVAL; 2825 } 2826 #endif 2827 2828 static DEFINE_MUTEX(memcg_max_mutex); 2829 2830 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 2831 unsigned long max, bool memsw) 2832 { 2833 bool enlarge = false; 2834 bool drained = false; 2835 int ret; 2836 bool limits_invariant; 2837 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 2838 2839 do { 2840 if (signal_pending(current)) { 2841 ret = -EINTR; 2842 break; 2843 } 2844 2845 mutex_lock(&memcg_max_mutex); 2846 /* 2847 * Make sure that the new limit (memsw or memory limit) doesn't 2848 * break our basic invariant rule memory.max <= memsw.max. 2849 */ 2850 limits_invariant = memsw ? max >= memcg->memory.max : 2851 max <= memcg->memsw.max; 2852 if (!limits_invariant) { 2853 mutex_unlock(&memcg_max_mutex); 2854 ret = -EINVAL; 2855 break; 2856 } 2857 if (max > counter->max) 2858 enlarge = true; 2859 ret = page_counter_set_max(counter, max); 2860 mutex_unlock(&memcg_max_mutex); 2861 2862 if (!ret) 2863 break; 2864 2865 if (!drained) { 2866 drain_all_stock(memcg); 2867 drained = true; 2868 continue; 2869 } 2870 2871 if (!try_to_free_mem_cgroup_pages(memcg, 1, 2872 GFP_KERNEL, !memsw)) { 2873 ret = -EBUSY; 2874 break; 2875 } 2876 } while (true); 2877 2878 if (!ret && enlarge) 2879 memcg_oom_recover(memcg); 2880 2881 return ret; 2882 } 2883 2884 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2885 gfp_t gfp_mask, 2886 unsigned long *total_scanned) 2887 { 2888 unsigned long nr_reclaimed = 0; 2889 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2890 unsigned long reclaimed; 2891 int loop = 0; 2892 struct mem_cgroup_tree_per_node *mctz; 2893 unsigned long excess; 2894 unsigned long nr_scanned; 2895 2896 if (order > 0) 2897 return 0; 2898 2899 mctz = soft_limit_tree_node(pgdat->node_id); 2900 2901 /* 2902 * Do not even bother to check the largest node if the root 2903 * is empty. Do it lockless to prevent lock bouncing. Races 2904 * are acceptable as soft limit is best effort anyway. 2905 */ 2906 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 2907 return 0; 2908 2909 /* 2910 * This loop can run a while, specially if mem_cgroup's continuously 2911 * keep exceeding their soft limit and putting the system under 2912 * pressure 2913 */ 2914 do { 2915 if (next_mz) 2916 mz = next_mz; 2917 else 2918 mz = mem_cgroup_largest_soft_limit_node(mctz); 2919 if (!mz) 2920 break; 2921 2922 nr_scanned = 0; 2923 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2924 gfp_mask, &nr_scanned); 2925 nr_reclaimed += reclaimed; 2926 *total_scanned += nr_scanned; 2927 spin_lock_irq(&mctz->lock); 2928 __mem_cgroup_remove_exceeded(mz, mctz); 2929 2930 /* 2931 * If we failed to reclaim anything from this memory cgroup 2932 * it is time to move on to the next cgroup 2933 */ 2934 next_mz = NULL; 2935 if (!reclaimed) 2936 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2937 2938 excess = soft_limit_excess(mz->memcg); 2939 /* 2940 * One school of thought says that we should not add 2941 * back the node to the tree if reclaim returns 0. 2942 * But our reclaim could return 0, simply because due 2943 * to priority we are exposing a smaller subset of 2944 * memory to reclaim from. Consider this as a longer 2945 * term TODO. 2946 */ 2947 /* If excess == 0, no tree ops */ 2948 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2949 spin_unlock_irq(&mctz->lock); 2950 css_put(&mz->memcg->css); 2951 loop++; 2952 /* 2953 * Could not reclaim anything and there are no more 2954 * mem cgroups to try or we seem to be looping without 2955 * reclaiming anything. 2956 */ 2957 if (!nr_reclaimed && 2958 (next_mz == NULL || 2959 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2960 break; 2961 } while (!nr_reclaimed); 2962 if (next_mz) 2963 css_put(&next_mz->memcg->css); 2964 return nr_reclaimed; 2965 } 2966 2967 /* 2968 * Test whether @memcg has children, dead or alive. Note that this 2969 * function doesn't care whether @memcg has use_hierarchy enabled and 2970 * returns %true if there are child csses according to the cgroup 2971 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2972 */ 2973 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2974 { 2975 bool ret; 2976 2977 rcu_read_lock(); 2978 ret = css_next_child(NULL, &memcg->css); 2979 rcu_read_unlock(); 2980 return ret; 2981 } 2982 2983 /* 2984 * Reclaims as many pages from the given memcg as possible. 2985 * 2986 * Caller is responsible for holding css reference for memcg. 2987 */ 2988 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2989 { 2990 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2991 2992 /* we call try-to-free pages for make this cgroup empty */ 2993 lru_add_drain_all(); 2994 2995 drain_all_stock(memcg); 2996 2997 /* try to free all pages in this cgroup */ 2998 while (nr_retries && page_counter_read(&memcg->memory)) { 2999 int progress; 3000 3001 if (signal_pending(current)) 3002 return -EINTR; 3003 3004 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3005 GFP_KERNEL, true); 3006 if (!progress) { 3007 nr_retries--; 3008 /* maybe some writeback is necessary */ 3009 congestion_wait(BLK_RW_ASYNC, HZ/10); 3010 } 3011 3012 } 3013 3014 return 0; 3015 } 3016 3017 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3018 char *buf, size_t nbytes, 3019 loff_t off) 3020 { 3021 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3022 3023 if (mem_cgroup_is_root(memcg)) 3024 return -EINVAL; 3025 return mem_cgroup_force_empty(memcg) ?: nbytes; 3026 } 3027 3028 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3029 struct cftype *cft) 3030 { 3031 return mem_cgroup_from_css(css)->use_hierarchy; 3032 } 3033 3034 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3035 struct cftype *cft, u64 val) 3036 { 3037 int retval = 0; 3038 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3039 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3040 3041 if (memcg->use_hierarchy == val) 3042 return 0; 3043 3044 /* 3045 * If parent's use_hierarchy is set, we can't make any modifications 3046 * in the child subtrees. If it is unset, then the change can 3047 * occur, provided the current cgroup has no children. 3048 * 3049 * For the root cgroup, parent_mem is NULL, we allow value to be 3050 * set if there are no children. 3051 */ 3052 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3053 (val == 1 || val == 0)) { 3054 if (!memcg_has_children(memcg)) 3055 memcg->use_hierarchy = val; 3056 else 3057 retval = -EBUSY; 3058 } else 3059 retval = -EINVAL; 3060 3061 return retval; 3062 } 3063 3064 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3065 { 3066 unsigned long val; 3067 3068 if (mem_cgroup_is_root(memcg)) { 3069 val = memcg_page_state(memcg, MEMCG_CACHE) + 3070 memcg_page_state(memcg, MEMCG_RSS); 3071 if (swap) 3072 val += memcg_page_state(memcg, MEMCG_SWAP); 3073 } else { 3074 if (!swap) 3075 val = page_counter_read(&memcg->memory); 3076 else 3077 val = page_counter_read(&memcg->memsw); 3078 } 3079 return val; 3080 } 3081 3082 enum { 3083 RES_USAGE, 3084 RES_LIMIT, 3085 RES_MAX_USAGE, 3086 RES_FAILCNT, 3087 RES_SOFT_LIMIT, 3088 }; 3089 3090 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3091 struct cftype *cft) 3092 { 3093 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3094 struct page_counter *counter; 3095 3096 switch (MEMFILE_TYPE(cft->private)) { 3097 case _MEM: 3098 counter = &memcg->memory; 3099 break; 3100 case _MEMSWAP: 3101 counter = &memcg->memsw; 3102 break; 3103 case _KMEM: 3104 counter = &memcg->kmem; 3105 break; 3106 case _TCP: 3107 counter = &memcg->tcpmem; 3108 break; 3109 default: 3110 BUG(); 3111 } 3112 3113 switch (MEMFILE_ATTR(cft->private)) { 3114 case RES_USAGE: 3115 if (counter == &memcg->memory) 3116 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3117 if (counter == &memcg->memsw) 3118 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3119 return (u64)page_counter_read(counter) * PAGE_SIZE; 3120 case RES_LIMIT: 3121 return (u64)counter->max * PAGE_SIZE; 3122 case RES_MAX_USAGE: 3123 return (u64)counter->watermark * PAGE_SIZE; 3124 case RES_FAILCNT: 3125 return counter->failcnt; 3126 case RES_SOFT_LIMIT: 3127 return (u64)memcg->soft_limit * PAGE_SIZE; 3128 default: 3129 BUG(); 3130 } 3131 } 3132 3133 #ifdef CONFIG_MEMCG_KMEM 3134 static int memcg_online_kmem(struct mem_cgroup *memcg) 3135 { 3136 int memcg_id; 3137 3138 if (cgroup_memory_nokmem) 3139 return 0; 3140 3141 BUG_ON(memcg->kmemcg_id >= 0); 3142 BUG_ON(memcg->kmem_state); 3143 3144 memcg_id = memcg_alloc_cache_id(); 3145 if (memcg_id < 0) 3146 return memcg_id; 3147 3148 static_branch_inc(&memcg_kmem_enabled_key); 3149 /* 3150 * A memory cgroup is considered kmem-online as soon as it gets 3151 * kmemcg_id. Setting the id after enabling static branching will 3152 * guarantee no one starts accounting before all call sites are 3153 * patched. 3154 */ 3155 memcg->kmemcg_id = memcg_id; 3156 memcg->kmem_state = KMEM_ONLINE; 3157 INIT_LIST_HEAD(&memcg->kmem_caches); 3158 3159 return 0; 3160 } 3161 3162 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3163 { 3164 struct cgroup_subsys_state *css; 3165 struct mem_cgroup *parent, *child; 3166 int kmemcg_id; 3167 3168 if (memcg->kmem_state != KMEM_ONLINE) 3169 return; 3170 /* 3171 * Clear the online state before clearing memcg_caches array 3172 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 3173 * guarantees that no cache will be created for this cgroup 3174 * after we are done (see memcg_create_kmem_cache()). 3175 */ 3176 memcg->kmem_state = KMEM_ALLOCATED; 3177 3178 memcg_deactivate_kmem_caches(memcg); 3179 3180 kmemcg_id = memcg->kmemcg_id; 3181 BUG_ON(kmemcg_id < 0); 3182 3183 parent = parent_mem_cgroup(memcg); 3184 if (!parent) 3185 parent = root_mem_cgroup; 3186 3187 /* 3188 * Change kmemcg_id of this cgroup and all its descendants to the 3189 * parent's id, and then move all entries from this cgroup's list_lrus 3190 * to ones of the parent. After we have finished, all list_lrus 3191 * corresponding to this cgroup are guaranteed to remain empty. The 3192 * ordering is imposed by list_lru_node->lock taken by 3193 * memcg_drain_all_list_lrus(). 3194 */ 3195 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3196 css_for_each_descendant_pre(css, &memcg->css) { 3197 child = mem_cgroup_from_css(css); 3198 BUG_ON(child->kmemcg_id != kmemcg_id); 3199 child->kmemcg_id = parent->kmemcg_id; 3200 if (!memcg->use_hierarchy) 3201 break; 3202 } 3203 rcu_read_unlock(); 3204 3205 memcg_drain_all_list_lrus(kmemcg_id, parent); 3206 3207 memcg_free_cache_id(kmemcg_id); 3208 } 3209 3210 static void memcg_free_kmem(struct mem_cgroup *memcg) 3211 { 3212 /* css_alloc() failed, offlining didn't happen */ 3213 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3214 memcg_offline_kmem(memcg); 3215 3216 if (memcg->kmem_state == KMEM_ALLOCATED) { 3217 memcg_destroy_kmem_caches(memcg); 3218 static_branch_dec(&memcg_kmem_enabled_key); 3219 WARN_ON(page_counter_read(&memcg->kmem)); 3220 } 3221 } 3222 #else 3223 static int memcg_online_kmem(struct mem_cgroup *memcg) 3224 { 3225 return 0; 3226 } 3227 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3228 { 3229 } 3230 static void memcg_free_kmem(struct mem_cgroup *memcg) 3231 { 3232 } 3233 #endif /* CONFIG_MEMCG_KMEM */ 3234 3235 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3236 unsigned long max) 3237 { 3238 int ret; 3239 3240 mutex_lock(&memcg_max_mutex); 3241 ret = page_counter_set_max(&memcg->kmem, max); 3242 mutex_unlock(&memcg_max_mutex); 3243 return ret; 3244 } 3245 3246 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3247 { 3248 int ret; 3249 3250 mutex_lock(&memcg_max_mutex); 3251 3252 ret = page_counter_set_max(&memcg->tcpmem, max); 3253 if (ret) 3254 goto out; 3255 3256 if (!memcg->tcpmem_active) { 3257 /* 3258 * The active flag needs to be written after the static_key 3259 * update. This is what guarantees that the socket activation 3260 * function is the last one to run. See mem_cgroup_sk_alloc() 3261 * for details, and note that we don't mark any socket as 3262 * belonging to this memcg until that flag is up. 3263 * 3264 * We need to do this, because static_keys will span multiple 3265 * sites, but we can't control their order. If we mark a socket 3266 * as accounted, but the accounting functions are not patched in 3267 * yet, we'll lose accounting. 3268 * 3269 * We never race with the readers in mem_cgroup_sk_alloc(), 3270 * because when this value change, the code to process it is not 3271 * patched in yet. 3272 */ 3273 static_branch_inc(&memcg_sockets_enabled_key); 3274 memcg->tcpmem_active = true; 3275 } 3276 out: 3277 mutex_unlock(&memcg_max_mutex); 3278 return ret; 3279 } 3280 3281 /* 3282 * The user of this function is... 3283 * RES_LIMIT. 3284 */ 3285 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3286 char *buf, size_t nbytes, loff_t off) 3287 { 3288 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3289 unsigned long nr_pages; 3290 int ret; 3291 3292 buf = strstrip(buf); 3293 ret = page_counter_memparse(buf, "-1", &nr_pages); 3294 if (ret) 3295 return ret; 3296 3297 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3298 case RES_LIMIT: 3299 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3300 ret = -EINVAL; 3301 break; 3302 } 3303 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3304 case _MEM: 3305 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3306 break; 3307 case _MEMSWAP: 3308 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3309 break; 3310 case _KMEM: 3311 ret = memcg_update_kmem_max(memcg, nr_pages); 3312 break; 3313 case _TCP: 3314 ret = memcg_update_tcp_max(memcg, nr_pages); 3315 break; 3316 } 3317 break; 3318 case RES_SOFT_LIMIT: 3319 memcg->soft_limit = nr_pages; 3320 ret = 0; 3321 break; 3322 } 3323 return ret ?: nbytes; 3324 } 3325 3326 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3327 size_t nbytes, loff_t off) 3328 { 3329 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3330 struct page_counter *counter; 3331 3332 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3333 case _MEM: 3334 counter = &memcg->memory; 3335 break; 3336 case _MEMSWAP: 3337 counter = &memcg->memsw; 3338 break; 3339 case _KMEM: 3340 counter = &memcg->kmem; 3341 break; 3342 case _TCP: 3343 counter = &memcg->tcpmem; 3344 break; 3345 default: 3346 BUG(); 3347 } 3348 3349 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3350 case RES_MAX_USAGE: 3351 page_counter_reset_watermark(counter); 3352 break; 3353 case RES_FAILCNT: 3354 counter->failcnt = 0; 3355 break; 3356 default: 3357 BUG(); 3358 } 3359 3360 return nbytes; 3361 } 3362 3363 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3364 struct cftype *cft) 3365 { 3366 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3367 } 3368 3369 #ifdef CONFIG_MMU 3370 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3371 struct cftype *cft, u64 val) 3372 { 3373 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3374 3375 if (val & ~MOVE_MASK) 3376 return -EINVAL; 3377 3378 /* 3379 * No kind of locking is needed in here, because ->can_attach() will 3380 * check this value once in the beginning of the process, and then carry 3381 * on with stale data. This means that changes to this value will only 3382 * affect task migrations starting after the change. 3383 */ 3384 memcg->move_charge_at_immigrate = val; 3385 return 0; 3386 } 3387 #else 3388 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3389 struct cftype *cft, u64 val) 3390 { 3391 return -ENOSYS; 3392 } 3393 #endif 3394 3395 #ifdef CONFIG_NUMA 3396 3397 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3398 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3399 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3400 3401 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3402 int nid, unsigned int lru_mask) 3403 { 3404 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 3405 unsigned long nr = 0; 3406 enum lru_list lru; 3407 3408 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3409 3410 for_each_lru(lru) { 3411 if (!(BIT(lru) & lru_mask)) 3412 continue; 3413 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3414 } 3415 return nr; 3416 } 3417 3418 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3419 unsigned int lru_mask) 3420 { 3421 unsigned long nr = 0; 3422 enum lru_list lru; 3423 3424 for_each_lru(lru) { 3425 if (!(BIT(lru) & lru_mask)) 3426 continue; 3427 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3428 } 3429 return nr; 3430 } 3431 3432 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3433 { 3434 struct numa_stat { 3435 const char *name; 3436 unsigned int lru_mask; 3437 }; 3438 3439 static const struct numa_stat stats[] = { 3440 { "total", LRU_ALL }, 3441 { "file", LRU_ALL_FILE }, 3442 { "anon", LRU_ALL_ANON }, 3443 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3444 }; 3445 const struct numa_stat *stat; 3446 int nid; 3447 unsigned long nr; 3448 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3449 3450 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3451 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3452 seq_printf(m, "%s=%lu", stat->name, nr); 3453 for_each_node_state(nid, N_MEMORY) { 3454 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3455 stat->lru_mask); 3456 seq_printf(m, " N%d=%lu", nid, nr); 3457 } 3458 seq_putc(m, '\n'); 3459 } 3460 3461 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3462 struct mem_cgroup *iter; 3463 3464 nr = 0; 3465 for_each_mem_cgroup_tree(iter, memcg) 3466 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3467 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3468 for_each_node_state(nid, N_MEMORY) { 3469 nr = 0; 3470 for_each_mem_cgroup_tree(iter, memcg) 3471 nr += mem_cgroup_node_nr_lru_pages( 3472 iter, nid, stat->lru_mask); 3473 seq_printf(m, " N%d=%lu", nid, nr); 3474 } 3475 seq_putc(m, '\n'); 3476 } 3477 3478 return 0; 3479 } 3480 #endif /* CONFIG_NUMA */ 3481 3482 /* Universal VM events cgroup1 shows, original sort order */ 3483 static const unsigned int memcg1_events[] = { 3484 PGPGIN, 3485 PGPGOUT, 3486 PGFAULT, 3487 PGMAJFAULT, 3488 }; 3489 3490 static const char *const memcg1_event_names[] = { 3491 "pgpgin", 3492 "pgpgout", 3493 "pgfault", 3494 "pgmajfault", 3495 }; 3496 3497 static int memcg_stat_show(struct seq_file *m, void *v) 3498 { 3499 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3500 unsigned long memory, memsw; 3501 struct mem_cgroup *mi; 3502 unsigned int i; 3503 3504 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3505 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3506 3507 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3508 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3509 continue; 3510 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3511 memcg_page_state_local(memcg, memcg1_stats[i]) * 3512 PAGE_SIZE); 3513 } 3514 3515 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3516 seq_printf(m, "%s %lu\n", memcg1_event_names[i], 3517 memcg_events_local(memcg, memcg1_events[i])); 3518 3519 for (i = 0; i < NR_LRU_LISTS; i++) 3520 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3521 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 3522 PAGE_SIZE); 3523 3524 /* Hierarchical information */ 3525 memory = memsw = PAGE_COUNTER_MAX; 3526 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3527 memory = min(memory, mi->memory.max); 3528 memsw = min(memsw, mi->memsw.max); 3529 } 3530 seq_printf(m, "hierarchical_memory_limit %llu\n", 3531 (u64)memory * PAGE_SIZE); 3532 if (do_memsw_account()) 3533 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3534 (u64)memsw * PAGE_SIZE); 3535 3536 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3537 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3538 continue; 3539 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 3540 (u64)memcg_page_state(memcg, i) * PAGE_SIZE); 3541 } 3542 3543 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3544 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], 3545 (u64)memcg_events(memcg, i)); 3546 3547 for (i = 0; i < NR_LRU_LISTS; i++) 3548 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], 3549 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 3550 PAGE_SIZE); 3551 3552 #ifdef CONFIG_DEBUG_VM 3553 { 3554 pg_data_t *pgdat; 3555 struct mem_cgroup_per_node *mz; 3556 struct zone_reclaim_stat *rstat; 3557 unsigned long recent_rotated[2] = {0, 0}; 3558 unsigned long recent_scanned[2] = {0, 0}; 3559 3560 for_each_online_pgdat(pgdat) { 3561 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3562 rstat = &mz->lruvec.reclaim_stat; 3563 3564 recent_rotated[0] += rstat->recent_rotated[0]; 3565 recent_rotated[1] += rstat->recent_rotated[1]; 3566 recent_scanned[0] += rstat->recent_scanned[0]; 3567 recent_scanned[1] += rstat->recent_scanned[1]; 3568 } 3569 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3570 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3571 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3572 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3573 } 3574 #endif 3575 3576 return 0; 3577 } 3578 3579 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3580 struct cftype *cft) 3581 { 3582 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3583 3584 return mem_cgroup_swappiness(memcg); 3585 } 3586 3587 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3588 struct cftype *cft, u64 val) 3589 { 3590 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3591 3592 if (val > 100) 3593 return -EINVAL; 3594 3595 if (css->parent) 3596 memcg->swappiness = val; 3597 else 3598 vm_swappiness = val; 3599 3600 return 0; 3601 } 3602 3603 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3604 { 3605 struct mem_cgroup_threshold_ary *t; 3606 unsigned long usage; 3607 int i; 3608 3609 rcu_read_lock(); 3610 if (!swap) 3611 t = rcu_dereference(memcg->thresholds.primary); 3612 else 3613 t = rcu_dereference(memcg->memsw_thresholds.primary); 3614 3615 if (!t) 3616 goto unlock; 3617 3618 usage = mem_cgroup_usage(memcg, swap); 3619 3620 /* 3621 * current_threshold points to threshold just below or equal to usage. 3622 * If it's not true, a threshold was crossed after last 3623 * call of __mem_cgroup_threshold(). 3624 */ 3625 i = t->current_threshold; 3626 3627 /* 3628 * Iterate backward over array of thresholds starting from 3629 * current_threshold and check if a threshold is crossed. 3630 * If none of thresholds below usage is crossed, we read 3631 * only one element of the array here. 3632 */ 3633 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3634 eventfd_signal(t->entries[i].eventfd, 1); 3635 3636 /* i = current_threshold + 1 */ 3637 i++; 3638 3639 /* 3640 * Iterate forward over array of thresholds starting from 3641 * current_threshold+1 and check if a threshold is crossed. 3642 * If none of thresholds above usage is crossed, we read 3643 * only one element of the array here. 3644 */ 3645 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3646 eventfd_signal(t->entries[i].eventfd, 1); 3647 3648 /* Update current_threshold */ 3649 t->current_threshold = i - 1; 3650 unlock: 3651 rcu_read_unlock(); 3652 } 3653 3654 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3655 { 3656 while (memcg) { 3657 __mem_cgroup_threshold(memcg, false); 3658 if (do_memsw_account()) 3659 __mem_cgroup_threshold(memcg, true); 3660 3661 memcg = parent_mem_cgroup(memcg); 3662 } 3663 } 3664 3665 static int compare_thresholds(const void *a, const void *b) 3666 { 3667 const struct mem_cgroup_threshold *_a = a; 3668 const struct mem_cgroup_threshold *_b = b; 3669 3670 if (_a->threshold > _b->threshold) 3671 return 1; 3672 3673 if (_a->threshold < _b->threshold) 3674 return -1; 3675 3676 return 0; 3677 } 3678 3679 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3680 { 3681 struct mem_cgroup_eventfd_list *ev; 3682 3683 spin_lock(&memcg_oom_lock); 3684 3685 list_for_each_entry(ev, &memcg->oom_notify, list) 3686 eventfd_signal(ev->eventfd, 1); 3687 3688 spin_unlock(&memcg_oom_lock); 3689 return 0; 3690 } 3691 3692 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3693 { 3694 struct mem_cgroup *iter; 3695 3696 for_each_mem_cgroup_tree(iter, memcg) 3697 mem_cgroup_oom_notify_cb(iter); 3698 } 3699 3700 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3701 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3702 { 3703 struct mem_cgroup_thresholds *thresholds; 3704 struct mem_cgroup_threshold_ary *new; 3705 unsigned long threshold; 3706 unsigned long usage; 3707 int i, size, ret; 3708 3709 ret = page_counter_memparse(args, "-1", &threshold); 3710 if (ret) 3711 return ret; 3712 3713 mutex_lock(&memcg->thresholds_lock); 3714 3715 if (type == _MEM) { 3716 thresholds = &memcg->thresholds; 3717 usage = mem_cgroup_usage(memcg, false); 3718 } else if (type == _MEMSWAP) { 3719 thresholds = &memcg->memsw_thresholds; 3720 usage = mem_cgroup_usage(memcg, true); 3721 } else 3722 BUG(); 3723 3724 /* Check if a threshold crossed before adding a new one */ 3725 if (thresholds->primary) 3726 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3727 3728 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3729 3730 /* Allocate memory for new array of thresholds */ 3731 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 3732 if (!new) { 3733 ret = -ENOMEM; 3734 goto unlock; 3735 } 3736 new->size = size; 3737 3738 /* Copy thresholds (if any) to new array */ 3739 if (thresholds->primary) { 3740 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3741 sizeof(struct mem_cgroup_threshold)); 3742 } 3743 3744 /* Add new threshold */ 3745 new->entries[size - 1].eventfd = eventfd; 3746 new->entries[size - 1].threshold = threshold; 3747 3748 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3749 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3750 compare_thresholds, NULL); 3751 3752 /* Find current threshold */ 3753 new->current_threshold = -1; 3754 for (i = 0; i < size; i++) { 3755 if (new->entries[i].threshold <= usage) { 3756 /* 3757 * new->current_threshold will not be used until 3758 * rcu_assign_pointer(), so it's safe to increment 3759 * it here. 3760 */ 3761 ++new->current_threshold; 3762 } else 3763 break; 3764 } 3765 3766 /* Free old spare buffer and save old primary buffer as spare */ 3767 kfree(thresholds->spare); 3768 thresholds->spare = thresholds->primary; 3769 3770 rcu_assign_pointer(thresholds->primary, new); 3771 3772 /* To be sure that nobody uses thresholds */ 3773 synchronize_rcu(); 3774 3775 unlock: 3776 mutex_unlock(&memcg->thresholds_lock); 3777 3778 return ret; 3779 } 3780 3781 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3782 struct eventfd_ctx *eventfd, const char *args) 3783 { 3784 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3785 } 3786 3787 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3788 struct eventfd_ctx *eventfd, const char *args) 3789 { 3790 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3791 } 3792 3793 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3794 struct eventfd_ctx *eventfd, enum res_type type) 3795 { 3796 struct mem_cgroup_thresholds *thresholds; 3797 struct mem_cgroup_threshold_ary *new; 3798 unsigned long usage; 3799 int i, j, size; 3800 3801 mutex_lock(&memcg->thresholds_lock); 3802 3803 if (type == _MEM) { 3804 thresholds = &memcg->thresholds; 3805 usage = mem_cgroup_usage(memcg, false); 3806 } else if (type == _MEMSWAP) { 3807 thresholds = &memcg->memsw_thresholds; 3808 usage = mem_cgroup_usage(memcg, true); 3809 } else 3810 BUG(); 3811 3812 if (!thresholds->primary) 3813 goto unlock; 3814 3815 /* Check if a threshold crossed before removing */ 3816 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3817 3818 /* Calculate new number of threshold */ 3819 size = 0; 3820 for (i = 0; i < thresholds->primary->size; i++) { 3821 if (thresholds->primary->entries[i].eventfd != eventfd) 3822 size++; 3823 } 3824 3825 new = thresholds->spare; 3826 3827 /* Set thresholds array to NULL if we don't have thresholds */ 3828 if (!size) { 3829 kfree(new); 3830 new = NULL; 3831 goto swap_buffers; 3832 } 3833 3834 new->size = size; 3835 3836 /* Copy thresholds and find current threshold */ 3837 new->current_threshold = -1; 3838 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3839 if (thresholds->primary->entries[i].eventfd == eventfd) 3840 continue; 3841 3842 new->entries[j] = thresholds->primary->entries[i]; 3843 if (new->entries[j].threshold <= usage) { 3844 /* 3845 * new->current_threshold will not be used 3846 * until rcu_assign_pointer(), so it's safe to increment 3847 * it here. 3848 */ 3849 ++new->current_threshold; 3850 } 3851 j++; 3852 } 3853 3854 swap_buffers: 3855 /* Swap primary and spare array */ 3856 thresholds->spare = thresholds->primary; 3857 3858 rcu_assign_pointer(thresholds->primary, new); 3859 3860 /* To be sure that nobody uses thresholds */ 3861 synchronize_rcu(); 3862 3863 /* If all events are unregistered, free the spare array */ 3864 if (!new) { 3865 kfree(thresholds->spare); 3866 thresholds->spare = NULL; 3867 } 3868 unlock: 3869 mutex_unlock(&memcg->thresholds_lock); 3870 } 3871 3872 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3873 struct eventfd_ctx *eventfd) 3874 { 3875 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3876 } 3877 3878 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3879 struct eventfd_ctx *eventfd) 3880 { 3881 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3882 } 3883 3884 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3885 struct eventfd_ctx *eventfd, const char *args) 3886 { 3887 struct mem_cgroup_eventfd_list *event; 3888 3889 event = kmalloc(sizeof(*event), GFP_KERNEL); 3890 if (!event) 3891 return -ENOMEM; 3892 3893 spin_lock(&memcg_oom_lock); 3894 3895 event->eventfd = eventfd; 3896 list_add(&event->list, &memcg->oom_notify); 3897 3898 /* already in OOM ? */ 3899 if (memcg->under_oom) 3900 eventfd_signal(eventfd, 1); 3901 spin_unlock(&memcg_oom_lock); 3902 3903 return 0; 3904 } 3905 3906 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3907 struct eventfd_ctx *eventfd) 3908 { 3909 struct mem_cgroup_eventfd_list *ev, *tmp; 3910 3911 spin_lock(&memcg_oom_lock); 3912 3913 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3914 if (ev->eventfd == eventfd) { 3915 list_del(&ev->list); 3916 kfree(ev); 3917 } 3918 } 3919 3920 spin_unlock(&memcg_oom_lock); 3921 } 3922 3923 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3924 { 3925 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 3926 3927 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3928 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3929 seq_printf(sf, "oom_kill %lu\n", 3930 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 3931 return 0; 3932 } 3933 3934 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3935 struct cftype *cft, u64 val) 3936 { 3937 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3938 3939 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3940 if (!css->parent || !((val == 0) || (val == 1))) 3941 return -EINVAL; 3942 3943 memcg->oom_kill_disable = val; 3944 if (!val) 3945 memcg_oom_recover(memcg); 3946 3947 return 0; 3948 } 3949 3950 #ifdef CONFIG_CGROUP_WRITEBACK 3951 3952 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3953 { 3954 return wb_domain_init(&memcg->cgwb_domain, gfp); 3955 } 3956 3957 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3958 { 3959 wb_domain_exit(&memcg->cgwb_domain); 3960 } 3961 3962 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3963 { 3964 wb_domain_size_changed(&memcg->cgwb_domain); 3965 } 3966 3967 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3968 { 3969 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3970 3971 if (!memcg->css.parent) 3972 return NULL; 3973 3974 return &memcg->cgwb_domain; 3975 } 3976 3977 /* 3978 * idx can be of type enum memcg_stat_item or node_stat_item. 3979 * Keep in sync with memcg_exact_page(). 3980 */ 3981 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 3982 { 3983 long x = atomic_long_read(&memcg->vmstats[idx]); 3984 int cpu; 3985 3986 for_each_online_cpu(cpu) 3987 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 3988 if (x < 0) 3989 x = 0; 3990 return x; 3991 } 3992 3993 /** 3994 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3995 * @wb: bdi_writeback in question 3996 * @pfilepages: out parameter for number of file pages 3997 * @pheadroom: out parameter for number of allocatable pages according to memcg 3998 * @pdirty: out parameter for number of dirty pages 3999 * @pwriteback: out parameter for number of pages under writeback 4000 * 4001 * Determine the numbers of file, headroom, dirty, and writeback pages in 4002 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4003 * is a bit more involved. 4004 * 4005 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4006 * headroom is calculated as the lowest headroom of itself and the 4007 * ancestors. Note that this doesn't consider the actual amount of 4008 * available memory in the system. The caller should further cap 4009 * *@pheadroom accordingly. 4010 */ 4011 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4012 unsigned long *pheadroom, unsigned long *pdirty, 4013 unsigned long *pwriteback) 4014 { 4015 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4016 struct mem_cgroup *parent; 4017 4018 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4019 4020 /* this should eventually include NR_UNSTABLE_NFS */ 4021 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4022 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4023 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4024 *pheadroom = PAGE_COUNTER_MAX; 4025 4026 while ((parent = parent_mem_cgroup(memcg))) { 4027 unsigned long ceiling = min(memcg->memory.max, memcg->high); 4028 unsigned long used = page_counter_read(&memcg->memory); 4029 4030 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4031 memcg = parent; 4032 } 4033 } 4034 4035 #else /* CONFIG_CGROUP_WRITEBACK */ 4036 4037 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4038 { 4039 return 0; 4040 } 4041 4042 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4043 { 4044 } 4045 4046 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4047 { 4048 } 4049 4050 #endif /* CONFIG_CGROUP_WRITEBACK */ 4051 4052 /* 4053 * DO NOT USE IN NEW FILES. 4054 * 4055 * "cgroup.event_control" implementation. 4056 * 4057 * This is way over-engineered. It tries to support fully configurable 4058 * events for each user. Such level of flexibility is completely 4059 * unnecessary especially in the light of the planned unified hierarchy. 4060 * 4061 * Please deprecate this and replace with something simpler if at all 4062 * possible. 4063 */ 4064 4065 /* 4066 * Unregister event and free resources. 4067 * 4068 * Gets called from workqueue. 4069 */ 4070 static void memcg_event_remove(struct work_struct *work) 4071 { 4072 struct mem_cgroup_event *event = 4073 container_of(work, struct mem_cgroup_event, remove); 4074 struct mem_cgroup *memcg = event->memcg; 4075 4076 remove_wait_queue(event->wqh, &event->wait); 4077 4078 event->unregister_event(memcg, event->eventfd); 4079 4080 /* Notify userspace the event is going away. */ 4081 eventfd_signal(event->eventfd, 1); 4082 4083 eventfd_ctx_put(event->eventfd); 4084 kfree(event); 4085 css_put(&memcg->css); 4086 } 4087 4088 /* 4089 * Gets called on EPOLLHUP on eventfd when user closes it. 4090 * 4091 * Called with wqh->lock held and interrupts disabled. 4092 */ 4093 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4094 int sync, void *key) 4095 { 4096 struct mem_cgroup_event *event = 4097 container_of(wait, struct mem_cgroup_event, wait); 4098 struct mem_cgroup *memcg = event->memcg; 4099 __poll_t flags = key_to_poll(key); 4100 4101 if (flags & EPOLLHUP) { 4102 /* 4103 * If the event has been detached at cgroup removal, we 4104 * can simply return knowing the other side will cleanup 4105 * for us. 4106 * 4107 * We can't race against event freeing since the other 4108 * side will require wqh->lock via remove_wait_queue(), 4109 * which we hold. 4110 */ 4111 spin_lock(&memcg->event_list_lock); 4112 if (!list_empty(&event->list)) { 4113 list_del_init(&event->list); 4114 /* 4115 * We are in atomic context, but cgroup_event_remove() 4116 * may sleep, so we have to call it in workqueue. 4117 */ 4118 schedule_work(&event->remove); 4119 } 4120 spin_unlock(&memcg->event_list_lock); 4121 } 4122 4123 return 0; 4124 } 4125 4126 static void memcg_event_ptable_queue_proc(struct file *file, 4127 wait_queue_head_t *wqh, poll_table *pt) 4128 { 4129 struct mem_cgroup_event *event = 4130 container_of(pt, struct mem_cgroup_event, pt); 4131 4132 event->wqh = wqh; 4133 add_wait_queue(wqh, &event->wait); 4134 } 4135 4136 /* 4137 * DO NOT USE IN NEW FILES. 4138 * 4139 * Parse input and register new cgroup event handler. 4140 * 4141 * Input must be in format '<event_fd> <control_fd> <args>'. 4142 * Interpretation of args is defined by control file implementation. 4143 */ 4144 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4145 char *buf, size_t nbytes, loff_t off) 4146 { 4147 struct cgroup_subsys_state *css = of_css(of); 4148 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4149 struct mem_cgroup_event *event; 4150 struct cgroup_subsys_state *cfile_css; 4151 unsigned int efd, cfd; 4152 struct fd efile; 4153 struct fd cfile; 4154 const char *name; 4155 char *endp; 4156 int ret; 4157 4158 buf = strstrip(buf); 4159 4160 efd = simple_strtoul(buf, &endp, 10); 4161 if (*endp != ' ') 4162 return -EINVAL; 4163 buf = endp + 1; 4164 4165 cfd = simple_strtoul(buf, &endp, 10); 4166 if ((*endp != ' ') && (*endp != '\0')) 4167 return -EINVAL; 4168 buf = endp + 1; 4169 4170 event = kzalloc(sizeof(*event), GFP_KERNEL); 4171 if (!event) 4172 return -ENOMEM; 4173 4174 event->memcg = memcg; 4175 INIT_LIST_HEAD(&event->list); 4176 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4177 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4178 INIT_WORK(&event->remove, memcg_event_remove); 4179 4180 efile = fdget(efd); 4181 if (!efile.file) { 4182 ret = -EBADF; 4183 goto out_kfree; 4184 } 4185 4186 event->eventfd = eventfd_ctx_fileget(efile.file); 4187 if (IS_ERR(event->eventfd)) { 4188 ret = PTR_ERR(event->eventfd); 4189 goto out_put_efile; 4190 } 4191 4192 cfile = fdget(cfd); 4193 if (!cfile.file) { 4194 ret = -EBADF; 4195 goto out_put_eventfd; 4196 } 4197 4198 /* the process need read permission on control file */ 4199 /* AV: shouldn't we check that it's been opened for read instead? */ 4200 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4201 if (ret < 0) 4202 goto out_put_cfile; 4203 4204 /* 4205 * Determine the event callbacks and set them in @event. This used 4206 * to be done via struct cftype but cgroup core no longer knows 4207 * about these events. The following is crude but the whole thing 4208 * is for compatibility anyway. 4209 * 4210 * DO NOT ADD NEW FILES. 4211 */ 4212 name = cfile.file->f_path.dentry->d_name.name; 4213 4214 if (!strcmp(name, "memory.usage_in_bytes")) { 4215 event->register_event = mem_cgroup_usage_register_event; 4216 event->unregister_event = mem_cgroup_usage_unregister_event; 4217 } else if (!strcmp(name, "memory.oom_control")) { 4218 event->register_event = mem_cgroup_oom_register_event; 4219 event->unregister_event = mem_cgroup_oom_unregister_event; 4220 } else if (!strcmp(name, "memory.pressure_level")) { 4221 event->register_event = vmpressure_register_event; 4222 event->unregister_event = vmpressure_unregister_event; 4223 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4224 event->register_event = memsw_cgroup_usage_register_event; 4225 event->unregister_event = memsw_cgroup_usage_unregister_event; 4226 } else { 4227 ret = -EINVAL; 4228 goto out_put_cfile; 4229 } 4230 4231 /* 4232 * Verify @cfile should belong to @css. Also, remaining events are 4233 * automatically removed on cgroup destruction but the removal is 4234 * asynchronous, so take an extra ref on @css. 4235 */ 4236 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4237 &memory_cgrp_subsys); 4238 ret = -EINVAL; 4239 if (IS_ERR(cfile_css)) 4240 goto out_put_cfile; 4241 if (cfile_css != css) { 4242 css_put(cfile_css); 4243 goto out_put_cfile; 4244 } 4245 4246 ret = event->register_event(memcg, event->eventfd, buf); 4247 if (ret) 4248 goto out_put_css; 4249 4250 vfs_poll(efile.file, &event->pt); 4251 4252 spin_lock(&memcg->event_list_lock); 4253 list_add(&event->list, &memcg->event_list); 4254 spin_unlock(&memcg->event_list_lock); 4255 4256 fdput(cfile); 4257 fdput(efile); 4258 4259 return nbytes; 4260 4261 out_put_css: 4262 css_put(css); 4263 out_put_cfile: 4264 fdput(cfile); 4265 out_put_eventfd: 4266 eventfd_ctx_put(event->eventfd); 4267 out_put_efile: 4268 fdput(efile); 4269 out_kfree: 4270 kfree(event); 4271 4272 return ret; 4273 } 4274 4275 static struct cftype mem_cgroup_legacy_files[] = { 4276 { 4277 .name = "usage_in_bytes", 4278 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4279 .read_u64 = mem_cgroup_read_u64, 4280 }, 4281 { 4282 .name = "max_usage_in_bytes", 4283 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4284 .write = mem_cgroup_reset, 4285 .read_u64 = mem_cgroup_read_u64, 4286 }, 4287 { 4288 .name = "limit_in_bytes", 4289 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4290 .write = mem_cgroup_write, 4291 .read_u64 = mem_cgroup_read_u64, 4292 }, 4293 { 4294 .name = "soft_limit_in_bytes", 4295 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4296 .write = mem_cgroup_write, 4297 .read_u64 = mem_cgroup_read_u64, 4298 }, 4299 { 4300 .name = "failcnt", 4301 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4302 .write = mem_cgroup_reset, 4303 .read_u64 = mem_cgroup_read_u64, 4304 }, 4305 { 4306 .name = "stat", 4307 .seq_show = memcg_stat_show, 4308 }, 4309 { 4310 .name = "force_empty", 4311 .write = mem_cgroup_force_empty_write, 4312 }, 4313 { 4314 .name = "use_hierarchy", 4315 .write_u64 = mem_cgroup_hierarchy_write, 4316 .read_u64 = mem_cgroup_hierarchy_read, 4317 }, 4318 { 4319 .name = "cgroup.event_control", /* XXX: for compat */ 4320 .write = memcg_write_event_control, 4321 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4322 }, 4323 { 4324 .name = "swappiness", 4325 .read_u64 = mem_cgroup_swappiness_read, 4326 .write_u64 = mem_cgroup_swappiness_write, 4327 }, 4328 { 4329 .name = "move_charge_at_immigrate", 4330 .read_u64 = mem_cgroup_move_charge_read, 4331 .write_u64 = mem_cgroup_move_charge_write, 4332 }, 4333 { 4334 .name = "oom_control", 4335 .seq_show = mem_cgroup_oom_control_read, 4336 .write_u64 = mem_cgroup_oom_control_write, 4337 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4338 }, 4339 { 4340 .name = "pressure_level", 4341 }, 4342 #ifdef CONFIG_NUMA 4343 { 4344 .name = "numa_stat", 4345 .seq_show = memcg_numa_stat_show, 4346 }, 4347 #endif 4348 { 4349 .name = "kmem.limit_in_bytes", 4350 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4351 .write = mem_cgroup_write, 4352 .read_u64 = mem_cgroup_read_u64, 4353 }, 4354 { 4355 .name = "kmem.usage_in_bytes", 4356 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4357 .read_u64 = mem_cgroup_read_u64, 4358 }, 4359 { 4360 .name = "kmem.failcnt", 4361 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4362 .write = mem_cgroup_reset, 4363 .read_u64 = mem_cgroup_read_u64, 4364 }, 4365 { 4366 .name = "kmem.max_usage_in_bytes", 4367 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4368 .write = mem_cgroup_reset, 4369 .read_u64 = mem_cgroup_read_u64, 4370 }, 4371 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 4372 { 4373 .name = "kmem.slabinfo", 4374 .seq_start = memcg_slab_start, 4375 .seq_next = memcg_slab_next, 4376 .seq_stop = memcg_slab_stop, 4377 .seq_show = memcg_slab_show, 4378 }, 4379 #endif 4380 { 4381 .name = "kmem.tcp.limit_in_bytes", 4382 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4383 .write = mem_cgroup_write, 4384 .read_u64 = mem_cgroup_read_u64, 4385 }, 4386 { 4387 .name = "kmem.tcp.usage_in_bytes", 4388 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4389 .read_u64 = mem_cgroup_read_u64, 4390 }, 4391 { 4392 .name = "kmem.tcp.failcnt", 4393 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4394 .write = mem_cgroup_reset, 4395 .read_u64 = mem_cgroup_read_u64, 4396 }, 4397 { 4398 .name = "kmem.tcp.max_usage_in_bytes", 4399 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4400 .write = mem_cgroup_reset, 4401 .read_u64 = mem_cgroup_read_u64, 4402 }, 4403 { }, /* terminate */ 4404 }; 4405 4406 /* 4407 * Private memory cgroup IDR 4408 * 4409 * Swap-out records and page cache shadow entries need to store memcg 4410 * references in constrained space, so we maintain an ID space that is 4411 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4412 * memory-controlled cgroups to 64k. 4413 * 4414 * However, there usually are many references to the oflline CSS after 4415 * the cgroup has been destroyed, such as page cache or reclaimable 4416 * slab objects, that don't need to hang on to the ID. We want to keep 4417 * those dead CSS from occupying IDs, or we might quickly exhaust the 4418 * relatively small ID space and prevent the creation of new cgroups 4419 * even when there are much fewer than 64k cgroups - possibly none. 4420 * 4421 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4422 * be freed and recycled when it's no longer needed, which is usually 4423 * when the CSS is offlined. 4424 * 4425 * The only exception to that are records of swapped out tmpfs/shmem 4426 * pages that need to be attributed to live ancestors on swapin. But 4427 * those references are manageable from userspace. 4428 */ 4429 4430 static DEFINE_IDR(mem_cgroup_idr); 4431 4432 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 4433 { 4434 if (memcg->id.id > 0) { 4435 idr_remove(&mem_cgroup_idr, memcg->id.id); 4436 memcg->id.id = 0; 4437 } 4438 } 4439 4440 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4441 { 4442 refcount_add(n, &memcg->id.ref); 4443 } 4444 4445 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4446 { 4447 if (refcount_sub_and_test(n, &memcg->id.ref)) { 4448 mem_cgroup_id_remove(memcg); 4449 4450 /* Memcg ID pins CSS */ 4451 css_put(&memcg->css); 4452 } 4453 } 4454 4455 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4456 { 4457 mem_cgroup_id_get_many(memcg, 1); 4458 } 4459 4460 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4461 { 4462 mem_cgroup_id_put_many(memcg, 1); 4463 } 4464 4465 /** 4466 * mem_cgroup_from_id - look up a memcg from a memcg id 4467 * @id: the memcg id to look up 4468 * 4469 * Caller must hold rcu_read_lock(). 4470 */ 4471 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4472 { 4473 WARN_ON_ONCE(!rcu_read_lock_held()); 4474 return idr_find(&mem_cgroup_idr, id); 4475 } 4476 4477 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4478 { 4479 struct mem_cgroup_per_node *pn; 4480 int tmp = node; 4481 /* 4482 * This routine is called against possible nodes. 4483 * But it's BUG to call kmalloc() against offline node. 4484 * 4485 * TODO: this routine can waste much memory for nodes which will 4486 * never be onlined. It's better to use memory hotplug callback 4487 * function. 4488 */ 4489 if (!node_state(node, N_NORMAL_MEMORY)) 4490 tmp = -1; 4491 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4492 if (!pn) 4493 return 1; 4494 4495 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); 4496 if (!pn->lruvec_stat_cpu) { 4497 kfree(pn); 4498 return 1; 4499 } 4500 4501 lruvec_init(&pn->lruvec); 4502 pn->usage_in_excess = 0; 4503 pn->on_tree = false; 4504 pn->memcg = memcg; 4505 4506 memcg->nodeinfo[node] = pn; 4507 return 0; 4508 } 4509 4510 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4511 { 4512 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 4513 4514 if (!pn) 4515 return; 4516 4517 free_percpu(pn->lruvec_stat_cpu); 4518 kfree(pn); 4519 } 4520 4521 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4522 { 4523 int node; 4524 4525 for_each_node(node) 4526 free_mem_cgroup_per_node_info(memcg, node); 4527 free_percpu(memcg->vmstats_percpu); 4528 kfree(memcg); 4529 } 4530 4531 static void mem_cgroup_free(struct mem_cgroup *memcg) 4532 { 4533 memcg_wb_domain_exit(memcg); 4534 __mem_cgroup_free(memcg); 4535 } 4536 4537 static struct mem_cgroup *mem_cgroup_alloc(void) 4538 { 4539 struct mem_cgroup *memcg; 4540 unsigned int size; 4541 int node; 4542 4543 size = sizeof(struct mem_cgroup); 4544 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4545 4546 memcg = kzalloc(size, GFP_KERNEL); 4547 if (!memcg) 4548 return NULL; 4549 4550 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4551 1, MEM_CGROUP_ID_MAX, 4552 GFP_KERNEL); 4553 if (memcg->id.id < 0) 4554 goto fail; 4555 4556 memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); 4557 if (!memcg->vmstats_percpu) 4558 goto fail; 4559 4560 for_each_node(node) 4561 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4562 goto fail; 4563 4564 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4565 goto fail; 4566 4567 INIT_WORK(&memcg->high_work, high_work_func); 4568 memcg->last_scanned_node = MAX_NUMNODES; 4569 INIT_LIST_HEAD(&memcg->oom_notify); 4570 mutex_init(&memcg->thresholds_lock); 4571 spin_lock_init(&memcg->move_lock); 4572 vmpressure_init(&memcg->vmpressure); 4573 INIT_LIST_HEAD(&memcg->event_list); 4574 spin_lock_init(&memcg->event_list_lock); 4575 memcg->socket_pressure = jiffies; 4576 #ifdef CONFIG_MEMCG_KMEM 4577 memcg->kmemcg_id = -1; 4578 #endif 4579 #ifdef CONFIG_CGROUP_WRITEBACK 4580 INIT_LIST_HEAD(&memcg->cgwb_list); 4581 #endif 4582 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4583 return memcg; 4584 fail: 4585 mem_cgroup_id_remove(memcg); 4586 __mem_cgroup_free(memcg); 4587 return NULL; 4588 } 4589 4590 static struct cgroup_subsys_state * __ref 4591 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4592 { 4593 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4594 struct mem_cgroup *memcg; 4595 long error = -ENOMEM; 4596 4597 memcg = mem_cgroup_alloc(); 4598 if (!memcg) 4599 return ERR_PTR(error); 4600 4601 memcg->high = PAGE_COUNTER_MAX; 4602 memcg->soft_limit = PAGE_COUNTER_MAX; 4603 if (parent) { 4604 memcg->swappiness = mem_cgroup_swappiness(parent); 4605 memcg->oom_kill_disable = parent->oom_kill_disable; 4606 } 4607 if (parent && parent->use_hierarchy) { 4608 memcg->use_hierarchy = true; 4609 page_counter_init(&memcg->memory, &parent->memory); 4610 page_counter_init(&memcg->swap, &parent->swap); 4611 page_counter_init(&memcg->memsw, &parent->memsw); 4612 page_counter_init(&memcg->kmem, &parent->kmem); 4613 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4614 } else { 4615 page_counter_init(&memcg->memory, NULL); 4616 page_counter_init(&memcg->swap, NULL); 4617 page_counter_init(&memcg->memsw, NULL); 4618 page_counter_init(&memcg->kmem, NULL); 4619 page_counter_init(&memcg->tcpmem, NULL); 4620 /* 4621 * Deeper hierachy with use_hierarchy == false doesn't make 4622 * much sense so let cgroup subsystem know about this 4623 * unfortunate state in our controller. 4624 */ 4625 if (parent != root_mem_cgroup) 4626 memory_cgrp_subsys.broken_hierarchy = true; 4627 } 4628 4629 /* The following stuff does not apply to the root */ 4630 if (!parent) { 4631 root_mem_cgroup = memcg; 4632 return &memcg->css; 4633 } 4634 4635 error = memcg_online_kmem(memcg); 4636 if (error) 4637 goto fail; 4638 4639 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4640 static_branch_inc(&memcg_sockets_enabled_key); 4641 4642 return &memcg->css; 4643 fail: 4644 mem_cgroup_id_remove(memcg); 4645 mem_cgroup_free(memcg); 4646 return ERR_PTR(-ENOMEM); 4647 } 4648 4649 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4650 { 4651 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4652 4653 /* 4654 * A memcg must be visible for memcg_expand_shrinker_maps() 4655 * by the time the maps are allocated. So, we allocate maps 4656 * here, when for_each_mem_cgroup() can't skip it. 4657 */ 4658 if (memcg_alloc_shrinker_maps(memcg)) { 4659 mem_cgroup_id_remove(memcg); 4660 return -ENOMEM; 4661 } 4662 4663 /* Online state pins memcg ID, memcg ID pins CSS */ 4664 refcount_set(&memcg->id.ref, 1); 4665 css_get(css); 4666 return 0; 4667 } 4668 4669 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4670 { 4671 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4672 struct mem_cgroup_event *event, *tmp; 4673 4674 /* 4675 * Unregister events and notify userspace. 4676 * Notify userspace about cgroup removing only after rmdir of cgroup 4677 * directory to avoid race between userspace and kernelspace. 4678 */ 4679 spin_lock(&memcg->event_list_lock); 4680 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4681 list_del_init(&event->list); 4682 schedule_work(&event->remove); 4683 } 4684 spin_unlock(&memcg->event_list_lock); 4685 4686 page_counter_set_min(&memcg->memory, 0); 4687 page_counter_set_low(&memcg->memory, 0); 4688 4689 memcg_offline_kmem(memcg); 4690 wb_memcg_offline(memcg); 4691 4692 drain_all_stock(memcg); 4693 4694 mem_cgroup_id_put(memcg); 4695 } 4696 4697 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4698 { 4699 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4700 4701 invalidate_reclaim_iterators(memcg); 4702 } 4703 4704 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4705 { 4706 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4707 4708 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4709 static_branch_dec(&memcg_sockets_enabled_key); 4710 4711 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4712 static_branch_dec(&memcg_sockets_enabled_key); 4713 4714 vmpressure_cleanup(&memcg->vmpressure); 4715 cancel_work_sync(&memcg->high_work); 4716 mem_cgroup_remove_from_trees(memcg); 4717 memcg_free_shrinker_maps(memcg); 4718 memcg_free_kmem(memcg); 4719 mem_cgroup_free(memcg); 4720 } 4721 4722 /** 4723 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4724 * @css: the target css 4725 * 4726 * Reset the states of the mem_cgroup associated with @css. This is 4727 * invoked when the userland requests disabling on the default hierarchy 4728 * but the memcg is pinned through dependency. The memcg should stop 4729 * applying policies and should revert to the vanilla state as it may be 4730 * made visible again. 4731 * 4732 * The current implementation only resets the essential configurations. 4733 * This needs to be expanded to cover all the visible parts. 4734 */ 4735 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4736 { 4737 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4738 4739 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 4740 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 4741 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); 4742 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 4743 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 4744 page_counter_set_min(&memcg->memory, 0); 4745 page_counter_set_low(&memcg->memory, 0); 4746 memcg->high = PAGE_COUNTER_MAX; 4747 memcg->soft_limit = PAGE_COUNTER_MAX; 4748 memcg_wb_domain_size_changed(memcg); 4749 } 4750 4751 #ifdef CONFIG_MMU 4752 /* Handlers for move charge at task migration. */ 4753 static int mem_cgroup_do_precharge(unsigned long count) 4754 { 4755 int ret; 4756 4757 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4758 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4759 if (!ret) { 4760 mc.precharge += count; 4761 return ret; 4762 } 4763 4764 /* Try charges one by one with reclaim, but do not retry */ 4765 while (count--) { 4766 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 4767 if (ret) 4768 return ret; 4769 mc.precharge++; 4770 cond_resched(); 4771 } 4772 return 0; 4773 } 4774 4775 union mc_target { 4776 struct page *page; 4777 swp_entry_t ent; 4778 }; 4779 4780 enum mc_target_type { 4781 MC_TARGET_NONE = 0, 4782 MC_TARGET_PAGE, 4783 MC_TARGET_SWAP, 4784 MC_TARGET_DEVICE, 4785 }; 4786 4787 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4788 unsigned long addr, pte_t ptent) 4789 { 4790 struct page *page = _vm_normal_page(vma, addr, ptent, true); 4791 4792 if (!page || !page_mapped(page)) 4793 return NULL; 4794 if (PageAnon(page)) { 4795 if (!(mc.flags & MOVE_ANON)) 4796 return NULL; 4797 } else { 4798 if (!(mc.flags & MOVE_FILE)) 4799 return NULL; 4800 } 4801 if (!get_page_unless_zero(page)) 4802 return NULL; 4803 4804 return page; 4805 } 4806 4807 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 4808 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4809 pte_t ptent, swp_entry_t *entry) 4810 { 4811 struct page *page = NULL; 4812 swp_entry_t ent = pte_to_swp_entry(ptent); 4813 4814 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4815 return NULL; 4816 4817 /* 4818 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 4819 * a device and because they are not accessible by CPU they are store 4820 * as special swap entry in the CPU page table. 4821 */ 4822 if (is_device_private_entry(ent)) { 4823 page = device_private_entry_to_page(ent); 4824 /* 4825 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 4826 * a refcount of 1 when free (unlike normal page) 4827 */ 4828 if (!page_ref_add_unless(page, 1, 1)) 4829 return NULL; 4830 return page; 4831 } 4832 4833 /* 4834 * Because lookup_swap_cache() updates some statistics counter, 4835 * we call find_get_page() with swapper_space directly. 4836 */ 4837 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4838 if (do_memsw_account()) 4839 entry->val = ent.val; 4840 4841 return page; 4842 } 4843 #else 4844 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4845 pte_t ptent, swp_entry_t *entry) 4846 { 4847 return NULL; 4848 } 4849 #endif 4850 4851 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4852 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4853 { 4854 struct page *page = NULL; 4855 struct address_space *mapping; 4856 pgoff_t pgoff; 4857 4858 if (!vma->vm_file) /* anonymous vma */ 4859 return NULL; 4860 if (!(mc.flags & MOVE_FILE)) 4861 return NULL; 4862 4863 mapping = vma->vm_file->f_mapping; 4864 pgoff = linear_page_index(vma, addr); 4865 4866 /* page is moved even if it's not RSS of this task(page-faulted). */ 4867 #ifdef CONFIG_SWAP 4868 /* shmem/tmpfs may report page out on swap: account for that too. */ 4869 if (shmem_mapping(mapping)) { 4870 page = find_get_entry(mapping, pgoff); 4871 if (xa_is_value(page)) { 4872 swp_entry_t swp = radix_to_swp_entry(page); 4873 if (do_memsw_account()) 4874 *entry = swp; 4875 page = find_get_page(swap_address_space(swp), 4876 swp_offset(swp)); 4877 } 4878 } else 4879 page = find_get_page(mapping, pgoff); 4880 #else 4881 page = find_get_page(mapping, pgoff); 4882 #endif 4883 return page; 4884 } 4885 4886 /** 4887 * mem_cgroup_move_account - move account of the page 4888 * @page: the page 4889 * @compound: charge the page as compound or small page 4890 * @from: mem_cgroup which the page is moved from. 4891 * @to: mem_cgroup which the page is moved to. @from != @to. 4892 * 4893 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4894 * 4895 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4896 * from old cgroup. 4897 */ 4898 static int mem_cgroup_move_account(struct page *page, 4899 bool compound, 4900 struct mem_cgroup *from, 4901 struct mem_cgroup *to) 4902 { 4903 unsigned long flags; 4904 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4905 int ret; 4906 bool anon; 4907 4908 VM_BUG_ON(from == to); 4909 VM_BUG_ON_PAGE(PageLRU(page), page); 4910 VM_BUG_ON(compound && !PageTransHuge(page)); 4911 4912 /* 4913 * Prevent mem_cgroup_migrate() from looking at 4914 * page->mem_cgroup of its source page while we change it. 4915 */ 4916 ret = -EBUSY; 4917 if (!trylock_page(page)) 4918 goto out; 4919 4920 ret = -EINVAL; 4921 if (page->mem_cgroup != from) 4922 goto out_unlock; 4923 4924 anon = PageAnon(page); 4925 4926 spin_lock_irqsave(&from->move_lock, flags); 4927 4928 if (!anon && page_mapped(page)) { 4929 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages); 4930 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages); 4931 } 4932 4933 /* 4934 * move_lock grabbed above and caller set from->moving_account, so 4935 * mod_memcg_page_state will serialize updates to PageDirty. 4936 * So mapping should be stable for dirty pages. 4937 */ 4938 if (!anon && PageDirty(page)) { 4939 struct address_space *mapping = page_mapping(page); 4940 4941 if (mapping_cap_account_dirty(mapping)) { 4942 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages); 4943 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages); 4944 } 4945 } 4946 4947 if (PageWriteback(page)) { 4948 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages); 4949 __mod_memcg_state(to, NR_WRITEBACK, nr_pages); 4950 } 4951 4952 /* 4953 * It is safe to change page->mem_cgroup here because the page 4954 * is referenced, charged, and isolated - we can't race with 4955 * uncharging, charging, migration, or LRU putback. 4956 */ 4957 4958 /* caller should have done css_get */ 4959 page->mem_cgroup = to; 4960 spin_unlock_irqrestore(&from->move_lock, flags); 4961 4962 ret = 0; 4963 4964 local_irq_disable(); 4965 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4966 memcg_check_events(to, page); 4967 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4968 memcg_check_events(from, page); 4969 local_irq_enable(); 4970 out_unlock: 4971 unlock_page(page); 4972 out: 4973 return ret; 4974 } 4975 4976 /** 4977 * get_mctgt_type - get target type of moving charge 4978 * @vma: the vma the pte to be checked belongs 4979 * @addr: the address corresponding to the pte to be checked 4980 * @ptent: the pte to be checked 4981 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4982 * 4983 * Returns 4984 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4985 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4986 * move charge. if @target is not NULL, the page is stored in target->page 4987 * with extra refcnt got(Callers should handle it). 4988 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4989 * target for charge migration. if @target is not NULL, the entry is stored 4990 * in target->ent. 4991 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC 4992 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru). 4993 * For now we such page is charge like a regular page would be as for all 4994 * intent and purposes it is just special memory taking the place of a 4995 * regular page. 4996 * 4997 * See Documentations/vm/hmm.txt and include/linux/hmm.h 4998 * 4999 * Called with pte lock held. 5000 */ 5001 5002 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5003 unsigned long addr, pte_t ptent, union mc_target *target) 5004 { 5005 struct page *page = NULL; 5006 enum mc_target_type ret = MC_TARGET_NONE; 5007 swp_entry_t ent = { .val = 0 }; 5008 5009 if (pte_present(ptent)) 5010 page = mc_handle_present_pte(vma, addr, ptent); 5011 else if (is_swap_pte(ptent)) 5012 page = mc_handle_swap_pte(vma, ptent, &ent); 5013 else if (pte_none(ptent)) 5014 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5015 5016 if (!page && !ent.val) 5017 return ret; 5018 if (page) { 5019 /* 5020 * Do only loose check w/o serialization. 5021 * mem_cgroup_move_account() checks the page is valid or 5022 * not under LRU exclusion. 5023 */ 5024 if (page->mem_cgroup == mc.from) { 5025 ret = MC_TARGET_PAGE; 5026 if (is_device_private_page(page) || 5027 is_device_public_page(page)) 5028 ret = MC_TARGET_DEVICE; 5029 if (target) 5030 target->page = page; 5031 } 5032 if (!ret || !target) 5033 put_page(page); 5034 } 5035 /* 5036 * There is a swap entry and a page doesn't exist or isn't charged. 5037 * But we cannot move a tail-page in a THP. 5038 */ 5039 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5040 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5041 ret = MC_TARGET_SWAP; 5042 if (target) 5043 target->ent = ent; 5044 } 5045 return ret; 5046 } 5047 5048 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5049 /* 5050 * We don't consider PMD mapped swapping or file mapped pages because THP does 5051 * not support them for now. 5052 * Caller should make sure that pmd_trans_huge(pmd) is true. 5053 */ 5054 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5055 unsigned long addr, pmd_t pmd, union mc_target *target) 5056 { 5057 struct page *page = NULL; 5058 enum mc_target_type ret = MC_TARGET_NONE; 5059 5060 if (unlikely(is_swap_pmd(pmd))) { 5061 VM_BUG_ON(thp_migration_supported() && 5062 !is_pmd_migration_entry(pmd)); 5063 return ret; 5064 } 5065 page = pmd_page(pmd); 5066 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5067 if (!(mc.flags & MOVE_ANON)) 5068 return ret; 5069 if (page->mem_cgroup == mc.from) { 5070 ret = MC_TARGET_PAGE; 5071 if (target) { 5072 get_page(page); 5073 target->page = page; 5074 } 5075 } 5076 return ret; 5077 } 5078 #else 5079 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5080 unsigned long addr, pmd_t pmd, union mc_target *target) 5081 { 5082 return MC_TARGET_NONE; 5083 } 5084 #endif 5085 5086 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5087 unsigned long addr, unsigned long end, 5088 struct mm_walk *walk) 5089 { 5090 struct vm_area_struct *vma = walk->vma; 5091 pte_t *pte; 5092 spinlock_t *ptl; 5093 5094 ptl = pmd_trans_huge_lock(pmd, vma); 5095 if (ptl) { 5096 /* 5097 * Note their can not be MC_TARGET_DEVICE for now as we do not 5098 * support transparent huge page with MEMORY_DEVICE_PUBLIC or 5099 * MEMORY_DEVICE_PRIVATE but this might change. 5100 */ 5101 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5102 mc.precharge += HPAGE_PMD_NR; 5103 spin_unlock(ptl); 5104 return 0; 5105 } 5106 5107 if (pmd_trans_unstable(pmd)) 5108 return 0; 5109 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5110 for (; addr != end; pte++, addr += PAGE_SIZE) 5111 if (get_mctgt_type(vma, addr, *pte, NULL)) 5112 mc.precharge++; /* increment precharge temporarily */ 5113 pte_unmap_unlock(pte - 1, ptl); 5114 cond_resched(); 5115 5116 return 0; 5117 } 5118 5119 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5120 { 5121 unsigned long precharge; 5122 5123 struct mm_walk mem_cgroup_count_precharge_walk = { 5124 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5125 .mm = mm, 5126 }; 5127 down_read(&mm->mmap_sem); 5128 walk_page_range(0, mm->highest_vm_end, 5129 &mem_cgroup_count_precharge_walk); 5130 up_read(&mm->mmap_sem); 5131 5132 precharge = mc.precharge; 5133 mc.precharge = 0; 5134 5135 return precharge; 5136 } 5137 5138 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5139 { 5140 unsigned long precharge = mem_cgroup_count_precharge(mm); 5141 5142 VM_BUG_ON(mc.moving_task); 5143 mc.moving_task = current; 5144 return mem_cgroup_do_precharge(precharge); 5145 } 5146 5147 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5148 static void __mem_cgroup_clear_mc(void) 5149 { 5150 struct mem_cgroup *from = mc.from; 5151 struct mem_cgroup *to = mc.to; 5152 5153 /* we must uncharge all the leftover precharges from mc.to */ 5154 if (mc.precharge) { 5155 cancel_charge(mc.to, mc.precharge); 5156 mc.precharge = 0; 5157 } 5158 /* 5159 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5160 * we must uncharge here. 5161 */ 5162 if (mc.moved_charge) { 5163 cancel_charge(mc.from, mc.moved_charge); 5164 mc.moved_charge = 0; 5165 } 5166 /* we must fixup refcnts and charges */ 5167 if (mc.moved_swap) { 5168 /* uncharge swap account from the old cgroup */ 5169 if (!mem_cgroup_is_root(mc.from)) 5170 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5171 5172 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5173 5174 /* 5175 * we charged both to->memory and to->memsw, so we 5176 * should uncharge to->memory. 5177 */ 5178 if (!mem_cgroup_is_root(mc.to)) 5179 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5180 5181 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 5182 css_put_many(&mc.to->css, mc.moved_swap); 5183 5184 mc.moved_swap = 0; 5185 } 5186 memcg_oom_recover(from); 5187 memcg_oom_recover(to); 5188 wake_up_all(&mc.waitq); 5189 } 5190 5191 static void mem_cgroup_clear_mc(void) 5192 { 5193 struct mm_struct *mm = mc.mm; 5194 5195 /* 5196 * we must clear moving_task before waking up waiters at the end of 5197 * task migration. 5198 */ 5199 mc.moving_task = NULL; 5200 __mem_cgroup_clear_mc(); 5201 spin_lock(&mc.lock); 5202 mc.from = NULL; 5203 mc.to = NULL; 5204 mc.mm = NULL; 5205 spin_unlock(&mc.lock); 5206 5207 mmput(mm); 5208 } 5209 5210 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5211 { 5212 struct cgroup_subsys_state *css; 5213 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5214 struct mem_cgroup *from; 5215 struct task_struct *leader, *p; 5216 struct mm_struct *mm; 5217 unsigned long move_flags; 5218 int ret = 0; 5219 5220 /* charge immigration isn't supported on the default hierarchy */ 5221 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5222 return 0; 5223 5224 /* 5225 * Multi-process migrations only happen on the default hierarchy 5226 * where charge immigration is not used. Perform charge 5227 * immigration if @tset contains a leader and whine if there are 5228 * multiple. 5229 */ 5230 p = NULL; 5231 cgroup_taskset_for_each_leader(leader, css, tset) { 5232 WARN_ON_ONCE(p); 5233 p = leader; 5234 memcg = mem_cgroup_from_css(css); 5235 } 5236 if (!p) 5237 return 0; 5238 5239 /* 5240 * We are now commited to this value whatever it is. Changes in this 5241 * tunable will only affect upcoming migrations, not the current one. 5242 * So we need to save it, and keep it going. 5243 */ 5244 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5245 if (!move_flags) 5246 return 0; 5247 5248 from = mem_cgroup_from_task(p); 5249 5250 VM_BUG_ON(from == memcg); 5251 5252 mm = get_task_mm(p); 5253 if (!mm) 5254 return 0; 5255 /* We move charges only when we move a owner of the mm */ 5256 if (mm->owner == p) { 5257 VM_BUG_ON(mc.from); 5258 VM_BUG_ON(mc.to); 5259 VM_BUG_ON(mc.precharge); 5260 VM_BUG_ON(mc.moved_charge); 5261 VM_BUG_ON(mc.moved_swap); 5262 5263 spin_lock(&mc.lock); 5264 mc.mm = mm; 5265 mc.from = from; 5266 mc.to = memcg; 5267 mc.flags = move_flags; 5268 spin_unlock(&mc.lock); 5269 /* We set mc.moving_task later */ 5270 5271 ret = mem_cgroup_precharge_mc(mm); 5272 if (ret) 5273 mem_cgroup_clear_mc(); 5274 } else { 5275 mmput(mm); 5276 } 5277 return ret; 5278 } 5279 5280 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5281 { 5282 if (mc.to) 5283 mem_cgroup_clear_mc(); 5284 } 5285 5286 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5287 unsigned long addr, unsigned long end, 5288 struct mm_walk *walk) 5289 { 5290 int ret = 0; 5291 struct vm_area_struct *vma = walk->vma; 5292 pte_t *pte; 5293 spinlock_t *ptl; 5294 enum mc_target_type target_type; 5295 union mc_target target; 5296 struct page *page; 5297 5298 ptl = pmd_trans_huge_lock(pmd, vma); 5299 if (ptl) { 5300 if (mc.precharge < HPAGE_PMD_NR) { 5301 spin_unlock(ptl); 5302 return 0; 5303 } 5304 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5305 if (target_type == MC_TARGET_PAGE) { 5306 page = target.page; 5307 if (!isolate_lru_page(page)) { 5308 if (!mem_cgroup_move_account(page, true, 5309 mc.from, mc.to)) { 5310 mc.precharge -= HPAGE_PMD_NR; 5311 mc.moved_charge += HPAGE_PMD_NR; 5312 } 5313 putback_lru_page(page); 5314 } 5315 put_page(page); 5316 } else if (target_type == MC_TARGET_DEVICE) { 5317 page = target.page; 5318 if (!mem_cgroup_move_account(page, true, 5319 mc.from, mc.to)) { 5320 mc.precharge -= HPAGE_PMD_NR; 5321 mc.moved_charge += HPAGE_PMD_NR; 5322 } 5323 put_page(page); 5324 } 5325 spin_unlock(ptl); 5326 return 0; 5327 } 5328 5329 if (pmd_trans_unstable(pmd)) 5330 return 0; 5331 retry: 5332 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5333 for (; addr != end; addr += PAGE_SIZE) { 5334 pte_t ptent = *(pte++); 5335 bool device = false; 5336 swp_entry_t ent; 5337 5338 if (!mc.precharge) 5339 break; 5340 5341 switch (get_mctgt_type(vma, addr, ptent, &target)) { 5342 case MC_TARGET_DEVICE: 5343 device = true; 5344 /* fall through */ 5345 case MC_TARGET_PAGE: 5346 page = target.page; 5347 /* 5348 * We can have a part of the split pmd here. Moving it 5349 * can be done but it would be too convoluted so simply 5350 * ignore such a partial THP and keep it in original 5351 * memcg. There should be somebody mapping the head. 5352 */ 5353 if (PageTransCompound(page)) 5354 goto put; 5355 if (!device && isolate_lru_page(page)) 5356 goto put; 5357 if (!mem_cgroup_move_account(page, false, 5358 mc.from, mc.to)) { 5359 mc.precharge--; 5360 /* we uncharge from mc.from later. */ 5361 mc.moved_charge++; 5362 } 5363 if (!device) 5364 putback_lru_page(page); 5365 put: /* get_mctgt_type() gets the page */ 5366 put_page(page); 5367 break; 5368 case MC_TARGET_SWAP: 5369 ent = target.ent; 5370 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5371 mc.precharge--; 5372 /* we fixup refcnts and charges later. */ 5373 mc.moved_swap++; 5374 } 5375 break; 5376 default: 5377 break; 5378 } 5379 } 5380 pte_unmap_unlock(pte - 1, ptl); 5381 cond_resched(); 5382 5383 if (addr != end) { 5384 /* 5385 * We have consumed all precharges we got in can_attach(). 5386 * We try charge one by one, but don't do any additional 5387 * charges to mc.to if we have failed in charge once in attach() 5388 * phase. 5389 */ 5390 ret = mem_cgroup_do_precharge(1); 5391 if (!ret) 5392 goto retry; 5393 } 5394 5395 return ret; 5396 } 5397 5398 static void mem_cgroup_move_charge(void) 5399 { 5400 struct mm_walk mem_cgroup_move_charge_walk = { 5401 .pmd_entry = mem_cgroup_move_charge_pte_range, 5402 .mm = mc.mm, 5403 }; 5404 5405 lru_add_drain_all(); 5406 /* 5407 * Signal lock_page_memcg() to take the memcg's move_lock 5408 * while we're moving its pages to another memcg. Then wait 5409 * for already started RCU-only updates to finish. 5410 */ 5411 atomic_inc(&mc.from->moving_account); 5412 synchronize_rcu(); 5413 retry: 5414 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 5415 /* 5416 * Someone who are holding the mmap_sem might be waiting in 5417 * waitq. So we cancel all extra charges, wake up all waiters, 5418 * and retry. Because we cancel precharges, we might not be able 5419 * to move enough charges, but moving charge is a best-effort 5420 * feature anyway, so it wouldn't be a big problem. 5421 */ 5422 __mem_cgroup_clear_mc(); 5423 cond_resched(); 5424 goto retry; 5425 } 5426 /* 5427 * When we have consumed all precharges and failed in doing 5428 * additional charge, the page walk just aborts. 5429 */ 5430 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 5431 5432 up_read(&mc.mm->mmap_sem); 5433 atomic_dec(&mc.from->moving_account); 5434 } 5435 5436 static void mem_cgroup_move_task(void) 5437 { 5438 if (mc.to) { 5439 mem_cgroup_move_charge(); 5440 mem_cgroup_clear_mc(); 5441 } 5442 } 5443 #else /* !CONFIG_MMU */ 5444 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5445 { 5446 return 0; 5447 } 5448 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5449 { 5450 } 5451 static void mem_cgroup_move_task(void) 5452 { 5453 } 5454 #endif 5455 5456 /* 5457 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5458 * to verify whether we're attached to the default hierarchy on each mount 5459 * attempt. 5460 */ 5461 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5462 { 5463 /* 5464 * use_hierarchy is forced on the default hierarchy. cgroup core 5465 * guarantees that @root doesn't have any children, so turning it 5466 * on for the root memcg is enough. 5467 */ 5468 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5469 root_mem_cgroup->use_hierarchy = true; 5470 else 5471 root_mem_cgroup->use_hierarchy = false; 5472 } 5473 5474 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 5475 { 5476 if (value == PAGE_COUNTER_MAX) 5477 seq_puts(m, "max\n"); 5478 else 5479 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 5480 5481 return 0; 5482 } 5483 5484 static u64 memory_current_read(struct cgroup_subsys_state *css, 5485 struct cftype *cft) 5486 { 5487 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5488 5489 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5490 } 5491 5492 static int memory_min_show(struct seq_file *m, void *v) 5493 { 5494 return seq_puts_memcg_tunable(m, 5495 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 5496 } 5497 5498 static ssize_t memory_min_write(struct kernfs_open_file *of, 5499 char *buf, size_t nbytes, loff_t off) 5500 { 5501 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5502 unsigned long min; 5503 int err; 5504 5505 buf = strstrip(buf); 5506 err = page_counter_memparse(buf, "max", &min); 5507 if (err) 5508 return err; 5509 5510 page_counter_set_min(&memcg->memory, min); 5511 5512 return nbytes; 5513 } 5514 5515 static int memory_low_show(struct seq_file *m, void *v) 5516 { 5517 return seq_puts_memcg_tunable(m, 5518 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 5519 } 5520 5521 static ssize_t memory_low_write(struct kernfs_open_file *of, 5522 char *buf, size_t nbytes, loff_t off) 5523 { 5524 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5525 unsigned long low; 5526 int err; 5527 5528 buf = strstrip(buf); 5529 err = page_counter_memparse(buf, "max", &low); 5530 if (err) 5531 return err; 5532 5533 page_counter_set_low(&memcg->memory, low); 5534 5535 return nbytes; 5536 } 5537 5538 static int memory_high_show(struct seq_file *m, void *v) 5539 { 5540 return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high)); 5541 } 5542 5543 static ssize_t memory_high_write(struct kernfs_open_file *of, 5544 char *buf, size_t nbytes, loff_t off) 5545 { 5546 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5547 unsigned long nr_pages; 5548 unsigned long high; 5549 int err; 5550 5551 buf = strstrip(buf); 5552 err = page_counter_memparse(buf, "max", &high); 5553 if (err) 5554 return err; 5555 5556 memcg->high = high; 5557 5558 nr_pages = page_counter_read(&memcg->memory); 5559 if (nr_pages > high) 5560 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5561 GFP_KERNEL, true); 5562 5563 memcg_wb_domain_size_changed(memcg); 5564 return nbytes; 5565 } 5566 5567 static int memory_max_show(struct seq_file *m, void *v) 5568 { 5569 return seq_puts_memcg_tunable(m, 5570 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 5571 } 5572 5573 static ssize_t memory_max_write(struct kernfs_open_file *of, 5574 char *buf, size_t nbytes, loff_t off) 5575 { 5576 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5577 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5578 bool drained = false; 5579 unsigned long max; 5580 int err; 5581 5582 buf = strstrip(buf); 5583 err = page_counter_memparse(buf, "max", &max); 5584 if (err) 5585 return err; 5586 5587 xchg(&memcg->memory.max, max); 5588 5589 for (;;) { 5590 unsigned long nr_pages = page_counter_read(&memcg->memory); 5591 5592 if (nr_pages <= max) 5593 break; 5594 5595 if (signal_pending(current)) { 5596 err = -EINTR; 5597 break; 5598 } 5599 5600 if (!drained) { 5601 drain_all_stock(memcg); 5602 drained = true; 5603 continue; 5604 } 5605 5606 if (nr_reclaims) { 5607 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5608 GFP_KERNEL, true)) 5609 nr_reclaims--; 5610 continue; 5611 } 5612 5613 memcg_memory_event(memcg, MEMCG_OOM); 5614 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5615 break; 5616 } 5617 5618 memcg_wb_domain_size_changed(memcg); 5619 return nbytes; 5620 } 5621 5622 static int memory_events_show(struct seq_file *m, void *v) 5623 { 5624 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5625 5626 seq_printf(m, "low %lu\n", 5627 atomic_long_read(&memcg->memory_events[MEMCG_LOW])); 5628 seq_printf(m, "high %lu\n", 5629 atomic_long_read(&memcg->memory_events[MEMCG_HIGH])); 5630 seq_printf(m, "max %lu\n", 5631 atomic_long_read(&memcg->memory_events[MEMCG_MAX])); 5632 seq_printf(m, "oom %lu\n", 5633 atomic_long_read(&memcg->memory_events[MEMCG_OOM])); 5634 seq_printf(m, "oom_kill %lu\n", 5635 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 5636 5637 return 0; 5638 } 5639 5640 static int memory_stat_show(struct seq_file *m, void *v) 5641 { 5642 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5643 int i; 5644 5645 /* 5646 * Provide statistics on the state of the memory subsystem as 5647 * well as cumulative event counters that show past behavior. 5648 * 5649 * This list is ordered following a combination of these gradients: 5650 * 1) generic big picture -> specifics and details 5651 * 2) reflecting userspace activity -> reflecting kernel heuristics 5652 * 5653 * Current memory state: 5654 */ 5655 5656 seq_printf(m, "anon %llu\n", 5657 (u64)memcg_page_state(memcg, MEMCG_RSS) * PAGE_SIZE); 5658 seq_printf(m, "file %llu\n", 5659 (u64)memcg_page_state(memcg, MEMCG_CACHE) * PAGE_SIZE); 5660 seq_printf(m, "kernel_stack %llu\n", 5661 (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1024); 5662 seq_printf(m, "slab %llu\n", 5663 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + 5664 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * 5665 PAGE_SIZE); 5666 seq_printf(m, "sock %llu\n", 5667 (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); 5668 5669 seq_printf(m, "shmem %llu\n", 5670 (u64)memcg_page_state(memcg, NR_SHMEM) * PAGE_SIZE); 5671 seq_printf(m, "file_mapped %llu\n", 5672 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * PAGE_SIZE); 5673 seq_printf(m, "file_dirty %llu\n", 5674 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * PAGE_SIZE); 5675 seq_printf(m, "file_writeback %llu\n", 5676 (u64)memcg_page_state(memcg, NR_WRITEBACK) * PAGE_SIZE); 5677 5678 /* 5679 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter 5680 * with the NR_ANON_THP vm counter, but right now it's a pain in the 5681 * arse because it requires migrating the work out of rmap to a place 5682 * where the page->mem_cgroup is set up and stable. 5683 */ 5684 seq_printf(m, "anon_thp %llu\n", 5685 (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * PAGE_SIZE); 5686 5687 for (i = 0; i < NR_LRU_LISTS; i++) 5688 seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], 5689 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 5690 PAGE_SIZE); 5691 5692 seq_printf(m, "slab_reclaimable %llu\n", 5693 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * 5694 PAGE_SIZE); 5695 seq_printf(m, "slab_unreclaimable %llu\n", 5696 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * 5697 PAGE_SIZE); 5698 5699 /* Accumulated memory events */ 5700 5701 seq_printf(m, "pgfault %lu\n", memcg_events(memcg, PGFAULT)); 5702 seq_printf(m, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT)); 5703 5704 seq_printf(m, "workingset_refault %lu\n", 5705 memcg_page_state(memcg, WORKINGSET_REFAULT)); 5706 seq_printf(m, "workingset_activate %lu\n", 5707 memcg_page_state(memcg, WORKINGSET_ACTIVATE)); 5708 seq_printf(m, "workingset_nodereclaim %lu\n", 5709 memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); 5710 5711 seq_printf(m, "pgrefill %lu\n", memcg_events(memcg, PGREFILL)); 5712 seq_printf(m, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + 5713 memcg_events(memcg, PGSCAN_DIRECT)); 5714 seq_printf(m, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + 5715 memcg_events(memcg, PGSTEAL_DIRECT)); 5716 seq_printf(m, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE)); 5717 seq_printf(m, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE)); 5718 seq_printf(m, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE)); 5719 seq_printf(m, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED)); 5720 5721 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5722 seq_printf(m, "thp_fault_alloc %lu\n", 5723 memcg_events(memcg, THP_FAULT_ALLOC)); 5724 seq_printf(m, "thp_collapse_alloc %lu\n", 5725 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 5726 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 5727 5728 return 0; 5729 } 5730 5731 static int memory_oom_group_show(struct seq_file *m, void *v) 5732 { 5733 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5734 5735 seq_printf(m, "%d\n", memcg->oom_group); 5736 5737 return 0; 5738 } 5739 5740 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 5741 char *buf, size_t nbytes, loff_t off) 5742 { 5743 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5744 int ret, oom_group; 5745 5746 buf = strstrip(buf); 5747 if (!buf) 5748 return -EINVAL; 5749 5750 ret = kstrtoint(buf, 0, &oom_group); 5751 if (ret) 5752 return ret; 5753 5754 if (oom_group != 0 && oom_group != 1) 5755 return -EINVAL; 5756 5757 memcg->oom_group = oom_group; 5758 5759 return nbytes; 5760 } 5761 5762 static struct cftype memory_files[] = { 5763 { 5764 .name = "current", 5765 .flags = CFTYPE_NOT_ON_ROOT, 5766 .read_u64 = memory_current_read, 5767 }, 5768 { 5769 .name = "min", 5770 .flags = CFTYPE_NOT_ON_ROOT, 5771 .seq_show = memory_min_show, 5772 .write = memory_min_write, 5773 }, 5774 { 5775 .name = "low", 5776 .flags = CFTYPE_NOT_ON_ROOT, 5777 .seq_show = memory_low_show, 5778 .write = memory_low_write, 5779 }, 5780 { 5781 .name = "high", 5782 .flags = CFTYPE_NOT_ON_ROOT, 5783 .seq_show = memory_high_show, 5784 .write = memory_high_write, 5785 }, 5786 { 5787 .name = "max", 5788 .flags = CFTYPE_NOT_ON_ROOT, 5789 .seq_show = memory_max_show, 5790 .write = memory_max_write, 5791 }, 5792 { 5793 .name = "events", 5794 .flags = CFTYPE_NOT_ON_ROOT, 5795 .file_offset = offsetof(struct mem_cgroup, events_file), 5796 .seq_show = memory_events_show, 5797 }, 5798 { 5799 .name = "stat", 5800 .flags = CFTYPE_NOT_ON_ROOT, 5801 .seq_show = memory_stat_show, 5802 }, 5803 { 5804 .name = "oom.group", 5805 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 5806 .seq_show = memory_oom_group_show, 5807 .write = memory_oom_group_write, 5808 }, 5809 { } /* terminate */ 5810 }; 5811 5812 struct cgroup_subsys memory_cgrp_subsys = { 5813 .css_alloc = mem_cgroup_css_alloc, 5814 .css_online = mem_cgroup_css_online, 5815 .css_offline = mem_cgroup_css_offline, 5816 .css_released = mem_cgroup_css_released, 5817 .css_free = mem_cgroup_css_free, 5818 .css_reset = mem_cgroup_css_reset, 5819 .can_attach = mem_cgroup_can_attach, 5820 .cancel_attach = mem_cgroup_cancel_attach, 5821 .post_attach = mem_cgroup_move_task, 5822 .bind = mem_cgroup_bind, 5823 .dfl_cftypes = memory_files, 5824 .legacy_cftypes = mem_cgroup_legacy_files, 5825 .early_init = 0, 5826 }; 5827 5828 /** 5829 * mem_cgroup_protected - check if memory consumption is in the normal range 5830 * @root: the top ancestor of the sub-tree being checked 5831 * @memcg: the memory cgroup to check 5832 * 5833 * WARNING: This function is not stateless! It can only be used as part 5834 * of a top-down tree iteration, not for isolated queries. 5835 * 5836 * Returns one of the following: 5837 * MEMCG_PROT_NONE: cgroup memory is not protected 5838 * MEMCG_PROT_LOW: cgroup memory is protected as long there is 5839 * an unprotected supply of reclaimable memory from other cgroups. 5840 * MEMCG_PROT_MIN: cgroup memory is protected 5841 * 5842 * @root is exclusive; it is never protected when looked at directly 5843 * 5844 * To provide a proper hierarchical behavior, effective memory.min/low values 5845 * are used. Below is the description of how effective memory.low is calculated. 5846 * Effective memory.min values is calculated in the same way. 5847 * 5848 * Effective memory.low is always equal or less than the original memory.low. 5849 * If there is no memory.low overcommittment (which is always true for 5850 * top-level memory cgroups), these two values are equal. 5851 * Otherwise, it's a part of parent's effective memory.low, 5852 * calculated as a cgroup's memory.low usage divided by sum of sibling's 5853 * memory.low usages, where memory.low usage is the size of actually 5854 * protected memory. 5855 * 5856 * low_usage 5857 * elow = min( memory.low, parent->elow * ------------------ ), 5858 * siblings_low_usage 5859 * 5860 * | memory.current, if memory.current < memory.low 5861 * low_usage = | 5862 * | 0, otherwise. 5863 * 5864 * 5865 * Such definition of the effective memory.low provides the expected 5866 * hierarchical behavior: parent's memory.low value is limiting 5867 * children, unprotected memory is reclaimed first and cgroups, 5868 * which are not using their guarantee do not affect actual memory 5869 * distribution. 5870 * 5871 * For example, if there are memcgs A, A/B, A/C, A/D and A/E: 5872 * 5873 * A A/memory.low = 2G, A/memory.current = 6G 5874 * //\\ 5875 * BC DE B/memory.low = 3G B/memory.current = 2G 5876 * C/memory.low = 1G C/memory.current = 2G 5877 * D/memory.low = 0 D/memory.current = 2G 5878 * E/memory.low = 10G E/memory.current = 0 5879 * 5880 * and the memory pressure is applied, the following memory distribution 5881 * is expected (approximately): 5882 * 5883 * A/memory.current = 2G 5884 * 5885 * B/memory.current = 1.3G 5886 * C/memory.current = 0.6G 5887 * D/memory.current = 0 5888 * E/memory.current = 0 5889 * 5890 * These calculations require constant tracking of the actual low usages 5891 * (see propagate_protected_usage()), as well as recursive calculation of 5892 * effective memory.low values. But as we do call mem_cgroup_protected() 5893 * path for each memory cgroup top-down from the reclaim, 5894 * it's possible to optimize this part, and save calculated elow 5895 * for next usage. This part is intentionally racy, but it's ok, 5896 * as memory.low is a best-effort mechanism. 5897 */ 5898 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 5899 struct mem_cgroup *memcg) 5900 { 5901 struct mem_cgroup *parent; 5902 unsigned long emin, parent_emin; 5903 unsigned long elow, parent_elow; 5904 unsigned long usage; 5905 5906 if (mem_cgroup_disabled()) 5907 return MEMCG_PROT_NONE; 5908 5909 if (!root) 5910 root = root_mem_cgroup; 5911 if (memcg == root) 5912 return MEMCG_PROT_NONE; 5913 5914 usage = page_counter_read(&memcg->memory); 5915 if (!usage) 5916 return MEMCG_PROT_NONE; 5917 5918 emin = memcg->memory.min; 5919 elow = memcg->memory.low; 5920 5921 parent = parent_mem_cgroup(memcg); 5922 /* No parent means a non-hierarchical mode on v1 memcg */ 5923 if (!parent) 5924 return MEMCG_PROT_NONE; 5925 5926 if (parent == root) 5927 goto exit; 5928 5929 parent_emin = READ_ONCE(parent->memory.emin); 5930 emin = min(emin, parent_emin); 5931 if (emin && parent_emin) { 5932 unsigned long min_usage, siblings_min_usage; 5933 5934 min_usage = min(usage, memcg->memory.min); 5935 siblings_min_usage = atomic_long_read( 5936 &parent->memory.children_min_usage); 5937 5938 if (min_usage && siblings_min_usage) 5939 emin = min(emin, parent_emin * min_usage / 5940 siblings_min_usage); 5941 } 5942 5943 parent_elow = READ_ONCE(parent->memory.elow); 5944 elow = min(elow, parent_elow); 5945 if (elow && parent_elow) { 5946 unsigned long low_usage, siblings_low_usage; 5947 5948 low_usage = min(usage, memcg->memory.low); 5949 siblings_low_usage = atomic_long_read( 5950 &parent->memory.children_low_usage); 5951 5952 if (low_usage && siblings_low_usage) 5953 elow = min(elow, parent_elow * low_usage / 5954 siblings_low_usage); 5955 } 5956 5957 exit: 5958 memcg->memory.emin = emin; 5959 memcg->memory.elow = elow; 5960 5961 if (usage <= emin) 5962 return MEMCG_PROT_MIN; 5963 else if (usage <= elow) 5964 return MEMCG_PROT_LOW; 5965 else 5966 return MEMCG_PROT_NONE; 5967 } 5968 5969 /** 5970 * mem_cgroup_try_charge - try charging a page 5971 * @page: page to charge 5972 * @mm: mm context of the victim 5973 * @gfp_mask: reclaim mode 5974 * @memcgp: charged memcg return 5975 * @compound: charge the page as compound or small page 5976 * 5977 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5978 * pages according to @gfp_mask if necessary. 5979 * 5980 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5981 * Otherwise, an error code is returned. 5982 * 5983 * After page->mapping has been set up, the caller must finalize the 5984 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5985 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5986 */ 5987 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5988 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5989 bool compound) 5990 { 5991 struct mem_cgroup *memcg = NULL; 5992 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5993 int ret = 0; 5994 5995 if (mem_cgroup_disabled()) 5996 goto out; 5997 5998 if (PageSwapCache(page)) { 5999 /* 6000 * Every swap fault against a single page tries to charge the 6001 * page, bail as early as possible. shmem_unuse() encounters 6002 * already charged pages, too. The USED bit is protected by 6003 * the page lock, which serializes swap cache removal, which 6004 * in turn serializes uncharging. 6005 */ 6006 VM_BUG_ON_PAGE(!PageLocked(page), page); 6007 if (compound_head(page)->mem_cgroup) 6008 goto out; 6009 6010 if (do_swap_account) { 6011 swp_entry_t ent = { .val = page_private(page), }; 6012 unsigned short id = lookup_swap_cgroup_id(ent); 6013 6014 rcu_read_lock(); 6015 memcg = mem_cgroup_from_id(id); 6016 if (memcg && !css_tryget_online(&memcg->css)) 6017 memcg = NULL; 6018 rcu_read_unlock(); 6019 } 6020 } 6021 6022 if (!memcg) 6023 memcg = get_mem_cgroup_from_mm(mm); 6024 6025 ret = try_charge(memcg, gfp_mask, nr_pages); 6026 6027 css_put(&memcg->css); 6028 out: 6029 *memcgp = memcg; 6030 return ret; 6031 } 6032 6033 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, 6034 gfp_t gfp_mask, struct mem_cgroup **memcgp, 6035 bool compound) 6036 { 6037 struct mem_cgroup *memcg; 6038 int ret; 6039 6040 ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound); 6041 memcg = *memcgp; 6042 mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask); 6043 return ret; 6044 } 6045 6046 /** 6047 * mem_cgroup_commit_charge - commit a page charge 6048 * @page: page to charge 6049 * @memcg: memcg to charge the page to 6050 * @lrucare: page might be on LRU already 6051 * @compound: charge the page as compound or small page 6052 * 6053 * Finalize a charge transaction started by mem_cgroup_try_charge(), 6054 * after page->mapping has been set up. This must happen atomically 6055 * as part of the page instantiation, i.e. under the page table lock 6056 * for anonymous pages, under the page lock for page and swap cache. 6057 * 6058 * In addition, the page must not be on the LRU during the commit, to 6059 * prevent racing with task migration. If it might be, use @lrucare. 6060 * 6061 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 6062 */ 6063 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 6064 bool lrucare, bool compound) 6065 { 6066 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 6067 6068 VM_BUG_ON_PAGE(!page->mapping, page); 6069 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 6070 6071 if (mem_cgroup_disabled()) 6072 return; 6073 /* 6074 * Swap faults will attempt to charge the same page multiple 6075 * times. But reuse_swap_page() might have removed the page 6076 * from swapcache already, so we can't check PageSwapCache(). 6077 */ 6078 if (!memcg) 6079 return; 6080 6081 commit_charge(page, memcg, lrucare); 6082 6083 local_irq_disable(); 6084 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 6085 memcg_check_events(memcg, page); 6086 local_irq_enable(); 6087 6088 if (do_memsw_account() && PageSwapCache(page)) { 6089 swp_entry_t entry = { .val = page_private(page) }; 6090 /* 6091 * The swap entry might not get freed for a long time, 6092 * let's not wait for it. The page already received a 6093 * memory+swap charge, drop the swap entry duplicate. 6094 */ 6095 mem_cgroup_uncharge_swap(entry, nr_pages); 6096 } 6097 } 6098 6099 /** 6100 * mem_cgroup_cancel_charge - cancel a page charge 6101 * @page: page to charge 6102 * @memcg: memcg to charge the page to 6103 * @compound: charge the page as compound or small page 6104 * 6105 * Cancel a charge transaction started by mem_cgroup_try_charge(). 6106 */ 6107 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 6108 bool compound) 6109 { 6110 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 6111 6112 if (mem_cgroup_disabled()) 6113 return; 6114 /* 6115 * Swap faults will attempt to charge the same page multiple 6116 * times. But reuse_swap_page() might have removed the page 6117 * from swapcache already, so we can't check PageSwapCache(). 6118 */ 6119 if (!memcg) 6120 return; 6121 6122 cancel_charge(memcg, nr_pages); 6123 } 6124 6125 struct uncharge_gather { 6126 struct mem_cgroup *memcg; 6127 unsigned long pgpgout; 6128 unsigned long nr_anon; 6129 unsigned long nr_file; 6130 unsigned long nr_kmem; 6131 unsigned long nr_huge; 6132 unsigned long nr_shmem; 6133 struct page *dummy_page; 6134 }; 6135 6136 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6137 { 6138 memset(ug, 0, sizeof(*ug)); 6139 } 6140 6141 static void uncharge_batch(const struct uncharge_gather *ug) 6142 { 6143 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; 6144 unsigned long flags; 6145 6146 if (!mem_cgroup_is_root(ug->memcg)) { 6147 page_counter_uncharge(&ug->memcg->memory, nr_pages); 6148 if (do_memsw_account()) 6149 page_counter_uncharge(&ug->memcg->memsw, nr_pages); 6150 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6151 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6152 memcg_oom_recover(ug->memcg); 6153 } 6154 6155 local_irq_save(flags); 6156 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); 6157 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); 6158 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); 6159 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); 6160 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6161 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); 6162 memcg_check_events(ug->memcg, ug->dummy_page); 6163 local_irq_restore(flags); 6164 6165 if (!mem_cgroup_is_root(ug->memcg)) 6166 css_put_many(&ug->memcg->css, nr_pages); 6167 } 6168 6169 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6170 { 6171 VM_BUG_ON_PAGE(PageLRU(page), page); 6172 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) && 6173 !PageHWPoison(page) , page); 6174 6175 if (!page->mem_cgroup) 6176 return; 6177 6178 /* 6179 * Nobody should be changing or seriously looking at 6180 * page->mem_cgroup at this point, we have fully 6181 * exclusive access to the page. 6182 */ 6183 6184 if (ug->memcg != page->mem_cgroup) { 6185 if (ug->memcg) { 6186 uncharge_batch(ug); 6187 uncharge_gather_clear(ug); 6188 } 6189 ug->memcg = page->mem_cgroup; 6190 } 6191 6192 if (!PageKmemcg(page)) { 6193 unsigned int nr_pages = 1; 6194 6195 if (PageTransHuge(page)) { 6196 nr_pages <<= compound_order(page); 6197 ug->nr_huge += nr_pages; 6198 } 6199 if (PageAnon(page)) 6200 ug->nr_anon += nr_pages; 6201 else { 6202 ug->nr_file += nr_pages; 6203 if (PageSwapBacked(page)) 6204 ug->nr_shmem += nr_pages; 6205 } 6206 ug->pgpgout++; 6207 } else { 6208 ug->nr_kmem += 1 << compound_order(page); 6209 __ClearPageKmemcg(page); 6210 } 6211 6212 ug->dummy_page = page; 6213 page->mem_cgroup = NULL; 6214 } 6215 6216 static void uncharge_list(struct list_head *page_list) 6217 { 6218 struct uncharge_gather ug; 6219 struct list_head *next; 6220 6221 uncharge_gather_clear(&ug); 6222 6223 /* 6224 * Note that the list can be a single page->lru; hence the 6225 * do-while loop instead of a simple list_for_each_entry(). 6226 */ 6227 next = page_list->next; 6228 do { 6229 struct page *page; 6230 6231 page = list_entry(next, struct page, lru); 6232 next = page->lru.next; 6233 6234 uncharge_page(page, &ug); 6235 } while (next != page_list); 6236 6237 if (ug.memcg) 6238 uncharge_batch(&ug); 6239 } 6240 6241 /** 6242 * mem_cgroup_uncharge - uncharge a page 6243 * @page: page to uncharge 6244 * 6245 * Uncharge a page previously charged with mem_cgroup_try_charge() and 6246 * mem_cgroup_commit_charge(). 6247 */ 6248 void mem_cgroup_uncharge(struct page *page) 6249 { 6250 struct uncharge_gather ug; 6251 6252 if (mem_cgroup_disabled()) 6253 return; 6254 6255 /* Don't touch page->lru of any random page, pre-check: */ 6256 if (!page->mem_cgroup) 6257 return; 6258 6259 uncharge_gather_clear(&ug); 6260 uncharge_page(page, &ug); 6261 uncharge_batch(&ug); 6262 } 6263 6264 /** 6265 * mem_cgroup_uncharge_list - uncharge a list of page 6266 * @page_list: list of pages to uncharge 6267 * 6268 * Uncharge a list of pages previously charged with 6269 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 6270 */ 6271 void mem_cgroup_uncharge_list(struct list_head *page_list) 6272 { 6273 if (mem_cgroup_disabled()) 6274 return; 6275 6276 if (!list_empty(page_list)) 6277 uncharge_list(page_list); 6278 } 6279 6280 /** 6281 * mem_cgroup_migrate - charge a page's replacement 6282 * @oldpage: currently circulating page 6283 * @newpage: replacement page 6284 * 6285 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6286 * be uncharged upon free. 6287 * 6288 * Both pages must be locked, @newpage->mapping must be set up. 6289 */ 6290 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6291 { 6292 struct mem_cgroup *memcg; 6293 unsigned int nr_pages; 6294 bool compound; 6295 unsigned long flags; 6296 6297 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6298 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6299 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6300 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6301 newpage); 6302 6303 if (mem_cgroup_disabled()) 6304 return; 6305 6306 /* Page cache replacement: new page already charged? */ 6307 if (newpage->mem_cgroup) 6308 return; 6309 6310 /* Swapcache readahead pages can get replaced before being charged */ 6311 memcg = oldpage->mem_cgroup; 6312 if (!memcg) 6313 return; 6314 6315 /* Force-charge the new page. The old one will be freed soon */ 6316 compound = PageTransHuge(newpage); 6317 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 6318 6319 page_counter_charge(&memcg->memory, nr_pages); 6320 if (do_memsw_account()) 6321 page_counter_charge(&memcg->memsw, nr_pages); 6322 css_get_many(&memcg->css, nr_pages); 6323 6324 commit_charge(newpage, memcg, false); 6325 6326 local_irq_save(flags); 6327 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 6328 memcg_check_events(memcg, newpage); 6329 local_irq_restore(flags); 6330 } 6331 6332 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6333 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6334 6335 void mem_cgroup_sk_alloc(struct sock *sk) 6336 { 6337 struct mem_cgroup *memcg; 6338 6339 if (!mem_cgroup_sockets_enabled) 6340 return; 6341 6342 /* 6343 * Socket cloning can throw us here with sk_memcg already 6344 * filled. It won't however, necessarily happen from 6345 * process context. So the test for root memcg given 6346 * the current task's memcg won't help us in this case. 6347 * 6348 * Respecting the original socket's memcg is a better 6349 * decision in this case. 6350 */ 6351 if (sk->sk_memcg) { 6352 css_get(&sk->sk_memcg->css); 6353 return; 6354 } 6355 6356 rcu_read_lock(); 6357 memcg = mem_cgroup_from_task(current); 6358 if (memcg == root_mem_cgroup) 6359 goto out; 6360 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6361 goto out; 6362 if (css_tryget_online(&memcg->css)) 6363 sk->sk_memcg = memcg; 6364 out: 6365 rcu_read_unlock(); 6366 } 6367 6368 void mem_cgroup_sk_free(struct sock *sk) 6369 { 6370 if (sk->sk_memcg) 6371 css_put(&sk->sk_memcg->css); 6372 } 6373 6374 /** 6375 * mem_cgroup_charge_skmem - charge socket memory 6376 * @memcg: memcg to charge 6377 * @nr_pages: number of pages to charge 6378 * 6379 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6380 * @memcg's configured limit, %false if the charge had to be forced. 6381 */ 6382 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6383 { 6384 gfp_t gfp_mask = GFP_KERNEL; 6385 6386 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6387 struct page_counter *fail; 6388 6389 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6390 memcg->tcpmem_pressure = 0; 6391 return true; 6392 } 6393 page_counter_charge(&memcg->tcpmem, nr_pages); 6394 memcg->tcpmem_pressure = 1; 6395 return false; 6396 } 6397 6398 /* Don't block in the packet receive path */ 6399 if (in_softirq()) 6400 gfp_mask = GFP_NOWAIT; 6401 6402 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 6403 6404 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 6405 return true; 6406 6407 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 6408 return false; 6409 } 6410 6411 /** 6412 * mem_cgroup_uncharge_skmem - uncharge socket memory 6413 * @memcg: memcg to uncharge 6414 * @nr_pages: number of pages to uncharge 6415 */ 6416 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6417 { 6418 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6419 page_counter_uncharge(&memcg->tcpmem, nr_pages); 6420 return; 6421 } 6422 6423 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 6424 6425 refill_stock(memcg, nr_pages); 6426 } 6427 6428 static int __init cgroup_memory(char *s) 6429 { 6430 char *token; 6431 6432 while ((token = strsep(&s, ",")) != NULL) { 6433 if (!*token) 6434 continue; 6435 if (!strcmp(token, "nosocket")) 6436 cgroup_memory_nosocket = true; 6437 if (!strcmp(token, "nokmem")) 6438 cgroup_memory_nokmem = true; 6439 } 6440 return 0; 6441 } 6442 __setup("cgroup.memory=", cgroup_memory); 6443 6444 /* 6445 * subsys_initcall() for memory controller. 6446 * 6447 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 6448 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 6449 * basically everything that doesn't depend on a specific mem_cgroup structure 6450 * should be initialized from here. 6451 */ 6452 static int __init mem_cgroup_init(void) 6453 { 6454 int cpu, node; 6455 6456 #ifdef CONFIG_MEMCG_KMEM 6457 /* 6458 * Kmem cache creation is mostly done with the slab_mutex held, 6459 * so use a workqueue with limited concurrency to avoid stalling 6460 * all worker threads in case lots of cgroups are created and 6461 * destroyed simultaneously. 6462 */ 6463 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 6464 BUG_ON(!memcg_kmem_cache_wq); 6465 #endif 6466 6467 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 6468 memcg_hotplug_cpu_dead); 6469 6470 for_each_possible_cpu(cpu) 6471 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 6472 drain_local_stock); 6473 6474 for_each_node(node) { 6475 struct mem_cgroup_tree_per_node *rtpn; 6476 6477 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 6478 node_online(node) ? node : NUMA_NO_NODE); 6479 6480 rtpn->rb_root = RB_ROOT; 6481 rtpn->rb_rightmost = NULL; 6482 spin_lock_init(&rtpn->lock); 6483 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6484 } 6485 6486 return 0; 6487 } 6488 subsys_initcall(mem_cgroup_init); 6489 6490 #ifdef CONFIG_MEMCG_SWAP 6491 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 6492 { 6493 while (!refcount_inc_not_zero(&memcg->id.ref)) { 6494 /* 6495 * The root cgroup cannot be destroyed, so it's refcount must 6496 * always be >= 1. 6497 */ 6498 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 6499 VM_BUG_ON(1); 6500 break; 6501 } 6502 memcg = parent_mem_cgroup(memcg); 6503 if (!memcg) 6504 memcg = root_mem_cgroup; 6505 } 6506 return memcg; 6507 } 6508 6509 /** 6510 * mem_cgroup_swapout - transfer a memsw charge to swap 6511 * @page: page whose memsw charge to transfer 6512 * @entry: swap entry to move the charge to 6513 * 6514 * Transfer the memsw charge of @page to @entry. 6515 */ 6516 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 6517 { 6518 struct mem_cgroup *memcg, *swap_memcg; 6519 unsigned int nr_entries; 6520 unsigned short oldid; 6521 6522 VM_BUG_ON_PAGE(PageLRU(page), page); 6523 VM_BUG_ON_PAGE(page_count(page), page); 6524 6525 if (!do_memsw_account()) 6526 return; 6527 6528 memcg = page->mem_cgroup; 6529 6530 /* Readahead page, never charged */ 6531 if (!memcg) 6532 return; 6533 6534 /* 6535 * In case the memcg owning these pages has been offlined and doesn't 6536 * have an ID allocated to it anymore, charge the closest online 6537 * ancestor for the swap instead and transfer the memory+swap charge. 6538 */ 6539 swap_memcg = mem_cgroup_id_get_online(memcg); 6540 nr_entries = hpage_nr_pages(page); 6541 /* Get references for the tail pages, too */ 6542 if (nr_entries > 1) 6543 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 6544 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 6545 nr_entries); 6546 VM_BUG_ON_PAGE(oldid, page); 6547 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 6548 6549 page->mem_cgroup = NULL; 6550 6551 if (!mem_cgroup_is_root(memcg)) 6552 page_counter_uncharge(&memcg->memory, nr_entries); 6553 6554 if (memcg != swap_memcg) { 6555 if (!mem_cgroup_is_root(swap_memcg)) 6556 page_counter_charge(&swap_memcg->memsw, nr_entries); 6557 page_counter_uncharge(&memcg->memsw, nr_entries); 6558 } 6559 6560 /* 6561 * Interrupts should be disabled here because the caller holds the 6562 * i_pages lock which is taken with interrupts-off. It is 6563 * important here to have the interrupts disabled because it is the 6564 * only synchronisation we have for updating the per-CPU variables. 6565 */ 6566 VM_BUG_ON(!irqs_disabled()); 6567 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), 6568 -nr_entries); 6569 memcg_check_events(memcg, page); 6570 6571 if (!mem_cgroup_is_root(memcg)) 6572 css_put_many(&memcg->css, nr_entries); 6573 } 6574 6575 /** 6576 * mem_cgroup_try_charge_swap - try charging swap space for a page 6577 * @page: page being added to swap 6578 * @entry: swap entry to charge 6579 * 6580 * Try to charge @page's memcg for the swap space at @entry. 6581 * 6582 * Returns 0 on success, -ENOMEM on failure. 6583 */ 6584 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 6585 { 6586 unsigned int nr_pages = hpage_nr_pages(page); 6587 struct page_counter *counter; 6588 struct mem_cgroup *memcg; 6589 unsigned short oldid; 6590 6591 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 6592 return 0; 6593 6594 memcg = page->mem_cgroup; 6595 6596 /* Readahead page, never charged */ 6597 if (!memcg) 6598 return 0; 6599 6600 if (!entry.val) { 6601 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6602 return 0; 6603 } 6604 6605 memcg = mem_cgroup_id_get_online(memcg); 6606 6607 if (!mem_cgroup_is_root(memcg) && 6608 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 6609 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 6610 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6611 mem_cgroup_id_put(memcg); 6612 return -ENOMEM; 6613 } 6614 6615 /* Get references for the tail pages, too */ 6616 if (nr_pages > 1) 6617 mem_cgroup_id_get_many(memcg, nr_pages - 1); 6618 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 6619 VM_BUG_ON_PAGE(oldid, page); 6620 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 6621 6622 return 0; 6623 } 6624 6625 /** 6626 * mem_cgroup_uncharge_swap - uncharge swap space 6627 * @entry: swap entry to uncharge 6628 * @nr_pages: the amount of swap space to uncharge 6629 */ 6630 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 6631 { 6632 struct mem_cgroup *memcg; 6633 unsigned short id; 6634 6635 if (!do_swap_account) 6636 return; 6637 6638 id = swap_cgroup_record(entry, 0, nr_pages); 6639 rcu_read_lock(); 6640 memcg = mem_cgroup_from_id(id); 6641 if (memcg) { 6642 if (!mem_cgroup_is_root(memcg)) { 6643 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6644 page_counter_uncharge(&memcg->swap, nr_pages); 6645 else 6646 page_counter_uncharge(&memcg->memsw, nr_pages); 6647 } 6648 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 6649 mem_cgroup_id_put_many(memcg, nr_pages); 6650 } 6651 rcu_read_unlock(); 6652 } 6653 6654 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 6655 { 6656 long nr_swap_pages = get_nr_swap_pages(); 6657 6658 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6659 return nr_swap_pages; 6660 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6661 nr_swap_pages = min_t(long, nr_swap_pages, 6662 READ_ONCE(memcg->swap.max) - 6663 page_counter_read(&memcg->swap)); 6664 return nr_swap_pages; 6665 } 6666 6667 bool mem_cgroup_swap_full(struct page *page) 6668 { 6669 struct mem_cgroup *memcg; 6670 6671 VM_BUG_ON_PAGE(!PageLocked(page), page); 6672 6673 if (vm_swap_full()) 6674 return true; 6675 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6676 return false; 6677 6678 memcg = page->mem_cgroup; 6679 if (!memcg) 6680 return false; 6681 6682 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6683 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) 6684 return true; 6685 6686 return false; 6687 } 6688 6689 /* for remember boot option*/ 6690 #ifdef CONFIG_MEMCG_SWAP_ENABLED 6691 static int really_do_swap_account __initdata = 1; 6692 #else 6693 static int really_do_swap_account __initdata; 6694 #endif 6695 6696 static int __init enable_swap_account(char *s) 6697 { 6698 if (!strcmp(s, "1")) 6699 really_do_swap_account = 1; 6700 else if (!strcmp(s, "0")) 6701 really_do_swap_account = 0; 6702 return 1; 6703 } 6704 __setup("swapaccount=", enable_swap_account); 6705 6706 static u64 swap_current_read(struct cgroup_subsys_state *css, 6707 struct cftype *cft) 6708 { 6709 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6710 6711 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6712 } 6713 6714 static int swap_max_show(struct seq_file *m, void *v) 6715 { 6716 return seq_puts_memcg_tunable(m, 6717 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 6718 } 6719 6720 static ssize_t swap_max_write(struct kernfs_open_file *of, 6721 char *buf, size_t nbytes, loff_t off) 6722 { 6723 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6724 unsigned long max; 6725 int err; 6726 6727 buf = strstrip(buf); 6728 err = page_counter_memparse(buf, "max", &max); 6729 if (err) 6730 return err; 6731 6732 xchg(&memcg->swap.max, max); 6733 6734 return nbytes; 6735 } 6736 6737 static int swap_events_show(struct seq_file *m, void *v) 6738 { 6739 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6740 6741 seq_printf(m, "max %lu\n", 6742 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 6743 seq_printf(m, "fail %lu\n", 6744 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 6745 6746 return 0; 6747 } 6748 6749 static struct cftype swap_files[] = { 6750 { 6751 .name = "swap.current", 6752 .flags = CFTYPE_NOT_ON_ROOT, 6753 .read_u64 = swap_current_read, 6754 }, 6755 { 6756 .name = "swap.max", 6757 .flags = CFTYPE_NOT_ON_ROOT, 6758 .seq_show = swap_max_show, 6759 .write = swap_max_write, 6760 }, 6761 { 6762 .name = "swap.events", 6763 .flags = CFTYPE_NOT_ON_ROOT, 6764 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 6765 .seq_show = swap_events_show, 6766 }, 6767 { } /* terminate */ 6768 }; 6769 6770 static struct cftype memsw_cgroup_files[] = { 6771 { 6772 .name = "memsw.usage_in_bytes", 6773 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6774 .read_u64 = mem_cgroup_read_u64, 6775 }, 6776 { 6777 .name = "memsw.max_usage_in_bytes", 6778 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6779 .write = mem_cgroup_reset, 6780 .read_u64 = mem_cgroup_read_u64, 6781 }, 6782 { 6783 .name = "memsw.limit_in_bytes", 6784 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6785 .write = mem_cgroup_write, 6786 .read_u64 = mem_cgroup_read_u64, 6787 }, 6788 { 6789 .name = "memsw.failcnt", 6790 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6791 .write = mem_cgroup_reset, 6792 .read_u64 = mem_cgroup_read_u64, 6793 }, 6794 { }, /* terminate */ 6795 }; 6796 6797 static int __init mem_cgroup_swap_init(void) 6798 { 6799 if (!mem_cgroup_disabled() && really_do_swap_account) { 6800 do_swap_account = 1; 6801 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6802 swap_files)); 6803 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6804 memsw_cgroup_files)); 6805 } 6806 return 0; 6807 } 6808 subsys_initcall(mem_cgroup_swap_init); 6809 6810 #endif /* CONFIG_MEMCG_SWAP */ 6811