1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 */ 24 25 #include <linux/page_counter.h> 26 #include <linux/memcontrol.h> 27 #include <linux/cgroup.h> 28 #include <linux/pagewalk.h> 29 #include <linux/sched/mm.h> 30 #include <linux/shmem_fs.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vm_event_item.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/swap_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include <linux/tracehook.h> 60 #include <linux/psi.h> 61 #include <linux/seq_buf.h> 62 #include "internal.h" 63 #include <net/sock.h> 64 #include <net/ip.h> 65 #include "slab.h" 66 67 #include <linux/uaccess.h> 68 69 #include <trace/events/vmscan.h> 70 71 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 72 EXPORT_SYMBOL(memory_cgrp_subsys); 73 74 struct mem_cgroup *root_mem_cgroup __read_mostly; 75 76 #define MEM_CGROUP_RECLAIM_RETRIES 5 77 78 /* Socket memory accounting disabled? */ 79 static bool cgroup_memory_nosocket; 80 81 /* Kernel memory accounting disabled? */ 82 static bool cgroup_memory_nokmem; 83 84 /* Whether the swap controller is active */ 85 #ifdef CONFIG_MEMCG_SWAP 86 bool cgroup_memory_noswap __read_mostly; 87 #else 88 #define cgroup_memory_noswap 1 89 #endif 90 91 #ifdef CONFIG_CGROUP_WRITEBACK 92 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 93 #endif 94 95 /* Whether legacy memory+swap accounting is active */ 96 static bool do_memsw_account(void) 97 { 98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 99 } 100 101 #define THRESHOLDS_EVENTS_TARGET 128 102 #define SOFTLIMIT_EVENTS_TARGET 1024 103 104 /* 105 * Cgroups above their limits are maintained in a RB-Tree, independent of 106 * their hierarchy representation 107 */ 108 109 struct mem_cgroup_tree_per_node { 110 struct rb_root rb_root; 111 struct rb_node *rb_rightmost; 112 spinlock_t lock; 113 }; 114 115 struct mem_cgroup_tree { 116 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 117 }; 118 119 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 120 121 /* for OOM */ 122 struct mem_cgroup_eventfd_list { 123 struct list_head list; 124 struct eventfd_ctx *eventfd; 125 }; 126 127 /* 128 * cgroup_event represents events which userspace want to receive. 129 */ 130 struct mem_cgroup_event { 131 /* 132 * memcg which the event belongs to. 133 */ 134 struct mem_cgroup *memcg; 135 /* 136 * eventfd to signal userspace about the event. 137 */ 138 struct eventfd_ctx *eventfd; 139 /* 140 * Each of these stored in a list by the cgroup. 141 */ 142 struct list_head list; 143 /* 144 * register_event() callback will be used to add new userspace 145 * waiter for changes related to this event. Use eventfd_signal() 146 * on eventfd to send notification to userspace. 147 */ 148 int (*register_event)(struct mem_cgroup *memcg, 149 struct eventfd_ctx *eventfd, const char *args); 150 /* 151 * unregister_event() callback will be called when userspace closes 152 * the eventfd or on cgroup removing. This callback must be set, 153 * if you want provide notification functionality. 154 */ 155 void (*unregister_event)(struct mem_cgroup *memcg, 156 struct eventfd_ctx *eventfd); 157 /* 158 * All fields below needed to unregister event when 159 * userspace closes eventfd. 160 */ 161 poll_table pt; 162 wait_queue_head_t *wqh; 163 wait_queue_entry_t wait; 164 struct work_struct remove; 165 }; 166 167 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 168 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 169 170 /* Stuffs for move charges at task migration. */ 171 /* 172 * Types of charges to be moved. 173 */ 174 #define MOVE_ANON 0x1U 175 #define MOVE_FILE 0x2U 176 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 177 178 /* "mc" and its members are protected by cgroup_mutex */ 179 static struct move_charge_struct { 180 spinlock_t lock; /* for from, to */ 181 struct mm_struct *mm; 182 struct mem_cgroup *from; 183 struct mem_cgroup *to; 184 unsigned long flags; 185 unsigned long precharge; 186 unsigned long moved_charge; 187 unsigned long moved_swap; 188 struct task_struct *moving_task; /* a task moving charges */ 189 wait_queue_head_t waitq; /* a waitq for other context */ 190 } mc = { 191 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 192 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 193 }; 194 195 /* 196 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 197 * limit reclaim to prevent infinite loops, if they ever occur. 198 */ 199 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 200 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 201 202 enum charge_type { 203 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 204 MEM_CGROUP_CHARGE_TYPE_ANON, 205 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 206 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 207 NR_CHARGE_TYPE, 208 }; 209 210 /* for encoding cft->private value on file */ 211 enum res_type { 212 _MEM, 213 _MEMSWAP, 214 _OOM_TYPE, 215 _KMEM, 216 _TCP, 217 }; 218 219 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 220 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 221 #define MEMFILE_ATTR(val) ((val) & 0xffff) 222 /* Used for OOM nofiier */ 223 #define OOM_CONTROL (0) 224 225 /* 226 * Iteration constructs for visiting all cgroups (under a tree). If 227 * loops are exited prematurely (break), mem_cgroup_iter_break() must 228 * be used for reference counting. 229 */ 230 #define for_each_mem_cgroup_tree(iter, root) \ 231 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 232 iter != NULL; \ 233 iter = mem_cgroup_iter(root, iter, NULL)) 234 235 #define for_each_mem_cgroup(iter) \ 236 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 237 iter != NULL; \ 238 iter = mem_cgroup_iter(NULL, iter, NULL)) 239 240 static inline bool should_force_charge(void) 241 { 242 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 243 (current->flags & PF_EXITING); 244 } 245 246 /* Some nice accessors for the vmpressure. */ 247 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 248 { 249 if (!memcg) 250 memcg = root_mem_cgroup; 251 return &memcg->vmpressure; 252 } 253 254 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 255 { 256 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 257 } 258 259 #ifdef CONFIG_MEMCG_KMEM 260 /* 261 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 262 * The main reason for not using cgroup id for this: 263 * this works better in sparse environments, where we have a lot of memcgs, 264 * but only a few kmem-limited. Or also, if we have, for instance, 200 265 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 266 * 200 entry array for that. 267 * 268 * The current size of the caches array is stored in memcg_nr_cache_ids. It 269 * will double each time we have to increase it. 270 */ 271 static DEFINE_IDA(memcg_cache_ida); 272 int memcg_nr_cache_ids; 273 274 /* Protects memcg_nr_cache_ids */ 275 static DECLARE_RWSEM(memcg_cache_ids_sem); 276 277 void memcg_get_cache_ids(void) 278 { 279 down_read(&memcg_cache_ids_sem); 280 } 281 282 void memcg_put_cache_ids(void) 283 { 284 up_read(&memcg_cache_ids_sem); 285 } 286 287 /* 288 * MIN_SIZE is different than 1, because we would like to avoid going through 289 * the alloc/free process all the time. In a small machine, 4 kmem-limited 290 * cgroups is a reasonable guess. In the future, it could be a parameter or 291 * tunable, but that is strictly not necessary. 292 * 293 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 294 * this constant directly from cgroup, but it is understandable that this is 295 * better kept as an internal representation in cgroup.c. In any case, the 296 * cgrp_id space is not getting any smaller, and we don't have to necessarily 297 * increase ours as well if it increases. 298 */ 299 #define MEMCG_CACHES_MIN_SIZE 4 300 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 301 302 /* 303 * A lot of the calls to the cache allocation functions are expected to be 304 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 305 * conditional to this static branch, we'll have to allow modules that does 306 * kmem_cache_alloc and the such to see this symbol as well 307 */ 308 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 309 EXPORT_SYMBOL(memcg_kmem_enabled_key); 310 311 struct workqueue_struct *memcg_kmem_cache_wq; 312 #endif 313 314 static int memcg_shrinker_map_size; 315 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 316 317 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 318 { 319 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 320 } 321 322 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 323 int size, int old_size) 324 { 325 struct memcg_shrinker_map *new, *old; 326 int nid; 327 328 lockdep_assert_held(&memcg_shrinker_map_mutex); 329 330 for_each_node(nid) { 331 old = rcu_dereference_protected( 332 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 333 /* Not yet online memcg */ 334 if (!old) 335 return 0; 336 337 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 338 if (!new) 339 return -ENOMEM; 340 341 /* Set all old bits, clear all new bits */ 342 memset(new->map, (int)0xff, old_size); 343 memset((void *)new->map + old_size, 0, size - old_size); 344 345 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 346 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 347 } 348 349 return 0; 350 } 351 352 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 353 { 354 struct mem_cgroup_per_node *pn; 355 struct memcg_shrinker_map *map; 356 int nid; 357 358 if (mem_cgroup_is_root(memcg)) 359 return; 360 361 for_each_node(nid) { 362 pn = mem_cgroup_nodeinfo(memcg, nid); 363 map = rcu_dereference_protected(pn->shrinker_map, true); 364 if (map) 365 kvfree(map); 366 rcu_assign_pointer(pn->shrinker_map, NULL); 367 } 368 } 369 370 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 371 { 372 struct memcg_shrinker_map *map; 373 int nid, size, ret = 0; 374 375 if (mem_cgroup_is_root(memcg)) 376 return 0; 377 378 mutex_lock(&memcg_shrinker_map_mutex); 379 size = memcg_shrinker_map_size; 380 for_each_node(nid) { 381 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 382 if (!map) { 383 memcg_free_shrinker_maps(memcg); 384 ret = -ENOMEM; 385 break; 386 } 387 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 388 } 389 mutex_unlock(&memcg_shrinker_map_mutex); 390 391 return ret; 392 } 393 394 int memcg_expand_shrinker_maps(int new_id) 395 { 396 int size, old_size, ret = 0; 397 struct mem_cgroup *memcg; 398 399 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 400 old_size = memcg_shrinker_map_size; 401 if (size <= old_size) 402 return 0; 403 404 mutex_lock(&memcg_shrinker_map_mutex); 405 if (!root_mem_cgroup) 406 goto unlock; 407 408 for_each_mem_cgroup(memcg) { 409 if (mem_cgroup_is_root(memcg)) 410 continue; 411 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 412 if (ret) { 413 mem_cgroup_iter_break(NULL, memcg); 414 goto unlock; 415 } 416 } 417 unlock: 418 if (!ret) 419 memcg_shrinker_map_size = size; 420 mutex_unlock(&memcg_shrinker_map_mutex); 421 return ret; 422 } 423 424 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 425 { 426 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 427 struct memcg_shrinker_map *map; 428 429 rcu_read_lock(); 430 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 431 /* Pairs with smp mb in shrink_slab() */ 432 smp_mb__before_atomic(); 433 set_bit(shrinker_id, map->map); 434 rcu_read_unlock(); 435 } 436 } 437 438 /** 439 * mem_cgroup_css_from_page - css of the memcg associated with a page 440 * @page: page of interest 441 * 442 * If memcg is bound to the default hierarchy, css of the memcg associated 443 * with @page is returned. The returned css remains associated with @page 444 * until it is released. 445 * 446 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 447 * is returned. 448 */ 449 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 450 { 451 struct mem_cgroup *memcg; 452 453 memcg = page->mem_cgroup; 454 455 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 456 memcg = root_mem_cgroup; 457 458 return &memcg->css; 459 } 460 461 /** 462 * page_cgroup_ino - return inode number of the memcg a page is charged to 463 * @page: the page 464 * 465 * Look up the closest online ancestor of the memory cgroup @page is charged to 466 * and return its inode number or 0 if @page is not charged to any cgroup. It 467 * is safe to call this function without holding a reference to @page. 468 * 469 * Note, this function is inherently racy, because there is nothing to prevent 470 * the cgroup inode from getting torn down and potentially reallocated a moment 471 * after page_cgroup_ino() returns, so it only should be used by callers that 472 * do not care (such as procfs interfaces). 473 */ 474 ino_t page_cgroup_ino(struct page *page) 475 { 476 struct mem_cgroup *memcg; 477 unsigned long ino = 0; 478 479 rcu_read_lock(); 480 if (PageSlab(page) && !PageTail(page)) 481 memcg = memcg_from_slab_page(page); 482 else 483 memcg = READ_ONCE(page->mem_cgroup); 484 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 485 memcg = parent_mem_cgroup(memcg); 486 if (memcg) 487 ino = cgroup_ino(memcg->css.cgroup); 488 rcu_read_unlock(); 489 return ino; 490 } 491 492 static struct mem_cgroup_per_node * 493 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 494 { 495 int nid = page_to_nid(page); 496 497 return memcg->nodeinfo[nid]; 498 } 499 500 static struct mem_cgroup_tree_per_node * 501 soft_limit_tree_node(int nid) 502 { 503 return soft_limit_tree.rb_tree_per_node[nid]; 504 } 505 506 static struct mem_cgroup_tree_per_node * 507 soft_limit_tree_from_page(struct page *page) 508 { 509 int nid = page_to_nid(page); 510 511 return soft_limit_tree.rb_tree_per_node[nid]; 512 } 513 514 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 515 struct mem_cgroup_tree_per_node *mctz, 516 unsigned long new_usage_in_excess) 517 { 518 struct rb_node **p = &mctz->rb_root.rb_node; 519 struct rb_node *parent = NULL; 520 struct mem_cgroup_per_node *mz_node; 521 bool rightmost = true; 522 523 if (mz->on_tree) 524 return; 525 526 mz->usage_in_excess = new_usage_in_excess; 527 if (!mz->usage_in_excess) 528 return; 529 while (*p) { 530 parent = *p; 531 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 532 tree_node); 533 if (mz->usage_in_excess < mz_node->usage_in_excess) { 534 p = &(*p)->rb_left; 535 rightmost = false; 536 } 537 538 /* 539 * We can't avoid mem cgroups that are over their soft 540 * limit by the same amount 541 */ 542 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 543 p = &(*p)->rb_right; 544 } 545 546 if (rightmost) 547 mctz->rb_rightmost = &mz->tree_node; 548 549 rb_link_node(&mz->tree_node, parent, p); 550 rb_insert_color(&mz->tree_node, &mctz->rb_root); 551 mz->on_tree = true; 552 } 553 554 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 555 struct mem_cgroup_tree_per_node *mctz) 556 { 557 if (!mz->on_tree) 558 return; 559 560 if (&mz->tree_node == mctz->rb_rightmost) 561 mctz->rb_rightmost = rb_prev(&mz->tree_node); 562 563 rb_erase(&mz->tree_node, &mctz->rb_root); 564 mz->on_tree = false; 565 } 566 567 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 568 struct mem_cgroup_tree_per_node *mctz) 569 { 570 unsigned long flags; 571 572 spin_lock_irqsave(&mctz->lock, flags); 573 __mem_cgroup_remove_exceeded(mz, mctz); 574 spin_unlock_irqrestore(&mctz->lock, flags); 575 } 576 577 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 578 { 579 unsigned long nr_pages = page_counter_read(&memcg->memory); 580 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 581 unsigned long excess = 0; 582 583 if (nr_pages > soft_limit) 584 excess = nr_pages - soft_limit; 585 586 return excess; 587 } 588 589 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 590 { 591 unsigned long excess; 592 struct mem_cgroup_per_node *mz; 593 struct mem_cgroup_tree_per_node *mctz; 594 595 mctz = soft_limit_tree_from_page(page); 596 if (!mctz) 597 return; 598 /* 599 * Necessary to update all ancestors when hierarchy is used. 600 * because their event counter is not touched. 601 */ 602 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 603 mz = mem_cgroup_page_nodeinfo(memcg, page); 604 excess = soft_limit_excess(memcg); 605 /* 606 * We have to update the tree if mz is on RB-tree or 607 * mem is over its softlimit. 608 */ 609 if (excess || mz->on_tree) { 610 unsigned long flags; 611 612 spin_lock_irqsave(&mctz->lock, flags); 613 /* if on-tree, remove it */ 614 if (mz->on_tree) 615 __mem_cgroup_remove_exceeded(mz, mctz); 616 /* 617 * Insert again. mz->usage_in_excess will be updated. 618 * If excess is 0, no tree ops. 619 */ 620 __mem_cgroup_insert_exceeded(mz, mctz, excess); 621 spin_unlock_irqrestore(&mctz->lock, flags); 622 } 623 } 624 } 625 626 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 627 { 628 struct mem_cgroup_tree_per_node *mctz; 629 struct mem_cgroup_per_node *mz; 630 int nid; 631 632 for_each_node(nid) { 633 mz = mem_cgroup_nodeinfo(memcg, nid); 634 mctz = soft_limit_tree_node(nid); 635 if (mctz) 636 mem_cgroup_remove_exceeded(mz, mctz); 637 } 638 } 639 640 static struct mem_cgroup_per_node * 641 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 642 { 643 struct mem_cgroup_per_node *mz; 644 645 retry: 646 mz = NULL; 647 if (!mctz->rb_rightmost) 648 goto done; /* Nothing to reclaim from */ 649 650 mz = rb_entry(mctz->rb_rightmost, 651 struct mem_cgroup_per_node, tree_node); 652 /* 653 * Remove the node now but someone else can add it back, 654 * we will to add it back at the end of reclaim to its correct 655 * position in the tree. 656 */ 657 __mem_cgroup_remove_exceeded(mz, mctz); 658 if (!soft_limit_excess(mz->memcg) || 659 !css_tryget(&mz->memcg->css)) 660 goto retry; 661 done: 662 return mz; 663 } 664 665 static struct mem_cgroup_per_node * 666 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 667 { 668 struct mem_cgroup_per_node *mz; 669 670 spin_lock_irq(&mctz->lock); 671 mz = __mem_cgroup_largest_soft_limit_node(mctz); 672 spin_unlock_irq(&mctz->lock); 673 return mz; 674 } 675 676 /** 677 * __mod_memcg_state - update cgroup memory statistics 678 * @memcg: the memory cgroup 679 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 680 * @val: delta to add to the counter, can be negative 681 */ 682 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 683 { 684 long x; 685 686 if (mem_cgroup_disabled()) 687 return; 688 689 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 690 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 691 struct mem_cgroup *mi; 692 693 /* 694 * Batch local counters to keep them in sync with 695 * the hierarchical ones. 696 */ 697 __this_cpu_add(memcg->vmstats_local->stat[idx], x); 698 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 699 atomic_long_add(x, &mi->vmstats[idx]); 700 x = 0; 701 } 702 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 703 } 704 705 static struct mem_cgroup_per_node * 706 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 707 { 708 struct mem_cgroup *parent; 709 710 parent = parent_mem_cgroup(pn->memcg); 711 if (!parent) 712 return NULL; 713 return mem_cgroup_nodeinfo(parent, nid); 714 } 715 716 /** 717 * __mod_lruvec_state - update lruvec memory statistics 718 * @lruvec: the lruvec 719 * @idx: the stat item 720 * @val: delta to add to the counter, can be negative 721 * 722 * The lruvec is the intersection of the NUMA node and a cgroup. This 723 * function updates the all three counters that are affected by a 724 * change of state at this level: per-node, per-cgroup, per-lruvec. 725 */ 726 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 727 int val) 728 { 729 pg_data_t *pgdat = lruvec_pgdat(lruvec); 730 struct mem_cgroup_per_node *pn; 731 struct mem_cgroup *memcg; 732 long x; 733 734 /* Update node */ 735 __mod_node_page_state(pgdat, idx, val); 736 737 if (mem_cgroup_disabled()) 738 return; 739 740 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 741 memcg = pn->memcg; 742 743 /* Update memcg */ 744 __mod_memcg_state(memcg, idx, val); 745 746 /* Update lruvec */ 747 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 748 749 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 750 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 751 struct mem_cgroup_per_node *pi; 752 753 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 754 atomic_long_add(x, &pi->lruvec_stat[idx]); 755 x = 0; 756 } 757 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 758 } 759 760 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) 761 { 762 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 763 struct mem_cgroup *memcg; 764 struct lruvec *lruvec; 765 766 rcu_read_lock(); 767 memcg = mem_cgroup_from_obj(p); 768 769 /* Untracked pages have no memcg, no lruvec. Update only the node */ 770 if (!memcg || memcg == root_mem_cgroup) { 771 __mod_node_page_state(pgdat, idx, val); 772 } else { 773 lruvec = mem_cgroup_lruvec(memcg, pgdat); 774 __mod_lruvec_state(lruvec, idx, val); 775 } 776 rcu_read_unlock(); 777 } 778 779 void mod_memcg_obj_state(void *p, int idx, int val) 780 { 781 struct mem_cgroup *memcg; 782 783 rcu_read_lock(); 784 memcg = mem_cgroup_from_obj(p); 785 if (memcg) 786 mod_memcg_state(memcg, idx, val); 787 rcu_read_unlock(); 788 } 789 790 /** 791 * __count_memcg_events - account VM events in a cgroup 792 * @memcg: the memory cgroup 793 * @idx: the event item 794 * @count: the number of events that occured 795 */ 796 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 797 unsigned long count) 798 { 799 unsigned long x; 800 801 if (mem_cgroup_disabled()) 802 return; 803 804 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 805 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 806 struct mem_cgroup *mi; 807 808 /* 809 * Batch local counters to keep them in sync with 810 * the hierarchical ones. 811 */ 812 __this_cpu_add(memcg->vmstats_local->events[idx], x); 813 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 814 atomic_long_add(x, &mi->vmevents[idx]); 815 x = 0; 816 } 817 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 818 } 819 820 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 821 { 822 return atomic_long_read(&memcg->vmevents[event]); 823 } 824 825 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 826 { 827 long x = 0; 828 int cpu; 829 830 for_each_possible_cpu(cpu) 831 x += per_cpu(memcg->vmstats_local->events[event], cpu); 832 return x; 833 } 834 835 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 836 struct page *page, 837 int nr_pages) 838 { 839 /* pagein of a big page is an event. So, ignore page size */ 840 if (nr_pages > 0) 841 __count_memcg_events(memcg, PGPGIN, 1); 842 else { 843 __count_memcg_events(memcg, PGPGOUT, 1); 844 nr_pages = -nr_pages; /* for event */ 845 } 846 847 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 848 } 849 850 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 851 enum mem_cgroup_events_target target) 852 { 853 unsigned long val, next; 854 855 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 856 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 857 /* from time_after() in jiffies.h */ 858 if ((long)(next - val) < 0) { 859 switch (target) { 860 case MEM_CGROUP_TARGET_THRESH: 861 next = val + THRESHOLDS_EVENTS_TARGET; 862 break; 863 case MEM_CGROUP_TARGET_SOFTLIMIT: 864 next = val + SOFTLIMIT_EVENTS_TARGET; 865 break; 866 default: 867 break; 868 } 869 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 870 return true; 871 } 872 return false; 873 } 874 875 /* 876 * Check events in order. 877 * 878 */ 879 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 880 { 881 /* threshold event is triggered in finer grain than soft limit */ 882 if (unlikely(mem_cgroup_event_ratelimit(memcg, 883 MEM_CGROUP_TARGET_THRESH))) { 884 bool do_softlimit; 885 886 do_softlimit = mem_cgroup_event_ratelimit(memcg, 887 MEM_CGROUP_TARGET_SOFTLIMIT); 888 mem_cgroup_threshold(memcg); 889 if (unlikely(do_softlimit)) 890 mem_cgroup_update_tree(memcg, page); 891 } 892 } 893 894 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 895 { 896 /* 897 * mm_update_next_owner() may clear mm->owner to NULL 898 * if it races with swapoff, page migration, etc. 899 * So this can be called with p == NULL. 900 */ 901 if (unlikely(!p)) 902 return NULL; 903 904 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 905 } 906 EXPORT_SYMBOL(mem_cgroup_from_task); 907 908 /** 909 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 910 * @mm: mm from which memcg should be extracted. It can be NULL. 911 * 912 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 913 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 914 * returned. 915 */ 916 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 917 { 918 struct mem_cgroup *memcg; 919 920 if (mem_cgroup_disabled()) 921 return NULL; 922 923 rcu_read_lock(); 924 do { 925 /* 926 * Page cache insertions can happen withou an 927 * actual mm context, e.g. during disk probing 928 * on boot, loopback IO, acct() writes etc. 929 */ 930 if (unlikely(!mm)) 931 memcg = root_mem_cgroup; 932 else { 933 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 934 if (unlikely(!memcg)) 935 memcg = root_mem_cgroup; 936 } 937 } while (!css_tryget(&memcg->css)); 938 rcu_read_unlock(); 939 return memcg; 940 } 941 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 942 943 /** 944 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 945 * @page: page from which memcg should be extracted. 946 * 947 * Obtain a reference on page->memcg and returns it if successful. Otherwise 948 * root_mem_cgroup is returned. 949 */ 950 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 951 { 952 struct mem_cgroup *memcg = page->mem_cgroup; 953 954 if (mem_cgroup_disabled()) 955 return NULL; 956 957 rcu_read_lock(); 958 /* Page should not get uncharged and freed memcg under us. */ 959 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 960 memcg = root_mem_cgroup; 961 rcu_read_unlock(); 962 return memcg; 963 } 964 EXPORT_SYMBOL(get_mem_cgroup_from_page); 965 966 /** 967 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg. 968 */ 969 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 970 { 971 if (unlikely(current->active_memcg)) { 972 struct mem_cgroup *memcg; 973 974 rcu_read_lock(); 975 /* current->active_memcg must hold a ref. */ 976 if (WARN_ON_ONCE(!css_tryget(¤t->active_memcg->css))) 977 memcg = root_mem_cgroup; 978 else 979 memcg = current->active_memcg; 980 rcu_read_unlock(); 981 return memcg; 982 } 983 return get_mem_cgroup_from_mm(current->mm); 984 } 985 986 /** 987 * mem_cgroup_iter - iterate over memory cgroup hierarchy 988 * @root: hierarchy root 989 * @prev: previously returned memcg, NULL on first invocation 990 * @reclaim: cookie for shared reclaim walks, NULL for full walks 991 * 992 * Returns references to children of the hierarchy below @root, or 993 * @root itself, or %NULL after a full round-trip. 994 * 995 * Caller must pass the return value in @prev on subsequent 996 * invocations for reference counting, or use mem_cgroup_iter_break() 997 * to cancel a hierarchy walk before the round-trip is complete. 998 * 999 * Reclaimers can specify a node and a priority level in @reclaim to 1000 * divide up the memcgs in the hierarchy among all concurrent 1001 * reclaimers operating on the same node and priority. 1002 */ 1003 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1004 struct mem_cgroup *prev, 1005 struct mem_cgroup_reclaim_cookie *reclaim) 1006 { 1007 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 1008 struct cgroup_subsys_state *css = NULL; 1009 struct mem_cgroup *memcg = NULL; 1010 struct mem_cgroup *pos = NULL; 1011 1012 if (mem_cgroup_disabled()) 1013 return NULL; 1014 1015 if (!root) 1016 root = root_mem_cgroup; 1017 1018 if (prev && !reclaim) 1019 pos = prev; 1020 1021 if (!root->use_hierarchy && root != root_mem_cgroup) { 1022 if (prev) 1023 goto out; 1024 return root; 1025 } 1026 1027 rcu_read_lock(); 1028 1029 if (reclaim) { 1030 struct mem_cgroup_per_node *mz; 1031 1032 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1033 iter = &mz->iter; 1034 1035 if (prev && reclaim->generation != iter->generation) 1036 goto out_unlock; 1037 1038 while (1) { 1039 pos = READ_ONCE(iter->position); 1040 if (!pos || css_tryget(&pos->css)) 1041 break; 1042 /* 1043 * css reference reached zero, so iter->position will 1044 * be cleared by ->css_released. However, we should not 1045 * rely on this happening soon, because ->css_released 1046 * is called from a work queue, and by busy-waiting we 1047 * might block it. So we clear iter->position right 1048 * away. 1049 */ 1050 (void)cmpxchg(&iter->position, pos, NULL); 1051 } 1052 } 1053 1054 if (pos) 1055 css = &pos->css; 1056 1057 for (;;) { 1058 css = css_next_descendant_pre(css, &root->css); 1059 if (!css) { 1060 /* 1061 * Reclaimers share the hierarchy walk, and a 1062 * new one might jump in right at the end of 1063 * the hierarchy - make sure they see at least 1064 * one group and restart from the beginning. 1065 */ 1066 if (!prev) 1067 continue; 1068 break; 1069 } 1070 1071 /* 1072 * Verify the css and acquire a reference. The root 1073 * is provided by the caller, so we know it's alive 1074 * and kicking, and don't take an extra reference. 1075 */ 1076 memcg = mem_cgroup_from_css(css); 1077 1078 if (css == &root->css) 1079 break; 1080 1081 if (css_tryget(css)) 1082 break; 1083 1084 memcg = NULL; 1085 } 1086 1087 if (reclaim) { 1088 /* 1089 * The position could have already been updated by a competing 1090 * thread, so check that the value hasn't changed since we read 1091 * it to avoid reclaiming from the same cgroup twice. 1092 */ 1093 (void)cmpxchg(&iter->position, pos, memcg); 1094 1095 if (pos) 1096 css_put(&pos->css); 1097 1098 if (!memcg) 1099 iter->generation++; 1100 else if (!prev) 1101 reclaim->generation = iter->generation; 1102 } 1103 1104 out_unlock: 1105 rcu_read_unlock(); 1106 out: 1107 if (prev && prev != root) 1108 css_put(&prev->css); 1109 1110 return memcg; 1111 } 1112 1113 /** 1114 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1115 * @root: hierarchy root 1116 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1117 */ 1118 void mem_cgroup_iter_break(struct mem_cgroup *root, 1119 struct mem_cgroup *prev) 1120 { 1121 if (!root) 1122 root = root_mem_cgroup; 1123 if (prev && prev != root) 1124 css_put(&prev->css); 1125 } 1126 1127 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1128 struct mem_cgroup *dead_memcg) 1129 { 1130 struct mem_cgroup_reclaim_iter *iter; 1131 struct mem_cgroup_per_node *mz; 1132 int nid; 1133 1134 for_each_node(nid) { 1135 mz = mem_cgroup_nodeinfo(from, nid); 1136 iter = &mz->iter; 1137 cmpxchg(&iter->position, dead_memcg, NULL); 1138 } 1139 } 1140 1141 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1142 { 1143 struct mem_cgroup *memcg = dead_memcg; 1144 struct mem_cgroup *last; 1145 1146 do { 1147 __invalidate_reclaim_iterators(memcg, dead_memcg); 1148 last = memcg; 1149 } while ((memcg = parent_mem_cgroup(memcg))); 1150 1151 /* 1152 * When cgruop1 non-hierarchy mode is used, 1153 * parent_mem_cgroup() does not walk all the way up to the 1154 * cgroup root (root_mem_cgroup). So we have to handle 1155 * dead_memcg from cgroup root separately. 1156 */ 1157 if (last != root_mem_cgroup) 1158 __invalidate_reclaim_iterators(root_mem_cgroup, 1159 dead_memcg); 1160 } 1161 1162 /** 1163 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1164 * @memcg: hierarchy root 1165 * @fn: function to call for each task 1166 * @arg: argument passed to @fn 1167 * 1168 * This function iterates over tasks attached to @memcg or to any of its 1169 * descendants and calls @fn for each task. If @fn returns a non-zero 1170 * value, the function breaks the iteration loop and returns the value. 1171 * Otherwise, it will iterate over all tasks and return 0. 1172 * 1173 * This function must not be called for the root memory cgroup. 1174 */ 1175 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1176 int (*fn)(struct task_struct *, void *), void *arg) 1177 { 1178 struct mem_cgroup *iter; 1179 int ret = 0; 1180 1181 BUG_ON(memcg == root_mem_cgroup); 1182 1183 for_each_mem_cgroup_tree(iter, memcg) { 1184 struct css_task_iter it; 1185 struct task_struct *task; 1186 1187 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1188 while (!ret && (task = css_task_iter_next(&it))) 1189 ret = fn(task, arg); 1190 css_task_iter_end(&it); 1191 if (ret) { 1192 mem_cgroup_iter_break(memcg, iter); 1193 break; 1194 } 1195 } 1196 return ret; 1197 } 1198 1199 /** 1200 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1201 * @page: the page 1202 * @pgdat: pgdat of the page 1203 * 1204 * This function relies on page->mem_cgroup being stable - see the 1205 * access rules in commit_charge(). 1206 */ 1207 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1208 { 1209 struct mem_cgroup_per_node *mz; 1210 struct mem_cgroup *memcg; 1211 struct lruvec *lruvec; 1212 1213 if (mem_cgroup_disabled()) { 1214 lruvec = &pgdat->__lruvec; 1215 goto out; 1216 } 1217 1218 memcg = page->mem_cgroup; 1219 /* 1220 * Swapcache readahead pages are added to the LRU - and 1221 * possibly migrated - before they are charged. 1222 */ 1223 if (!memcg) 1224 memcg = root_mem_cgroup; 1225 1226 mz = mem_cgroup_page_nodeinfo(memcg, page); 1227 lruvec = &mz->lruvec; 1228 out: 1229 /* 1230 * Since a node can be onlined after the mem_cgroup was created, 1231 * we have to be prepared to initialize lruvec->zone here; 1232 * and if offlined then reonlined, we need to reinitialize it. 1233 */ 1234 if (unlikely(lruvec->pgdat != pgdat)) 1235 lruvec->pgdat = pgdat; 1236 return lruvec; 1237 } 1238 1239 /** 1240 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1241 * @lruvec: mem_cgroup per zone lru vector 1242 * @lru: index of lru list the page is sitting on 1243 * @zid: zone id of the accounted pages 1244 * @nr_pages: positive when adding or negative when removing 1245 * 1246 * This function must be called under lru_lock, just before a page is added 1247 * to or just after a page is removed from an lru list (that ordering being 1248 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1249 */ 1250 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1251 int zid, int nr_pages) 1252 { 1253 struct mem_cgroup_per_node *mz; 1254 unsigned long *lru_size; 1255 long size; 1256 1257 if (mem_cgroup_disabled()) 1258 return; 1259 1260 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1261 lru_size = &mz->lru_zone_size[zid][lru]; 1262 1263 if (nr_pages < 0) 1264 *lru_size += nr_pages; 1265 1266 size = *lru_size; 1267 if (WARN_ONCE(size < 0, 1268 "%s(%p, %d, %d): lru_size %ld\n", 1269 __func__, lruvec, lru, nr_pages, size)) { 1270 VM_BUG_ON(1); 1271 *lru_size = 0; 1272 } 1273 1274 if (nr_pages > 0) 1275 *lru_size += nr_pages; 1276 } 1277 1278 /** 1279 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1280 * @memcg: the memory cgroup 1281 * 1282 * Returns the maximum amount of memory @mem can be charged with, in 1283 * pages. 1284 */ 1285 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1286 { 1287 unsigned long margin = 0; 1288 unsigned long count; 1289 unsigned long limit; 1290 1291 count = page_counter_read(&memcg->memory); 1292 limit = READ_ONCE(memcg->memory.max); 1293 if (count < limit) 1294 margin = limit - count; 1295 1296 if (do_memsw_account()) { 1297 count = page_counter_read(&memcg->memsw); 1298 limit = READ_ONCE(memcg->memsw.max); 1299 if (count < limit) 1300 margin = min(margin, limit - count); 1301 else 1302 margin = 0; 1303 } 1304 1305 return margin; 1306 } 1307 1308 /* 1309 * A routine for checking "mem" is under move_account() or not. 1310 * 1311 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1312 * moving cgroups. This is for waiting at high-memory pressure 1313 * caused by "move". 1314 */ 1315 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1316 { 1317 struct mem_cgroup *from; 1318 struct mem_cgroup *to; 1319 bool ret = false; 1320 /* 1321 * Unlike task_move routines, we access mc.to, mc.from not under 1322 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1323 */ 1324 spin_lock(&mc.lock); 1325 from = mc.from; 1326 to = mc.to; 1327 if (!from) 1328 goto unlock; 1329 1330 ret = mem_cgroup_is_descendant(from, memcg) || 1331 mem_cgroup_is_descendant(to, memcg); 1332 unlock: 1333 spin_unlock(&mc.lock); 1334 return ret; 1335 } 1336 1337 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1338 { 1339 if (mc.moving_task && current != mc.moving_task) { 1340 if (mem_cgroup_under_move(memcg)) { 1341 DEFINE_WAIT(wait); 1342 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1343 /* moving charge context might have finished. */ 1344 if (mc.moving_task) 1345 schedule(); 1346 finish_wait(&mc.waitq, &wait); 1347 return true; 1348 } 1349 } 1350 return false; 1351 } 1352 1353 static char *memory_stat_format(struct mem_cgroup *memcg) 1354 { 1355 struct seq_buf s; 1356 int i; 1357 1358 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1359 if (!s.buffer) 1360 return NULL; 1361 1362 /* 1363 * Provide statistics on the state of the memory subsystem as 1364 * well as cumulative event counters that show past behavior. 1365 * 1366 * This list is ordered following a combination of these gradients: 1367 * 1) generic big picture -> specifics and details 1368 * 2) reflecting userspace activity -> reflecting kernel heuristics 1369 * 1370 * Current memory state: 1371 */ 1372 1373 seq_buf_printf(&s, "anon %llu\n", 1374 (u64)memcg_page_state(memcg, NR_ANON_MAPPED) * 1375 PAGE_SIZE); 1376 seq_buf_printf(&s, "file %llu\n", 1377 (u64)memcg_page_state(memcg, NR_FILE_PAGES) * 1378 PAGE_SIZE); 1379 seq_buf_printf(&s, "kernel_stack %llu\n", 1380 (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1381 1024); 1382 seq_buf_printf(&s, "slab %llu\n", 1383 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + 1384 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * 1385 PAGE_SIZE); 1386 seq_buf_printf(&s, "sock %llu\n", 1387 (u64)memcg_page_state(memcg, MEMCG_SOCK) * 1388 PAGE_SIZE); 1389 1390 seq_buf_printf(&s, "shmem %llu\n", 1391 (u64)memcg_page_state(memcg, NR_SHMEM) * 1392 PAGE_SIZE); 1393 seq_buf_printf(&s, "file_mapped %llu\n", 1394 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * 1395 PAGE_SIZE); 1396 seq_buf_printf(&s, "file_dirty %llu\n", 1397 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * 1398 PAGE_SIZE); 1399 seq_buf_printf(&s, "file_writeback %llu\n", 1400 (u64)memcg_page_state(memcg, NR_WRITEBACK) * 1401 PAGE_SIZE); 1402 1403 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1404 seq_buf_printf(&s, "anon_thp %llu\n", 1405 (u64)memcg_page_state(memcg, NR_ANON_THPS) * 1406 HPAGE_PMD_SIZE); 1407 #endif 1408 1409 for (i = 0; i < NR_LRU_LISTS; i++) 1410 seq_buf_printf(&s, "%s %llu\n", lru_list_name(i), 1411 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 1412 PAGE_SIZE); 1413 1414 seq_buf_printf(&s, "slab_reclaimable %llu\n", 1415 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * 1416 PAGE_SIZE); 1417 seq_buf_printf(&s, "slab_unreclaimable %llu\n", 1418 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * 1419 PAGE_SIZE); 1420 1421 /* Accumulated memory events */ 1422 1423 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1424 memcg_events(memcg, PGFAULT)); 1425 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1426 memcg_events(memcg, PGMAJFAULT)); 1427 1428 seq_buf_printf(&s, "workingset_refault %lu\n", 1429 memcg_page_state(memcg, WORKINGSET_REFAULT)); 1430 seq_buf_printf(&s, "workingset_activate %lu\n", 1431 memcg_page_state(memcg, WORKINGSET_ACTIVATE)); 1432 seq_buf_printf(&s, "workingset_restore %lu\n", 1433 memcg_page_state(memcg, WORKINGSET_RESTORE)); 1434 seq_buf_printf(&s, "workingset_nodereclaim %lu\n", 1435 memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); 1436 1437 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1438 memcg_events(memcg, PGREFILL)); 1439 seq_buf_printf(&s, "pgscan %lu\n", 1440 memcg_events(memcg, PGSCAN_KSWAPD) + 1441 memcg_events(memcg, PGSCAN_DIRECT)); 1442 seq_buf_printf(&s, "pgsteal %lu\n", 1443 memcg_events(memcg, PGSTEAL_KSWAPD) + 1444 memcg_events(memcg, PGSTEAL_DIRECT)); 1445 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1446 memcg_events(memcg, PGACTIVATE)); 1447 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1448 memcg_events(memcg, PGDEACTIVATE)); 1449 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1450 memcg_events(memcg, PGLAZYFREE)); 1451 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1452 memcg_events(memcg, PGLAZYFREED)); 1453 1454 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1455 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1456 memcg_events(memcg, THP_FAULT_ALLOC)); 1457 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1458 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1459 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1460 1461 /* The above should easily fit into one page */ 1462 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1463 1464 return s.buffer; 1465 } 1466 1467 #define K(x) ((x) << (PAGE_SHIFT-10)) 1468 /** 1469 * mem_cgroup_print_oom_context: Print OOM information relevant to 1470 * memory controller. 1471 * @memcg: The memory cgroup that went over limit 1472 * @p: Task that is going to be killed 1473 * 1474 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1475 * enabled 1476 */ 1477 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1478 { 1479 rcu_read_lock(); 1480 1481 if (memcg) { 1482 pr_cont(",oom_memcg="); 1483 pr_cont_cgroup_path(memcg->css.cgroup); 1484 } else 1485 pr_cont(",global_oom"); 1486 if (p) { 1487 pr_cont(",task_memcg="); 1488 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1489 } 1490 rcu_read_unlock(); 1491 } 1492 1493 /** 1494 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1495 * memory controller. 1496 * @memcg: The memory cgroup that went over limit 1497 */ 1498 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1499 { 1500 char *buf; 1501 1502 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1503 K((u64)page_counter_read(&memcg->memory)), 1504 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1505 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1506 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1507 K((u64)page_counter_read(&memcg->swap)), 1508 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1509 else { 1510 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1511 K((u64)page_counter_read(&memcg->memsw)), 1512 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1513 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1514 K((u64)page_counter_read(&memcg->kmem)), 1515 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1516 } 1517 1518 pr_info("Memory cgroup stats for "); 1519 pr_cont_cgroup_path(memcg->css.cgroup); 1520 pr_cont(":"); 1521 buf = memory_stat_format(memcg); 1522 if (!buf) 1523 return; 1524 pr_info("%s", buf); 1525 kfree(buf); 1526 } 1527 1528 /* 1529 * Return the memory (and swap, if configured) limit for a memcg. 1530 */ 1531 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1532 { 1533 unsigned long max; 1534 1535 max = READ_ONCE(memcg->memory.max); 1536 if (mem_cgroup_swappiness(memcg)) { 1537 unsigned long memsw_max; 1538 unsigned long swap_max; 1539 1540 memsw_max = memcg->memsw.max; 1541 swap_max = READ_ONCE(memcg->swap.max); 1542 swap_max = min(swap_max, (unsigned long)total_swap_pages); 1543 max = min(max + swap_max, memsw_max); 1544 } 1545 return max; 1546 } 1547 1548 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1549 { 1550 return page_counter_read(&memcg->memory); 1551 } 1552 1553 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1554 int order) 1555 { 1556 struct oom_control oc = { 1557 .zonelist = NULL, 1558 .nodemask = NULL, 1559 .memcg = memcg, 1560 .gfp_mask = gfp_mask, 1561 .order = order, 1562 }; 1563 bool ret; 1564 1565 if (mutex_lock_killable(&oom_lock)) 1566 return true; 1567 /* 1568 * A few threads which were not waiting at mutex_lock_killable() can 1569 * fail to bail out. Therefore, check again after holding oom_lock. 1570 */ 1571 ret = should_force_charge() || out_of_memory(&oc); 1572 mutex_unlock(&oom_lock); 1573 return ret; 1574 } 1575 1576 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1577 pg_data_t *pgdat, 1578 gfp_t gfp_mask, 1579 unsigned long *total_scanned) 1580 { 1581 struct mem_cgroup *victim = NULL; 1582 int total = 0; 1583 int loop = 0; 1584 unsigned long excess; 1585 unsigned long nr_scanned; 1586 struct mem_cgroup_reclaim_cookie reclaim = { 1587 .pgdat = pgdat, 1588 }; 1589 1590 excess = soft_limit_excess(root_memcg); 1591 1592 while (1) { 1593 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1594 if (!victim) { 1595 loop++; 1596 if (loop >= 2) { 1597 /* 1598 * If we have not been able to reclaim 1599 * anything, it might because there are 1600 * no reclaimable pages under this hierarchy 1601 */ 1602 if (!total) 1603 break; 1604 /* 1605 * We want to do more targeted reclaim. 1606 * excess >> 2 is not to excessive so as to 1607 * reclaim too much, nor too less that we keep 1608 * coming back to reclaim from this cgroup 1609 */ 1610 if (total >= (excess >> 2) || 1611 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1612 break; 1613 } 1614 continue; 1615 } 1616 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1617 pgdat, &nr_scanned); 1618 *total_scanned += nr_scanned; 1619 if (!soft_limit_excess(root_memcg)) 1620 break; 1621 } 1622 mem_cgroup_iter_break(root_memcg, victim); 1623 return total; 1624 } 1625 1626 #ifdef CONFIG_LOCKDEP 1627 static struct lockdep_map memcg_oom_lock_dep_map = { 1628 .name = "memcg_oom_lock", 1629 }; 1630 #endif 1631 1632 static DEFINE_SPINLOCK(memcg_oom_lock); 1633 1634 /* 1635 * Check OOM-Killer is already running under our hierarchy. 1636 * If someone is running, return false. 1637 */ 1638 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1639 { 1640 struct mem_cgroup *iter, *failed = NULL; 1641 1642 spin_lock(&memcg_oom_lock); 1643 1644 for_each_mem_cgroup_tree(iter, memcg) { 1645 if (iter->oom_lock) { 1646 /* 1647 * this subtree of our hierarchy is already locked 1648 * so we cannot give a lock. 1649 */ 1650 failed = iter; 1651 mem_cgroup_iter_break(memcg, iter); 1652 break; 1653 } else 1654 iter->oom_lock = true; 1655 } 1656 1657 if (failed) { 1658 /* 1659 * OK, we failed to lock the whole subtree so we have 1660 * to clean up what we set up to the failing subtree 1661 */ 1662 for_each_mem_cgroup_tree(iter, memcg) { 1663 if (iter == failed) { 1664 mem_cgroup_iter_break(memcg, iter); 1665 break; 1666 } 1667 iter->oom_lock = false; 1668 } 1669 } else 1670 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1671 1672 spin_unlock(&memcg_oom_lock); 1673 1674 return !failed; 1675 } 1676 1677 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1678 { 1679 struct mem_cgroup *iter; 1680 1681 spin_lock(&memcg_oom_lock); 1682 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1683 for_each_mem_cgroup_tree(iter, memcg) 1684 iter->oom_lock = false; 1685 spin_unlock(&memcg_oom_lock); 1686 } 1687 1688 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1689 { 1690 struct mem_cgroup *iter; 1691 1692 spin_lock(&memcg_oom_lock); 1693 for_each_mem_cgroup_tree(iter, memcg) 1694 iter->under_oom++; 1695 spin_unlock(&memcg_oom_lock); 1696 } 1697 1698 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1699 { 1700 struct mem_cgroup *iter; 1701 1702 /* 1703 * When a new child is created while the hierarchy is under oom, 1704 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1705 */ 1706 spin_lock(&memcg_oom_lock); 1707 for_each_mem_cgroup_tree(iter, memcg) 1708 if (iter->under_oom > 0) 1709 iter->under_oom--; 1710 spin_unlock(&memcg_oom_lock); 1711 } 1712 1713 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1714 1715 struct oom_wait_info { 1716 struct mem_cgroup *memcg; 1717 wait_queue_entry_t wait; 1718 }; 1719 1720 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1721 unsigned mode, int sync, void *arg) 1722 { 1723 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1724 struct mem_cgroup *oom_wait_memcg; 1725 struct oom_wait_info *oom_wait_info; 1726 1727 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1728 oom_wait_memcg = oom_wait_info->memcg; 1729 1730 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1731 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1732 return 0; 1733 return autoremove_wake_function(wait, mode, sync, arg); 1734 } 1735 1736 static void memcg_oom_recover(struct mem_cgroup *memcg) 1737 { 1738 /* 1739 * For the following lockless ->under_oom test, the only required 1740 * guarantee is that it must see the state asserted by an OOM when 1741 * this function is called as a result of userland actions 1742 * triggered by the notification of the OOM. This is trivially 1743 * achieved by invoking mem_cgroup_mark_under_oom() before 1744 * triggering notification. 1745 */ 1746 if (memcg && memcg->under_oom) 1747 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1748 } 1749 1750 enum oom_status { 1751 OOM_SUCCESS, 1752 OOM_FAILED, 1753 OOM_ASYNC, 1754 OOM_SKIPPED 1755 }; 1756 1757 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1758 { 1759 enum oom_status ret; 1760 bool locked; 1761 1762 if (order > PAGE_ALLOC_COSTLY_ORDER) 1763 return OOM_SKIPPED; 1764 1765 memcg_memory_event(memcg, MEMCG_OOM); 1766 1767 /* 1768 * We are in the middle of the charge context here, so we 1769 * don't want to block when potentially sitting on a callstack 1770 * that holds all kinds of filesystem and mm locks. 1771 * 1772 * cgroup1 allows disabling the OOM killer and waiting for outside 1773 * handling until the charge can succeed; remember the context and put 1774 * the task to sleep at the end of the page fault when all locks are 1775 * released. 1776 * 1777 * On the other hand, in-kernel OOM killer allows for an async victim 1778 * memory reclaim (oom_reaper) and that means that we are not solely 1779 * relying on the oom victim to make a forward progress and we can 1780 * invoke the oom killer here. 1781 * 1782 * Please note that mem_cgroup_out_of_memory might fail to find a 1783 * victim and then we have to bail out from the charge path. 1784 */ 1785 if (memcg->oom_kill_disable) { 1786 if (!current->in_user_fault) 1787 return OOM_SKIPPED; 1788 css_get(&memcg->css); 1789 current->memcg_in_oom = memcg; 1790 current->memcg_oom_gfp_mask = mask; 1791 current->memcg_oom_order = order; 1792 1793 return OOM_ASYNC; 1794 } 1795 1796 mem_cgroup_mark_under_oom(memcg); 1797 1798 locked = mem_cgroup_oom_trylock(memcg); 1799 1800 if (locked) 1801 mem_cgroup_oom_notify(memcg); 1802 1803 mem_cgroup_unmark_under_oom(memcg); 1804 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1805 ret = OOM_SUCCESS; 1806 else 1807 ret = OOM_FAILED; 1808 1809 if (locked) 1810 mem_cgroup_oom_unlock(memcg); 1811 1812 return ret; 1813 } 1814 1815 /** 1816 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1817 * @handle: actually kill/wait or just clean up the OOM state 1818 * 1819 * This has to be called at the end of a page fault if the memcg OOM 1820 * handler was enabled. 1821 * 1822 * Memcg supports userspace OOM handling where failed allocations must 1823 * sleep on a waitqueue until the userspace task resolves the 1824 * situation. Sleeping directly in the charge context with all kinds 1825 * of locks held is not a good idea, instead we remember an OOM state 1826 * in the task and mem_cgroup_oom_synchronize() has to be called at 1827 * the end of the page fault to complete the OOM handling. 1828 * 1829 * Returns %true if an ongoing memcg OOM situation was detected and 1830 * completed, %false otherwise. 1831 */ 1832 bool mem_cgroup_oom_synchronize(bool handle) 1833 { 1834 struct mem_cgroup *memcg = current->memcg_in_oom; 1835 struct oom_wait_info owait; 1836 bool locked; 1837 1838 /* OOM is global, do not handle */ 1839 if (!memcg) 1840 return false; 1841 1842 if (!handle) 1843 goto cleanup; 1844 1845 owait.memcg = memcg; 1846 owait.wait.flags = 0; 1847 owait.wait.func = memcg_oom_wake_function; 1848 owait.wait.private = current; 1849 INIT_LIST_HEAD(&owait.wait.entry); 1850 1851 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1852 mem_cgroup_mark_under_oom(memcg); 1853 1854 locked = mem_cgroup_oom_trylock(memcg); 1855 1856 if (locked) 1857 mem_cgroup_oom_notify(memcg); 1858 1859 if (locked && !memcg->oom_kill_disable) { 1860 mem_cgroup_unmark_under_oom(memcg); 1861 finish_wait(&memcg_oom_waitq, &owait.wait); 1862 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1863 current->memcg_oom_order); 1864 } else { 1865 schedule(); 1866 mem_cgroup_unmark_under_oom(memcg); 1867 finish_wait(&memcg_oom_waitq, &owait.wait); 1868 } 1869 1870 if (locked) { 1871 mem_cgroup_oom_unlock(memcg); 1872 /* 1873 * There is no guarantee that an OOM-lock contender 1874 * sees the wakeups triggered by the OOM kill 1875 * uncharges. Wake any sleepers explicitely. 1876 */ 1877 memcg_oom_recover(memcg); 1878 } 1879 cleanup: 1880 current->memcg_in_oom = NULL; 1881 css_put(&memcg->css); 1882 return true; 1883 } 1884 1885 /** 1886 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1887 * @victim: task to be killed by the OOM killer 1888 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1889 * 1890 * Returns a pointer to a memory cgroup, which has to be cleaned up 1891 * by killing all belonging OOM-killable tasks. 1892 * 1893 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1894 */ 1895 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1896 struct mem_cgroup *oom_domain) 1897 { 1898 struct mem_cgroup *oom_group = NULL; 1899 struct mem_cgroup *memcg; 1900 1901 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1902 return NULL; 1903 1904 if (!oom_domain) 1905 oom_domain = root_mem_cgroup; 1906 1907 rcu_read_lock(); 1908 1909 memcg = mem_cgroup_from_task(victim); 1910 if (memcg == root_mem_cgroup) 1911 goto out; 1912 1913 /* 1914 * If the victim task has been asynchronously moved to a different 1915 * memory cgroup, we might end up killing tasks outside oom_domain. 1916 * In this case it's better to ignore memory.group.oom. 1917 */ 1918 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1919 goto out; 1920 1921 /* 1922 * Traverse the memory cgroup hierarchy from the victim task's 1923 * cgroup up to the OOMing cgroup (or root) to find the 1924 * highest-level memory cgroup with oom.group set. 1925 */ 1926 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1927 if (memcg->oom_group) 1928 oom_group = memcg; 1929 1930 if (memcg == oom_domain) 1931 break; 1932 } 1933 1934 if (oom_group) 1935 css_get(&oom_group->css); 1936 out: 1937 rcu_read_unlock(); 1938 1939 return oom_group; 1940 } 1941 1942 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1943 { 1944 pr_info("Tasks in "); 1945 pr_cont_cgroup_path(memcg->css.cgroup); 1946 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1947 } 1948 1949 /** 1950 * lock_page_memcg - lock a page->mem_cgroup binding 1951 * @page: the page 1952 * 1953 * This function protects unlocked LRU pages from being moved to 1954 * another cgroup. 1955 * 1956 * It ensures lifetime of the returned memcg. Caller is responsible 1957 * for the lifetime of the page; __unlock_page_memcg() is available 1958 * when @page might get freed inside the locked section. 1959 */ 1960 struct mem_cgroup *lock_page_memcg(struct page *page) 1961 { 1962 struct page *head = compound_head(page); /* rmap on tail pages */ 1963 struct mem_cgroup *memcg; 1964 unsigned long flags; 1965 1966 /* 1967 * The RCU lock is held throughout the transaction. The fast 1968 * path can get away without acquiring the memcg->move_lock 1969 * because page moving starts with an RCU grace period. 1970 * 1971 * The RCU lock also protects the memcg from being freed when 1972 * the page state that is going to change is the only thing 1973 * preventing the page itself from being freed. E.g. writeback 1974 * doesn't hold a page reference and relies on PG_writeback to 1975 * keep off truncation, migration and so forth. 1976 */ 1977 rcu_read_lock(); 1978 1979 if (mem_cgroup_disabled()) 1980 return NULL; 1981 again: 1982 memcg = head->mem_cgroup; 1983 if (unlikely(!memcg)) 1984 return NULL; 1985 1986 if (atomic_read(&memcg->moving_account) <= 0) 1987 return memcg; 1988 1989 spin_lock_irqsave(&memcg->move_lock, flags); 1990 if (memcg != head->mem_cgroup) { 1991 spin_unlock_irqrestore(&memcg->move_lock, flags); 1992 goto again; 1993 } 1994 1995 /* 1996 * When charge migration first begins, we can have locked and 1997 * unlocked page stat updates happening concurrently. Track 1998 * the task who has the lock for unlock_page_memcg(). 1999 */ 2000 memcg->move_lock_task = current; 2001 memcg->move_lock_flags = flags; 2002 2003 return memcg; 2004 } 2005 EXPORT_SYMBOL(lock_page_memcg); 2006 2007 /** 2008 * __unlock_page_memcg - unlock and unpin a memcg 2009 * @memcg: the memcg 2010 * 2011 * Unlock and unpin a memcg returned by lock_page_memcg(). 2012 */ 2013 void __unlock_page_memcg(struct mem_cgroup *memcg) 2014 { 2015 if (memcg && memcg->move_lock_task == current) { 2016 unsigned long flags = memcg->move_lock_flags; 2017 2018 memcg->move_lock_task = NULL; 2019 memcg->move_lock_flags = 0; 2020 2021 spin_unlock_irqrestore(&memcg->move_lock, flags); 2022 } 2023 2024 rcu_read_unlock(); 2025 } 2026 2027 /** 2028 * unlock_page_memcg - unlock a page->mem_cgroup binding 2029 * @page: the page 2030 */ 2031 void unlock_page_memcg(struct page *page) 2032 { 2033 struct page *head = compound_head(page); 2034 2035 __unlock_page_memcg(head->mem_cgroup); 2036 } 2037 EXPORT_SYMBOL(unlock_page_memcg); 2038 2039 struct memcg_stock_pcp { 2040 struct mem_cgroup *cached; /* this never be root cgroup */ 2041 unsigned int nr_pages; 2042 struct work_struct work; 2043 unsigned long flags; 2044 #define FLUSHING_CACHED_CHARGE 0 2045 }; 2046 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2047 static DEFINE_MUTEX(percpu_charge_mutex); 2048 2049 /** 2050 * consume_stock: Try to consume stocked charge on this cpu. 2051 * @memcg: memcg to consume from. 2052 * @nr_pages: how many pages to charge. 2053 * 2054 * The charges will only happen if @memcg matches the current cpu's memcg 2055 * stock, and at least @nr_pages are available in that stock. Failure to 2056 * service an allocation will refill the stock. 2057 * 2058 * returns true if successful, false otherwise. 2059 */ 2060 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2061 { 2062 struct memcg_stock_pcp *stock; 2063 unsigned long flags; 2064 bool ret = false; 2065 2066 if (nr_pages > MEMCG_CHARGE_BATCH) 2067 return ret; 2068 2069 local_irq_save(flags); 2070 2071 stock = this_cpu_ptr(&memcg_stock); 2072 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2073 stock->nr_pages -= nr_pages; 2074 ret = true; 2075 } 2076 2077 local_irq_restore(flags); 2078 2079 return ret; 2080 } 2081 2082 /* 2083 * Returns stocks cached in percpu and reset cached information. 2084 */ 2085 static void drain_stock(struct memcg_stock_pcp *stock) 2086 { 2087 struct mem_cgroup *old = stock->cached; 2088 2089 if (stock->nr_pages) { 2090 page_counter_uncharge(&old->memory, stock->nr_pages); 2091 if (do_memsw_account()) 2092 page_counter_uncharge(&old->memsw, stock->nr_pages); 2093 css_put_many(&old->css, stock->nr_pages); 2094 stock->nr_pages = 0; 2095 } 2096 stock->cached = NULL; 2097 } 2098 2099 static void drain_local_stock(struct work_struct *dummy) 2100 { 2101 struct memcg_stock_pcp *stock; 2102 unsigned long flags; 2103 2104 /* 2105 * The only protection from memory hotplug vs. drain_stock races is 2106 * that we always operate on local CPU stock here with IRQ disabled 2107 */ 2108 local_irq_save(flags); 2109 2110 stock = this_cpu_ptr(&memcg_stock); 2111 drain_stock(stock); 2112 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2113 2114 local_irq_restore(flags); 2115 } 2116 2117 /* 2118 * Cache charges(val) to local per_cpu area. 2119 * This will be consumed by consume_stock() function, later. 2120 */ 2121 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2122 { 2123 struct memcg_stock_pcp *stock; 2124 unsigned long flags; 2125 2126 local_irq_save(flags); 2127 2128 stock = this_cpu_ptr(&memcg_stock); 2129 if (stock->cached != memcg) { /* reset if necessary */ 2130 drain_stock(stock); 2131 stock->cached = memcg; 2132 } 2133 stock->nr_pages += nr_pages; 2134 2135 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2136 drain_stock(stock); 2137 2138 local_irq_restore(flags); 2139 } 2140 2141 /* 2142 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2143 * of the hierarchy under it. 2144 */ 2145 static void drain_all_stock(struct mem_cgroup *root_memcg) 2146 { 2147 int cpu, curcpu; 2148 2149 /* If someone's already draining, avoid adding running more workers. */ 2150 if (!mutex_trylock(&percpu_charge_mutex)) 2151 return; 2152 /* 2153 * Notify other cpus that system-wide "drain" is running 2154 * We do not care about races with the cpu hotplug because cpu down 2155 * as well as workers from this path always operate on the local 2156 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2157 */ 2158 curcpu = get_cpu(); 2159 for_each_online_cpu(cpu) { 2160 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2161 struct mem_cgroup *memcg; 2162 bool flush = false; 2163 2164 rcu_read_lock(); 2165 memcg = stock->cached; 2166 if (memcg && stock->nr_pages && 2167 mem_cgroup_is_descendant(memcg, root_memcg)) 2168 flush = true; 2169 rcu_read_unlock(); 2170 2171 if (flush && 2172 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2173 if (cpu == curcpu) 2174 drain_local_stock(&stock->work); 2175 else 2176 schedule_work_on(cpu, &stock->work); 2177 } 2178 } 2179 put_cpu(); 2180 mutex_unlock(&percpu_charge_mutex); 2181 } 2182 2183 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2184 { 2185 struct memcg_stock_pcp *stock; 2186 struct mem_cgroup *memcg, *mi; 2187 2188 stock = &per_cpu(memcg_stock, cpu); 2189 drain_stock(stock); 2190 2191 for_each_mem_cgroup(memcg) { 2192 int i; 2193 2194 for (i = 0; i < MEMCG_NR_STAT; i++) { 2195 int nid; 2196 long x; 2197 2198 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2199 if (x) 2200 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2201 atomic_long_add(x, &memcg->vmstats[i]); 2202 2203 if (i >= NR_VM_NODE_STAT_ITEMS) 2204 continue; 2205 2206 for_each_node(nid) { 2207 struct mem_cgroup_per_node *pn; 2208 2209 pn = mem_cgroup_nodeinfo(memcg, nid); 2210 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2211 if (x) 2212 do { 2213 atomic_long_add(x, &pn->lruvec_stat[i]); 2214 } while ((pn = parent_nodeinfo(pn, nid))); 2215 } 2216 } 2217 2218 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2219 long x; 2220 2221 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2222 if (x) 2223 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2224 atomic_long_add(x, &memcg->vmevents[i]); 2225 } 2226 } 2227 2228 return 0; 2229 } 2230 2231 static void reclaim_high(struct mem_cgroup *memcg, 2232 unsigned int nr_pages, 2233 gfp_t gfp_mask) 2234 { 2235 do { 2236 if (page_counter_read(&memcg->memory) <= 2237 READ_ONCE(memcg->memory.high)) 2238 continue; 2239 memcg_memory_event(memcg, MEMCG_HIGH); 2240 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 2241 } while ((memcg = parent_mem_cgroup(memcg)) && 2242 !mem_cgroup_is_root(memcg)); 2243 } 2244 2245 static void high_work_func(struct work_struct *work) 2246 { 2247 struct mem_cgroup *memcg; 2248 2249 memcg = container_of(work, struct mem_cgroup, high_work); 2250 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2251 } 2252 2253 /* 2254 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2255 * enough to still cause a significant slowdown in most cases, while still 2256 * allowing diagnostics and tracing to proceed without becoming stuck. 2257 */ 2258 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2259 2260 /* 2261 * When calculating the delay, we use these either side of the exponentiation to 2262 * maintain precision and scale to a reasonable number of jiffies (see the table 2263 * below. 2264 * 2265 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2266 * overage ratio to a delay. 2267 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the 2268 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2269 * to produce a reasonable delay curve. 2270 * 2271 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2272 * reasonable delay curve compared to precision-adjusted overage, not 2273 * penalising heavily at first, but still making sure that growth beyond the 2274 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2275 * example, with a high of 100 megabytes: 2276 * 2277 * +-------+------------------------+ 2278 * | usage | time to allocate in ms | 2279 * +-------+------------------------+ 2280 * | 100M | 0 | 2281 * | 101M | 6 | 2282 * | 102M | 25 | 2283 * | 103M | 57 | 2284 * | 104M | 102 | 2285 * | 105M | 159 | 2286 * | 106M | 230 | 2287 * | 107M | 313 | 2288 * | 108M | 409 | 2289 * | 109M | 518 | 2290 * | 110M | 639 | 2291 * | 111M | 774 | 2292 * | 112M | 921 | 2293 * | 113M | 1081 | 2294 * | 114M | 1254 | 2295 * | 115M | 1439 | 2296 * | 116M | 1638 | 2297 * | 117M | 1849 | 2298 * | 118M | 2000 | 2299 * | 119M | 2000 | 2300 * | 120M | 2000 | 2301 * +-------+------------------------+ 2302 */ 2303 #define MEMCG_DELAY_PRECISION_SHIFT 20 2304 #define MEMCG_DELAY_SCALING_SHIFT 14 2305 2306 static u64 calculate_overage(unsigned long usage, unsigned long high) 2307 { 2308 u64 overage; 2309 2310 if (usage <= high) 2311 return 0; 2312 2313 /* 2314 * Prevent division by 0 in overage calculation by acting as if 2315 * it was a threshold of 1 page 2316 */ 2317 high = max(high, 1UL); 2318 2319 overage = usage - high; 2320 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2321 return div64_u64(overage, high); 2322 } 2323 2324 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2325 { 2326 u64 overage, max_overage = 0; 2327 2328 do { 2329 overage = calculate_overage(page_counter_read(&memcg->memory), 2330 READ_ONCE(memcg->memory.high)); 2331 max_overage = max(overage, max_overage); 2332 } while ((memcg = parent_mem_cgroup(memcg)) && 2333 !mem_cgroup_is_root(memcg)); 2334 2335 return max_overage; 2336 } 2337 2338 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2339 { 2340 u64 overage, max_overage = 0; 2341 2342 do { 2343 overage = calculate_overage(page_counter_read(&memcg->swap), 2344 READ_ONCE(memcg->swap.high)); 2345 if (overage) 2346 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2347 max_overage = max(overage, max_overage); 2348 } while ((memcg = parent_mem_cgroup(memcg)) && 2349 !mem_cgroup_is_root(memcg)); 2350 2351 return max_overage; 2352 } 2353 2354 /* 2355 * Get the number of jiffies that we should penalise a mischievous cgroup which 2356 * is exceeding its memory.high by checking both it and its ancestors. 2357 */ 2358 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2359 unsigned int nr_pages, 2360 u64 max_overage) 2361 { 2362 unsigned long penalty_jiffies; 2363 2364 if (!max_overage) 2365 return 0; 2366 2367 /* 2368 * We use overage compared to memory.high to calculate the number of 2369 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2370 * fairly lenient on small overages, and increasingly harsh when the 2371 * memcg in question makes it clear that it has no intention of stopping 2372 * its crazy behaviour, so we exponentially increase the delay based on 2373 * overage amount. 2374 */ 2375 penalty_jiffies = max_overage * max_overage * HZ; 2376 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2377 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2378 2379 /* 2380 * Factor in the task's own contribution to the overage, such that four 2381 * N-sized allocations are throttled approximately the same as one 2382 * 4N-sized allocation. 2383 * 2384 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2385 * larger the current charge patch is than that. 2386 */ 2387 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2388 } 2389 2390 /* 2391 * Scheduled by try_charge() to be executed from the userland return path 2392 * and reclaims memory over the high limit. 2393 */ 2394 void mem_cgroup_handle_over_high(void) 2395 { 2396 unsigned long penalty_jiffies; 2397 unsigned long pflags; 2398 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2399 struct mem_cgroup *memcg; 2400 2401 if (likely(!nr_pages)) 2402 return; 2403 2404 memcg = get_mem_cgroup_from_mm(current->mm); 2405 reclaim_high(memcg, nr_pages, GFP_KERNEL); 2406 current->memcg_nr_pages_over_high = 0; 2407 2408 /* 2409 * memory.high is breached and reclaim is unable to keep up. Throttle 2410 * allocators proactively to slow down excessive growth. 2411 */ 2412 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2413 mem_find_max_overage(memcg)); 2414 2415 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2416 swap_find_max_overage(memcg)); 2417 2418 /* 2419 * Clamp the max delay per usermode return so as to still keep the 2420 * application moving forwards and also permit diagnostics, albeit 2421 * extremely slowly. 2422 */ 2423 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2424 2425 /* 2426 * Don't sleep if the amount of jiffies this memcg owes us is so low 2427 * that it's not even worth doing, in an attempt to be nice to those who 2428 * go only a small amount over their memory.high value and maybe haven't 2429 * been aggressively reclaimed enough yet. 2430 */ 2431 if (penalty_jiffies <= HZ / 100) 2432 goto out; 2433 2434 /* 2435 * If we exit early, we're guaranteed to die (since 2436 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2437 * need to account for any ill-begotten jiffies to pay them off later. 2438 */ 2439 psi_memstall_enter(&pflags); 2440 schedule_timeout_killable(penalty_jiffies); 2441 psi_memstall_leave(&pflags); 2442 2443 out: 2444 css_put(&memcg->css); 2445 } 2446 2447 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2448 unsigned int nr_pages) 2449 { 2450 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2451 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2452 struct mem_cgroup *mem_over_limit; 2453 struct page_counter *counter; 2454 unsigned long nr_reclaimed; 2455 bool may_swap = true; 2456 bool drained = false; 2457 enum oom_status oom_status; 2458 2459 if (mem_cgroup_is_root(memcg)) 2460 return 0; 2461 retry: 2462 if (consume_stock(memcg, nr_pages)) 2463 return 0; 2464 2465 if (!do_memsw_account() || 2466 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2467 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2468 goto done_restock; 2469 if (do_memsw_account()) 2470 page_counter_uncharge(&memcg->memsw, batch); 2471 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2472 } else { 2473 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2474 may_swap = false; 2475 } 2476 2477 if (batch > nr_pages) { 2478 batch = nr_pages; 2479 goto retry; 2480 } 2481 2482 /* 2483 * Memcg doesn't have a dedicated reserve for atomic 2484 * allocations. But like the global atomic pool, we need to 2485 * put the burden of reclaim on regular allocation requests 2486 * and let these go through as privileged allocations. 2487 */ 2488 if (gfp_mask & __GFP_ATOMIC) 2489 goto force; 2490 2491 /* 2492 * Unlike in global OOM situations, memcg is not in a physical 2493 * memory shortage. Allow dying and OOM-killed tasks to 2494 * bypass the last charges so that they can exit quickly and 2495 * free their memory. 2496 */ 2497 if (unlikely(should_force_charge())) 2498 goto force; 2499 2500 /* 2501 * Prevent unbounded recursion when reclaim operations need to 2502 * allocate memory. This might exceed the limits temporarily, 2503 * but we prefer facilitating memory reclaim and getting back 2504 * under the limit over triggering OOM kills in these cases. 2505 */ 2506 if (unlikely(current->flags & PF_MEMALLOC)) 2507 goto force; 2508 2509 if (unlikely(task_in_memcg_oom(current))) 2510 goto nomem; 2511 2512 if (!gfpflags_allow_blocking(gfp_mask)) 2513 goto nomem; 2514 2515 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2516 2517 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2518 gfp_mask, may_swap); 2519 2520 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2521 goto retry; 2522 2523 if (!drained) { 2524 drain_all_stock(mem_over_limit); 2525 drained = true; 2526 goto retry; 2527 } 2528 2529 if (gfp_mask & __GFP_NORETRY) 2530 goto nomem; 2531 /* 2532 * Even though the limit is exceeded at this point, reclaim 2533 * may have been able to free some pages. Retry the charge 2534 * before killing the task. 2535 * 2536 * Only for regular pages, though: huge pages are rather 2537 * unlikely to succeed so close to the limit, and we fall back 2538 * to regular pages anyway in case of failure. 2539 */ 2540 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2541 goto retry; 2542 /* 2543 * At task move, charge accounts can be doubly counted. So, it's 2544 * better to wait until the end of task_move if something is going on. 2545 */ 2546 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2547 goto retry; 2548 2549 if (nr_retries--) 2550 goto retry; 2551 2552 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2553 goto nomem; 2554 2555 if (gfp_mask & __GFP_NOFAIL) 2556 goto force; 2557 2558 if (fatal_signal_pending(current)) 2559 goto force; 2560 2561 /* 2562 * keep retrying as long as the memcg oom killer is able to make 2563 * a forward progress or bypass the charge if the oom killer 2564 * couldn't make any progress. 2565 */ 2566 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2567 get_order(nr_pages * PAGE_SIZE)); 2568 switch (oom_status) { 2569 case OOM_SUCCESS: 2570 nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2571 goto retry; 2572 case OOM_FAILED: 2573 goto force; 2574 default: 2575 goto nomem; 2576 } 2577 nomem: 2578 if (!(gfp_mask & __GFP_NOFAIL)) 2579 return -ENOMEM; 2580 force: 2581 /* 2582 * The allocation either can't fail or will lead to more memory 2583 * being freed very soon. Allow memory usage go over the limit 2584 * temporarily by force charging it. 2585 */ 2586 page_counter_charge(&memcg->memory, nr_pages); 2587 if (do_memsw_account()) 2588 page_counter_charge(&memcg->memsw, nr_pages); 2589 css_get_many(&memcg->css, nr_pages); 2590 2591 return 0; 2592 2593 done_restock: 2594 css_get_many(&memcg->css, batch); 2595 if (batch > nr_pages) 2596 refill_stock(memcg, batch - nr_pages); 2597 2598 /* 2599 * If the hierarchy is above the normal consumption range, schedule 2600 * reclaim on returning to userland. We can perform reclaim here 2601 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2602 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2603 * not recorded as it most likely matches current's and won't 2604 * change in the meantime. As high limit is checked again before 2605 * reclaim, the cost of mismatch is negligible. 2606 */ 2607 do { 2608 bool mem_high, swap_high; 2609 2610 mem_high = page_counter_read(&memcg->memory) > 2611 READ_ONCE(memcg->memory.high); 2612 swap_high = page_counter_read(&memcg->swap) > 2613 READ_ONCE(memcg->swap.high); 2614 2615 /* Don't bother a random interrupted task */ 2616 if (in_interrupt()) { 2617 if (mem_high) { 2618 schedule_work(&memcg->high_work); 2619 break; 2620 } 2621 continue; 2622 } 2623 2624 if (mem_high || swap_high) { 2625 /* 2626 * The allocating tasks in this cgroup will need to do 2627 * reclaim or be throttled to prevent further growth 2628 * of the memory or swap footprints. 2629 * 2630 * Target some best-effort fairness between the tasks, 2631 * and distribute reclaim work and delay penalties 2632 * based on how much each task is actually allocating. 2633 */ 2634 current->memcg_nr_pages_over_high += batch; 2635 set_notify_resume(current); 2636 break; 2637 } 2638 } while ((memcg = parent_mem_cgroup(memcg))); 2639 2640 return 0; 2641 } 2642 2643 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2644 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2645 { 2646 if (mem_cgroup_is_root(memcg)) 2647 return; 2648 2649 page_counter_uncharge(&memcg->memory, nr_pages); 2650 if (do_memsw_account()) 2651 page_counter_uncharge(&memcg->memsw, nr_pages); 2652 2653 css_put_many(&memcg->css, nr_pages); 2654 } 2655 #endif 2656 2657 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2658 { 2659 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2660 /* 2661 * Any of the following ensures page->mem_cgroup stability: 2662 * 2663 * - the page lock 2664 * - LRU isolation 2665 * - lock_page_memcg() 2666 * - exclusive reference 2667 */ 2668 page->mem_cgroup = memcg; 2669 } 2670 2671 #ifdef CONFIG_MEMCG_KMEM 2672 /* 2673 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2674 * 2675 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2676 * cgroup_mutex, etc. 2677 */ 2678 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2679 { 2680 struct page *page; 2681 2682 if (mem_cgroup_disabled()) 2683 return NULL; 2684 2685 page = virt_to_head_page(p); 2686 2687 /* 2688 * Slab pages don't have page->mem_cgroup set because corresponding 2689 * kmem caches can be reparented during the lifetime. That's why 2690 * memcg_from_slab_page() should be used instead. 2691 */ 2692 if (PageSlab(page)) 2693 return memcg_from_slab_page(page); 2694 2695 /* All other pages use page->mem_cgroup */ 2696 return page->mem_cgroup; 2697 } 2698 2699 static int memcg_alloc_cache_id(void) 2700 { 2701 int id, size; 2702 int err; 2703 2704 id = ida_simple_get(&memcg_cache_ida, 2705 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2706 if (id < 0) 2707 return id; 2708 2709 if (id < memcg_nr_cache_ids) 2710 return id; 2711 2712 /* 2713 * There's no space for the new id in memcg_caches arrays, 2714 * so we have to grow them. 2715 */ 2716 down_write(&memcg_cache_ids_sem); 2717 2718 size = 2 * (id + 1); 2719 if (size < MEMCG_CACHES_MIN_SIZE) 2720 size = MEMCG_CACHES_MIN_SIZE; 2721 else if (size > MEMCG_CACHES_MAX_SIZE) 2722 size = MEMCG_CACHES_MAX_SIZE; 2723 2724 err = memcg_update_all_caches(size); 2725 if (!err) 2726 err = memcg_update_all_list_lrus(size); 2727 if (!err) 2728 memcg_nr_cache_ids = size; 2729 2730 up_write(&memcg_cache_ids_sem); 2731 2732 if (err) { 2733 ida_simple_remove(&memcg_cache_ida, id); 2734 return err; 2735 } 2736 return id; 2737 } 2738 2739 static void memcg_free_cache_id(int id) 2740 { 2741 ida_simple_remove(&memcg_cache_ida, id); 2742 } 2743 2744 struct memcg_kmem_cache_create_work { 2745 struct mem_cgroup *memcg; 2746 struct kmem_cache *cachep; 2747 struct work_struct work; 2748 }; 2749 2750 static void memcg_kmem_cache_create_func(struct work_struct *w) 2751 { 2752 struct memcg_kmem_cache_create_work *cw = 2753 container_of(w, struct memcg_kmem_cache_create_work, work); 2754 struct mem_cgroup *memcg = cw->memcg; 2755 struct kmem_cache *cachep = cw->cachep; 2756 2757 memcg_create_kmem_cache(memcg, cachep); 2758 2759 css_put(&memcg->css); 2760 kfree(cw); 2761 } 2762 2763 /* 2764 * Enqueue the creation of a per-memcg kmem_cache. 2765 */ 2766 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2767 struct kmem_cache *cachep) 2768 { 2769 struct memcg_kmem_cache_create_work *cw; 2770 2771 if (!css_tryget_online(&memcg->css)) 2772 return; 2773 2774 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2775 if (!cw) 2776 return; 2777 2778 cw->memcg = memcg; 2779 cw->cachep = cachep; 2780 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2781 2782 queue_work(memcg_kmem_cache_wq, &cw->work); 2783 } 2784 2785 static inline bool memcg_kmem_bypass(void) 2786 { 2787 if (in_interrupt()) 2788 return true; 2789 2790 /* Allow remote memcg charging in kthread contexts. */ 2791 if ((!current->mm || (current->flags & PF_KTHREAD)) && 2792 !current->active_memcg) 2793 return true; 2794 return false; 2795 } 2796 2797 /** 2798 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2799 * @cachep: the original global kmem cache 2800 * 2801 * Return the kmem_cache we're supposed to use for a slab allocation. 2802 * We try to use the current memcg's version of the cache. 2803 * 2804 * If the cache does not exist yet, if we are the first user of it, we 2805 * create it asynchronously in a workqueue and let the current allocation 2806 * go through with the original cache. 2807 * 2808 * This function takes a reference to the cache it returns to assure it 2809 * won't get destroyed while we are working with it. Once the caller is 2810 * done with it, memcg_kmem_put_cache() must be called to release the 2811 * reference. 2812 */ 2813 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2814 { 2815 struct mem_cgroup *memcg; 2816 struct kmem_cache *memcg_cachep; 2817 struct memcg_cache_array *arr; 2818 int kmemcg_id; 2819 2820 VM_BUG_ON(!is_root_cache(cachep)); 2821 2822 if (memcg_kmem_bypass()) 2823 return cachep; 2824 2825 rcu_read_lock(); 2826 2827 if (unlikely(current->active_memcg)) 2828 memcg = current->active_memcg; 2829 else 2830 memcg = mem_cgroup_from_task(current); 2831 2832 if (!memcg || memcg == root_mem_cgroup) 2833 goto out_unlock; 2834 2835 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2836 if (kmemcg_id < 0) 2837 goto out_unlock; 2838 2839 arr = rcu_dereference(cachep->memcg_params.memcg_caches); 2840 2841 /* 2842 * Make sure we will access the up-to-date value. The code updating 2843 * memcg_caches issues a write barrier to match the data dependency 2844 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()). 2845 */ 2846 memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]); 2847 2848 /* 2849 * If we are in a safe context (can wait, and not in interrupt 2850 * context), we could be be predictable and return right away. 2851 * This would guarantee that the allocation being performed 2852 * already belongs in the new cache. 2853 * 2854 * However, there are some clashes that can arrive from locking. 2855 * For instance, because we acquire the slab_mutex while doing 2856 * memcg_create_kmem_cache, this means no further allocation 2857 * could happen with the slab_mutex held. So it's better to 2858 * defer everything. 2859 * 2860 * If the memcg is dying or memcg_cache is about to be released, 2861 * don't bother creating new kmem_caches. Because memcg_cachep 2862 * is ZEROed as the fist step of kmem offlining, we don't need 2863 * percpu_ref_tryget_live() here. css_tryget_online() check in 2864 * memcg_schedule_kmem_cache_create() will prevent us from 2865 * creation of a new kmem_cache. 2866 */ 2867 if (unlikely(!memcg_cachep)) 2868 memcg_schedule_kmem_cache_create(memcg, cachep); 2869 else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) 2870 cachep = memcg_cachep; 2871 out_unlock: 2872 rcu_read_unlock(); 2873 return cachep; 2874 } 2875 2876 /** 2877 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2878 * @cachep: the cache returned by memcg_kmem_get_cache 2879 */ 2880 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2881 { 2882 if (!is_root_cache(cachep)) 2883 percpu_ref_put(&cachep->memcg_params.refcnt); 2884 } 2885 2886 /** 2887 * __memcg_kmem_charge: charge a number of kernel pages to a memcg 2888 * @memcg: memory cgroup to charge 2889 * @gfp: reclaim mode 2890 * @nr_pages: number of pages to charge 2891 * 2892 * Returns 0 on success, an error code on failure. 2893 */ 2894 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 2895 unsigned int nr_pages) 2896 { 2897 struct page_counter *counter; 2898 int ret; 2899 2900 ret = try_charge(memcg, gfp, nr_pages); 2901 if (ret) 2902 return ret; 2903 2904 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2905 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2906 2907 /* 2908 * Enforce __GFP_NOFAIL allocation because callers are not 2909 * prepared to see failures and likely do not have any failure 2910 * handling code. 2911 */ 2912 if (gfp & __GFP_NOFAIL) { 2913 page_counter_charge(&memcg->kmem, nr_pages); 2914 return 0; 2915 } 2916 cancel_charge(memcg, nr_pages); 2917 return -ENOMEM; 2918 } 2919 return 0; 2920 } 2921 2922 /** 2923 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg 2924 * @memcg: memcg to uncharge 2925 * @nr_pages: number of pages to uncharge 2926 */ 2927 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 2928 { 2929 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2930 page_counter_uncharge(&memcg->kmem, nr_pages); 2931 2932 page_counter_uncharge(&memcg->memory, nr_pages); 2933 if (do_memsw_account()) 2934 page_counter_uncharge(&memcg->memsw, nr_pages); 2935 } 2936 2937 /** 2938 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 2939 * @page: page to charge 2940 * @gfp: reclaim mode 2941 * @order: allocation order 2942 * 2943 * Returns 0 on success, an error code on failure. 2944 */ 2945 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 2946 { 2947 struct mem_cgroup *memcg; 2948 int ret = 0; 2949 2950 if (memcg_kmem_bypass()) 2951 return 0; 2952 2953 memcg = get_mem_cgroup_from_current(); 2954 if (!mem_cgroup_is_root(memcg)) { 2955 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); 2956 if (!ret) { 2957 page->mem_cgroup = memcg; 2958 __SetPageKmemcg(page); 2959 } 2960 } 2961 css_put(&memcg->css); 2962 return ret; 2963 } 2964 2965 /** 2966 * __memcg_kmem_uncharge_page: uncharge a kmem page 2967 * @page: page to uncharge 2968 * @order: allocation order 2969 */ 2970 void __memcg_kmem_uncharge_page(struct page *page, int order) 2971 { 2972 struct mem_cgroup *memcg = page->mem_cgroup; 2973 unsigned int nr_pages = 1 << order; 2974 2975 if (!memcg) 2976 return; 2977 2978 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2979 __memcg_kmem_uncharge(memcg, nr_pages); 2980 page->mem_cgroup = NULL; 2981 2982 /* slab pages do not have PageKmemcg flag set */ 2983 if (PageKmemcg(page)) 2984 __ClearPageKmemcg(page); 2985 2986 css_put_many(&memcg->css, nr_pages); 2987 } 2988 #endif /* CONFIG_MEMCG_KMEM */ 2989 2990 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2991 2992 /* 2993 * Because tail pages are not marked as "used", set it. We're under 2994 * pgdat->lru_lock and migration entries setup in all page mappings. 2995 */ 2996 void mem_cgroup_split_huge_fixup(struct page *head) 2997 { 2998 int i; 2999 3000 if (mem_cgroup_disabled()) 3001 return; 3002 3003 for (i = 1; i < HPAGE_PMD_NR; i++) 3004 head[i].mem_cgroup = head->mem_cgroup; 3005 } 3006 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3007 3008 #ifdef CONFIG_MEMCG_SWAP 3009 /** 3010 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3011 * @entry: swap entry to be moved 3012 * @from: mem_cgroup which the entry is moved from 3013 * @to: mem_cgroup which the entry is moved to 3014 * 3015 * It succeeds only when the swap_cgroup's record for this entry is the same 3016 * as the mem_cgroup's id of @from. 3017 * 3018 * Returns 0 on success, -EINVAL on failure. 3019 * 3020 * The caller must have charged to @to, IOW, called page_counter_charge() about 3021 * both res and memsw, and called css_get(). 3022 */ 3023 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3024 struct mem_cgroup *from, struct mem_cgroup *to) 3025 { 3026 unsigned short old_id, new_id; 3027 3028 old_id = mem_cgroup_id(from); 3029 new_id = mem_cgroup_id(to); 3030 3031 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3032 mod_memcg_state(from, MEMCG_SWAP, -1); 3033 mod_memcg_state(to, MEMCG_SWAP, 1); 3034 return 0; 3035 } 3036 return -EINVAL; 3037 } 3038 #else 3039 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3040 struct mem_cgroup *from, struct mem_cgroup *to) 3041 { 3042 return -EINVAL; 3043 } 3044 #endif 3045 3046 static DEFINE_MUTEX(memcg_max_mutex); 3047 3048 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3049 unsigned long max, bool memsw) 3050 { 3051 bool enlarge = false; 3052 bool drained = false; 3053 int ret; 3054 bool limits_invariant; 3055 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3056 3057 do { 3058 if (signal_pending(current)) { 3059 ret = -EINTR; 3060 break; 3061 } 3062 3063 mutex_lock(&memcg_max_mutex); 3064 /* 3065 * Make sure that the new limit (memsw or memory limit) doesn't 3066 * break our basic invariant rule memory.max <= memsw.max. 3067 */ 3068 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3069 max <= memcg->memsw.max; 3070 if (!limits_invariant) { 3071 mutex_unlock(&memcg_max_mutex); 3072 ret = -EINVAL; 3073 break; 3074 } 3075 if (max > counter->max) 3076 enlarge = true; 3077 ret = page_counter_set_max(counter, max); 3078 mutex_unlock(&memcg_max_mutex); 3079 3080 if (!ret) 3081 break; 3082 3083 if (!drained) { 3084 drain_all_stock(memcg); 3085 drained = true; 3086 continue; 3087 } 3088 3089 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3090 GFP_KERNEL, !memsw)) { 3091 ret = -EBUSY; 3092 break; 3093 } 3094 } while (true); 3095 3096 if (!ret && enlarge) 3097 memcg_oom_recover(memcg); 3098 3099 return ret; 3100 } 3101 3102 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3103 gfp_t gfp_mask, 3104 unsigned long *total_scanned) 3105 { 3106 unsigned long nr_reclaimed = 0; 3107 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3108 unsigned long reclaimed; 3109 int loop = 0; 3110 struct mem_cgroup_tree_per_node *mctz; 3111 unsigned long excess; 3112 unsigned long nr_scanned; 3113 3114 if (order > 0) 3115 return 0; 3116 3117 mctz = soft_limit_tree_node(pgdat->node_id); 3118 3119 /* 3120 * Do not even bother to check the largest node if the root 3121 * is empty. Do it lockless to prevent lock bouncing. Races 3122 * are acceptable as soft limit is best effort anyway. 3123 */ 3124 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3125 return 0; 3126 3127 /* 3128 * This loop can run a while, specially if mem_cgroup's continuously 3129 * keep exceeding their soft limit and putting the system under 3130 * pressure 3131 */ 3132 do { 3133 if (next_mz) 3134 mz = next_mz; 3135 else 3136 mz = mem_cgroup_largest_soft_limit_node(mctz); 3137 if (!mz) 3138 break; 3139 3140 nr_scanned = 0; 3141 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3142 gfp_mask, &nr_scanned); 3143 nr_reclaimed += reclaimed; 3144 *total_scanned += nr_scanned; 3145 spin_lock_irq(&mctz->lock); 3146 __mem_cgroup_remove_exceeded(mz, mctz); 3147 3148 /* 3149 * If we failed to reclaim anything from this memory cgroup 3150 * it is time to move on to the next cgroup 3151 */ 3152 next_mz = NULL; 3153 if (!reclaimed) 3154 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3155 3156 excess = soft_limit_excess(mz->memcg); 3157 /* 3158 * One school of thought says that we should not add 3159 * back the node to the tree if reclaim returns 0. 3160 * But our reclaim could return 0, simply because due 3161 * to priority we are exposing a smaller subset of 3162 * memory to reclaim from. Consider this as a longer 3163 * term TODO. 3164 */ 3165 /* If excess == 0, no tree ops */ 3166 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3167 spin_unlock_irq(&mctz->lock); 3168 css_put(&mz->memcg->css); 3169 loop++; 3170 /* 3171 * Could not reclaim anything and there are no more 3172 * mem cgroups to try or we seem to be looping without 3173 * reclaiming anything. 3174 */ 3175 if (!nr_reclaimed && 3176 (next_mz == NULL || 3177 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3178 break; 3179 } while (!nr_reclaimed); 3180 if (next_mz) 3181 css_put(&next_mz->memcg->css); 3182 return nr_reclaimed; 3183 } 3184 3185 /* 3186 * Test whether @memcg has children, dead or alive. Note that this 3187 * function doesn't care whether @memcg has use_hierarchy enabled and 3188 * returns %true if there are child csses according to the cgroup 3189 * hierarchy. Testing use_hierarchy is the caller's responsibility. 3190 */ 3191 static inline bool memcg_has_children(struct mem_cgroup *memcg) 3192 { 3193 bool ret; 3194 3195 rcu_read_lock(); 3196 ret = css_next_child(NULL, &memcg->css); 3197 rcu_read_unlock(); 3198 return ret; 3199 } 3200 3201 /* 3202 * Reclaims as many pages from the given memcg as possible. 3203 * 3204 * Caller is responsible for holding css reference for memcg. 3205 */ 3206 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3207 { 3208 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3209 3210 /* we call try-to-free pages for make this cgroup empty */ 3211 lru_add_drain_all(); 3212 3213 drain_all_stock(memcg); 3214 3215 /* try to free all pages in this cgroup */ 3216 while (nr_retries && page_counter_read(&memcg->memory)) { 3217 int progress; 3218 3219 if (signal_pending(current)) 3220 return -EINTR; 3221 3222 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3223 GFP_KERNEL, true); 3224 if (!progress) { 3225 nr_retries--; 3226 /* maybe some writeback is necessary */ 3227 congestion_wait(BLK_RW_ASYNC, HZ/10); 3228 } 3229 3230 } 3231 3232 return 0; 3233 } 3234 3235 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3236 char *buf, size_t nbytes, 3237 loff_t off) 3238 { 3239 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3240 3241 if (mem_cgroup_is_root(memcg)) 3242 return -EINVAL; 3243 return mem_cgroup_force_empty(memcg) ?: nbytes; 3244 } 3245 3246 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3247 struct cftype *cft) 3248 { 3249 return mem_cgroup_from_css(css)->use_hierarchy; 3250 } 3251 3252 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3253 struct cftype *cft, u64 val) 3254 { 3255 int retval = 0; 3256 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3257 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3258 3259 if (memcg->use_hierarchy == val) 3260 return 0; 3261 3262 /* 3263 * If parent's use_hierarchy is set, we can't make any modifications 3264 * in the child subtrees. If it is unset, then the change can 3265 * occur, provided the current cgroup has no children. 3266 * 3267 * For the root cgroup, parent_mem is NULL, we allow value to be 3268 * set if there are no children. 3269 */ 3270 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3271 (val == 1 || val == 0)) { 3272 if (!memcg_has_children(memcg)) 3273 memcg->use_hierarchy = val; 3274 else 3275 retval = -EBUSY; 3276 } else 3277 retval = -EINVAL; 3278 3279 return retval; 3280 } 3281 3282 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3283 { 3284 unsigned long val; 3285 3286 if (mem_cgroup_is_root(memcg)) { 3287 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3288 memcg_page_state(memcg, NR_ANON_MAPPED); 3289 if (swap) 3290 val += memcg_page_state(memcg, MEMCG_SWAP); 3291 } else { 3292 if (!swap) 3293 val = page_counter_read(&memcg->memory); 3294 else 3295 val = page_counter_read(&memcg->memsw); 3296 } 3297 return val; 3298 } 3299 3300 enum { 3301 RES_USAGE, 3302 RES_LIMIT, 3303 RES_MAX_USAGE, 3304 RES_FAILCNT, 3305 RES_SOFT_LIMIT, 3306 }; 3307 3308 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3309 struct cftype *cft) 3310 { 3311 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3312 struct page_counter *counter; 3313 3314 switch (MEMFILE_TYPE(cft->private)) { 3315 case _MEM: 3316 counter = &memcg->memory; 3317 break; 3318 case _MEMSWAP: 3319 counter = &memcg->memsw; 3320 break; 3321 case _KMEM: 3322 counter = &memcg->kmem; 3323 break; 3324 case _TCP: 3325 counter = &memcg->tcpmem; 3326 break; 3327 default: 3328 BUG(); 3329 } 3330 3331 switch (MEMFILE_ATTR(cft->private)) { 3332 case RES_USAGE: 3333 if (counter == &memcg->memory) 3334 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3335 if (counter == &memcg->memsw) 3336 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3337 return (u64)page_counter_read(counter) * PAGE_SIZE; 3338 case RES_LIMIT: 3339 return (u64)counter->max * PAGE_SIZE; 3340 case RES_MAX_USAGE: 3341 return (u64)counter->watermark * PAGE_SIZE; 3342 case RES_FAILCNT: 3343 return counter->failcnt; 3344 case RES_SOFT_LIMIT: 3345 return (u64)memcg->soft_limit * PAGE_SIZE; 3346 default: 3347 BUG(); 3348 } 3349 } 3350 3351 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) 3352 { 3353 unsigned long stat[MEMCG_NR_STAT] = {0}; 3354 struct mem_cgroup *mi; 3355 int node, cpu, i; 3356 3357 for_each_online_cpu(cpu) 3358 for (i = 0; i < MEMCG_NR_STAT; i++) 3359 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); 3360 3361 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3362 for (i = 0; i < MEMCG_NR_STAT; i++) 3363 atomic_long_add(stat[i], &mi->vmstats[i]); 3364 3365 for_each_node(node) { 3366 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3367 struct mem_cgroup_per_node *pi; 3368 3369 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3370 stat[i] = 0; 3371 3372 for_each_online_cpu(cpu) 3373 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3374 stat[i] += per_cpu( 3375 pn->lruvec_stat_cpu->count[i], cpu); 3376 3377 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) 3378 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3379 atomic_long_add(stat[i], &pi->lruvec_stat[i]); 3380 } 3381 } 3382 3383 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) 3384 { 3385 unsigned long events[NR_VM_EVENT_ITEMS]; 3386 struct mem_cgroup *mi; 3387 int cpu, i; 3388 3389 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3390 events[i] = 0; 3391 3392 for_each_online_cpu(cpu) 3393 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3394 events[i] += per_cpu(memcg->vmstats_percpu->events[i], 3395 cpu); 3396 3397 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3398 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3399 atomic_long_add(events[i], &mi->vmevents[i]); 3400 } 3401 3402 #ifdef CONFIG_MEMCG_KMEM 3403 static int memcg_online_kmem(struct mem_cgroup *memcg) 3404 { 3405 int memcg_id; 3406 3407 if (cgroup_memory_nokmem) 3408 return 0; 3409 3410 BUG_ON(memcg->kmemcg_id >= 0); 3411 BUG_ON(memcg->kmem_state); 3412 3413 memcg_id = memcg_alloc_cache_id(); 3414 if (memcg_id < 0) 3415 return memcg_id; 3416 3417 static_branch_inc(&memcg_kmem_enabled_key); 3418 /* 3419 * A memory cgroup is considered kmem-online as soon as it gets 3420 * kmemcg_id. Setting the id after enabling static branching will 3421 * guarantee no one starts accounting before all call sites are 3422 * patched. 3423 */ 3424 memcg->kmemcg_id = memcg_id; 3425 memcg->kmem_state = KMEM_ONLINE; 3426 INIT_LIST_HEAD(&memcg->kmem_caches); 3427 3428 return 0; 3429 } 3430 3431 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3432 { 3433 struct cgroup_subsys_state *css; 3434 struct mem_cgroup *parent, *child; 3435 int kmemcg_id; 3436 3437 if (memcg->kmem_state != KMEM_ONLINE) 3438 return; 3439 /* 3440 * Clear the online state before clearing memcg_caches array 3441 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 3442 * guarantees that no cache will be created for this cgroup 3443 * after we are done (see memcg_create_kmem_cache()). 3444 */ 3445 memcg->kmem_state = KMEM_ALLOCATED; 3446 3447 parent = parent_mem_cgroup(memcg); 3448 if (!parent) 3449 parent = root_mem_cgroup; 3450 3451 /* 3452 * Deactivate and reparent kmem_caches. 3453 */ 3454 memcg_deactivate_kmem_caches(memcg, parent); 3455 3456 kmemcg_id = memcg->kmemcg_id; 3457 BUG_ON(kmemcg_id < 0); 3458 3459 /* 3460 * Change kmemcg_id of this cgroup and all its descendants to the 3461 * parent's id, and then move all entries from this cgroup's list_lrus 3462 * to ones of the parent. After we have finished, all list_lrus 3463 * corresponding to this cgroup are guaranteed to remain empty. The 3464 * ordering is imposed by list_lru_node->lock taken by 3465 * memcg_drain_all_list_lrus(). 3466 */ 3467 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3468 css_for_each_descendant_pre(css, &memcg->css) { 3469 child = mem_cgroup_from_css(css); 3470 BUG_ON(child->kmemcg_id != kmemcg_id); 3471 child->kmemcg_id = parent->kmemcg_id; 3472 if (!memcg->use_hierarchy) 3473 break; 3474 } 3475 rcu_read_unlock(); 3476 3477 memcg_drain_all_list_lrus(kmemcg_id, parent); 3478 3479 memcg_free_cache_id(kmemcg_id); 3480 } 3481 3482 static void memcg_free_kmem(struct mem_cgroup *memcg) 3483 { 3484 /* css_alloc() failed, offlining didn't happen */ 3485 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3486 memcg_offline_kmem(memcg); 3487 3488 if (memcg->kmem_state == KMEM_ALLOCATED) { 3489 WARN_ON(!list_empty(&memcg->kmem_caches)); 3490 static_branch_dec(&memcg_kmem_enabled_key); 3491 } 3492 } 3493 #else 3494 static int memcg_online_kmem(struct mem_cgroup *memcg) 3495 { 3496 return 0; 3497 } 3498 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3499 { 3500 } 3501 static void memcg_free_kmem(struct mem_cgroup *memcg) 3502 { 3503 } 3504 #endif /* CONFIG_MEMCG_KMEM */ 3505 3506 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3507 unsigned long max) 3508 { 3509 int ret; 3510 3511 mutex_lock(&memcg_max_mutex); 3512 ret = page_counter_set_max(&memcg->kmem, max); 3513 mutex_unlock(&memcg_max_mutex); 3514 return ret; 3515 } 3516 3517 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3518 { 3519 int ret; 3520 3521 mutex_lock(&memcg_max_mutex); 3522 3523 ret = page_counter_set_max(&memcg->tcpmem, max); 3524 if (ret) 3525 goto out; 3526 3527 if (!memcg->tcpmem_active) { 3528 /* 3529 * The active flag needs to be written after the static_key 3530 * update. This is what guarantees that the socket activation 3531 * function is the last one to run. See mem_cgroup_sk_alloc() 3532 * for details, and note that we don't mark any socket as 3533 * belonging to this memcg until that flag is up. 3534 * 3535 * We need to do this, because static_keys will span multiple 3536 * sites, but we can't control their order. If we mark a socket 3537 * as accounted, but the accounting functions are not patched in 3538 * yet, we'll lose accounting. 3539 * 3540 * We never race with the readers in mem_cgroup_sk_alloc(), 3541 * because when this value change, the code to process it is not 3542 * patched in yet. 3543 */ 3544 static_branch_inc(&memcg_sockets_enabled_key); 3545 memcg->tcpmem_active = true; 3546 } 3547 out: 3548 mutex_unlock(&memcg_max_mutex); 3549 return ret; 3550 } 3551 3552 /* 3553 * The user of this function is... 3554 * RES_LIMIT. 3555 */ 3556 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3557 char *buf, size_t nbytes, loff_t off) 3558 { 3559 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3560 unsigned long nr_pages; 3561 int ret; 3562 3563 buf = strstrip(buf); 3564 ret = page_counter_memparse(buf, "-1", &nr_pages); 3565 if (ret) 3566 return ret; 3567 3568 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3569 case RES_LIMIT: 3570 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3571 ret = -EINVAL; 3572 break; 3573 } 3574 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3575 case _MEM: 3576 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3577 break; 3578 case _MEMSWAP: 3579 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3580 break; 3581 case _KMEM: 3582 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3583 "Please report your usecase to linux-mm@kvack.org if you " 3584 "depend on this functionality.\n"); 3585 ret = memcg_update_kmem_max(memcg, nr_pages); 3586 break; 3587 case _TCP: 3588 ret = memcg_update_tcp_max(memcg, nr_pages); 3589 break; 3590 } 3591 break; 3592 case RES_SOFT_LIMIT: 3593 memcg->soft_limit = nr_pages; 3594 ret = 0; 3595 break; 3596 } 3597 return ret ?: nbytes; 3598 } 3599 3600 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3601 size_t nbytes, loff_t off) 3602 { 3603 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3604 struct page_counter *counter; 3605 3606 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3607 case _MEM: 3608 counter = &memcg->memory; 3609 break; 3610 case _MEMSWAP: 3611 counter = &memcg->memsw; 3612 break; 3613 case _KMEM: 3614 counter = &memcg->kmem; 3615 break; 3616 case _TCP: 3617 counter = &memcg->tcpmem; 3618 break; 3619 default: 3620 BUG(); 3621 } 3622 3623 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3624 case RES_MAX_USAGE: 3625 page_counter_reset_watermark(counter); 3626 break; 3627 case RES_FAILCNT: 3628 counter->failcnt = 0; 3629 break; 3630 default: 3631 BUG(); 3632 } 3633 3634 return nbytes; 3635 } 3636 3637 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3638 struct cftype *cft) 3639 { 3640 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3641 } 3642 3643 #ifdef CONFIG_MMU 3644 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3645 struct cftype *cft, u64 val) 3646 { 3647 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3648 3649 if (val & ~MOVE_MASK) 3650 return -EINVAL; 3651 3652 /* 3653 * No kind of locking is needed in here, because ->can_attach() will 3654 * check this value once in the beginning of the process, and then carry 3655 * on with stale data. This means that changes to this value will only 3656 * affect task migrations starting after the change. 3657 */ 3658 memcg->move_charge_at_immigrate = val; 3659 return 0; 3660 } 3661 #else 3662 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3663 struct cftype *cft, u64 val) 3664 { 3665 return -ENOSYS; 3666 } 3667 #endif 3668 3669 #ifdef CONFIG_NUMA 3670 3671 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3672 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3673 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3674 3675 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3676 int nid, unsigned int lru_mask, bool tree) 3677 { 3678 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3679 unsigned long nr = 0; 3680 enum lru_list lru; 3681 3682 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3683 3684 for_each_lru(lru) { 3685 if (!(BIT(lru) & lru_mask)) 3686 continue; 3687 if (tree) 3688 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3689 else 3690 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3691 } 3692 return nr; 3693 } 3694 3695 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3696 unsigned int lru_mask, 3697 bool tree) 3698 { 3699 unsigned long nr = 0; 3700 enum lru_list lru; 3701 3702 for_each_lru(lru) { 3703 if (!(BIT(lru) & lru_mask)) 3704 continue; 3705 if (tree) 3706 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3707 else 3708 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3709 } 3710 return nr; 3711 } 3712 3713 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3714 { 3715 struct numa_stat { 3716 const char *name; 3717 unsigned int lru_mask; 3718 }; 3719 3720 static const struct numa_stat stats[] = { 3721 { "total", LRU_ALL }, 3722 { "file", LRU_ALL_FILE }, 3723 { "anon", LRU_ALL_ANON }, 3724 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3725 }; 3726 const struct numa_stat *stat; 3727 int nid; 3728 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3729 3730 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3731 seq_printf(m, "%s=%lu", stat->name, 3732 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3733 false)); 3734 for_each_node_state(nid, N_MEMORY) 3735 seq_printf(m, " N%d=%lu", nid, 3736 mem_cgroup_node_nr_lru_pages(memcg, nid, 3737 stat->lru_mask, false)); 3738 seq_putc(m, '\n'); 3739 } 3740 3741 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3742 3743 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3744 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3745 true)); 3746 for_each_node_state(nid, N_MEMORY) 3747 seq_printf(m, " N%d=%lu", nid, 3748 mem_cgroup_node_nr_lru_pages(memcg, nid, 3749 stat->lru_mask, true)); 3750 seq_putc(m, '\n'); 3751 } 3752 3753 return 0; 3754 } 3755 #endif /* CONFIG_NUMA */ 3756 3757 static const unsigned int memcg1_stats[] = { 3758 NR_FILE_PAGES, 3759 NR_ANON_MAPPED, 3760 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3761 NR_ANON_THPS, 3762 #endif 3763 NR_SHMEM, 3764 NR_FILE_MAPPED, 3765 NR_FILE_DIRTY, 3766 NR_WRITEBACK, 3767 MEMCG_SWAP, 3768 }; 3769 3770 static const char *const memcg1_stat_names[] = { 3771 "cache", 3772 "rss", 3773 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3774 "rss_huge", 3775 #endif 3776 "shmem", 3777 "mapped_file", 3778 "dirty", 3779 "writeback", 3780 "swap", 3781 }; 3782 3783 /* Universal VM events cgroup1 shows, original sort order */ 3784 static const unsigned int memcg1_events[] = { 3785 PGPGIN, 3786 PGPGOUT, 3787 PGFAULT, 3788 PGMAJFAULT, 3789 }; 3790 3791 static int memcg_stat_show(struct seq_file *m, void *v) 3792 { 3793 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3794 unsigned long memory, memsw; 3795 struct mem_cgroup *mi; 3796 unsigned int i; 3797 3798 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3799 3800 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3801 unsigned long nr; 3802 3803 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3804 continue; 3805 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 3806 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3807 if (memcg1_stats[i] == NR_ANON_THPS) 3808 nr *= HPAGE_PMD_NR; 3809 #endif 3810 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 3811 } 3812 3813 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3814 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 3815 memcg_events_local(memcg, memcg1_events[i])); 3816 3817 for (i = 0; i < NR_LRU_LISTS; i++) 3818 seq_printf(m, "%s %lu\n", lru_list_name(i), 3819 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 3820 PAGE_SIZE); 3821 3822 /* Hierarchical information */ 3823 memory = memsw = PAGE_COUNTER_MAX; 3824 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3825 memory = min(memory, READ_ONCE(mi->memory.max)); 3826 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 3827 } 3828 seq_printf(m, "hierarchical_memory_limit %llu\n", 3829 (u64)memory * PAGE_SIZE); 3830 if (do_memsw_account()) 3831 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3832 (u64)memsw * PAGE_SIZE); 3833 3834 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3835 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3836 continue; 3837 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 3838 (u64)memcg_page_state(memcg, memcg1_stats[i]) * 3839 PAGE_SIZE); 3840 } 3841 3842 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3843 seq_printf(m, "total_%s %llu\n", 3844 vm_event_name(memcg1_events[i]), 3845 (u64)memcg_events(memcg, memcg1_events[i])); 3846 3847 for (i = 0; i < NR_LRU_LISTS; i++) 3848 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 3849 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 3850 PAGE_SIZE); 3851 3852 #ifdef CONFIG_DEBUG_VM 3853 { 3854 pg_data_t *pgdat; 3855 struct mem_cgroup_per_node *mz; 3856 unsigned long anon_cost = 0; 3857 unsigned long file_cost = 0; 3858 3859 for_each_online_pgdat(pgdat) { 3860 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3861 3862 anon_cost += mz->lruvec.anon_cost; 3863 file_cost += mz->lruvec.file_cost; 3864 } 3865 seq_printf(m, "anon_cost %lu\n", anon_cost); 3866 seq_printf(m, "file_cost %lu\n", file_cost); 3867 } 3868 #endif 3869 3870 return 0; 3871 } 3872 3873 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3874 struct cftype *cft) 3875 { 3876 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3877 3878 return mem_cgroup_swappiness(memcg); 3879 } 3880 3881 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3882 struct cftype *cft, u64 val) 3883 { 3884 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3885 3886 if (val > 100) 3887 return -EINVAL; 3888 3889 if (css->parent) 3890 memcg->swappiness = val; 3891 else 3892 vm_swappiness = val; 3893 3894 return 0; 3895 } 3896 3897 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3898 { 3899 struct mem_cgroup_threshold_ary *t; 3900 unsigned long usage; 3901 int i; 3902 3903 rcu_read_lock(); 3904 if (!swap) 3905 t = rcu_dereference(memcg->thresholds.primary); 3906 else 3907 t = rcu_dereference(memcg->memsw_thresholds.primary); 3908 3909 if (!t) 3910 goto unlock; 3911 3912 usage = mem_cgroup_usage(memcg, swap); 3913 3914 /* 3915 * current_threshold points to threshold just below or equal to usage. 3916 * If it's not true, a threshold was crossed after last 3917 * call of __mem_cgroup_threshold(). 3918 */ 3919 i = t->current_threshold; 3920 3921 /* 3922 * Iterate backward over array of thresholds starting from 3923 * current_threshold and check if a threshold is crossed. 3924 * If none of thresholds below usage is crossed, we read 3925 * only one element of the array here. 3926 */ 3927 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3928 eventfd_signal(t->entries[i].eventfd, 1); 3929 3930 /* i = current_threshold + 1 */ 3931 i++; 3932 3933 /* 3934 * Iterate forward over array of thresholds starting from 3935 * current_threshold+1 and check if a threshold is crossed. 3936 * If none of thresholds above usage is crossed, we read 3937 * only one element of the array here. 3938 */ 3939 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3940 eventfd_signal(t->entries[i].eventfd, 1); 3941 3942 /* Update current_threshold */ 3943 t->current_threshold = i - 1; 3944 unlock: 3945 rcu_read_unlock(); 3946 } 3947 3948 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3949 { 3950 while (memcg) { 3951 __mem_cgroup_threshold(memcg, false); 3952 if (do_memsw_account()) 3953 __mem_cgroup_threshold(memcg, true); 3954 3955 memcg = parent_mem_cgroup(memcg); 3956 } 3957 } 3958 3959 static int compare_thresholds(const void *a, const void *b) 3960 { 3961 const struct mem_cgroup_threshold *_a = a; 3962 const struct mem_cgroup_threshold *_b = b; 3963 3964 if (_a->threshold > _b->threshold) 3965 return 1; 3966 3967 if (_a->threshold < _b->threshold) 3968 return -1; 3969 3970 return 0; 3971 } 3972 3973 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3974 { 3975 struct mem_cgroup_eventfd_list *ev; 3976 3977 spin_lock(&memcg_oom_lock); 3978 3979 list_for_each_entry(ev, &memcg->oom_notify, list) 3980 eventfd_signal(ev->eventfd, 1); 3981 3982 spin_unlock(&memcg_oom_lock); 3983 return 0; 3984 } 3985 3986 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3987 { 3988 struct mem_cgroup *iter; 3989 3990 for_each_mem_cgroup_tree(iter, memcg) 3991 mem_cgroup_oom_notify_cb(iter); 3992 } 3993 3994 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3995 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3996 { 3997 struct mem_cgroup_thresholds *thresholds; 3998 struct mem_cgroup_threshold_ary *new; 3999 unsigned long threshold; 4000 unsigned long usage; 4001 int i, size, ret; 4002 4003 ret = page_counter_memparse(args, "-1", &threshold); 4004 if (ret) 4005 return ret; 4006 4007 mutex_lock(&memcg->thresholds_lock); 4008 4009 if (type == _MEM) { 4010 thresholds = &memcg->thresholds; 4011 usage = mem_cgroup_usage(memcg, false); 4012 } else if (type == _MEMSWAP) { 4013 thresholds = &memcg->memsw_thresholds; 4014 usage = mem_cgroup_usage(memcg, true); 4015 } else 4016 BUG(); 4017 4018 /* Check if a threshold crossed before adding a new one */ 4019 if (thresholds->primary) 4020 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4021 4022 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4023 4024 /* Allocate memory for new array of thresholds */ 4025 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4026 if (!new) { 4027 ret = -ENOMEM; 4028 goto unlock; 4029 } 4030 new->size = size; 4031 4032 /* Copy thresholds (if any) to new array */ 4033 if (thresholds->primary) { 4034 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4035 sizeof(struct mem_cgroup_threshold)); 4036 } 4037 4038 /* Add new threshold */ 4039 new->entries[size - 1].eventfd = eventfd; 4040 new->entries[size - 1].threshold = threshold; 4041 4042 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4043 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4044 compare_thresholds, NULL); 4045 4046 /* Find current threshold */ 4047 new->current_threshold = -1; 4048 for (i = 0; i < size; i++) { 4049 if (new->entries[i].threshold <= usage) { 4050 /* 4051 * new->current_threshold will not be used until 4052 * rcu_assign_pointer(), so it's safe to increment 4053 * it here. 4054 */ 4055 ++new->current_threshold; 4056 } else 4057 break; 4058 } 4059 4060 /* Free old spare buffer and save old primary buffer as spare */ 4061 kfree(thresholds->spare); 4062 thresholds->spare = thresholds->primary; 4063 4064 rcu_assign_pointer(thresholds->primary, new); 4065 4066 /* To be sure that nobody uses thresholds */ 4067 synchronize_rcu(); 4068 4069 unlock: 4070 mutex_unlock(&memcg->thresholds_lock); 4071 4072 return ret; 4073 } 4074 4075 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4076 struct eventfd_ctx *eventfd, const char *args) 4077 { 4078 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4079 } 4080 4081 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4082 struct eventfd_ctx *eventfd, const char *args) 4083 { 4084 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4085 } 4086 4087 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4088 struct eventfd_ctx *eventfd, enum res_type type) 4089 { 4090 struct mem_cgroup_thresholds *thresholds; 4091 struct mem_cgroup_threshold_ary *new; 4092 unsigned long usage; 4093 int i, j, size, entries; 4094 4095 mutex_lock(&memcg->thresholds_lock); 4096 4097 if (type == _MEM) { 4098 thresholds = &memcg->thresholds; 4099 usage = mem_cgroup_usage(memcg, false); 4100 } else if (type == _MEMSWAP) { 4101 thresholds = &memcg->memsw_thresholds; 4102 usage = mem_cgroup_usage(memcg, true); 4103 } else 4104 BUG(); 4105 4106 if (!thresholds->primary) 4107 goto unlock; 4108 4109 /* Check if a threshold crossed before removing */ 4110 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4111 4112 /* Calculate new number of threshold */ 4113 size = entries = 0; 4114 for (i = 0; i < thresholds->primary->size; i++) { 4115 if (thresholds->primary->entries[i].eventfd != eventfd) 4116 size++; 4117 else 4118 entries++; 4119 } 4120 4121 new = thresholds->spare; 4122 4123 /* If no items related to eventfd have been cleared, nothing to do */ 4124 if (!entries) 4125 goto unlock; 4126 4127 /* Set thresholds array to NULL if we don't have thresholds */ 4128 if (!size) { 4129 kfree(new); 4130 new = NULL; 4131 goto swap_buffers; 4132 } 4133 4134 new->size = size; 4135 4136 /* Copy thresholds and find current threshold */ 4137 new->current_threshold = -1; 4138 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4139 if (thresholds->primary->entries[i].eventfd == eventfd) 4140 continue; 4141 4142 new->entries[j] = thresholds->primary->entries[i]; 4143 if (new->entries[j].threshold <= usage) { 4144 /* 4145 * new->current_threshold will not be used 4146 * until rcu_assign_pointer(), so it's safe to increment 4147 * it here. 4148 */ 4149 ++new->current_threshold; 4150 } 4151 j++; 4152 } 4153 4154 swap_buffers: 4155 /* Swap primary and spare array */ 4156 thresholds->spare = thresholds->primary; 4157 4158 rcu_assign_pointer(thresholds->primary, new); 4159 4160 /* To be sure that nobody uses thresholds */ 4161 synchronize_rcu(); 4162 4163 /* If all events are unregistered, free the spare array */ 4164 if (!new) { 4165 kfree(thresholds->spare); 4166 thresholds->spare = NULL; 4167 } 4168 unlock: 4169 mutex_unlock(&memcg->thresholds_lock); 4170 } 4171 4172 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4173 struct eventfd_ctx *eventfd) 4174 { 4175 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4176 } 4177 4178 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4179 struct eventfd_ctx *eventfd) 4180 { 4181 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4182 } 4183 4184 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4185 struct eventfd_ctx *eventfd, const char *args) 4186 { 4187 struct mem_cgroup_eventfd_list *event; 4188 4189 event = kmalloc(sizeof(*event), GFP_KERNEL); 4190 if (!event) 4191 return -ENOMEM; 4192 4193 spin_lock(&memcg_oom_lock); 4194 4195 event->eventfd = eventfd; 4196 list_add(&event->list, &memcg->oom_notify); 4197 4198 /* already in OOM ? */ 4199 if (memcg->under_oom) 4200 eventfd_signal(eventfd, 1); 4201 spin_unlock(&memcg_oom_lock); 4202 4203 return 0; 4204 } 4205 4206 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4207 struct eventfd_ctx *eventfd) 4208 { 4209 struct mem_cgroup_eventfd_list *ev, *tmp; 4210 4211 spin_lock(&memcg_oom_lock); 4212 4213 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4214 if (ev->eventfd == eventfd) { 4215 list_del(&ev->list); 4216 kfree(ev); 4217 } 4218 } 4219 4220 spin_unlock(&memcg_oom_lock); 4221 } 4222 4223 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4224 { 4225 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4226 4227 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4228 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4229 seq_printf(sf, "oom_kill %lu\n", 4230 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4231 return 0; 4232 } 4233 4234 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4235 struct cftype *cft, u64 val) 4236 { 4237 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4238 4239 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4240 if (!css->parent || !((val == 0) || (val == 1))) 4241 return -EINVAL; 4242 4243 memcg->oom_kill_disable = val; 4244 if (!val) 4245 memcg_oom_recover(memcg); 4246 4247 return 0; 4248 } 4249 4250 #ifdef CONFIG_CGROUP_WRITEBACK 4251 4252 #include <trace/events/writeback.h> 4253 4254 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4255 { 4256 return wb_domain_init(&memcg->cgwb_domain, gfp); 4257 } 4258 4259 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4260 { 4261 wb_domain_exit(&memcg->cgwb_domain); 4262 } 4263 4264 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4265 { 4266 wb_domain_size_changed(&memcg->cgwb_domain); 4267 } 4268 4269 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4270 { 4271 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4272 4273 if (!memcg->css.parent) 4274 return NULL; 4275 4276 return &memcg->cgwb_domain; 4277 } 4278 4279 /* 4280 * idx can be of type enum memcg_stat_item or node_stat_item. 4281 * Keep in sync with memcg_exact_page(). 4282 */ 4283 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 4284 { 4285 long x = atomic_long_read(&memcg->vmstats[idx]); 4286 int cpu; 4287 4288 for_each_online_cpu(cpu) 4289 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 4290 if (x < 0) 4291 x = 0; 4292 return x; 4293 } 4294 4295 /** 4296 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4297 * @wb: bdi_writeback in question 4298 * @pfilepages: out parameter for number of file pages 4299 * @pheadroom: out parameter for number of allocatable pages according to memcg 4300 * @pdirty: out parameter for number of dirty pages 4301 * @pwriteback: out parameter for number of pages under writeback 4302 * 4303 * Determine the numbers of file, headroom, dirty, and writeback pages in 4304 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4305 * is a bit more involved. 4306 * 4307 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4308 * headroom is calculated as the lowest headroom of itself and the 4309 * ancestors. Note that this doesn't consider the actual amount of 4310 * available memory in the system. The caller should further cap 4311 * *@pheadroom accordingly. 4312 */ 4313 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4314 unsigned long *pheadroom, unsigned long *pdirty, 4315 unsigned long *pwriteback) 4316 { 4317 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4318 struct mem_cgroup *parent; 4319 4320 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4321 4322 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4323 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4324 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4325 *pheadroom = PAGE_COUNTER_MAX; 4326 4327 while ((parent = parent_mem_cgroup(memcg))) { 4328 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4329 READ_ONCE(memcg->memory.high)); 4330 unsigned long used = page_counter_read(&memcg->memory); 4331 4332 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4333 memcg = parent; 4334 } 4335 } 4336 4337 /* 4338 * Foreign dirty flushing 4339 * 4340 * There's an inherent mismatch between memcg and writeback. The former 4341 * trackes ownership per-page while the latter per-inode. This was a 4342 * deliberate design decision because honoring per-page ownership in the 4343 * writeback path is complicated, may lead to higher CPU and IO overheads 4344 * and deemed unnecessary given that write-sharing an inode across 4345 * different cgroups isn't a common use-case. 4346 * 4347 * Combined with inode majority-writer ownership switching, this works well 4348 * enough in most cases but there are some pathological cases. For 4349 * example, let's say there are two cgroups A and B which keep writing to 4350 * different but confined parts of the same inode. B owns the inode and 4351 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4352 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4353 * triggering background writeback. A will be slowed down without a way to 4354 * make writeback of the dirty pages happen. 4355 * 4356 * Conditions like the above can lead to a cgroup getting repatedly and 4357 * severely throttled after making some progress after each 4358 * dirty_expire_interval while the underyling IO device is almost 4359 * completely idle. 4360 * 4361 * Solving this problem completely requires matching the ownership tracking 4362 * granularities between memcg and writeback in either direction. However, 4363 * the more egregious behaviors can be avoided by simply remembering the 4364 * most recent foreign dirtying events and initiating remote flushes on 4365 * them when local writeback isn't enough to keep the memory clean enough. 4366 * 4367 * The following two functions implement such mechanism. When a foreign 4368 * page - a page whose memcg and writeback ownerships don't match - is 4369 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4370 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4371 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4372 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4373 * foreign bdi_writebacks which haven't expired. Both the numbers of 4374 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4375 * limited to MEMCG_CGWB_FRN_CNT. 4376 * 4377 * The mechanism only remembers IDs and doesn't hold any object references. 4378 * As being wrong occasionally doesn't matter, updates and accesses to the 4379 * records are lockless and racy. 4380 */ 4381 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4382 struct bdi_writeback *wb) 4383 { 4384 struct mem_cgroup *memcg = page->mem_cgroup; 4385 struct memcg_cgwb_frn *frn; 4386 u64 now = get_jiffies_64(); 4387 u64 oldest_at = now; 4388 int oldest = -1; 4389 int i; 4390 4391 trace_track_foreign_dirty(page, wb); 4392 4393 /* 4394 * Pick the slot to use. If there is already a slot for @wb, keep 4395 * using it. If not replace the oldest one which isn't being 4396 * written out. 4397 */ 4398 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4399 frn = &memcg->cgwb_frn[i]; 4400 if (frn->bdi_id == wb->bdi->id && 4401 frn->memcg_id == wb->memcg_css->id) 4402 break; 4403 if (time_before64(frn->at, oldest_at) && 4404 atomic_read(&frn->done.cnt) == 1) { 4405 oldest = i; 4406 oldest_at = frn->at; 4407 } 4408 } 4409 4410 if (i < MEMCG_CGWB_FRN_CNT) { 4411 /* 4412 * Re-using an existing one. Update timestamp lazily to 4413 * avoid making the cacheline hot. We want them to be 4414 * reasonably up-to-date and significantly shorter than 4415 * dirty_expire_interval as that's what expires the record. 4416 * Use the shorter of 1s and dirty_expire_interval / 8. 4417 */ 4418 unsigned long update_intv = 4419 min_t(unsigned long, HZ, 4420 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4421 4422 if (time_before64(frn->at, now - update_intv)) 4423 frn->at = now; 4424 } else if (oldest >= 0) { 4425 /* replace the oldest free one */ 4426 frn = &memcg->cgwb_frn[oldest]; 4427 frn->bdi_id = wb->bdi->id; 4428 frn->memcg_id = wb->memcg_css->id; 4429 frn->at = now; 4430 } 4431 } 4432 4433 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4434 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4435 { 4436 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4437 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4438 u64 now = jiffies_64; 4439 int i; 4440 4441 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4442 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4443 4444 /* 4445 * If the record is older than dirty_expire_interval, 4446 * writeback on it has already started. No need to kick it 4447 * off again. Also, don't start a new one if there's 4448 * already one in flight. 4449 */ 4450 if (time_after64(frn->at, now - intv) && 4451 atomic_read(&frn->done.cnt) == 1) { 4452 frn->at = 0; 4453 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4454 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4455 WB_REASON_FOREIGN_FLUSH, 4456 &frn->done); 4457 } 4458 } 4459 } 4460 4461 #else /* CONFIG_CGROUP_WRITEBACK */ 4462 4463 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4464 { 4465 return 0; 4466 } 4467 4468 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4469 { 4470 } 4471 4472 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4473 { 4474 } 4475 4476 #endif /* CONFIG_CGROUP_WRITEBACK */ 4477 4478 /* 4479 * DO NOT USE IN NEW FILES. 4480 * 4481 * "cgroup.event_control" implementation. 4482 * 4483 * This is way over-engineered. It tries to support fully configurable 4484 * events for each user. Such level of flexibility is completely 4485 * unnecessary especially in the light of the planned unified hierarchy. 4486 * 4487 * Please deprecate this and replace with something simpler if at all 4488 * possible. 4489 */ 4490 4491 /* 4492 * Unregister event and free resources. 4493 * 4494 * Gets called from workqueue. 4495 */ 4496 static void memcg_event_remove(struct work_struct *work) 4497 { 4498 struct mem_cgroup_event *event = 4499 container_of(work, struct mem_cgroup_event, remove); 4500 struct mem_cgroup *memcg = event->memcg; 4501 4502 remove_wait_queue(event->wqh, &event->wait); 4503 4504 event->unregister_event(memcg, event->eventfd); 4505 4506 /* Notify userspace the event is going away. */ 4507 eventfd_signal(event->eventfd, 1); 4508 4509 eventfd_ctx_put(event->eventfd); 4510 kfree(event); 4511 css_put(&memcg->css); 4512 } 4513 4514 /* 4515 * Gets called on EPOLLHUP on eventfd when user closes it. 4516 * 4517 * Called with wqh->lock held and interrupts disabled. 4518 */ 4519 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4520 int sync, void *key) 4521 { 4522 struct mem_cgroup_event *event = 4523 container_of(wait, struct mem_cgroup_event, wait); 4524 struct mem_cgroup *memcg = event->memcg; 4525 __poll_t flags = key_to_poll(key); 4526 4527 if (flags & EPOLLHUP) { 4528 /* 4529 * If the event has been detached at cgroup removal, we 4530 * can simply return knowing the other side will cleanup 4531 * for us. 4532 * 4533 * We can't race against event freeing since the other 4534 * side will require wqh->lock via remove_wait_queue(), 4535 * which we hold. 4536 */ 4537 spin_lock(&memcg->event_list_lock); 4538 if (!list_empty(&event->list)) { 4539 list_del_init(&event->list); 4540 /* 4541 * We are in atomic context, but cgroup_event_remove() 4542 * may sleep, so we have to call it in workqueue. 4543 */ 4544 schedule_work(&event->remove); 4545 } 4546 spin_unlock(&memcg->event_list_lock); 4547 } 4548 4549 return 0; 4550 } 4551 4552 static void memcg_event_ptable_queue_proc(struct file *file, 4553 wait_queue_head_t *wqh, poll_table *pt) 4554 { 4555 struct mem_cgroup_event *event = 4556 container_of(pt, struct mem_cgroup_event, pt); 4557 4558 event->wqh = wqh; 4559 add_wait_queue(wqh, &event->wait); 4560 } 4561 4562 /* 4563 * DO NOT USE IN NEW FILES. 4564 * 4565 * Parse input and register new cgroup event handler. 4566 * 4567 * Input must be in format '<event_fd> <control_fd> <args>'. 4568 * Interpretation of args is defined by control file implementation. 4569 */ 4570 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4571 char *buf, size_t nbytes, loff_t off) 4572 { 4573 struct cgroup_subsys_state *css = of_css(of); 4574 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4575 struct mem_cgroup_event *event; 4576 struct cgroup_subsys_state *cfile_css; 4577 unsigned int efd, cfd; 4578 struct fd efile; 4579 struct fd cfile; 4580 const char *name; 4581 char *endp; 4582 int ret; 4583 4584 buf = strstrip(buf); 4585 4586 efd = simple_strtoul(buf, &endp, 10); 4587 if (*endp != ' ') 4588 return -EINVAL; 4589 buf = endp + 1; 4590 4591 cfd = simple_strtoul(buf, &endp, 10); 4592 if ((*endp != ' ') && (*endp != '\0')) 4593 return -EINVAL; 4594 buf = endp + 1; 4595 4596 event = kzalloc(sizeof(*event), GFP_KERNEL); 4597 if (!event) 4598 return -ENOMEM; 4599 4600 event->memcg = memcg; 4601 INIT_LIST_HEAD(&event->list); 4602 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4603 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4604 INIT_WORK(&event->remove, memcg_event_remove); 4605 4606 efile = fdget(efd); 4607 if (!efile.file) { 4608 ret = -EBADF; 4609 goto out_kfree; 4610 } 4611 4612 event->eventfd = eventfd_ctx_fileget(efile.file); 4613 if (IS_ERR(event->eventfd)) { 4614 ret = PTR_ERR(event->eventfd); 4615 goto out_put_efile; 4616 } 4617 4618 cfile = fdget(cfd); 4619 if (!cfile.file) { 4620 ret = -EBADF; 4621 goto out_put_eventfd; 4622 } 4623 4624 /* the process need read permission on control file */ 4625 /* AV: shouldn't we check that it's been opened for read instead? */ 4626 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4627 if (ret < 0) 4628 goto out_put_cfile; 4629 4630 /* 4631 * Determine the event callbacks and set them in @event. This used 4632 * to be done via struct cftype but cgroup core no longer knows 4633 * about these events. The following is crude but the whole thing 4634 * is for compatibility anyway. 4635 * 4636 * DO NOT ADD NEW FILES. 4637 */ 4638 name = cfile.file->f_path.dentry->d_name.name; 4639 4640 if (!strcmp(name, "memory.usage_in_bytes")) { 4641 event->register_event = mem_cgroup_usage_register_event; 4642 event->unregister_event = mem_cgroup_usage_unregister_event; 4643 } else if (!strcmp(name, "memory.oom_control")) { 4644 event->register_event = mem_cgroup_oom_register_event; 4645 event->unregister_event = mem_cgroup_oom_unregister_event; 4646 } else if (!strcmp(name, "memory.pressure_level")) { 4647 event->register_event = vmpressure_register_event; 4648 event->unregister_event = vmpressure_unregister_event; 4649 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4650 event->register_event = memsw_cgroup_usage_register_event; 4651 event->unregister_event = memsw_cgroup_usage_unregister_event; 4652 } else { 4653 ret = -EINVAL; 4654 goto out_put_cfile; 4655 } 4656 4657 /* 4658 * Verify @cfile should belong to @css. Also, remaining events are 4659 * automatically removed on cgroup destruction but the removal is 4660 * asynchronous, so take an extra ref on @css. 4661 */ 4662 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4663 &memory_cgrp_subsys); 4664 ret = -EINVAL; 4665 if (IS_ERR(cfile_css)) 4666 goto out_put_cfile; 4667 if (cfile_css != css) { 4668 css_put(cfile_css); 4669 goto out_put_cfile; 4670 } 4671 4672 ret = event->register_event(memcg, event->eventfd, buf); 4673 if (ret) 4674 goto out_put_css; 4675 4676 vfs_poll(efile.file, &event->pt); 4677 4678 spin_lock(&memcg->event_list_lock); 4679 list_add(&event->list, &memcg->event_list); 4680 spin_unlock(&memcg->event_list_lock); 4681 4682 fdput(cfile); 4683 fdput(efile); 4684 4685 return nbytes; 4686 4687 out_put_css: 4688 css_put(css); 4689 out_put_cfile: 4690 fdput(cfile); 4691 out_put_eventfd: 4692 eventfd_ctx_put(event->eventfd); 4693 out_put_efile: 4694 fdput(efile); 4695 out_kfree: 4696 kfree(event); 4697 4698 return ret; 4699 } 4700 4701 static struct cftype mem_cgroup_legacy_files[] = { 4702 { 4703 .name = "usage_in_bytes", 4704 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4705 .read_u64 = mem_cgroup_read_u64, 4706 }, 4707 { 4708 .name = "max_usage_in_bytes", 4709 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4710 .write = mem_cgroup_reset, 4711 .read_u64 = mem_cgroup_read_u64, 4712 }, 4713 { 4714 .name = "limit_in_bytes", 4715 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4716 .write = mem_cgroup_write, 4717 .read_u64 = mem_cgroup_read_u64, 4718 }, 4719 { 4720 .name = "soft_limit_in_bytes", 4721 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4722 .write = mem_cgroup_write, 4723 .read_u64 = mem_cgroup_read_u64, 4724 }, 4725 { 4726 .name = "failcnt", 4727 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4728 .write = mem_cgroup_reset, 4729 .read_u64 = mem_cgroup_read_u64, 4730 }, 4731 { 4732 .name = "stat", 4733 .seq_show = memcg_stat_show, 4734 }, 4735 { 4736 .name = "force_empty", 4737 .write = mem_cgroup_force_empty_write, 4738 }, 4739 { 4740 .name = "use_hierarchy", 4741 .write_u64 = mem_cgroup_hierarchy_write, 4742 .read_u64 = mem_cgroup_hierarchy_read, 4743 }, 4744 { 4745 .name = "cgroup.event_control", /* XXX: for compat */ 4746 .write = memcg_write_event_control, 4747 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4748 }, 4749 { 4750 .name = "swappiness", 4751 .read_u64 = mem_cgroup_swappiness_read, 4752 .write_u64 = mem_cgroup_swappiness_write, 4753 }, 4754 { 4755 .name = "move_charge_at_immigrate", 4756 .read_u64 = mem_cgroup_move_charge_read, 4757 .write_u64 = mem_cgroup_move_charge_write, 4758 }, 4759 { 4760 .name = "oom_control", 4761 .seq_show = mem_cgroup_oom_control_read, 4762 .write_u64 = mem_cgroup_oom_control_write, 4763 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4764 }, 4765 { 4766 .name = "pressure_level", 4767 }, 4768 #ifdef CONFIG_NUMA 4769 { 4770 .name = "numa_stat", 4771 .seq_show = memcg_numa_stat_show, 4772 }, 4773 #endif 4774 { 4775 .name = "kmem.limit_in_bytes", 4776 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4777 .write = mem_cgroup_write, 4778 .read_u64 = mem_cgroup_read_u64, 4779 }, 4780 { 4781 .name = "kmem.usage_in_bytes", 4782 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4783 .read_u64 = mem_cgroup_read_u64, 4784 }, 4785 { 4786 .name = "kmem.failcnt", 4787 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4788 .write = mem_cgroup_reset, 4789 .read_u64 = mem_cgroup_read_u64, 4790 }, 4791 { 4792 .name = "kmem.max_usage_in_bytes", 4793 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4794 .write = mem_cgroup_reset, 4795 .read_u64 = mem_cgroup_read_u64, 4796 }, 4797 #if defined(CONFIG_MEMCG_KMEM) && \ 4798 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 4799 { 4800 .name = "kmem.slabinfo", 4801 .seq_start = memcg_slab_start, 4802 .seq_next = memcg_slab_next, 4803 .seq_stop = memcg_slab_stop, 4804 .seq_show = memcg_slab_show, 4805 }, 4806 #endif 4807 { 4808 .name = "kmem.tcp.limit_in_bytes", 4809 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4810 .write = mem_cgroup_write, 4811 .read_u64 = mem_cgroup_read_u64, 4812 }, 4813 { 4814 .name = "kmem.tcp.usage_in_bytes", 4815 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4816 .read_u64 = mem_cgroup_read_u64, 4817 }, 4818 { 4819 .name = "kmem.tcp.failcnt", 4820 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4821 .write = mem_cgroup_reset, 4822 .read_u64 = mem_cgroup_read_u64, 4823 }, 4824 { 4825 .name = "kmem.tcp.max_usage_in_bytes", 4826 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4827 .write = mem_cgroup_reset, 4828 .read_u64 = mem_cgroup_read_u64, 4829 }, 4830 { }, /* terminate */ 4831 }; 4832 4833 /* 4834 * Private memory cgroup IDR 4835 * 4836 * Swap-out records and page cache shadow entries need to store memcg 4837 * references in constrained space, so we maintain an ID space that is 4838 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4839 * memory-controlled cgroups to 64k. 4840 * 4841 * However, there usually are many references to the offline CSS after 4842 * the cgroup has been destroyed, such as page cache or reclaimable 4843 * slab objects, that don't need to hang on to the ID. We want to keep 4844 * those dead CSS from occupying IDs, or we might quickly exhaust the 4845 * relatively small ID space and prevent the creation of new cgroups 4846 * even when there are much fewer than 64k cgroups - possibly none. 4847 * 4848 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4849 * be freed and recycled when it's no longer needed, which is usually 4850 * when the CSS is offlined. 4851 * 4852 * The only exception to that are records of swapped out tmpfs/shmem 4853 * pages that need to be attributed to live ancestors on swapin. But 4854 * those references are manageable from userspace. 4855 */ 4856 4857 static DEFINE_IDR(mem_cgroup_idr); 4858 4859 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 4860 { 4861 if (memcg->id.id > 0) { 4862 idr_remove(&mem_cgroup_idr, memcg->id.id); 4863 memcg->id.id = 0; 4864 } 4865 } 4866 4867 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 4868 unsigned int n) 4869 { 4870 refcount_add(n, &memcg->id.ref); 4871 } 4872 4873 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4874 { 4875 if (refcount_sub_and_test(n, &memcg->id.ref)) { 4876 mem_cgroup_id_remove(memcg); 4877 4878 /* Memcg ID pins CSS */ 4879 css_put(&memcg->css); 4880 } 4881 } 4882 4883 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4884 { 4885 mem_cgroup_id_put_many(memcg, 1); 4886 } 4887 4888 /** 4889 * mem_cgroup_from_id - look up a memcg from a memcg id 4890 * @id: the memcg id to look up 4891 * 4892 * Caller must hold rcu_read_lock(). 4893 */ 4894 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4895 { 4896 WARN_ON_ONCE(!rcu_read_lock_held()); 4897 return idr_find(&mem_cgroup_idr, id); 4898 } 4899 4900 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4901 { 4902 struct mem_cgroup_per_node *pn; 4903 int tmp = node; 4904 /* 4905 * This routine is called against possible nodes. 4906 * But it's BUG to call kmalloc() against offline node. 4907 * 4908 * TODO: this routine can waste much memory for nodes which will 4909 * never be onlined. It's better to use memory hotplug callback 4910 * function. 4911 */ 4912 if (!node_state(node, N_NORMAL_MEMORY)) 4913 tmp = -1; 4914 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4915 if (!pn) 4916 return 1; 4917 4918 pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat); 4919 if (!pn->lruvec_stat_local) { 4920 kfree(pn); 4921 return 1; 4922 } 4923 4924 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); 4925 if (!pn->lruvec_stat_cpu) { 4926 free_percpu(pn->lruvec_stat_local); 4927 kfree(pn); 4928 return 1; 4929 } 4930 4931 lruvec_init(&pn->lruvec); 4932 pn->usage_in_excess = 0; 4933 pn->on_tree = false; 4934 pn->memcg = memcg; 4935 4936 memcg->nodeinfo[node] = pn; 4937 return 0; 4938 } 4939 4940 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4941 { 4942 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 4943 4944 if (!pn) 4945 return; 4946 4947 free_percpu(pn->lruvec_stat_cpu); 4948 free_percpu(pn->lruvec_stat_local); 4949 kfree(pn); 4950 } 4951 4952 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4953 { 4954 int node; 4955 4956 for_each_node(node) 4957 free_mem_cgroup_per_node_info(memcg, node); 4958 free_percpu(memcg->vmstats_percpu); 4959 free_percpu(memcg->vmstats_local); 4960 kfree(memcg); 4961 } 4962 4963 static void mem_cgroup_free(struct mem_cgroup *memcg) 4964 { 4965 memcg_wb_domain_exit(memcg); 4966 /* 4967 * Flush percpu vmstats and vmevents to guarantee the value correctness 4968 * on parent's and all ancestor levels. 4969 */ 4970 memcg_flush_percpu_vmstats(memcg); 4971 memcg_flush_percpu_vmevents(memcg); 4972 __mem_cgroup_free(memcg); 4973 } 4974 4975 static struct mem_cgroup *mem_cgroup_alloc(void) 4976 { 4977 struct mem_cgroup *memcg; 4978 unsigned int size; 4979 int node; 4980 int __maybe_unused i; 4981 long error = -ENOMEM; 4982 4983 size = sizeof(struct mem_cgroup); 4984 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4985 4986 memcg = kzalloc(size, GFP_KERNEL); 4987 if (!memcg) 4988 return ERR_PTR(error); 4989 4990 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4991 1, MEM_CGROUP_ID_MAX, 4992 GFP_KERNEL); 4993 if (memcg->id.id < 0) { 4994 error = memcg->id.id; 4995 goto fail; 4996 } 4997 4998 memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); 4999 if (!memcg->vmstats_local) 5000 goto fail; 5001 5002 memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); 5003 if (!memcg->vmstats_percpu) 5004 goto fail; 5005 5006 for_each_node(node) 5007 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5008 goto fail; 5009 5010 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5011 goto fail; 5012 5013 INIT_WORK(&memcg->high_work, high_work_func); 5014 INIT_LIST_HEAD(&memcg->oom_notify); 5015 mutex_init(&memcg->thresholds_lock); 5016 spin_lock_init(&memcg->move_lock); 5017 vmpressure_init(&memcg->vmpressure); 5018 INIT_LIST_HEAD(&memcg->event_list); 5019 spin_lock_init(&memcg->event_list_lock); 5020 memcg->socket_pressure = jiffies; 5021 #ifdef CONFIG_MEMCG_KMEM 5022 memcg->kmemcg_id = -1; 5023 #endif 5024 #ifdef CONFIG_CGROUP_WRITEBACK 5025 INIT_LIST_HEAD(&memcg->cgwb_list); 5026 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5027 memcg->cgwb_frn[i].done = 5028 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5029 #endif 5030 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5031 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5032 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5033 memcg->deferred_split_queue.split_queue_len = 0; 5034 #endif 5035 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5036 return memcg; 5037 fail: 5038 mem_cgroup_id_remove(memcg); 5039 __mem_cgroup_free(memcg); 5040 return ERR_PTR(error); 5041 } 5042 5043 static struct cgroup_subsys_state * __ref 5044 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5045 { 5046 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5047 struct mem_cgroup *memcg; 5048 long error = -ENOMEM; 5049 5050 memcg = mem_cgroup_alloc(); 5051 if (IS_ERR(memcg)) 5052 return ERR_CAST(memcg); 5053 5054 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5055 memcg->soft_limit = PAGE_COUNTER_MAX; 5056 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5057 if (parent) { 5058 memcg->swappiness = mem_cgroup_swappiness(parent); 5059 memcg->oom_kill_disable = parent->oom_kill_disable; 5060 } 5061 if (parent && parent->use_hierarchy) { 5062 memcg->use_hierarchy = true; 5063 page_counter_init(&memcg->memory, &parent->memory); 5064 page_counter_init(&memcg->swap, &parent->swap); 5065 page_counter_init(&memcg->memsw, &parent->memsw); 5066 page_counter_init(&memcg->kmem, &parent->kmem); 5067 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5068 } else { 5069 page_counter_init(&memcg->memory, NULL); 5070 page_counter_init(&memcg->swap, NULL); 5071 page_counter_init(&memcg->memsw, NULL); 5072 page_counter_init(&memcg->kmem, NULL); 5073 page_counter_init(&memcg->tcpmem, NULL); 5074 /* 5075 * Deeper hierachy with use_hierarchy == false doesn't make 5076 * much sense so let cgroup subsystem know about this 5077 * unfortunate state in our controller. 5078 */ 5079 if (parent != root_mem_cgroup) 5080 memory_cgrp_subsys.broken_hierarchy = true; 5081 } 5082 5083 /* The following stuff does not apply to the root */ 5084 if (!parent) { 5085 #ifdef CONFIG_MEMCG_KMEM 5086 INIT_LIST_HEAD(&memcg->kmem_caches); 5087 #endif 5088 root_mem_cgroup = memcg; 5089 return &memcg->css; 5090 } 5091 5092 error = memcg_online_kmem(memcg); 5093 if (error) 5094 goto fail; 5095 5096 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5097 static_branch_inc(&memcg_sockets_enabled_key); 5098 5099 return &memcg->css; 5100 fail: 5101 mem_cgroup_id_remove(memcg); 5102 mem_cgroup_free(memcg); 5103 return ERR_PTR(error); 5104 } 5105 5106 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5107 { 5108 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5109 5110 /* 5111 * A memcg must be visible for memcg_expand_shrinker_maps() 5112 * by the time the maps are allocated. So, we allocate maps 5113 * here, when for_each_mem_cgroup() can't skip it. 5114 */ 5115 if (memcg_alloc_shrinker_maps(memcg)) { 5116 mem_cgroup_id_remove(memcg); 5117 return -ENOMEM; 5118 } 5119 5120 /* Online state pins memcg ID, memcg ID pins CSS */ 5121 refcount_set(&memcg->id.ref, 1); 5122 css_get(css); 5123 return 0; 5124 } 5125 5126 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5127 { 5128 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5129 struct mem_cgroup_event *event, *tmp; 5130 5131 /* 5132 * Unregister events and notify userspace. 5133 * Notify userspace about cgroup removing only after rmdir of cgroup 5134 * directory to avoid race between userspace and kernelspace. 5135 */ 5136 spin_lock(&memcg->event_list_lock); 5137 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5138 list_del_init(&event->list); 5139 schedule_work(&event->remove); 5140 } 5141 spin_unlock(&memcg->event_list_lock); 5142 5143 page_counter_set_min(&memcg->memory, 0); 5144 page_counter_set_low(&memcg->memory, 0); 5145 5146 memcg_offline_kmem(memcg); 5147 wb_memcg_offline(memcg); 5148 5149 drain_all_stock(memcg); 5150 5151 mem_cgroup_id_put(memcg); 5152 } 5153 5154 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5155 { 5156 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5157 5158 invalidate_reclaim_iterators(memcg); 5159 } 5160 5161 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5162 { 5163 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5164 int __maybe_unused i; 5165 5166 #ifdef CONFIG_CGROUP_WRITEBACK 5167 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5168 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5169 #endif 5170 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5171 static_branch_dec(&memcg_sockets_enabled_key); 5172 5173 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5174 static_branch_dec(&memcg_sockets_enabled_key); 5175 5176 vmpressure_cleanup(&memcg->vmpressure); 5177 cancel_work_sync(&memcg->high_work); 5178 mem_cgroup_remove_from_trees(memcg); 5179 memcg_free_shrinker_maps(memcg); 5180 memcg_free_kmem(memcg); 5181 mem_cgroup_free(memcg); 5182 } 5183 5184 /** 5185 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5186 * @css: the target css 5187 * 5188 * Reset the states of the mem_cgroup associated with @css. This is 5189 * invoked when the userland requests disabling on the default hierarchy 5190 * but the memcg is pinned through dependency. The memcg should stop 5191 * applying policies and should revert to the vanilla state as it may be 5192 * made visible again. 5193 * 5194 * The current implementation only resets the essential configurations. 5195 * This needs to be expanded to cover all the visible parts. 5196 */ 5197 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5198 { 5199 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5200 5201 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5202 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5203 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); 5204 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5205 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5206 page_counter_set_min(&memcg->memory, 0); 5207 page_counter_set_low(&memcg->memory, 0); 5208 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5209 memcg->soft_limit = PAGE_COUNTER_MAX; 5210 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5211 memcg_wb_domain_size_changed(memcg); 5212 } 5213 5214 #ifdef CONFIG_MMU 5215 /* Handlers for move charge at task migration. */ 5216 static int mem_cgroup_do_precharge(unsigned long count) 5217 { 5218 int ret; 5219 5220 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5221 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5222 if (!ret) { 5223 mc.precharge += count; 5224 return ret; 5225 } 5226 5227 /* Try charges one by one with reclaim, but do not retry */ 5228 while (count--) { 5229 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5230 if (ret) 5231 return ret; 5232 mc.precharge++; 5233 cond_resched(); 5234 } 5235 return 0; 5236 } 5237 5238 union mc_target { 5239 struct page *page; 5240 swp_entry_t ent; 5241 }; 5242 5243 enum mc_target_type { 5244 MC_TARGET_NONE = 0, 5245 MC_TARGET_PAGE, 5246 MC_TARGET_SWAP, 5247 MC_TARGET_DEVICE, 5248 }; 5249 5250 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5251 unsigned long addr, pte_t ptent) 5252 { 5253 struct page *page = vm_normal_page(vma, addr, ptent); 5254 5255 if (!page || !page_mapped(page)) 5256 return NULL; 5257 if (PageAnon(page)) { 5258 if (!(mc.flags & MOVE_ANON)) 5259 return NULL; 5260 } else { 5261 if (!(mc.flags & MOVE_FILE)) 5262 return NULL; 5263 } 5264 if (!get_page_unless_zero(page)) 5265 return NULL; 5266 5267 return page; 5268 } 5269 5270 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5271 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5272 pte_t ptent, swp_entry_t *entry) 5273 { 5274 struct page *page = NULL; 5275 swp_entry_t ent = pte_to_swp_entry(ptent); 5276 5277 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 5278 return NULL; 5279 5280 /* 5281 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5282 * a device and because they are not accessible by CPU they are store 5283 * as special swap entry in the CPU page table. 5284 */ 5285 if (is_device_private_entry(ent)) { 5286 page = device_private_entry_to_page(ent); 5287 /* 5288 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5289 * a refcount of 1 when free (unlike normal page) 5290 */ 5291 if (!page_ref_add_unless(page, 1, 1)) 5292 return NULL; 5293 return page; 5294 } 5295 5296 /* 5297 * Because lookup_swap_cache() updates some statistics counter, 5298 * we call find_get_page() with swapper_space directly. 5299 */ 5300 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5301 entry->val = ent.val; 5302 5303 return page; 5304 } 5305 #else 5306 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5307 pte_t ptent, swp_entry_t *entry) 5308 { 5309 return NULL; 5310 } 5311 #endif 5312 5313 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5314 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5315 { 5316 struct page *page = NULL; 5317 struct address_space *mapping; 5318 pgoff_t pgoff; 5319 5320 if (!vma->vm_file) /* anonymous vma */ 5321 return NULL; 5322 if (!(mc.flags & MOVE_FILE)) 5323 return NULL; 5324 5325 mapping = vma->vm_file->f_mapping; 5326 pgoff = linear_page_index(vma, addr); 5327 5328 /* page is moved even if it's not RSS of this task(page-faulted). */ 5329 #ifdef CONFIG_SWAP 5330 /* shmem/tmpfs may report page out on swap: account for that too. */ 5331 if (shmem_mapping(mapping)) { 5332 page = find_get_entry(mapping, pgoff); 5333 if (xa_is_value(page)) { 5334 swp_entry_t swp = radix_to_swp_entry(page); 5335 *entry = swp; 5336 page = find_get_page(swap_address_space(swp), 5337 swp_offset(swp)); 5338 } 5339 } else 5340 page = find_get_page(mapping, pgoff); 5341 #else 5342 page = find_get_page(mapping, pgoff); 5343 #endif 5344 return page; 5345 } 5346 5347 /** 5348 * mem_cgroup_move_account - move account of the page 5349 * @page: the page 5350 * @compound: charge the page as compound or small page 5351 * @from: mem_cgroup which the page is moved from. 5352 * @to: mem_cgroup which the page is moved to. @from != @to. 5353 * 5354 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5355 * 5356 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5357 * from old cgroup. 5358 */ 5359 static int mem_cgroup_move_account(struct page *page, 5360 bool compound, 5361 struct mem_cgroup *from, 5362 struct mem_cgroup *to) 5363 { 5364 struct lruvec *from_vec, *to_vec; 5365 struct pglist_data *pgdat; 5366 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5367 int ret; 5368 5369 VM_BUG_ON(from == to); 5370 VM_BUG_ON_PAGE(PageLRU(page), page); 5371 VM_BUG_ON(compound && !PageTransHuge(page)); 5372 5373 /* 5374 * Prevent mem_cgroup_migrate() from looking at 5375 * page->mem_cgroup of its source page while we change it. 5376 */ 5377 ret = -EBUSY; 5378 if (!trylock_page(page)) 5379 goto out; 5380 5381 ret = -EINVAL; 5382 if (page->mem_cgroup != from) 5383 goto out_unlock; 5384 5385 pgdat = page_pgdat(page); 5386 from_vec = mem_cgroup_lruvec(from, pgdat); 5387 to_vec = mem_cgroup_lruvec(to, pgdat); 5388 5389 lock_page_memcg(page); 5390 5391 if (PageAnon(page)) { 5392 if (page_mapped(page)) { 5393 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5394 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5395 if (PageTransHuge(page)) { 5396 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5397 -nr_pages); 5398 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5399 nr_pages); 5400 } 5401 5402 } 5403 } else { 5404 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5405 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5406 5407 if (PageSwapBacked(page)) { 5408 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5409 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5410 } 5411 5412 if (page_mapped(page)) { 5413 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5414 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5415 } 5416 5417 if (PageDirty(page)) { 5418 struct address_space *mapping = page_mapping(page); 5419 5420 if (mapping_cap_account_dirty(mapping)) { 5421 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5422 -nr_pages); 5423 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5424 nr_pages); 5425 } 5426 } 5427 } 5428 5429 if (PageWriteback(page)) { 5430 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5431 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5432 } 5433 5434 /* 5435 * All state has been migrated, let's switch to the new memcg. 5436 * 5437 * It is safe to change page->mem_cgroup here because the page 5438 * is referenced, charged, isolated, and locked: we can't race 5439 * with (un)charging, migration, LRU putback, or anything else 5440 * that would rely on a stable page->mem_cgroup. 5441 * 5442 * Note that lock_page_memcg is a memcg lock, not a page lock, 5443 * to save space. As soon as we switch page->mem_cgroup to a 5444 * new memcg that isn't locked, the above state can change 5445 * concurrently again. Make sure we're truly done with it. 5446 */ 5447 smp_mb(); 5448 5449 page->mem_cgroup = to; /* caller should have done css_get */ 5450 5451 __unlock_page_memcg(from); 5452 5453 ret = 0; 5454 5455 local_irq_disable(); 5456 mem_cgroup_charge_statistics(to, page, nr_pages); 5457 memcg_check_events(to, page); 5458 mem_cgroup_charge_statistics(from, page, -nr_pages); 5459 memcg_check_events(from, page); 5460 local_irq_enable(); 5461 out_unlock: 5462 unlock_page(page); 5463 out: 5464 return ret; 5465 } 5466 5467 /** 5468 * get_mctgt_type - get target type of moving charge 5469 * @vma: the vma the pte to be checked belongs 5470 * @addr: the address corresponding to the pte to be checked 5471 * @ptent: the pte to be checked 5472 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5473 * 5474 * Returns 5475 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5476 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5477 * move charge. if @target is not NULL, the page is stored in target->page 5478 * with extra refcnt got(Callers should handle it). 5479 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5480 * target for charge migration. if @target is not NULL, the entry is stored 5481 * in target->ent. 5482 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5483 * (so ZONE_DEVICE page and thus not on the lru). 5484 * For now we such page is charge like a regular page would be as for all 5485 * intent and purposes it is just special memory taking the place of a 5486 * regular page. 5487 * 5488 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5489 * 5490 * Called with pte lock held. 5491 */ 5492 5493 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5494 unsigned long addr, pte_t ptent, union mc_target *target) 5495 { 5496 struct page *page = NULL; 5497 enum mc_target_type ret = MC_TARGET_NONE; 5498 swp_entry_t ent = { .val = 0 }; 5499 5500 if (pte_present(ptent)) 5501 page = mc_handle_present_pte(vma, addr, ptent); 5502 else if (is_swap_pte(ptent)) 5503 page = mc_handle_swap_pte(vma, ptent, &ent); 5504 else if (pte_none(ptent)) 5505 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5506 5507 if (!page && !ent.val) 5508 return ret; 5509 if (page) { 5510 /* 5511 * Do only loose check w/o serialization. 5512 * mem_cgroup_move_account() checks the page is valid or 5513 * not under LRU exclusion. 5514 */ 5515 if (page->mem_cgroup == mc.from) { 5516 ret = MC_TARGET_PAGE; 5517 if (is_device_private_page(page)) 5518 ret = MC_TARGET_DEVICE; 5519 if (target) 5520 target->page = page; 5521 } 5522 if (!ret || !target) 5523 put_page(page); 5524 } 5525 /* 5526 * There is a swap entry and a page doesn't exist or isn't charged. 5527 * But we cannot move a tail-page in a THP. 5528 */ 5529 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5530 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5531 ret = MC_TARGET_SWAP; 5532 if (target) 5533 target->ent = ent; 5534 } 5535 return ret; 5536 } 5537 5538 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5539 /* 5540 * We don't consider PMD mapped swapping or file mapped pages because THP does 5541 * not support them for now. 5542 * Caller should make sure that pmd_trans_huge(pmd) is true. 5543 */ 5544 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5545 unsigned long addr, pmd_t pmd, union mc_target *target) 5546 { 5547 struct page *page = NULL; 5548 enum mc_target_type ret = MC_TARGET_NONE; 5549 5550 if (unlikely(is_swap_pmd(pmd))) { 5551 VM_BUG_ON(thp_migration_supported() && 5552 !is_pmd_migration_entry(pmd)); 5553 return ret; 5554 } 5555 page = pmd_page(pmd); 5556 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5557 if (!(mc.flags & MOVE_ANON)) 5558 return ret; 5559 if (page->mem_cgroup == mc.from) { 5560 ret = MC_TARGET_PAGE; 5561 if (target) { 5562 get_page(page); 5563 target->page = page; 5564 } 5565 } 5566 return ret; 5567 } 5568 #else 5569 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5570 unsigned long addr, pmd_t pmd, union mc_target *target) 5571 { 5572 return MC_TARGET_NONE; 5573 } 5574 #endif 5575 5576 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5577 unsigned long addr, unsigned long end, 5578 struct mm_walk *walk) 5579 { 5580 struct vm_area_struct *vma = walk->vma; 5581 pte_t *pte; 5582 spinlock_t *ptl; 5583 5584 ptl = pmd_trans_huge_lock(pmd, vma); 5585 if (ptl) { 5586 /* 5587 * Note their can not be MC_TARGET_DEVICE for now as we do not 5588 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5589 * this might change. 5590 */ 5591 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5592 mc.precharge += HPAGE_PMD_NR; 5593 spin_unlock(ptl); 5594 return 0; 5595 } 5596 5597 if (pmd_trans_unstable(pmd)) 5598 return 0; 5599 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5600 for (; addr != end; pte++, addr += PAGE_SIZE) 5601 if (get_mctgt_type(vma, addr, *pte, NULL)) 5602 mc.precharge++; /* increment precharge temporarily */ 5603 pte_unmap_unlock(pte - 1, ptl); 5604 cond_resched(); 5605 5606 return 0; 5607 } 5608 5609 static const struct mm_walk_ops precharge_walk_ops = { 5610 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5611 }; 5612 5613 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5614 { 5615 unsigned long precharge; 5616 5617 mmap_read_lock(mm); 5618 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5619 mmap_read_unlock(mm); 5620 5621 precharge = mc.precharge; 5622 mc.precharge = 0; 5623 5624 return precharge; 5625 } 5626 5627 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5628 { 5629 unsigned long precharge = mem_cgroup_count_precharge(mm); 5630 5631 VM_BUG_ON(mc.moving_task); 5632 mc.moving_task = current; 5633 return mem_cgroup_do_precharge(precharge); 5634 } 5635 5636 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5637 static void __mem_cgroup_clear_mc(void) 5638 { 5639 struct mem_cgroup *from = mc.from; 5640 struct mem_cgroup *to = mc.to; 5641 5642 /* we must uncharge all the leftover precharges from mc.to */ 5643 if (mc.precharge) { 5644 cancel_charge(mc.to, mc.precharge); 5645 mc.precharge = 0; 5646 } 5647 /* 5648 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5649 * we must uncharge here. 5650 */ 5651 if (mc.moved_charge) { 5652 cancel_charge(mc.from, mc.moved_charge); 5653 mc.moved_charge = 0; 5654 } 5655 /* we must fixup refcnts and charges */ 5656 if (mc.moved_swap) { 5657 /* uncharge swap account from the old cgroup */ 5658 if (!mem_cgroup_is_root(mc.from)) 5659 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5660 5661 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5662 5663 /* 5664 * we charged both to->memory and to->memsw, so we 5665 * should uncharge to->memory. 5666 */ 5667 if (!mem_cgroup_is_root(mc.to)) 5668 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5669 5670 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 5671 css_put_many(&mc.to->css, mc.moved_swap); 5672 5673 mc.moved_swap = 0; 5674 } 5675 memcg_oom_recover(from); 5676 memcg_oom_recover(to); 5677 wake_up_all(&mc.waitq); 5678 } 5679 5680 static void mem_cgroup_clear_mc(void) 5681 { 5682 struct mm_struct *mm = mc.mm; 5683 5684 /* 5685 * we must clear moving_task before waking up waiters at the end of 5686 * task migration. 5687 */ 5688 mc.moving_task = NULL; 5689 __mem_cgroup_clear_mc(); 5690 spin_lock(&mc.lock); 5691 mc.from = NULL; 5692 mc.to = NULL; 5693 mc.mm = NULL; 5694 spin_unlock(&mc.lock); 5695 5696 mmput(mm); 5697 } 5698 5699 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5700 { 5701 struct cgroup_subsys_state *css; 5702 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5703 struct mem_cgroup *from; 5704 struct task_struct *leader, *p; 5705 struct mm_struct *mm; 5706 unsigned long move_flags; 5707 int ret = 0; 5708 5709 /* charge immigration isn't supported on the default hierarchy */ 5710 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5711 return 0; 5712 5713 /* 5714 * Multi-process migrations only happen on the default hierarchy 5715 * where charge immigration is not used. Perform charge 5716 * immigration if @tset contains a leader and whine if there are 5717 * multiple. 5718 */ 5719 p = NULL; 5720 cgroup_taskset_for_each_leader(leader, css, tset) { 5721 WARN_ON_ONCE(p); 5722 p = leader; 5723 memcg = mem_cgroup_from_css(css); 5724 } 5725 if (!p) 5726 return 0; 5727 5728 /* 5729 * We are now commited to this value whatever it is. Changes in this 5730 * tunable will only affect upcoming migrations, not the current one. 5731 * So we need to save it, and keep it going. 5732 */ 5733 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5734 if (!move_flags) 5735 return 0; 5736 5737 from = mem_cgroup_from_task(p); 5738 5739 VM_BUG_ON(from == memcg); 5740 5741 mm = get_task_mm(p); 5742 if (!mm) 5743 return 0; 5744 /* We move charges only when we move a owner of the mm */ 5745 if (mm->owner == p) { 5746 VM_BUG_ON(mc.from); 5747 VM_BUG_ON(mc.to); 5748 VM_BUG_ON(mc.precharge); 5749 VM_BUG_ON(mc.moved_charge); 5750 VM_BUG_ON(mc.moved_swap); 5751 5752 spin_lock(&mc.lock); 5753 mc.mm = mm; 5754 mc.from = from; 5755 mc.to = memcg; 5756 mc.flags = move_flags; 5757 spin_unlock(&mc.lock); 5758 /* We set mc.moving_task later */ 5759 5760 ret = mem_cgroup_precharge_mc(mm); 5761 if (ret) 5762 mem_cgroup_clear_mc(); 5763 } else { 5764 mmput(mm); 5765 } 5766 return ret; 5767 } 5768 5769 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5770 { 5771 if (mc.to) 5772 mem_cgroup_clear_mc(); 5773 } 5774 5775 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5776 unsigned long addr, unsigned long end, 5777 struct mm_walk *walk) 5778 { 5779 int ret = 0; 5780 struct vm_area_struct *vma = walk->vma; 5781 pte_t *pte; 5782 spinlock_t *ptl; 5783 enum mc_target_type target_type; 5784 union mc_target target; 5785 struct page *page; 5786 5787 ptl = pmd_trans_huge_lock(pmd, vma); 5788 if (ptl) { 5789 if (mc.precharge < HPAGE_PMD_NR) { 5790 spin_unlock(ptl); 5791 return 0; 5792 } 5793 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5794 if (target_type == MC_TARGET_PAGE) { 5795 page = target.page; 5796 if (!isolate_lru_page(page)) { 5797 if (!mem_cgroup_move_account(page, true, 5798 mc.from, mc.to)) { 5799 mc.precharge -= HPAGE_PMD_NR; 5800 mc.moved_charge += HPAGE_PMD_NR; 5801 } 5802 putback_lru_page(page); 5803 } 5804 put_page(page); 5805 } else if (target_type == MC_TARGET_DEVICE) { 5806 page = target.page; 5807 if (!mem_cgroup_move_account(page, true, 5808 mc.from, mc.to)) { 5809 mc.precharge -= HPAGE_PMD_NR; 5810 mc.moved_charge += HPAGE_PMD_NR; 5811 } 5812 put_page(page); 5813 } 5814 spin_unlock(ptl); 5815 return 0; 5816 } 5817 5818 if (pmd_trans_unstable(pmd)) 5819 return 0; 5820 retry: 5821 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5822 for (; addr != end; addr += PAGE_SIZE) { 5823 pte_t ptent = *(pte++); 5824 bool device = false; 5825 swp_entry_t ent; 5826 5827 if (!mc.precharge) 5828 break; 5829 5830 switch (get_mctgt_type(vma, addr, ptent, &target)) { 5831 case MC_TARGET_DEVICE: 5832 device = true; 5833 fallthrough; 5834 case MC_TARGET_PAGE: 5835 page = target.page; 5836 /* 5837 * We can have a part of the split pmd here. Moving it 5838 * can be done but it would be too convoluted so simply 5839 * ignore such a partial THP and keep it in original 5840 * memcg. There should be somebody mapping the head. 5841 */ 5842 if (PageTransCompound(page)) 5843 goto put; 5844 if (!device && isolate_lru_page(page)) 5845 goto put; 5846 if (!mem_cgroup_move_account(page, false, 5847 mc.from, mc.to)) { 5848 mc.precharge--; 5849 /* we uncharge from mc.from later. */ 5850 mc.moved_charge++; 5851 } 5852 if (!device) 5853 putback_lru_page(page); 5854 put: /* get_mctgt_type() gets the page */ 5855 put_page(page); 5856 break; 5857 case MC_TARGET_SWAP: 5858 ent = target.ent; 5859 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5860 mc.precharge--; 5861 /* we fixup refcnts and charges later. */ 5862 mc.moved_swap++; 5863 } 5864 break; 5865 default: 5866 break; 5867 } 5868 } 5869 pte_unmap_unlock(pte - 1, ptl); 5870 cond_resched(); 5871 5872 if (addr != end) { 5873 /* 5874 * We have consumed all precharges we got in can_attach(). 5875 * We try charge one by one, but don't do any additional 5876 * charges to mc.to if we have failed in charge once in attach() 5877 * phase. 5878 */ 5879 ret = mem_cgroup_do_precharge(1); 5880 if (!ret) 5881 goto retry; 5882 } 5883 5884 return ret; 5885 } 5886 5887 static const struct mm_walk_ops charge_walk_ops = { 5888 .pmd_entry = mem_cgroup_move_charge_pte_range, 5889 }; 5890 5891 static void mem_cgroup_move_charge(void) 5892 { 5893 lru_add_drain_all(); 5894 /* 5895 * Signal lock_page_memcg() to take the memcg's move_lock 5896 * while we're moving its pages to another memcg. Then wait 5897 * for already started RCU-only updates to finish. 5898 */ 5899 atomic_inc(&mc.from->moving_account); 5900 synchronize_rcu(); 5901 retry: 5902 if (unlikely(!mmap_read_trylock(mc.mm))) { 5903 /* 5904 * Someone who are holding the mmap_lock might be waiting in 5905 * waitq. So we cancel all extra charges, wake up all waiters, 5906 * and retry. Because we cancel precharges, we might not be able 5907 * to move enough charges, but moving charge is a best-effort 5908 * feature anyway, so it wouldn't be a big problem. 5909 */ 5910 __mem_cgroup_clear_mc(); 5911 cond_resched(); 5912 goto retry; 5913 } 5914 /* 5915 * When we have consumed all precharges and failed in doing 5916 * additional charge, the page walk just aborts. 5917 */ 5918 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 5919 NULL); 5920 5921 mmap_read_unlock(mc.mm); 5922 atomic_dec(&mc.from->moving_account); 5923 } 5924 5925 static void mem_cgroup_move_task(void) 5926 { 5927 if (mc.to) { 5928 mem_cgroup_move_charge(); 5929 mem_cgroup_clear_mc(); 5930 } 5931 } 5932 #else /* !CONFIG_MMU */ 5933 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5934 { 5935 return 0; 5936 } 5937 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5938 { 5939 } 5940 static void mem_cgroup_move_task(void) 5941 { 5942 } 5943 #endif 5944 5945 /* 5946 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5947 * to verify whether we're attached to the default hierarchy on each mount 5948 * attempt. 5949 */ 5950 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5951 { 5952 /* 5953 * use_hierarchy is forced on the default hierarchy. cgroup core 5954 * guarantees that @root doesn't have any children, so turning it 5955 * on for the root memcg is enough. 5956 */ 5957 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5958 root_mem_cgroup->use_hierarchy = true; 5959 else 5960 root_mem_cgroup->use_hierarchy = false; 5961 } 5962 5963 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 5964 { 5965 if (value == PAGE_COUNTER_MAX) 5966 seq_puts(m, "max\n"); 5967 else 5968 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 5969 5970 return 0; 5971 } 5972 5973 static u64 memory_current_read(struct cgroup_subsys_state *css, 5974 struct cftype *cft) 5975 { 5976 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5977 5978 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5979 } 5980 5981 static int memory_min_show(struct seq_file *m, void *v) 5982 { 5983 return seq_puts_memcg_tunable(m, 5984 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 5985 } 5986 5987 static ssize_t memory_min_write(struct kernfs_open_file *of, 5988 char *buf, size_t nbytes, loff_t off) 5989 { 5990 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5991 unsigned long min; 5992 int err; 5993 5994 buf = strstrip(buf); 5995 err = page_counter_memparse(buf, "max", &min); 5996 if (err) 5997 return err; 5998 5999 page_counter_set_min(&memcg->memory, min); 6000 6001 return nbytes; 6002 } 6003 6004 static int memory_low_show(struct seq_file *m, void *v) 6005 { 6006 return seq_puts_memcg_tunable(m, 6007 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6008 } 6009 6010 static ssize_t memory_low_write(struct kernfs_open_file *of, 6011 char *buf, size_t nbytes, loff_t off) 6012 { 6013 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6014 unsigned long low; 6015 int err; 6016 6017 buf = strstrip(buf); 6018 err = page_counter_memparse(buf, "max", &low); 6019 if (err) 6020 return err; 6021 6022 page_counter_set_low(&memcg->memory, low); 6023 6024 return nbytes; 6025 } 6026 6027 static int memory_high_show(struct seq_file *m, void *v) 6028 { 6029 return seq_puts_memcg_tunable(m, 6030 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6031 } 6032 6033 static ssize_t memory_high_write(struct kernfs_open_file *of, 6034 char *buf, size_t nbytes, loff_t off) 6035 { 6036 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6037 unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 6038 bool drained = false; 6039 unsigned long high; 6040 int err; 6041 6042 buf = strstrip(buf); 6043 err = page_counter_memparse(buf, "max", &high); 6044 if (err) 6045 return err; 6046 6047 page_counter_set_high(&memcg->memory, high); 6048 6049 for (;;) { 6050 unsigned long nr_pages = page_counter_read(&memcg->memory); 6051 unsigned long reclaimed; 6052 6053 if (nr_pages <= high) 6054 break; 6055 6056 if (signal_pending(current)) 6057 break; 6058 6059 if (!drained) { 6060 drain_all_stock(memcg); 6061 drained = true; 6062 continue; 6063 } 6064 6065 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6066 GFP_KERNEL, true); 6067 6068 if (!reclaimed && !nr_retries--) 6069 break; 6070 } 6071 6072 return nbytes; 6073 } 6074 6075 static int memory_max_show(struct seq_file *m, void *v) 6076 { 6077 return seq_puts_memcg_tunable(m, 6078 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6079 } 6080 6081 static ssize_t memory_max_write(struct kernfs_open_file *of, 6082 char *buf, size_t nbytes, loff_t off) 6083 { 6084 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6085 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 6086 bool drained = false; 6087 unsigned long max; 6088 int err; 6089 6090 buf = strstrip(buf); 6091 err = page_counter_memparse(buf, "max", &max); 6092 if (err) 6093 return err; 6094 6095 xchg(&memcg->memory.max, max); 6096 6097 for (;;) { 6098 unsigned long nr_pages = page_counter_read(&memcg->memory); 6099 6100 if (nr_pages <= max) 6101 break; 6102 6103 if (signal_pending(current)) 6104 break; 6105 6106 if (!drained) { 6107 drain_all_stock(memcg); 6108 drained = true; 6109 continue; 6110 } 6111 6112 if (nr_reclaims) { 6113 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6114 GFP_KERNEL, true)) 6115 nr_reclaims--; 6116 continue; 6117 } 6118 6119 memcg_memory_event(memcg, MEMCG_OOM); 6120 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6121 break; 6122 } 6123 6124 memcg_wb_domain_size_changed(memcg); 6125 return nbytes; 6126 } 6127 6128 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6129 { 6130 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6131 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6132 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6133 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6134 seq_printf(m, "oom_kill %lu\n", 6135 atomic_long_read(&events[MEMCG_OOM_KILL])); 6136 } 6137 6138 static int memory_events_show(struct seq_file *m, void *v) 6139 { 6140 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6141 6142 __memory_events_show(m, memcg->memory_events); 6143 return 0; 6144 } 6145 6146 static int memory_events_local_show(struct seq_file *m, void *v) 6147 { 6148 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6149 6150 __memory_events_show(m, memcg->memory_events_local); 6151 return 0; 6152 } 6153 6154 static int memory_stat_show(struct seq_file *m, void *v) 6155 { 6156 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6157 char *buf; 6158 6159 buf = memory_stat_format(memcg); 6160 if (!buf) 6161 return -ENOMEM; 6162 seq_puts(m, buf); 6163 kfree(buf); 6164 return 0; 6165 } 6166 6167 static int memory_oom_group_show(struct seq_file *m, void *v) 6168 { 6169 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6170 6171 seq_printf(m, "%d\n", memcg->oom_group); 6172 6173 return 0; 6174 } 6175 6176 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6177 char *buf, size_t nbytes, loff_t off) 6178 { 6179 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6180 int ret, oom_group; 6181 6182 buf = strstrip(buf); 6183 if (!buf) 6184 return -EINVAL; 6185 6186 ret = kstrtoint(buf, 0, &oom_group); 6187 if (ret) 6188 return ret; 6189 6190 if (oom_group != 0 && oom_group != 1) 6191 return -EINVAL; 6192 6193 memcg->oom_group = oom_group; 6194 6195 return nbytes; 6196 } 6197 6198 static struct cftype memory_files[] = { 6199 { 6200 .name = "current", 6201 .flags = CFTYPE_NOT_ON_ROOT, 6202 .read_u64 = memory_current_read, 6203 }, 6204 { 6205 .name = "min", 6206 .flags = CFTYPE_NOT_ON_ROOT, 6207 .seq_show = memory_min_show, 6208 .write = memory_min_write, 6209 }, 6210 { 6211 .name = "low", 6212 .flags = CFTYPE_NOT_ON_ROOT, 6213 .seq_show = memory_low_show, 6214 .write = memory_low_write, 6215 }, 6216 { 6217 .name = "high", 6218 .flags = CFTYPE_NOT_ON_ROOT, 6219 .seq_show = memory_high_show, 6220 .write = memory_high_write, 6221 }, 6222 { 6223 .name = "max", 6224 .flags = CFTYPE_NOT_ON_ROOT, 6225 .seq_show = memory_max_show, 6226 .write = memory_max_write, 6227 }, 6228 { 6229 .name = "events", 6230 .flags = CFTYPE_NOT_ON_ROOT, 6231 .file_offset = offsetof(struct mem_cgroup, events_file), 6232 .seq_show = memory_events_show, 6233 }, 6234 { 6235 .name = "events.local", 6236 .flags = CFTYPE_NOT_ON_ROOT, 6237 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6238 .seq_show = memory_events_local_show, 6239 }, 6240 { 6241 .name = "stat", 6242 .seq_show = memory_stat_show, 6243 }, 6244 { 6245 .name = "oom.group", 6246 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6247 .seq_show = memory_oom_group_show, 6248 .write = memory_oom_group_write, 6249 }, 6250 { } /* terminate */ 6251 }; 6252 6253 struct cgroup_subsys memory_cgrp_subsys = { 6254 .css_alloc = mem_cgroup_css_alloc, 6255 .css_online = mem_cgroup_css_online, 6256 .css_offline = mem_cgroup_css_offline, 6257 .css_released = mem_cgroup_css_released, 6258 .css_free = mem_cgroup_css_free, 6259 .css_reset = mem_cgroup_css_reset, 6260 .can_attach = mem_cgroup_can_attach, 6261 .cancel_attach = mem_cgroup_cancel_attach, 6262 .post_attach = mem_cgroup_move_task, 6263 .bind = mem_cgroup_bind, 6264 .dfl_cftypes = memory_files, 6265 .legacy_cftypes = mem_cgroup_legacy_files, 6266 .early_init = 0, 6267 }; 6268 6269 /* 6270 * This function calculates an individual cgroup's effective 6271 * protection which is derived from its own memory.min/low, its 6272 * parent's and siblings' settings, as well as the actual memory 6273 * distribution in the tree. 6274 * 6275 * The following rules apply to the effective protection values: 6276 * 6277 * 1. At the first level of reclaim, effective protection is equal to 6278 * the declared protection in memory.min and memory.low. 6279 * 6280 * 2. To enable safe delegation of the protection configuration, at 6281 * subsequent levels the effective protection is capped to the 6282 * parent's effective protection. 6283 * 6284 * 3. To make complex and dynamic subtrees easier to configure, the 6285 * user is allowed to overcommit the declared protection at a given 6286 * level. If that is the case, the parent's effective protection is 6287 * distributed to the children in proportion to how much protection 6288 * they have declared and how much of it they are utilizing. 6289 * 6290 * This makes distribution proportional, but also work-conserving: 6291 * if one cgroup claims much more protection than it uses memory, 6292 * the unused remainder is available to its siblings. 6293 * 6294 * 4. Conversely, when the declared protection is undercommitted at a 6295 * given level, the distribution of the larger parental protection 6296 * budget is NOT proportional. A cgroup's protection from a sibling 6297 * is capped to its own memory.min/low setting. 6298 * 6299 * 5. However, to allow protecting recursive subtrees from each other 6300 * without having to declare each individual cgroup's fixed share 6301 * of the ancestor's claim to protection, any unutilized - 6302 * "floating" - protection from up the tree is distributed in 6303 * proportion to each cgroup's *usage*. This makes the protection 6304 * neutral wrt sibling cgroups and lets them compete freely over 6305 * the shared parental protection budget, but it protects the 6306 * subtree as a whole from neighboring subtrees. 6307 * 6308 * Note that 4. and 5. are not in conflict: 4. is about protecting 6309 * against immediate siblings whereas 5. is about protecting against 6310 * neighboring subtrees. 6311 */ 6312 static unsigned long effective_protection(unsigned long usage, 6313 unsigned long parent_usage, 6314 unsigned long setting, 6315 unsigned long parent_effective, 6316 unsigned long siblings_protected) 6317 { 6318 unsigned long protected; 6319 unsigned long ep; 6320 6321 protected = min(usage, setting); 6322 /* 6323 * If all cgroups at this level combined claim and use more 6324 * protection then what the parent affords them, distribute 6325 * shares in proportion to utilization. 6326 * 6327 * We are using actual utilization rather than the statically 6328 * claimed protection in order to be work-conserving: claimed 6329 * but unused protection is available to siblings that would 6330 * otherwise get a smaller chunk than what they claimed. 6331 */ 6332 if (siblings_protected > parent_effective) 6333 return protected * parent_effective / siblings_protected; 6334 6335 /* 6336 * Ok, utilized protection of all children is within what the 6337 * parent affords them, so we know whatever this child claims 6338 * and utilizes is effectively protected. 6339 * 6340 * If there is unprotected usage beyond this value, reclaim 6341 * will apply pressure in proportion to that amount. 6342 * 6343 * If there is unutilized protection, the cgroup will be fully 6344 * shielded from reclaim, but we do return a smaller value for 6345 * protection than what the group could enjoy in theory. This 6346 * is okay. With the overcommit distribution above, effective 6347 * protection is always dependent on how memory is actually 6348 * consumed among the siblings anyway. 6349 */ 6350 ep = protected; 6351 6352 /* 6353 * If the children aren't claiming (all of) the protection 6354 * afforded to them by the parent, distribute the remainder in 6355 * proportion to the (unprotected) memory of each cgroup. That 6356 * way, cgroups that aren't explicitly prioritized wrt each 6357 * other compete freely over the allowance, but they are 6358 * collectively protected from neighboring trees. 6359 * 6360 * We're using unprotected memory for the weight so that if 6361 * some cgroups DO claim explicit protection, we don't protect 6362 * the same bytes twice. 6363 */ 6364 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6365 return ep; 6366 6367 if (parent_effective > siblings_protected && usage > protected) { 6368 unsigned long unclaimed; 6369 6370 unclaimed = parent_effective - siblings_protected; 6371 unclaimed *= usage - protected; 6372 unclaimed /= parent_usage - siblings_protected; 6373 6374 ep += unclaimed; 6375 } 6376 6377 return ep; 6378 } 6379 6380 /** 6381 * mem_cgroup_protected - check if memory consumption is in the normal range 6382 * @root: the top ancestor of the sub-tree being checked 6383 * @memcg: the memory cgroup to check 6384 * 6385 * WARNING: This function is not stateless! It can only be used as part 6386 * of a top-down tree iteration, not for isolated queries. 6387 * 6388 * Returns one of the following: 6389 * MEMCG_PROT_NONE: cgroup memory is not protected 6390 * MEMCG_PROT_LOW: cgroup memory is protected as long there is 6391 * an unprotected supply of reclaimable memory from other cgroups. 6392 * MEMCG_PROT_MIN: cgroup memory is protected 6393 */ 6394 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 6395 struct mem_cgroup *memcg) 6396 { 6397 unsigned long usage, parent_usage; 6398 struct mem_cgroup *parent; 6399 6400 if (mem_cgroup_disabled()) 6401 return MEMCG_PROT_NONE; 6402 6403 if (!root) 6404 root = root_mem_cgroup; 6405 if (memcg == root) 6406 return MEMCG_PROT_NONE; 6407 6408 usage = page_counter_read(&memcg->memory); 6409 if (!usage) 6410 return MEMCG_PROT_NONE; 6411 6412 parent = parent_mem_cgroup(memcg); 6413 /* No parent means a non-hierarchical mode on v1 memcg */ 6414 if (!parent) 6415 return MEMCG_PROT_NONE; 6416 6417 if (parent == root) { 6418 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6419 memcg->memory.elow = memcg->memory.low; 6420 goto out; 6421 } 6422 6423 parent_usage = page_counter_read(&parent->memory); 6424 6425 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6426 READ_ONCE(memcg->memory.min), 6427 READ_ONCE(parent->memory.emin), 6428 atomic_long_read(&parent->memory.children_min_usage))); 6429 6430 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6431 memcg->memory.low, READ_ONCE(parent->memory.elow), 6432 atomic_long_read(&parent->memory.children_low_usage))); 6433 6434 out: 6435 if (usage <= memcg->memory.emin) 6436 return MEMCG_PROT_MIN; 6437 else if (usage <= memcg->memory.elow) 6438 return MEMCG_PROT_LOW; 6439 else 6440 return MEMCG_PROT_NONE; 6441 } 6442 6443 /** 6444 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6445 * @page: page to charge 6446 * @mm: mm context of the victim 6447 * @gfp_mask: reclaim mode 6448 * 6449 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6450 * pages according to @gfp_mask if necessary. 6451 * 6452 * Returns 0 on success. Otherwise, an error code is returned. 6453 */ 6454 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6455 { 6456 unsigned int nr_pages = hpage_nr_pages(page); 6457 struct mem_cgroup *memcg = NULL; 6458 int ret = 0; 6459 6460 if (mem_cgroup_disabled()) 6461 goto out; 6462 6463 if (PageSwapCache(page)) { 6464 swp_entry_t ent = { .val = page_private(page), }; 6465 unsigned short id; 6466 6467 /* 6468 * Every swap fault against a single page tries to charge the 6469 * page, bail as early as possible. shmem_unuse() encounters 6470 * already charged pages, too. page->mem_cgroup is protected 6471 * by the page lock, which serializes swap cache removal, which 6472 * in turn serializes uncharging. 6473 */ 6474 VM_BUG_ON_PAGE(!PageLocked(page), page); 6475 if (compound_head(page)->mem_cgroup) 6476 goto out; 6477 6478 id = lookup_swap_cgroup_id(ent); 6479 rcu_read_lock(); 6480 memcg = mem_cgroup_from_id(id); 6481 if (memcg && !css_tryget_online(&memcg->css)) 6482 memcg = NULL; 6483 rcu_read_unlock(); 6484 } 6485 6486 if (!memcg) 6487 memcg = get_mem_cgroup_from_mm(mm); 6488 6489 ret = try_charge(memcg, gfp_mask, nr_pages); 6490 if (ret) 6491 goto out_put; 6492 6493 commit_charge(page, memcg); 6494 6495 local_irq_disable(); 6496 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6497 memcg_check_events(memcg, page); 6498 local_irq_enable(); 6499 6500 if (PageSwapCache(page)) { 6501 swp_entry_t entry = { .val = page_private(page) }; 6502 /* 6503 * The swap entry might not get freed for a long time, 6504 * let's not wait for it. The page already received a 6505 * memory+swap charge, drop the swap entry duplicate. 6506 */ 6507 mem_cgroup_uncharge_swap(entry, nr_pages); 6508 } 6509 6510 out_put: 6511 css_put(&memcg->css); 6512 out: 6513 return ret; 6514 } 6515 6516 struct uncharge_gather { 6517 struct mem_cgroup *memcg; 6518 unsigned long nr_pages; 6519 unsigned long pgpgout; 6520 unsigned long nr_kmem; 6521 struct page *dummy_page; 6522 }; 6523 6524 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6525 { 6526 memset(ug, 0, sizeof(*ug)); 6527 } 6528 6529 static void uncharge_batch(const struct uncharge_gather *ug) 6530 { 6531 unsigned long flags; 6532 6533 if (!mem_cgroup_is_root(ug->memcg)) { 6534 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); 6535 if (do_memsw_account()) 6536 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); 6537 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6538 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6539 memcg_oom_recover(ug->memcg); 6540 } 6541 6542 local_irq_save(flags); 6543 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6544 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); 6545 memcg_check_events(ug->memcg, ug->dummy_page); 6546 local_irq_restore(flags); 6547 6548 if (!mem_cgroup_is_root(ug->memcg)) 6549 css_put_many(&ug->memcg->css, ug->nr_pages); 6550 } 6551 6552 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6553 { 6554 unsigned long nr_pages; 6555 6556 VM_BUG_ON_PAGE(PageLRU(page), page); 6557 6558 if (!page->mem_cgroup) 6559 return; 6560 6561 /* 6562 * Nobody should be changing or seriously looking at 6563 * page->mem_cgroup at this point, we have fully 6564 * exclusive access to the page. 6565 */ 6566 6567 if (ug->memcg != page->mem_cgroup) { 6568 if (ug->memcg) { 6569 uncharge_batch(ug); 6570 uncharge_gather_clear(ug); 6571 } 6572 ug->memcg = page->mem_cgroup; 6573 } 6574 6575 nr_pages = compound_nr(page); 6576 ug->nr_pages += nr_pages; 6577 6578 if (!PageKmemcg(page)) { 6579 ug->pgpgout++; 6580 } else { 6581 ug->nr_kmem += nr_pages; 6582 __ClearPageKmemcg(page); 6583 } 6584 6585 ug->dummy_page = page; 6586 page->mem_cgroup = NULL; 6587 } 6588 6589 static void uncharge_list(struct list_head *page_list) 6590 { 6591 struct uncharge_gather ug; 6592 struct list_head *next; 6593 6594 uncharge_gather_clear(&ug); 6595 6596 /* 6597 * Note that the list can be a single page->lru; hence the 6598 * do-while loop instead of a simple list_for_each_entry(). 6599 */ 6600 next = page_list->next; 6601 do { 6602 struct page *page; 6603 6604 page = list_entry(next, struct page, lru); 6605 next = page->lru.next; 6606 6607 uncharge_page(page, &ug); 6608 } while (next != page_list); 6609 6610 if (ug.memcg) 6611 uncharge_batch(&ug); 6612 } 6613 6614 /** 6615 * mem_cgroup_uncharge - uncharge a page 6616 * @page: page to uncharge 6617 * 6618 * Uncharge a page previously charged with mem_cgroup_charge(). 6619 */ 6620 void mem_cgroup_uncharge(struct page *page) 6621 { 6622 struct uncharge_gather ug; 6623 6624 if (mem_cgroup_disabled()) 6625 return; 6626 6627 /* Don't touch page->lru of any random page, pre-check: */ 6628 if (!page->mem_cgroup) 6629 return; 6630 6631 uncharge_gather_clear(&ug); 6632 uncharge_page(page, &ug); 6633 uncharge_batch(&ug); 6634 } 6635 6636 /** 6637 * mem_cgroup_uncharge_list - uncharge a list of page 6638 * @page_list: list of pages to uncharge 6639 * 6640 * Uncharge a list of pages previously charged with 6641 * mem_cgroup_charge(). 6642 */ 6643 void mem_cgroup_uncharge_list(struct list_head *page_list) 6644 { 6645 if (mem_cgroup_disabled()) 6646 return; 6647 6648 if (!list_empty(page_list)) 6649 uncharge_list(page_list); 6650 } 6651 6652 /** 6653 * mem_cgroup_migrate - charge a page's replacement 6654 * @oldpage: currently circulating page 6655 * @newpage: replacement page 6656 * 6657 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6658 * be uncharged upon free. 6659 * 6660 * Both pages must be locked, @newpage->mapping must be set up. 6661 */ 6662 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6663 { 6664 struct mem_cgroup *memcg; 6665 unsigned int nr_pages; 6666 unsigned long flags; 6667 6668 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6669 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6670 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6671 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6672 newpage); 6673 6674 if (mem_cgroup_disabled()) 6675 return; 6676 6677 /* Page cache replacement: new page already charged? */ 6678 if (newpage->mem_cgroup) 6679 return; 6680 6681 /* Swapcache readahead pages can get replaced before being charged */ 6682 memcg = oldpage->mem_cgroup; 6683 if (!memcg) 6684 return; 6685 6686 /* Force-charge the new page. The old one will be freed soon */ 6687 nr_pages = hpage_nr_pages(newpage); 6688 6689 page_counter_charge(&memcg->memory, nr_pages); 6690 if (do_memsw_account()) 6691 page_counter_charge(&memcg->memsw, nr_pages); 6692 css_get_many(&memcg->css, nr_pages); 6693 6694 commit_charge(newpage, memcg); 6695 6696 local_irq_save(flags); 6697 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 6698 memcg_check_events(memcg, newpage); 6699 local_irq_restore(flags); 6700 } 6701 6702 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6703 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6704 6705 void mem_cgroup_sk_alloc(struct sock *sk) 6706 { 6707 struct mem_cgroup *memcg; 6708 6709 if (!mem_cgroup_sockets_enabled) 6710 return; 6711 6712 /* Do not associate the sock with unrelated interrupted task's memcg. */ 6713 if (in_interrupt()) 6714 return; 6715 6716 rcu_read_lock(); 6717 memcg = mem_cgroup_from_task(current); 6718 if (memcg == root_mem_cgroup) 6719 goto out; 6720 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6721 goto out; 6722 if (css_tryget(&memcg->css)) 6723 sk->sk_memcg = memcg; 6724 out: 6725 rcu_read_unlock(); 6726 } 6727 6728 void mem_cgroup_sk_free(struct sock *sk) 6729 { 6730 if (sk->sk_memcg) 6731 css_put(&sk->sk_memcg->css); 6732 } 6733 6734 /** 6735 * mem_cgroup_charge_skmem - charge socket memory 6736 * @memcg: memcg to charge 6737 * @nr_pages: number of pages to charge 6738 * 6739 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6740 * @memcg's configured limit, %false if the charge had to be forced. 6741 */ 6742 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6743 { 6744 gfp_t gfp_mask = GFP_KERNEL; 6745 6746 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6747 struct page_counter *fail; 6748 6749 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6750 memcg->tcpmem_pressure = 0; 6751 return true; 6752 } 6753 page_counter_charge(&memcg->tcpmem, nr_pages); 6754 memcg->tcpmem_pressure = 1; 6755 return false; 6756 } 6757 6758 /* Don't block in the packet receive path */ 6759 if (in_softirq()) 6760 gfp_mask = GFP_NOWAIT; 6761 6762 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 6763 6764 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 6765 return true; 6766 6767 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 6768 return false; 6769 } 6770 6771 /** 6772 * mem_cgroup_uncharge_skmem - uncharge socket memory 6773 * @memcg: memcg to uncharge 6774 * @nr_pages: number of pages to uncharge 6775 */ 6776 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6777 { 6778 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6779 page_counter_uncharge(&memcg->tcpmem, nr_pages); 6780 return; 6781 } 6782 6783 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 6784 6785 refill_stock(memcg, nr_pages); 6786 } 6787 6788 static int __init cgroup_memory(char *s) 6789 { 6790 char *token; 6791 6792 while ((token = strsep(&s, ",")) != NULL) { 6793 if (!*token) 6794 continue; 6795 if (!strcmp(token, "nosocket")) 6796 cgroup_memory_nosocket = true; 6797 if (!strcmp(token, "nokmem")) 6798 cgroup_memory_nokmem = true; 6799 } 6800 return 0; 6801 } 6802 __setup("cgroup.memory=", cgroup_memory); 6803 6804 /* 6805 * subsys_initcall() for memory controller. 6806 * 6807 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 6808 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 6809 * basically everything that doesn't depend on a specific mem_cgroup structure 6810 * should be initialized from here. 6811 */ 6812 static int __init mem_cgroup_init(void) 6813 { 6814 int cpu, node; 6815 6816 #ifdef CONFIG_MEMCG_KMEM 6817 /* 6818 * Kmem cache creation is mostly done with the slab_mutex held, 6819 * so use a workqueue with limited concurrency to avoid stalling 6820 * all worker threads in case lots of cgroups are created and 6821 * destroyed simultaneously. 6822 */ 6823 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 6824 BUG_ON(!memcg_kmem_cache_wq); 6825 #endif 6826 6827 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 6828 memcg_hotplug_cpu_dead); 6829 6830 for_each_possible_cpu(cpu) 6831 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 6832 drain_local_stock); 6833 6834 for_each_node(node) { 6835 struct mem_cgroup_tree_per_node *rtpn; 6836 6837 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 6838 node_online(node) ? node : NUMA_NO_NODE); 6839 6840 rtpn->rb_root = RB_ROOT; 6841 rtpn->rb_rightmost = NULL; 6842 spin_lock_init(&rtpn->lock); 6843 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6844 } 6845 6846 return 0; 6847 } 6848 subsys_initcall(mem_cgroup_init); 6849 6850 #ifdef CONFIG_MEMCG_SWAP 6851 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 6852 { 6853 while (!refcount_inc_not_zero(&memcg->id.ref)) { 6854 /* 6855 * The root cgroup cannot be destroyed, so it's refcount must 6856 * always be >= 1. 6857 */ 6858 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 6859 VM_BUG_ON(1); 6860 break; 6861 } 6862 memcg = parent_mem_cgroup(memcg); 6863 if (!memcg) 6864 memcg = root_mem_cgroup; 6865 } 6866 return memcg; 6867 } 6868 6869 /** 6870 * mem_cgroup_swapout - transfer a memsw charge to swap 6871 * @page: page whose memsw charge to transfer 6872 * @entry: swap entry to move the charge to 6873 * 6874 * Transfer the memsw charge of @page to @entry. 6875 */ 6876 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 6877 { 6878 struct mem_cgroup *memcg, *swap_memcg; 6879 unsigned int nr_entries; 6880 unsigned short oldid; 6881 6882 VM_BUG_ON_PAGE(PageLRU(page), page); 6883 VM_BUG_ON_PAGE(page_count(page), page); 6884 6885 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6886 return; 6887 6888 memcg = page->mem_cgroup; 6889 6890 /* Readahead page, never charged */ 6891 if (!memcg) 6892 return; 6893 6894 /* 6895 * In case the memcg owning these pages has been offlined and doesn't 6896 * have an ID allocated to it anymore, charge the closest online 6897 * ancestor for the swap instead and transfer the memory+swap charge. 6898 */ 6899 swap_memcg = mem_cgroup_id_get_online(memcg); 6900 nr_entries = hpage_nr_pages(page); 6901 /* Get references for the tail pages, too */ 6902 if (nr_entries > 1) 6903 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 6904 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 6905 nr_entries); 6906 VM_BUG_ON_PAGE(oldid, page); 6907 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 6908 6909 page->mem_cgroup = NULL; 6910 6911 if (!mem_cgroup_is_root(memcg)) 6912 page_counter_uncharge(&memcg->memory, nr_entries); 6913 6914 if (!cgroup_memory_noswap && memcg != swap_memcg) { 6915 if (!mem_cgroup_is_root(swap_memcg)) 6916 page_counter_charge(&swap_memcg->memsw, nr_entries); 6917 page_counter_uncharge(&memcg->memsw, nr_entries); 6918 } 6919 6920 /* 6921 * Interrupts should be disabled here because the caller holds the 6922 * i_pages lock which is taken with interrupts-off. It is 6923 * important here to have the interrupts disabled because it is the 6924 * only synchronisation we have for updating the per-CPU variables. 6925 */ 6926 VM_BUG_ON(!irqs_disabled()); 6927 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 6928 memcg_check_events(memcg, page); 6929 6930 if (!mem_cgroup_is_root(memcg)) 6931 css_put_many(&memcg->css, nr_entries); 6932 } 6933 6934 /** 6935 * mem_cgroup_try_charge_swap - try charging swap space for a page 6936 * @page: page being added to swap 6937 * @entry: swap entry to charge 6938 * 6939 * Try to charge @page's memcg for the swap space at @entry. 6940 * 6941 * Returns 0 on success, -ENOMEM on failure. 6942 */ 6943 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 6944 { 6945 unsigned int nr_pages = hpage_nr_pages(page); 6946 struct page_counter *counter; 6947 struct mem_cgroup *memcg; 6948 unsigned short oldid; 6949 6950 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6951 return 0; 6952 6953 memcg = page->mem_cgroup; 6954 6955 /* Readahead page, never charged */ 6956 if (!memcg) 6957 return 0; 6958 6959 if (!entry.val) { 6960 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6961 return 0; 6962 } 6963 6964 memcg = mem_cgroup_id_get_online(memcg); 6965 6966 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 6967 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 6968 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 6969 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6970 mem_cgroup_id_put(memcg); 6971 return -ENOMEM; 6972 } 6973 6974 /* Get references for the tail pages, too */ 6975 if (nr_pages > 1) 6976 mem_cgroup_id_get_many(memcg, nr_pages - 1); 6977 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 6978 VM_BUG_ON_PAGE(oldid, page); 6979 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 6980 6981 return 0; 6982 } 6983 6984 /** 6985 * mem_cgroup_uncharge_swap - uncharge swap space 6986 * @entry: swap entry to uncharge 6987 * @nr_pages: the amount of swap space to uncharge 6988 */ 6989 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 6990 { 6991 struct mem_cgroup *memcg; 6992 unsigned short id; 6993 6994 id = swap_cgroup_record(entry, 0, nr_pages); 6995 rcu_read_lock(); 6996 memcg = mem_cgroup_from_id(id); 6997 if (memcg) { 6998 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 6999 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7000 page_counter_uncharge(&memcg->swap, nr_pages); 7001 else 7002 page_counter_uncharge(&memcg->memsw, nr_pages); 7003 } 7004 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7005 mem_cgroup_id_put_many(memcg, nr_pages); 7006 } 7007 rcu_read_unlock(); 7008 } 7009 7010 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7011 { 7012 long nr_swap_pages = get_nr_swap_pages(); 7013 7014 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7015 return nr_swap_pages; 7016 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7017 nr_swap_pages = min_t(long, nr_swap_pages, 7018 READ_ONCE(memcg->swap.max) - 7019 page_counter_read(&memcg->swap)); 7020 return nr_swap_pages; 7021 } 7022 7023 bool mem_cgroup_swap_full(struct page *page) 7024 { 7025 struct mem_cgroup *memcg; 7026 7027 VM_BUG_ON_PAGE(!PageLocked(page), page); 7028 7029 if (vm_swap_full()) 7030 return true; 7031 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7032 return false; 7033 7034 memcg = page->mem_cgroup; 7035 if (!memcg) 7036 return false; 7037 7038 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7039 unsigned long usage = page_counter_read(&memcg->swap); 7040 7041 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7042 usage * 2 >= READ_ONCE(memcg->swap.max)) 7043 return true; 7044 } 7045 7046 return false; 7047 } 7048 7049 static int __init setup_swap_account(char *s) 7050 { 7051 if (!strcmp(s, "1")) 7052 cgroup_memory_noswap = 0; 7053 else if (!strcmp(s, "0")) 7054 cgroup_memory_noswap = 1; 7055 return 1; 7056 } 7057 __setup("swapaccount=", setup_swap_account); 7058 7059 static u64 swap_current_read(struct cgroup_subsys_state *css, 7060 struct cftype *cft) 7061 { 7062 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7063 7064 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7065 } 7066 7067 static int swap_high_show(struct seq_file *m, void *v) 7068 { 7069 return seq_puts_memcg_tunable(m, 7070 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7071 } 7072 7073 static ssize_t swap_high_write(struct kernfs_open_file *of, 7074 char *buf, size_t nbytes, loff_t off) 7075 { 7076 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7077 unsigned long high; 7078 int err; 7079 7080 buf = strstrip(buf); 7081 err = page_counter_memparse(buf, "max", &high); 7082 if (err) 7083 return err; 7084 7085 page_counter_set_high(&memcg->swap, high); 7086 7087 return nbytes; 7088 } 7089 7090 static int swap_max_show(struct seq_file *m, void *v) 7091 { 7092 return seq_puts_memcg_tunable(m, 7093 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7094 } 7095 7096 static ssize_t swap_max_write(struct kernfs_open_file *of, 7097 char *buf, size_t nbytes, loff_t off) 7098 { 7099 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7100 unsigned long max; 7101 int err; 7102 7103 buf = strstrip(buf); 7104 err = page_counter_memparse(buf, "max", &max); 7105 if (err) 7106 return err; 7107 7108 xchg(&memcg->swap.max, max); 7109 7110 return nbytes; 7111 } 7112 7113 static int swap_events_show(struct seq_file *m, void *v) 7114 { 7115 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7116 7117 seq_printf(m, "high %lu\n", 7118 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7119 seq_printf(m, "max %lu\n", 7120 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7121 seq_printf(m, "fail %lu\n", 7122 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7123 7124 return 0; 7125 } 7126 7127 static struct cftype swap_files[] = { 7128 { 7129 .name = "swap.current", 7130 .flags = CFTYPE_NOT_ON_ROOT, 7131 .read_u64 = swap_current_read, 7132 }, 7133 { 7134 .name = "swap.high", 7135 .flags = CFTYPE_NOT_ON_ROOT, 7136 .seq_show = swap_high_show, 7137 .write = swap_high_write, 7138 }, 7139 { 7140 .name = "swap.max", 7141 .flags = CFTYPE_NOT_ON_ROOT, 7142 .seq_show = swap_max_show, 7143 .write = swap_max_write, 7144 }, 7145 { 7146 .name = "swap.events", 7147 .flags = CFTYPE_NOT_ON_ROOT, 7148 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7149 .seq_show = swap_events_show, 7150 }, 7151 { } /* terminate */ 7152 }; 7153 7154 static struct cftype memsw_files[] = { 7155 { 7156 .name = "memsw.usage_in_bytes", 7157 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7158 .read_u64 = mem_cgroup_read_u64, 7159 }, 7160 { 7161 .name = "memsw.max_usage_in_bytes", 7162 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7163 .write = mem_cgroup_reset, 7164 .read_u64 = mem_cgroup_read_u64, 7165 }, 7166 { 7167 .name = "memsw.limit_in_bytes", 7168 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7169 .write = mem_cgroup_write, 7170 .read_u64 = mem_cgroup_read_u64, 7171 }, 7172 { 7173 .name = "memsw.failcnt", 7174 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7175 .write = mem_cgroup_reset, 7176 .read_u64 = mem_cgroup_read_u64, 7177 }, 7178 { }, /* terminate */ 7179 }; 7180 7181 static int __init mem_cgroup_swap_init(void) 7182 { 7183 /* No memory control -> no swap control */ 7184 if (mem_cgroup_disabled()) 7185 cgroup_memory_noswap = true; 7186 7187 if (cgroup_memory_noswap) 7188 return 0; 7189 7190 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7191 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7192 7193 return 0; 7194 } 7195 subsys_initcall(mem_cgroup_swap_init); 7196 7197 #endif /* CONFIG_MEMCG_SWAP */ 7198