1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 */ 24 25 #include <linux/page_counter.h> 26 #include <linux/memcontrol.h> 27 #include <linux/cgroup.h> 28 #include <linux/pagewalk.h> 29 #include <linux/sched/mm.h> 30 #include <linux/shmem_fs.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vm_event_item.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/swap_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include <linux/tracehook.h> 60 #include <linux/psi.h> 61 #include <linux/seq_buf.h> 62 #include "internal.h" 63 #include <net/sock.h> 64 #include <net/ip.h> 65 #include "slab.h" 66 67 #include <linux/uaccess.h> 68 69 #include <trace/events/vmscan.h> 70 71 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 72 EXPORT_SYMBOL(memory_cgrp_subsys); 73 74 struct mem_cgroup *root_mem_cgroup __read_mostly; 75 76 #define MEM_CGROUP_RECLAIM_RETRIES 5 77 78 /* Socket memory accounting disabled? */ 79 static bool cgroup_memory_nosocket; 80 81 /* Kernel memory accounting disabled? */ 82 static bool cgroup_memory_nokmem; 83 84 /* Whether the swap controller is active */ 85 #ifdef CONFIG_MEMCG_SWAP 86 int do_swap_account __read_mostly; 87 #else 88 #define do_swap_account 0 89 #endif 90 91 #ifdef CONFIG_CGROUP_WRITEBACK 92 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 93 #endif 94 95 /* Whether legacy memory+swap accounting is active */ 96 static bool do_memsw_account(void) 97 { 98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 99 } 100 101 #define THRESHOLDS_EVENTS_TARGET 128 102 #define SOFTLIMIT_EVENTS_TARGET 1024 103 104 /* 105 * Cgroups above their limits are maintained in a RB-Tree, independent of 106 * their hierarchy representation 107 */ 108 109 struct mem_cgroup_tree_per_node { 110 struct rb_root rb_root; 111 struct rb_node *rb_rightmost; 112 spinlock_t lock; 113 }; 114 115 struct mem_cgroup_tree { 116 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 117 }; 118 119 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 120 121 /* for OOM */ 122 struct mem_cgroup_eventfd_list { 123 struct list_head list; 124 struct eventfd_ctx *eventfd; 125 }; 126 127 /* 128 * cgroup_event represents events which userspace want to receive. 129 */ 130 struct mem_cgroup_event { 131 /* 132 * memcg which the event belongs to. 133 */ 134 struct mem_cgroup *memcg; 135 /* 136 * eventfd to signal userspace about the event. 137 */ 138 struct eventfd_ctx *eventfd; 139 /* 140 * Each of these stored in a list by the cgroup. 141 */ 142 struct list_head list; 143 /* 144 * register_event() callback will be used to add new userspace 145 * waiter for changes related to this event. Use eventfd_signal() 146 * on eventfd to send notification to userspace. 147 */ 148 int (*register_event)(struct mem_cgroup *memcg, 149 struct eventfd_ctx *eventfd, const char *args); 150 /* 151 * unregister_event() callback will be called when userspace closes 152 * the eventfd or on cgroup removing. This callback must be set, 153 * if you want provide notification functionality. 154 */ 155 void (*unregister_event)(struct mem_cgroup *memcg, 156 struct eventfd_ctx *eventfd); 157 /* 158 * All fields below needed to unregister event when 159 * userspace closes eventfd. 160 */ 161 poll_table pt; 162 wait_queue_head_t *wqh; 163 wait_queue_entry_t wait; 164 struct work_struct remove; 165 }; 166 167 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 168 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 169 170 /* Stuffs for move charges at task migration. */ 171 /* 172 * Types of charges to be moved. 173 */ 174 #define MOVE_ANON 0x1U 175 #define MOVE_FILE 0x2U 176 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 177 178 /* "mc" and its members are protected by cgroup_mutex */ 179 static struct move_charge_struct { 180 spinlock_t lock; /* for from, to */ 181 struct mm_struct *mm; 182 struct mem_cgroup *from; 183 struct mem_cgroup *to; 184 unsigned long flags; 185 unsigned long precharge; 186 unsigned long moved_charge; 187 unsigned long moved_swap; 188 struct task_struct *moving_task; /* a task moving charges */ 189 wait_queue_head_t waitq; /* a waitq for other context */ 190 } mc = { 191 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 192 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 193 }; 194 195 /* 196 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 197 * limit reclaim to prevent infinite loops, if they ever occur. 198 */ 199 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 200 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 201 202 enum charge_type { 203 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 204 MEM_CGROUP_CHARGE_TYPE_ANON, 205 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 206 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 207 NR_CHARGE_TYPE, 208 }; 209 210 /* for encoding cft->private value on file */ 211 enum res_type { 212 _MEM, 213 _MEMSWAP, 214 _OOM_TYPE, 215 _KMEM, 216 _TCP, 217 }; 218 219 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 220 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 221 #define MEMFILE_ATTR(val) ((val) & 0xffff) 222 /* Used for OOM nofiier */ 223 #define OOM_CONTROL (0) 224 225 /* 226 * Iteration constructs for visiting all cgroups (under a tree). If 227 * loops are exited prematurely (break), mem_cgroup_iter_break() must 228 * be used for reference counting. 229 */ 230 #define for_each_mem_cgroup_tree(iter, root) \ 231 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 232 iter != NULL; \ 233 iter = mem_cgroup_iter(root, iter, NULL)) 234 235 #define for_each_mem_cgroup(iter) \ 236 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 237 iter != NULL; \ 238 iter = mem_cgroup_iter(NULL, iter, NULL)) 239 240 static inline bool should_force_charge(void) 241 { 242 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 243 (current->flags & PF_EXITING); 244 } 245 246 /* Some nice accessors for the vmpressure. */ 247 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 248 { 249 if (!memcg) 250 memcg = root_mem_cgroup; 251 return &memcg->vmpressure; 252 } 253 254 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 255 { 256 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 257 } 258 259 #ifdef CONFIG_MEMCG_KMEM 260 /* 261 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 262 * The main reason for not using cgroup id for this: 263 * this works better in sparse environments, where we have a lot of memcgs, 264 * but only a few kmem-limited. Or also, if we have, for instance, 200 265 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 266 * 200 entry array for that. 267 * 268 * The current size of the caches array is stored in memcg_nr_cache_ids. It 269 * will double each time we have to increase it. 270 */ 271 static DEFINE_IDA(memcg_cache_ida); 272 int memcg_nr_cache_ids; 273 274 /* Protects memcg_nr_cache_ids */ 275 static DECLARE_RWSEM(memcg_cache_ids_sem); 276 277 void memcg_get_cache_ids(void) 278 { 279 down_read(&memcg_cache_ids_sem); 280 } 281 282 void memcg_put_cache_ids(void) 283 { 284 up_read(&memcg_cache_ids_sem); 285 } 286 287 /* 288 * MIN_SIZE is different than 1, because we would like to avoid going through 289 * the alloc/free process all the time. In a small machine, 4 kmem-limited 290 * cgroups is a reasonable guess. In the future, it could be a parameter or 291 * tunable, but that is strictly not necessary. 292 * 293 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 294 * this constant directly from cgroup, but it is understandable that this is 295 * better kept as an internal representation in cgroup.c. In any case, the 296 * cgrp_id space is not getting any smaller, and we don't have to necessarily 297 * increase ours as well if it increases. 298 */ 299 #define MEMCG_CACHES_MIN_SIZE 4 300 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 301 302 /* 303 * A lot of the calls to the cache allocation functions are expected to be 304 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 305 * conditional to this static branch, we'll have to allow modules that does 306 * kmem_cache_alloc and the such to see this symbol as well 307 */ 308 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 309 EXPORT_SYMBOL(memcg_kmem_enabled_key); 310 311 struct workqueue_struct *memcg_kmem_cache_wq; 312 #endif 313 314 static int memcg_shrinker_map_size; 315 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 316 317 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 318 { 319 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 320 } 321 322 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 323 int size, int old_size) 324 { 325 struct memcg_shrinker_map *new, *old; 326 int nid; 327 328 lockdep_assert_held(&memcg_shrinker_map_mutex); 329 330 for_each_node(nid) { 331 old = rcu_dereference_protected( 332 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 333 /* Not yet online memcg */ 334 if (!old) 335 return 0; 336 337 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 338 if (!new) 339 return -ENOMEM; 340 341 /* Set all old bits, clear all new bits */ 342 memset(new->map, (int)0xff, old_size); 343 memset((void *)new->map + old_size, 0, size - old_size); 344 345 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 346 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 347 } 348 349 return 0; 350 } 351 352 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 353 { 354 struct mem_cgroup_per_node *pn; 355 struct memcg_shrinker_map *map; 356 int nid; 357 358 if (mem_cgroup_is_root(memcg)) 359 return; 360 361 for_each_node(nid) { 362 pn = mem_cgroup_nodeinfo(memcg, nid); 363 map = rcu_dereference_protected(pn->shrinker_map, true); 364 if (map) 365 kvfree(map); 366 rcu_assign_pointer(pn->shrinker_map, NULL); 367 } 368 } 369 370 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 371 { 372 struct memcg_shrinker_map *map; 373 int nid, size, ret = 0; 374 375 if (mem_cgroup_is_root(memcg)) 376 return 0; 377 378 mutex_lock(&memcg_shrinker_map_mutex); 379 size = memcg_shrinker_map_size; 380 for_each_node(nid) { 381 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 382 if (!map) { 383 memcg_free_shrinker_maps(memcg); 384 ret = -ENOMEM; 385 break; 386 } 387 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 388 } 389 mutex_unlock(&memcg_shrinker_map_mutex); 390 391 return ret; 392 } 393 394 int memcg_expand_shrinker_maps(int new_id) 395 { 396 int size, old_size, ret = 0; 397 struct mem_cgroup *memcg; 398 399 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 400 old_size = memcg_shrinker_map_size; 401 if (size <= old_size) 402 return 0; 403 404 mutex_lock(&memcg_shrinker_map_mutex); 405 if (!root_mem_cgroup) 406 goto unlock; 407 408 for_each_mem_cgroup(memcg) { 409 if (mem_cgroup_is_root(memcg)) 410 continue; 411 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 412 if (ret) { 413 mem_cgroup_iter_break(NULL, memcg); 414 goto unlock; 415 } 416 } 417 unlock: 418 if (!ret) 419 memcg_shrinker_map_size = size; 420 mutex_unlock(&memcg_shrinker_map_mutex); 421 return ret; 422 } 423 424 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 425 { 426 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 427 struct memcg_shrinker_map *map; 428 429 rcu_read_lock(); 430 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 431 /* Pairs with smp mb in shrink_slab() */ 432 smp_mb__before_atomic(); 433 set_bit(shrinker_id, map->map); 434 rcu_read_unlock(); 435 } 436 } 437 438 /** 439 * mem_cgroup_css_from_page - css of the memcg associated with a page 440 * @page: page of interest 441 * 442 * If memcg is bound to the default hierarchy, css of the memcg associated 443 * with @page is returned. The returned css remains associated with @page 444 * until it is released. 445 * 446 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 447 * is returned. 448 */ 449 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 450 { 451 struct mem_cgroup *memcg; 452 453 memcg = page->mem_cgroup; 454 455 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 456 memcg = root_mem_cgroup; 457 458 return &memcg->css; 459 } 460 461 /** 462 * page_cgroup_ino - return inode number of the memcg a page is charged to 463 * @page: the page 464 * 465 * Look up the closest online ancestor of the memory cgroup @page is charged to 466 * and return its inode number or 0 if @page is not charged to any cgroup. It 467 * is safe to call this function without holding a reference to @page. 468 * 469 * Note, this function is inherently racy, because there is nothing to prevent 470 * the cgroup inode from getting torn down and potentially reallocated a moment 471 * after page_cgroup_ino() returns, so it only should be used by callers that 472 * do not care (such as procfs interfaces). 473 */ 474 ino_t page_cgroup_ino(struct page *page) 475 { 476 struct mem_cgroup *memcg; 477 unsigned long ino = 0; 478 479 rcu_read_lock(); 480 if (PageSlab(page) && !PageTail(page)) 481 memcg = memcg_from_slab_page(page); 482 else 483 memcg = READ_ONCE(page->mem_cgroup); 484 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 485 memcg = parent_mem_cgroup(memcg); 486 if (memcg) 487 ino = cgroup_ino(memcg->css.cgroup); 488 rcu_read_unlock(); 489 return ino; 490 } 491 492 static struct mem_cgroup_per_node * 493 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 494 { 495 int nid = page_to_nid(page); 496 497 return memcg->nodeinfo[nid]; 498 } 499 500 static struct mem_cgroup_tree_per_node * 501 soft_limit_tree_node(int nid) 502 { 503 return soft_limit_tree.rb_tree_per_node[nid]; 504 } 505 506 static struct mem_cgroup_tree_per_node * 507 soft_limit_tree_from_page(struct page *page) 508 { 509 int nid = page_to_nid(page); 510 511 return soft_limit_tree.rb_tree_per_node[nid]; 512 } 513 514 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 515 struct mem_cgroup_tree_per_node *mctz, 516 unsigned long new_usage_in_excess) 517 { 518 struct rb_node **p = &mctz->rb_root.rb_node; 519 struct rb_node *parent = NULL; 520 struct mem_cgroup_per_node *mz_node; 521 bool rightmost = true; 522 523 if (mz->on_tree) 524 return; 525 526 mz->usage_in_excess = new_usage_in_excess; 527 if (!mz->usage_in_excess) 528 return; 529 while (*p) { 530 parent = *p; 531 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 532 tree_node); 533 if (mz->usage_in_excess < mz_node->usage_in_excess) { 534 p = &(*p)->rb_left; 535 rightmost = false; 536 } 537 538 /* 539 * We can't avoid mem cgroups that are over their soft 540 * limit by the same amount 541 */ 542 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 543 p = &(*p)->rb_right; 544 } 545 546 if (rightmost) 547 mctz->rb_rightmost = &mz->tree_node; 548 549 rb_link_node(&mz->tree_node, parent, p); 550 rb_insert_color(&mz->tree_node, &mctz->rb_root); 551 mz->on_tree = true; 552 } 553 554 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 555 struct mem_cgroup_tree_per_node *mctz) 556 { 557 if (!mz->on_tree) 558 return; 559 560 if (&mz->tree_node == mctz->rb_rightmost) 561 mctz->rb_rightmost = rb_prev(&mz->tree_node); 562 563 rb_erase(&mz->tree_node, &mctz->rb_root); 564 mz->on_tree = false; 565 } 566 567 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 568 struct mem_cgroup_tree_per_node *mctz) 569 { 570 unsigned long flags; 571 572 spin_lock_irqsave(&mctz->lock, flags); 573 __mem_cgroup_remove_exceeded(mz, mctz); 574 spin_unlock_irqrestore(&mctz->lock, flags); 575 } 576 577 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 578 { 579 unsigned long nr_pages = page_counter_read(&memcg->memory); 580 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 581 unsigned long excess = 0; 582 583 if (nr_pages > soft_limit) 584 excess = nr_pages - soft_limit; 585 586 return excess; 587 } 588 589 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 590 { 591 unsigned long excess; 592 struct mem_cgroup_per_node *mz; 593 struct mem_cgroup_tree_per_node *mctz; 594 595 mctz = soft_limit_tree_from_page(page); 596 if (!mctz) 597 return; 598 /* 599 * Necessary to update all ancestors when hierarchy is used. 600 * because their event counter is not touched. 601 */ 602 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 603 mz = mem_cgroup_page_nodeinfo(memcg, page); 604 excess = soft_limit_excess(memcg); 605 /* 606 * We have to update the tree if mz is on RB-tree or 607 * mem is over its softlimit. 608 */ 609 if (excess || mz->on_tree) { 610 unsigned long flags; 611 612 spin_lock_irqsave(&mctz->lock, flags); 613 /* if on-tree, remove it */ 614 if (mz->on_tree) 615 __mem_cgroup_remove_exceeded(mz, mctz); 616 /* 617 * Insert again. mz->usage_in_excess will be updated. 618 * If excess is 0, no tree ops. 619 */ 620 __mem_cgroup_insert_exceeded(mz, mctz, excess); 621 spin_unlock_irqrestore(&mctz->lock, flags); 622 } 623 } 624 } 625 626 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 627 { 628 struct mem_cgroup_tree_per_node *mctz; 629 struct mem_cgroup_per_node *mz; 630 int nid; 631 632 for_each_node(nid) { 633 mz = mem_cgroup_nodeinfo(memcg, nid); 634 mctz = soft_limit_tree_node(nid); 635 if (mctz) 636 mem_cgroup_remove_exceeded(mz, mctz); 637 } 638 } 639 640 static struct mem_cgroup_per_node * 641 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 642 { 643 struct mem_cgroup_per_node *mz; 644 645 retry: 646 mz = NULL; 647 if (!mctz->rb_rightmost) 648 goto done; /* Nothing to reclaim from */ 649 650 mz = rb_entry(mctz->rb_rightmost, 651 struct mem_cgroup_per_node, tree_node); 652 /* 653 * Remove the node now but someone else can add it back, 654 * we will to add it back at the end of reclaim to its correct 655 * position in the tree. 656 */ 657 __mem_cgroup_remove_exceeded(mz, mctz); 658 if (!soft_limit_excess(mz->memcg) || 659 !css_tryget(&mz->memcg->css)) 660 goto retry; 661 done: 662 return mz; 663 } 664 665 static struct mem_cgroup_per_node * 666 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 667 { 668 struct mem_cgroup_per_node *mz; 669 670 spin_lock_irq(&mctz->lock); 671 mz = __mem_cgroup_largest_soft_limit_node(mctz); 672 spin_unlock_irq(&mctz->lock); 673 return mz; 674 } 675 676 /** 677 * __mod_memcg_state - update cgroup memory statistics 678 * @memcg: the memory cgroup 679 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 680 * @val: delta to add to the counter, can be negative 681 */ 682 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 683 { 684 long x; 685 686 if (mem_cgroup_disabled()) 687 return; 688 689 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 690 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 691 struct mem_cgroup *mi; 692 693 /* 694 * Batch local counters to keep them in sync with 695 * the hierarchical ones. 696 */ 697 __this_cpu_add(memcg->vmstats_local->stat[idx], x); 698 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 699 atomic_long_add(x, &mi->vmstats[idx]); 700 x = 0; 701 } 702 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 703 } 704 705 static struct mem_cgroup_per_node * 706 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 707 { 708 struct mem_cgroup *parent; 709 710 parent = parent_mem_cgroup(pn->memcg); 711 if (!parent) 712 return NULL; 713 return mem_cgroup_nodeinfo(parent, nid); 714 } 715 716 /** 717 * __mod_lruvec_state - update lruvec memory statistics 718 * @lruvec: the lruvec 719 * @idx: the stat item 720 * @val: delta to add to the counter, can be negative 721 * 722 * The lruvec is the intersection of the NUMA node and a cgroup. This 723 * function updates the all three counters that are affected by a 724 * change of state at this level: per-node, per-cgroup, per-lruvec. 725 */ 726 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 727 int val) 728 { 729 pg_data_t *pgdat = lruvec_pgdat(lruvec); 730 struct mem_cgroup_per_node *pn; 731 struct mem_cgroup *memcg; 732 long x; 733 734 /* Update node */ 735 __mod_node_page_state(pgdat, idx, val); 736 737 if (mem_cgroup_disabled()) 738 return; 739 740 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 741 memcg = pn->memcg; 742 743 /* Update memcg */ 744 __mod_memcg_state(memcg, idx, val); 745 746 /* Update lruvec */ 747 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 748 749 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 750 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 751 struct mem_cgroup_per_node *pi; 752 753 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 754 atomic_long_add(x, &pi->lruvec_stat[idx]); 755 x = 0; 756 } 757 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 758 } 759 760 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) 761 { 762 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 763 struct mem_cgroup *memcg; 764 struct lruvec *lruvec; 765 766 rcu_read_lock(); 767 memcg = mem_cgroup_from_obj(p); 768 769 /* Untracked pages have no memcg, no lruvec. Update only the node */ 770 if (!memcg || memcg == root_mem_cgroup) { 771 __mod_node_page_state(pgdat, idx, val); 772 } else { 773 lruvec = mem_cgroup_lruvec(memcg, pgdat); 774 __mod_lruvec_state(lruvec, idx, val); 775 } 776 rcu_read_unlock(); 777 } 778 779 void mod_memcg_obj_state(void *p, int idx, int val) 780 { 781 struct mem_cgroup *memcg; 782 783 rcu_read_lock(); 784 memcg = mem_cgroup_from_obj(p); 785 if (memcg) 786 mod_memcg_state(memcg, idx, val); 787 rcu_read_unlock(); 788 } 789 790 /** 791 * __count_memcg_events - account VM events in a cgroup 792 * @memcg: the memory cgroup 793 * @idx: the event item 794 * @count: the number of events that occured 795 */ 796 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 797 unsigned long count) 798 { 799 unsigned long x; 800 801 if (mem_cgroup_disabled()) 802 return; 803 804 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 805 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 806 struct mem_cgroup *mi; 807 808 /* 809 * Batch local counters to keep them in sync with 810 * the hierarchical ones. 811 */ 812 __this_cpu_add(memcg->vmstats_local->events[idx], x); 813 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 814 atomic_long_add(x, &mi->vmevents[idx]); 815 x = 0; 816 } 817 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 818 } 819 820 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 821 { 822 return atomic_long_read(&memcg->vmevents[event]); 823 } 824 825 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 826 { 827 long x = 0; 828 int cpu; 829 830 for_each_possible_cpu(cpu) 831 x += per_cpu(memcg->vmstats_local->events[event], cpu); 832 return x; 833 } 834 835 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 836 struct page *page, 837 int nr_pages) 838 { 839 /* 840 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 841 * counted as CACHE even if it's on ANON LRU. 842 */ 843 if (PageAnon(page)) 844 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); 845 else { 846 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); 847 if (PageSwapBacked(page)) 848 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); 849 } 850 851 if (abs(nr_pages) > 1) { 852 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 853 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); 854 } 855 856 /* pagein of a big page is an event. So, ignore page size */ 857 if (nr_pages > 0) 858 __count_memcg_events(memcg, PGPGIN, 1); 859 else { 860 __count_memcg_events(memcg, PGPGOUT, 1); 861 nr_pages = -nr_pages; /* for event */ 862 } 863 864 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 865 } 866 867 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 868 enum mem_cgroup_events_target target) 869 { 870 unsigned long val, next; 871 872 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 873 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 874 /* from time_after() in jiffies.h */ 875 if ((long)(next - val) < 0) { 876 switch (target) { 877 case MEM_CGROUP_TARGET_THRESH: 878 next = val + THRESHOLDS_EVENTS_TARGET; 879 break; 880 case MEM_CGROUP_TARGET_SOFTLIMIT: 881 next = val + SOFTLIMIT_EVENTS_TARGET; 882 break; 883 default: 884 break; 885 } 886 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 887 return true; 888 } 889 return false; 890 } 891 892 /* 893 * Check events in order. 894 * 895 */ 896 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 897 { 898 /* threshold event is triggered in finer grain than soft limit */ 899 if (unlikely(mem_cgroup_event_ratelimit(memcg, 900 MEM_CGROUP_TARGET_THRESH))) { 901 bool do_softlimit; 902 903 do_softlimit = mem_cgroup_event_ratelimit(memcg, 904 MEM_CGROUP_TARGET_SOFTLIMIT); 905 mem_cgroup_threshold(memcg); 906 if (unlikely(do_softlimit)) 907 mem_cgroup_update_tree(memcg, page); 908 } 909 } 910 911 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 912 { 913 /* 914 * mm_update_next_owner() may clear mm->owner to NULL 915 * if it races with swapoff, page migration, etc. 916 * So this can be called with p == NULL. 917 */ 918 if (unlikely(!p)) 919 return NULL; 920 921 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 922 } 923 EXPORT_SYMBOL(mem_cgroup_from_task); 924 925 /** 926 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 927 * @mm: mm from which memcg should be extracted. It can be NULL. 928 * 929 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 930 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 931 * returned. 932 */ 933 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 934 { 935 struct mem_cgroup *memcg; 936 937 if (mem_cgroup_disabled()) 938 return NULL; 939 940 rcu_read_lock(); 941 do { 942 /* 943 * Page cache insertions can happen withou an 944 * actual mm context, e.g. during disk probing 945 * on boot, loopback IO, acct() writes etc. 946 */ 947 if (unlikely(!mm)) 948 memcg = root_mem_cgroup; 949 else { 950 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 951 if (unlikely(!memcg)) 952 memcg = root_mem_cgroup; 953 } 954 } while (!css_tryget(&memcg->css)); 955 rcu_read_unlock(); 956 return memcg; 957 } 958 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 959 960 /** 961 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 962 * @page: page from which memcg should be extracted. 963 * 964 * Obtain a reference on page->memcg and returns it if successful. Otherwise 965 * root_mem_cgroup is returned. 966 */ 967 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 968 { 969 struct mem_cgroup *memcg = page->mem_cgroup; 970 971 if (mem_cgroup_disabled()) 972 return NULL; 973 974 rcu_read_lock(); 975 /* Page should not get uncharged and freed memcg under us. */ 976 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 977 memcg = root_mem_cgroup; 978 rcu_read_unlock(); 979 return memcg; 980 } 981 EXPORT_SYMBOL(get_mem_cgroup_from_page); 982 983 /** 984 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg. 985 */ 986 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 987 { 988 if (unlikely(current->active_memcg)) { 989 struct mem_cgroup *memcg; 990 991 rcu_read_lock(); 992 /* current->active_memcg must hold a ref. */ 993 if (WARN_ON_ONCE(!css_tryget(¤t->active_memcg->css))) 994 memcg = root_mem_cgroup; 995 else 996 memcg = current->active_memcg; 997 rcu_read_unlock(); 998 return memcg; 999 } 1000 return get_mem_cgroup_from_mm(current->mm); 1001 } 1002 1003 /** 1004 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1005 * @root: hierarchy root 1006 * @prev: previously returned memcg, NULL on first invocation 1007 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1008 * 1009 * Returns references to children of the hierarchy below @root, or 1010 * @root itself, or %NULL after a full round-trip. 1011 * 1012 * Caller must pass the return value in @prev on subsequent 1013 * invocations for reference counting, or use mem_cgroup_iter_break() 1014 * to cancel a hierarchy walk before the round-trip is complete. 1015 * 1016 * Reclaimers can specify a node and a priority level in @reclaim to 1017 * divide up the memcgs in the hierarchy among all concurrent 1018 * reclaimers operating on the same node and priority. 1019 */ 1020 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1021 struct mem_cgroup *prev, 1022 struct mem_cgroup_reclaim_cookie *reclaim) 1023 { 1024 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 1025 struct cgroup_subsys_state *css = NULL; 1026 struct mem_cgroup *memcg = NULL; 1027 struct mem_cgroup *pos = NULL; 1028 1029 if (mem_cgroup_disabled()) 1030 return NULL; 1031 1032 if (!root) 1033 root = root_mem_cgroup; 1034 1035 if (prev && !reclaim) 1036 pos = prev; 1037 1038 if (!root->use_hierarchy && root != root_mem_cgroup) { 1039 if (prev) 1040 goto out; 1041 return root; 1042 } 1043 1044 rcu_read_lock(); 1045 1046 if (reclaim) { 1047 struct mem_cgroup_per_node *mz; 1048 1049 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1050 iter = &mz->iter; 1051 1052 if (prev && reclaim->generation != iter->generation) 1053 goto out_unlock; 1054 1055 while (1) { 1056 pos = READ_ONCE(iter->position); 1057 if (!pos || css_tryget(&pos->css)) 1058 break; 1059 /* 1060 * css reference reached zero, so iter->position will 1061 * be cleared by ->css_released. However, we should not 1062 * rely on this happening soon, because ->css_released 1063 * is called from a work queue, and by busy-waiting we 1064 * might block it. So we clear iter->position right 1065 * away. 1066 */ 1067 (void)cmpxchg(&iter->position, pos, NULL); 1068 } 1069 } 1070 1071 if (pos) 1072 css = &pos->css; 1073 1074 for (;;) { 1075 css = css_next_descendant_pre(css, &root->css); 1076 if (!css) { 1077 /* 1078 * Reclaimers share the hierarchy walk, and a 1079 * new one might jump in right at the end of 1080 * the hierarchy - make sure they see at least 1081 * one group and restart from the beginning. 1082 */ 1083 if (!prev) 1084 continue; 1085 break; 1086 } 1087 1088 /* 1089 * Verify the css and acquire a reference. The root 1090 * is provided by the caller, so we know it's alive 1091 * and kicking, and don't take an extra reference. 1092 */ 1093 memcg = mem_cgroup_from_css(css); 1094 1095 if (css == &root->css) 1096 break; 1097 1098 if (css_tryget(css)) 1099 break; 1100 1101 memcg = NULL; 1102 } 1103 1104 if (reclaim) { 1105 /* 1106 * The position could have already been updated by a competing 1107 * thread, so check that the value hasn't changed since we read 1108 * it to avoid reclaiming from the same cgroup twice. 1109 */ 1110 (void)cmpxchg(&iter->position, pos, memcg); 1111 1112 if (pos) 1113 css_put(&pos->css); 1114 1115 if (!memcg) 1116 iter->generation++; 1117 else if (!prev) 1118 reclaim->generation = iter->generation; 1119 } 1120 1121 out_unlock: 1122 rcu_read_unlock(); 1123 out: 1124 if (prev && prev != root) 1125 css_put(&prev->css); 1126 1127 return memcg; 1128 } 1129 1130 /** 1131 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1132 * @root: hierarchy root 1133 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1134 */ 1135 void mem_cgroup_iter_break(struct mem_cgroup *root, 1136 struct mem_cgroup *prev) 1137 { 1138 if (!root) 1139 root = root_mem_cgroup; 1140 if (prev && prev != root) 1141 css_put(&prev->css); 1142 } 1143 1144 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1145 struct mem_cgroup *dead_memcg) 1146 { 1147 struct mem_cgroup_reclaim_iter *iter; 1148 struct mem_cgroup_per_node *mz; 1149 int nid; 1150 1151 for_each_node(nid) { 1152 mz = mem_cgroup_nodeinfo(from, nid); 1153 iter = &mz->iter; 1154 cmpxchg(&iter->position, dead_memcg, NULL); 1155 } 1156 } 1157 1158 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1159 { 1160 struct mem_cgroup *memcg = dead_memcg; 1161 struct mem_cgroup *last; 1162 1163 do { 1164 __invalidate_reclaim_iterators(memcg, dead_memcg); 1165 last = memcg; 1166 } while ((memcg = parent_mem_cgroup(memcg))); 1167 1168 /* 1169 * When cgruop1 non-hierarchy mode is used, 1170 * parent_mem_cgroup() does not walk all the way up to the 1171 * cgroup root (root_mem_cgroup). So we have to handle 1172 * dead_memcg from cgroup root separately. 1173 */ 1174 if (last != root_mem_cgroup) 1175 __invalidate_reclaim_iterators(root_mem_cgroup, 1176 dead_memcg); 1177 } 1178 1179 /** 1180 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1181 * @memcg: hierarchy root 1182 * @fn: function to call for each task 1183 * @arg: argument passed to @fn 1184 * 1185 * This function iterates over tasks attached to @memcg or to any of its 1186 * descendants and calls @fn for each task. If @fn returns a non-zero 1187 * value, the function breaks the iteration loop and returns the value. 1188 * Otherwise, it will iterate over all tasks and return 0. 1189 * 1190 * This function must not be called for the root memory cgroup. 1191 */ 1192 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1193 int (*fn)(struct task_struct *, void *), void *arg) 1194 { 1195 struct mem_cgroup *iter; 1196 int ret = 0; 1197 1198 BUG_ON(memcg == root_mem_cgroup); 1199 1200 for_each_mem_cgroup_tree(iter, memcg) { 1201 struct css_task_iter it; 1202 struct task_struct *task; 1203 1204 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1205 while (!ret && (task = css_task_iter_next(&it))) 1206 ret = fn(task, arg); 1207 css_task_iter_end(&it); 1208 if (ret) { 1209 mem_cgroup_iter_break(memcg, iter); 1210 break; 1211 } 1212 } 1213 return ret; 1214 } 1215 1216 /** 1217 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1218 * @page: the page 1219 * @pgdat: pgdat of the page 1220 * 1221 * This function is only safe when following the LRU page isolation 1222 * and putback protocol: the LRU lock must be held, and the page must 1223 * either be PageLRU() or the caller must have isolated/allocated it. 1224 */ 1225 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1226 { 1227 struct mem_cgroup_per_node *mz; 1228 struct mem_cgroup *memcg; 1229 struct lruvec *lruvec; 1230 1231 if (mem_cgroup_disabled()) { 1232 lruvec = &pgdat->__lruvec; 1233 goto out; 1234 } 1235 1236 memcg = page->mem_cgroup; 1237 /* 1238 * Swapcache readahead pages are added to the LRU - and 1239 * possibly migrated - before they are charged. 1240 */ 1241 if (!memcg) 1242 memcg = root_mem_cgroup; 1243 1244 mz = mem_cgroup_page_nodeinfo(memcg, page); 1245 lruvec = &mz->lruvec; 1246 out: 1247 /* 1248 * Since a node can be onlined after the mem_cgroup was created, 1249 * we have to be prepared to initialize lruvec->zone here; 1250 * and if offlined then reonlined, we need to reinitialize it. 1251 */ 1252 if (unlikely(lruvec->pgdat != pgdat)) 1253 lruvec->pgdat = pgdat; 1254 return lruvec; 1255 } 1256 1257 /** 1258 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1259 * @lruvec: mem_cgroup per zone lru vector 1260 * @lru: index of lru list the page is sitting on 1261 * @zid: zone id of the accounted pages 1262 * @nr_pages: positive when adding or negative when removing 1263 * 1264 * This function must be called under lru_lock, just before a page is added 1265 * to or just after a page is removed from an lru list (that ordering being 1266 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1267 */ 1268 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1269 int zid, int nr_pages) 1270 { 1271 struct mem_cgroup_per_node *mz; 1272 unsigned long *lru_size; 1273 long size; 1274 1275 if (mem_cgroup_disabled()) 1276 return; 1277 1278 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1279 lru_size = &mz->lru_zone_size[zid][lru]; 1280 1281 if (nr_pages < 0) 1282 *lru_size += nr_pages; 1283 1284 size = *lru_size; 1285 if (WARN_ONCE(size < 0, 1286 "%s(%p, %d, %d): lru_size %ld\n", 1287 __func__, lruvec, lru, nr_pages, size)) { 1288 VM_BUG_ON(1); 1289 *lru_size = 0; 1290 } 1291 1292 if (nr_pages > 0) 1293 *lru_size += nr_pages; 1294 } 1295 1296 /** 1297 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1298 * @memcg: the memory cgroup 1299 * 1300 * Returns the maximum amount of memory @mem can be charged with, in 1301 * pages. 1302 */ 1303 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1304 { 1305 unsigned long margin = 0; 1306 unsigned long count; 1307 unsigned long limit; 1308 1309 count = page_counter_read(&memcg->memory); 1310 limit = READ_ONCE(memcg->memory.max); 1311 if (count < limit) 1312 margin = limit - count; 1313 1314 if (do_memsw_account()) { 1315 count = page_counter_read(&memcg->memsw); 1316 limit = READ_ONCE(memcg->memsw.max); 1317 if (count < limit) 1318 margin = min(margin, limit - count); 1319 else 1320 margin = 0; 1321 } 1322 1323 return margin; 1324 } 1325 1326 /* 1327 * A routine for checking "mem" is under move_account() or not. 1328 * 1329 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1330 * moving cgroups. This is for waiting at high-memory pressure 1331 * caused by "move". 1332 */ 1333 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1334 { 1335 struct mem_cgroup *from; 1336 struct mem_cgroup *to; 1337 bool ret = false; 1338 /* 1339 * Unlike task_move routines, we access mc.to, mc.from not under 1340 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1341 */ 1342 spin_lock(&mc.lock); 1343 from = mc.from; 1344 to = mc.to; 1345 if (!from) 1346 goto unlock; 1347 1348 ret = mem_cgroup_is_descendant(from, memcg) || 1349 mem_cgroup_is_descendant(to, memcg); 1350 unlock: 1351 spin_unlock(&mc.lock); 1352 return ret; 1353 } 1354 1355 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1356 { 1357 if (mc.moving_task && current != mc.moving_task) { 1358 if (mem_cgroup_under_move(memcg)) { 1359 DEFINE_WAIT(wait); 1360 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1361 /* moving charge context might have finished. */ 1362 if (mc.moving_task) 1363 schedule(); 1364 finish_wait(&mc.waitq, &wait); 1365 return true; 1366 } 1367 } 1368 return false; 1369 } 1370 1371 static char *memory_stat_format(struct mem_cgroup *memcg) 1372 { 1373 struct seq_buf s; 1374 int i; 1375 1376 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1377 if (!s.buffer) 1378 return NULL; 1379 1380 /* 1381 * Provide statistics on the state of the memory subsystem as 1382 * well as cumulative event counters that show past behavior. 1383 * 1384 * This list is ordered following a combination of these gradients: 1385 * 1) generic big picture -> specifics and details 1386 * 2) reflecting userspace activity -> reflecting kernel heuristics 1387 * 1388 * Current memory state: 1389 */ 1390 1391 seq_buf_printf(&s, "anon %llu\n", 1392 (u64)memcg_page_state(memcg, MEMCG_RSS) * 1393 PAGE_SIZE); 1394 seq_buf_printf(&s, "file %llu\n", 1395 (u64)memcg_page_state(memcg, MEMCG_CACHE) * 1396 PAGE_SIZE); 1397 seq_buf_printf(&s, "kernel_stack %llu\n", 1398 (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1399 1024); 1400 seq_buf_printf(&s, "slab %llu\n", 1401 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + 1402 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * 1403 PAGE_SIZE); 1404 seq_buf_printf(&s, "sock %llu\n", 1405 (u64)memcg_page_state(memcg, MEMCG_SOCK) * 1406 PAGE_SIZE); 1407 1408 seq_buf_printf(&s, "shmem %llu\n", 1409 (u64)memcg_page_state(memcg, NR_SHMEM) * 1410 PAGE_SIZE); 1411 seq_buf_printf(&s, "file_mapped %llu\n", 1412 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * 1413 PAGE_SIZE); 1414 seq_buf_printf(&s, "file_dirty %llu\n", 1415 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * 1416 PAGE_SIZE); 1417 seq_buf_printf(&s, "file_writeback %llu\n", 1418 (u64)memcg_page_state(memcg, NR_WRITEBACK) * 1419 PAGE_SIZE); 1420 1421 /* 1422 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter 1423 * with the NR_ANON_THP vm counter, but right now it's a pain in the 1424 * arse because it requires migrating the work out of rmap to a place 1425 * where the page->mem_cgroup is set up and stable. 1426 */ 1427 seq_buf_printf(&s, "anon_thp %llu\n", 1428 (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * 1429 PAGE_SIZE); 1430 1431 for (i = 0; i < NR_LRU_LISTS; i++) 1432 seq_buf_printf(&s, "%s %llu\n", lru_list_name(i), 1433 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 1434 PAGE_SIZE); 1435 1436 seq_buf_printf(&s, "slab_reclaimable %llu\n", 1437 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * 1438 PAGE_SIZE); 1439 seq_buf_printf(&s, "slab_unreclaimable %llu\n", 1440 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * 1441 PAGE_SIZE); 1442 1443 /* Accumulated memory events */ 1444 1445 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1446 memcg_events(memcg, PGFAULT)); 1447 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1448 memcg_events(memcg, PGMAJFAULT)); 1449 1450 seq_buf_printf(&s, "workingset_refault %lu\n", 1451 memcg_page_state(memcg, WORKINGSET_REFAULT)); 1452 seq_buf_printf(&s, "workingset_activate %lu\n", 1453 memcg_page_state(memcg, WORKINGSET_ACTIVATE)); 1454 seq_buf_printf(&s, "workingset_restore %lu\n", 1455 memcg_page_state(memcg, WORKINGSET_RESTORE)); 1456 seq_buf_printf(&s, "workingset_nodereclaim %lu\n", 1457 memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); 1458 1459 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1460 memcg_events(memcg, PGREFILL)); 1461 seq_buf_printf(&s, "pgscan %lu\n", 1462 memcg_events(memcg, PGSCAN_KSWAPD) + 1463 memcg_events(memcg, PGSCAN_DIRECT)); 1464 seq_buf_printf(&s, "pgsteal %lu\n", 1465 memcg_events(memcg, PGSTEAL_KSWAPD) + 1466 memcg_events(memcg, PGSTEAL_DIRECT)); 1467 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1468 memcg_events(memcg, PGACTIVATE)); 1469 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1470 memcg_events(memcg, PGDEACTIVATE)); 1471 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1472 memcg_events(memcg, PGLAZYFREE)); 1473 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1474 memcg_events(memcg, PGLAZYFREED)); 1475 1476 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1477 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1478 memcg_events(memcg, THP_FAULT_ALLOC)); 1479 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1480 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1481 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1482 1483 /* The above should easily fit into one page */ 1484 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1485 1486 return s.buffer; 1487 } 1488 1489 #define K(x) ((x) << (PAGE_SHIFT-10)) 1490 /** 1491 * mem_cgroup_print_oom_context: Print OOM information relevant to 1492 * memory controller. 1493 * @memcg: The memory cgroup that went over limit 1494 * @p: Task that is going to be killed 1495 * 1496 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1497 * enabled 1498 */ 1499 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1500 { 1501 rcu_read_lock(); 1502 1503 if (memcg) { 1504 pr_cont(",oom_memcg="); 1505 pr_cont_cgroup_path(memcg->css.cgroup); 1506 } else 1507 pr_cont(",global_oom"); 1508 if (p) { 1509 pr_cont(",task_memcg="); 1510 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1511 } 1512 rcu_read_unlock(); 1513 } 1514 1515 /** 1516 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1517 * memory controller. 1518 * @memcg: The memory cgroup that went over limit 1519 */ 1520 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1521 { 1522 char *buf; 1523 1524 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1525 K((u64)page_counter_read(&memcg->memory)), 1526 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1527 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1528 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1529 K((u64)page_counter_read(&memcg->swap)), 1530 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1531 else { 1532 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1533 K((u64)page_counter_read(&memcg->memsw)), 1534 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1535 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1536 K((u64)page_counter_read(&memcg->kmem)), 1537 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1538 } 1539 1540 pr_info("Memory cgroup stats for "); 1541 pr_cont_cgroup_path(memcg->css.cgroup); 1542 pr_cont(":"); 1543 buf = memory_stat_format(memcg); 1544 if (!buf) 1545 return; 1546 pr_info("%s", buf); 1547 kfree(buf); 1548 } 1549 1550 /* 1551 * Return the memory (and swap, if configured) limit for a memcg. 1552 */ 1553 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1554 { 1555 unsigned long max; 1556 1557 max = READ_ONCE(memcg->memory.max); 1558 if (mem_cgroup_swappiness(memcg)) { 1559 unsigned long memsw_max; 1560 unsigned long swap_max; 1561 1562 memsw_max = memcg->memsw.max; 1563 swap_max = READ_ONCE(memcg->swap.max); 1564 swap_max = min(swap_max, (unsigned long)total_swap_pages); 1565 max = min(max + swap_max, memsw_max); 1566 } 1567 return max; 1568 } 1569 1570 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1571 { 1572 return page_counter_read(&memcg->memory); 1573 } 1574 1575 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1576 int order) 1577 { 1578 struct oom_control oc = { 1579 .zonelist = NULL, 1580 .nodemask = NULL, 1581 .memcg = memcg, 1582 .gfp_mask = gfp_mask, 1583 .order = order, 1584 }; 1585 bool ret; 1586 1587 if (mutex_lock_killable(&oom_lock)) 1588 return true; 1589 /* 1590 * A few threads which were not waiting at mutex_lock_killable() can 1591 * fail to bail out. Therefore, check again after holding oom_lock. 1592 */ 1593 ret = should_force_charge() || out_of_memory(&oc); 1594 mutex_unlock(&oom_lock); 1595 return ret; 1596 } 1597 1598 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1599 pg_data_t *pgdat, 1600 gfp_t gfp_mask, 1601 unsigned long *total_scanned) 1602 { 1603 struct mem_cgroup *victim = NULL; 1604 int total = 0; 1605 int loop = 0; 1606 unsigned long excess; 1607 unsigned long nr_scanned; 1608 struct mem_cgroup_reclaim_cookie reclaim = { 1609 .pgdat = pgdat, 1610 }; 1611 1612 excess = soft_limit_excess(root_memcg); 1613 1614 while (1) { 1615 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1616 if (!victim) { 1617 loop++; 1618 if (loop >= 2) { 1619 /* 1620 * If we have not been able to reclaim 1621 * anything, it might because there are 1622 * no reclaimable pages under this hierarchy 1623 */ 1624 if (!total) 1625 break; 1626 /* 1627 * We want to do more targeted reclaim. 1628 * excess >> 2 is not to excessive so as to 1629 * reclaim too much, nor too less that we keep 1630 * coming back to reclaim from this cgroup 1631 */ 1632 if (total >= (excess >> 2) || 1633 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1634 break; 1635 } 1636 continue; 1637 } 1638 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1639 pgdat, &nr_scanned); 1640 *total_scanned += nr_scanned; 1641 if (!soft_limit_excess(root_memcg)) 1642 break; 1643 } 1644 mem_cgroup_iter_break(root_memcg, victim); 1645 return total; 1646 } 1647 1648 #ifdef CONFIG_LOCKDEP 1649 static struct lockdep_map memcg_oom_lock_dep_map = { 1650 .name = "memcg_oom_lock", 1651 }; 1652 #endif 1653 1654 static DEFINE_SPINLOCK(memcg_oom_lock); 1655 1656 /* 1657 * Check OOM-Killer is already running under our hierarchy. 1658 * If someone is running, return false. 1659 */ 1660 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1661 { 1662 struct mem_cgroup *iter, *failed = NULL; 1663 1664 spin_lock(&memcg_oom_lock); 1665 1666 for_each_mem_cgroup_tree(iter, memcg) { 1667 if (iter->oom_lock) { 1668 /* 1669 * this subtree of our hierarchy is already locked 1670 * so we cannot give a lock. 1671 */ 1672 failed = iter; 1673 mem_cgroup_iter_break(memcg, iter); 1674 break; 1675 } else 1676 iter->oom_lock = true; 1677 } 1678 1679 if (failed) { 1680 /* 1681 * OK, we failed to lock the whole subtree so we have 1682 * to clean up what we set up to the failing subtree 1683 */ 1684 for_each_mem_cgroup_tree(iter, memcg) { 1685 if (iter == failed) { 1686 mem_cgroup_iter_break(memcg, iter); 1687 break; 1688 } 1689 iter->oom_lock = false; 1690 } 1691 } else 1692 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1693 1694 spin_unlock(&memcg_oom_lock); 1695 1696 return !failed; 1697 } 1698 1699 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1700 { 1701 struct mem_cgroup *iter; 1702 1703 spin_lock(&memcg_oom_lock); 1704 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1705 for_each_mem_cgroup_tree(iter, memcg) 1706 iter->oom_lock = false; 1707 spin_unlock(&memcg_oom_lock); 1708 } 1709 1710 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1711 { 1712 struct mem_cgroup *iter; 1713 1714 spin_lock(&memcg_oom_lock); 1715 for_each_mem_cgroup_tree(iter, memcg) 1716 iter->under_oom++; 1717 spin_unlock(&memcg_oom_lock); 1718 } 1719 1720 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1721 { 1722 struct mem_cgroup *iter; 1723 1724 /* 1725 * When a new child is created while the hierarchy is under oom, 1726 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1727 */ 1728 spin_lock(&memcg_oom_lock); 1729 for_each_mem_cgroup_tree(iter, memcg) 1730 if (iter->under_oom > 0) 1731 iter->under_oom--; 1732 spin_unlock(&memcg_oom_lock); 1733 } 1734 1735 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1736 1737 struct oom_wait_info { 1738 struct mem_cgroup *memcg; 1739 wait_queue_entry_t wait; 1740 }; 1741 1742 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1743 unsigned mode, int sync, void *arg) 1744 { 1745 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1746 struct mem_cgroup *oom_wait_memcg; 1747 struct oom_wait_info *oom_wait_info; 1748 1749 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1750 oom_wait_memcg = oom_wait_info->memcg; 1751 1752 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1753 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1754 return 0; 1755 return autoremove_wake_function(wait, mode, sync, arg); 1756 } 1757 1758 static void memcg_oom_recover(struct mem_cgroup *memcg) 1759 { 1760 /* 1761 * For the following lockless ->under_oom test, the only required 1762 * guarantee is that it must see the state asserted by an OOM when 1763 * this function is called as a result of userland actions 1764 * triggered by the notification of the OOM. This is trivially 1765 * achieved by invoking mem_cgroup_mark_under_oom() before 1766 * triggering notification. 1767 */ 1768 if (memcg && memcg->under_oom) 1769 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1770 } 1771 1772 enum oom_status { 1773 OOM_SUCCESS, 1774 OOM_FAILED, 1775 OOM_ASYNC, 1776 OOM_SKIPPED 1777 }; 1778 1779 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1780 { 1781 enum oom_status ret; 1782 bool locked; 1783 1784 if (order > PAGE_ALLOC_COSTLY_ORDER) 1785 return OOM_SKIPPED; 1786 1787 memcg_memory_event(memcg, MEMCG_OOM); 1788 1789 /* 1790 * We are in the middle of the charge context here, so we 1791 * don't want to block when potentially sitting on a callstack 1792 * that holds all kinds of filesystem and mm locks. 1793 * 1794 * cgroup1 allows disabling the OOM killer and waiting for outside 1795 * handling until the charge can succeed; remember the context and put 1796 * the task to sleep at the end of the page fault when all locks are 1797 * released. 1798 * 1799 * On the other hand, in-kernel OOM killer allows for an async victim 1800 * memory reclaim (oom_reaper) and that means that we are not solely 1801 * relying on the oom victim to make a forward progress and we can 1802 * invoke the oom killer here. 1803 * 1804 * Please note that mem_cgroup_out_of_memory might fail to find a 1805 * victim and then we have to bail out from the charge path. 1806 */ 1807 if (memcg->oom_kill_disable) { 1808 if (!current->in_user_fault) 1809 return OOM_SKIPPED; 1810 css_get(&memcg->css); 1811 current->memcg_in_oom = memcg; 1812 current->memcg_oom_gfp_mask = mask; 1813 current->memcg_oom_order = order; 1814 1815 return OOM_ASYNC; 1816 } 1817 1818 mem_cgroup_mark_under_oom(memcg); 1819 1820 locked = mem_cgroup_oom_trylock(memcg); 1821 1822 if (locked) 1823 mem_cgroup_oom_notify(memcg); 1824 1825 mem_cgroup_unmark_under_oom(memcg); 1826 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1827 ret = OOM_SUCCESS; 1828 else 1829 ret = OOM_FAILED; 1830 1831 if (locked) 1832 mem_cgroup_oom_unlock(memcg); 1833 1834 return ret; 1835 } 1836 1837 /** 1838 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1839 * @handle: actually kill/wait or just clean up the OOM state 1840 * 1841 * This has to be called at the end of a page fault if the memcg OOM 1842 * handler was enabled. 1843 * 1844 * Memcg supports userspace OOM handling where failed allocations must 1845 * sleep on a waitqueue until the userspace task resolves the 1846 * situation. Sleeping directly in the charge context with all kinds 1847 * of locks held is not a good idea, instead we remember an OOM state 1848 * in the task and mem_cgroup_oom_synchronize() has to be called at 1849 * the end of the page fault to complete the OOM handling. 1850 * 1851 * Returns %true if an ongoing memcg OOM situation was detected and 1852 * completed, %false otherwise. 1853 */ 1854 bool mem_cgroup_oom_synchronize(bool handle) 1855 { 1856 struct mem_cgroup *memcg = current->memcg_in_oom; 1857 struct oom_wait_info owait; 1858 bool locked; 1859 1860 /* OOM is global, do not handle */ 1861 if (!memcg) 1862 return false; 1863 1864 if (!handle) 1865 goto cleanup; 1866 1867 owait.memcg = memcg; 1868 owait.wait.flags = 0; 1869 owait.wait.func = memcg_oom_wake_function; 1870 owait.wait.private = current; 1871 INIT_LIST_HEAD(&owait.wait.entry); 1872 1873 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1874 mem_cgroup_mark_under_oom(memcg); 1875 1876 locked = mem_cgroup_oom_trylock(memcg); 1877 1878 if (locked) 1879 mem_cgroup_oom_notify(memcg); 1880 1881 if (locked && !memcg->oom_kill_disable) { 1882 mem_cgroup_unmark_under_oom(memcg); 1883 finish_wait(&memcg_oom_waitq, &owait.wait); 1884 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1885 current->memcg_oom_order); 1886 } else { 1887 schedule(); 1888 mem_cgroup_unmark_under_oom(memcg); 1889 finish_wait(&memcg_oom_waitq, &owait.wait); 1890 } 1891 1892 if (locked) { 1893 mem_cgroup_oom_unlock(memcg); 1894 /* 1895 * There is no guarantee that an OOM-lock contender 1896 * sees the wakeups triggered by the OOM kill 1897 * uncharges. Wake any sleepers explicitely. 1898 */ 1899 memcg_oom_recover(memcg); 1900 } 1901 cleanup: 1902 current->memcg_in_oom = NULL; 1903 css_put(&memcg->css); 1904 return true; 1905 } 1906 1907 /** 1908 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1909 * @victim: task to be killed by the OOM killer 1910 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1911 * 1912 * Returns a pointer to a memory cgroup, which has to be cleaned up 1913 * by killing all belonging OOM-killable tasks. 1914 * 1915 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1916 */ 1917 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1918 struct mem_cgroup *oom_domain) 1919 { 1920 struct mem_cgroup *oom_group = NULL; 1921 struct mem_cgroup *memcg; 1922 1923 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1924 return NULL; 1925 1926 if (!oom_domain) 1927 oom_domain = root_mem_cgroup; 1928 1929 rcu_read_lock(); 1930 1931 memcg = mem_cgroup_from_task(victim); 1932 if (memcg == root_mem_cgroup) 1933 goto out; 1934 1935 /* 1936 * If the victim task has been asynchronously moved to a different 1937 * memory cgroup, we might end up killing tasks outside oom_domain. 1938 * In this case it's better to ignore memory.group.oom. 1939 */ 1940 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1941 goto out; 1942 1943 /* 1944 * Traverse the memory cgroup hierarchy from the victim task's 1945 * cgroup up to the OOMing cgroup (or root) to find the 1946 * highest-level memory cgroup with oom.group set. 1947 */ 1948 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1949 if (memcg->oom_group) 1950 oom_group = memcg; 1951 1952 if (memcg == oom_domain) 1953 break; 1954 } 1955 1956 if (oom_group) 1957 css_get(&oom_group->css); 1958 out: 1959 rcu_read_unlock(); 1960 1961 return oom_group; 1962 } 1963 1964 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1965 { 1966 pr_info("Tasks in "); 1967 pr_cont_cgroup_path(memcg->css.cgroup); 1968 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1969 } 1970 1971 /** 1972 * lock_page_memcg - lock a page->mem_cgroup binding 1973 * @page: the page 1974 * 1975 * This function protects unlocked LRU pages from being moved to 1976 * another cgroup. 1977 * 1978 * It ensures lifetime of the returned memcg. Caller is responsible 1979 * for the lifetime of the page; __unlock_page_memcg() is available 1980 * when @page might get freed inside the locked section. 1981 */ 1982 struct mem_cgroup *lock_page_memcg(struct page *page) 1983 { 1984 struct mem_cgroup *memcg; 1985 unsigned long flags; 1986 1987 /* 1988 * The RCU lock is held throughout the transaction. The fast 1989 * path can get away without acquiring the memcg->move_lock 1990 * because page moving starts with an RCU grace period. 1991 * 1992 * The RCU lock also protects the memcg from being freed when 1993 * the page state that is going to change is the only thing 1994 * preventing the page itself from being freed. E.g. writeback 1995 * doesn't hold a page reference and relies on PG_writeback to 1996 * keep off truncation, migration and so forth. 1997 */ 1998 rcu_read_lock(); 1999 2000 if (mem_cgroup_disabled()) 2001 return NULL; 2002 again: 2003 memcg = page->mem_cgroup; 2004 if (unlikely(!memcg)) 2005 return NULL; 2006 2007 if (atomic_read(&memcg->moving_account) <= 0) 2008 return memcg; 2009 2010 spin_lock_irqsave(&memcg->move_lock, flags); 2011 if (memcg != page->mem_cgroup) { 2012 spin_unlock_irqrestore(&memcg->move_lock, flags); 2013 goto again; 2014 } 2015 2016 /* 2017 * When charge migration first begins, we can have locked and 2018 * unlocked page stat updates happening concurrently. Track 2019 * the task who has the lock for unlock_page_memcg(). 2020 */ 2021 memcg->move_lock_task = current; 2022 memcg->move_lock_flags = flags; 2023 2024 return memcg; 2025 } 2026 EXPORT_SYMBOL(lock_page_memcg); 2027 2028 /** 2029 * __unlock_page_memcg - unlock and unpin a memcg 2030 * @memcg: the memcg 2031 * 2032 * Unlock and unpin a memcg returned by lock_page_memcg(). 2033 */ 2034 void __unlock_page_memcg(struct mem_cgroup *memcg) 2035 { 2036 if (memcg && memcg->move_lock_task == current) { 2037 unsigned long flags = memcg->move_lock_flags; 2038 2039 memcg->move_lock_task = NULL; 2040 memcg->move_lock_flags = 0; 2041 2042 spin_unlock_irqrestore(&memcg->move_lock, flags); 2043 } 2044 2045 rcu_read_unlock(); 2046 } 2047 2048 /** 2049 * unlock_page_memcg - unlock a page->mem_cgroup binding 2050 * @page: the page 2051 */ 2052 void unlock_page_memcg(struct page *page) 2053 { 2054 __unlock_page_memcg(page->mem_cgroup); 2055 } 2056 EXPORT_SYMBOL(unlock_page_memcg); 2057 2058 struct memcg_stock_pcp { 2059 struct mem_cgroup *cached; /* this never be root cgroup */ 2060 unsigned int nr_pages; 2061 struct work_struct work; 2062 unsigned long flags; 2063 #define FLUSHING_CACHED_CHARGE 0 2064 }; 2065 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2066 static DEFINE_MUTEX(percpu_charge_mutex); 2067 2068 /** 2069 * consume_stock: Try to consume stocked charge on this cpu. 2070 * @memcg: memcg to consume from. 2071 * @nr_pages: how many pages to charge. 2072 * 2073 * The charges will only happen if @memcg matches the current cpu's memcg 2074 * stock, and at least @nr_pages are available in that stock. Failure to 2075 * service an allocation will refill the stock. 2076 * 2077 * returns true if successful, false otherwise. 2078 */ 2079 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2080 { 2081 struct memcg_stock_pcp *stock; 2082 unsigned long flags; 2083 bool ret = false; 2084 2085 if (nr_pages > MEMCG_CHARGE_BATCH) 2086 return ret; 2087 2088 local_irq_save(flags); 2089 2090 stock = this_cpu_ptr(&memcg_stock); 2091 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2092 stock->nr_pages -= nr_pages; 2093 ret = true; 2094 } 2095 2096 local_irq_restore(flags); 2097 2098 return ret; 2099 } 2100 2101 /* 2102 * Returns stocks cached in percpu and reset cached information. 2103 */ 2104 static void drain_stock(struct memcg_stock_pcp *stock) 2105 { 2106 struct mem_cgroup *old = stock->cached; 2107 2108 if (stock->nr_pages) { 2109 page_counter_uncharge(&old->memory, stock->nr_pages); 2110 if (do_memsw_account()) 2111 page_counter_uncharge(&old->memsw, stock->nr_pages); 2112 css_put_many(&old->css, stock->nr_pages); 2113 stock->nr_pages = 0; 2114 } 2115 stock->cached = NULL; 2116 } 2117 2118 static void drain_local_stock(struct work_struct *dummy) 2119 { 2120 struct memcg_stock_pcp *stock; 2121 unsigned long flags; 2122 2123 /* 2124 * The only protection from memory hotplug vs. drain_stock races is 2125 * that we always operate on local CPU stock here with IRQ disabled 2126 */ 2127 local_irq_save(flags); 2128 2129 stock = this_cpu_ptr(&memcg_stock); 2130 drain_stock(stock); 2131 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2132 2133 local_irq_restore(flags); 2134 } 2135 2136 /* 2137 * Cache charges(val) to local per_cpu area. 2138 * This will be consumed by consume_stock() function, later. 2139 */ 2140 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2141 { 2142 struct memcg_stock_pcp *stock; 2143 unsigned long flags; 2144 2145 local_irq_save(flags); 2146 2147 stock = this_cpu_ptr(&memcg_stock); 2148 if (stock->cached != memcg) { /* reset if necessary */ 2149 drain_stock(stock); 2150 stock->cached = memcg; 2151 } 2152 stock->nr_pages += nr_pages; 2153 2154 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2155 drain_stock(stock); 2156 2157 local_irq_restore(flags); 2158 } 2159 2160 /* 2161 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2162 * of the hierarchy under it. 2163 */ 2164 static void drain_all_stock(struct mem_cgroup *root_memcg) 2165 { 2166 int cpu, curcpu; 2167 2168 /* If someone's already draining, avoid adding running more workers. */ 2169 if (!mutex_trylock(&percpu_charge_mutex)) 2170 return; 2171 /* 2172 * Notify other cpus that system-wide "drain" is running 2173 * We do not care about races with the cpu hotplug because cpu down 2174 * as well as workers from this path always operate on the local 2175 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2176 */ 2177 curcpu = get_cpu(); 2178 for_each_online_cpu(cpu) { 2179 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2180 struct mem_cgroup *memcg; 2181 bool flush = false; 2182 2183 rcu_read_lock(); 2184 memcg = stock->cached; 2185 if (memcg && stock->nr_pages && 2186 mem_cgroup_is_descendant(memcg, root_memcg)) 2187 flush = true; 2188 rcu_read_unlock(); 2189 2190 if (flush && 2191 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2192 if (cpu == curcpu) 2193 drain_local_stock(&stock->work); 2194 else 2195 schedule_work_on(cpu, &stock->work); 2196 } 2197 } 2198 put_cpu(); 2199 mutex_unlock(&percpu_charge_mutex); 2200 } 2201 2202 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2203 { 2204 struct memcg_stock_pcp *stock; 2205 struct mem_cgroup *memcg, *mi; 2206 2207 stock = &per_cpu(memcg_stock, cpu); 2208 drain_stock(stock); 2209 2210 for_each_mem_cgroup(memcg) { 2211 int i; 2212 2213 for (i = 0; i < MEMCG_NR_STAT; i++) { 2214 int nid; 2215 long x; 2216 2217 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2218 if (x) 2219 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2220 atomic_long_add(x, &memcg->vmstats[i]); 2221 2222 if (i >= NR_VM_NODE_STAT_ITEMS) 2223 continue; 2224 2225 for_each_node(nid) { 2226 struct mem_cgroup_per_node *pn; 2227 2228 pn = mem_cgroup_nodeinfo(memcg, nid); 2229 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2230 if (x) 2231 do { 2232 atomic_long_add(x, &pn->lruvec_stat[i]); 2233 } while ((pn = parent_nodeinfo(pn, nid))); 2234 } 2235 } 2236 2237 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2238 long x; 2239 2240 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2241 if (x) 2242 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2243 atomic_long_add(x, &memcg->vmevents[i]); 2244 } 2245 } 2246 2247 return 0; 2248 } 2249 2250 static void reclaim_high(struct mem_cgroup *memcg, 2251 unsigned int nr_pages, 2252 gfp_t gfp_mask) 2253 { 2254 do { 2255 if (page_counter_read(&memcg->memory) <= 2256 READ_ONCE(memcg->memory.high)) 2257 continue; 2258 memcg_memory_event(memcg, MEMCG_HIGH); 2259 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 2260 } while ((memcg = parent_mem_cgroup(memcg)) && 2261 !mem_cgroup_is_root(memcg)); 2262 } 2263 2264 static void high_work_func(struct work_struct *work) 2265 { 2266 struct mem_cgroup *memcg; 2267 2268 memcg = container_of(work, struct mem_cgroup, high_work); 2269 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2270 } 2271 2272 /* 2273 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2274 * enough to still cause a significant slowdown in most cases, while still 2275 * allowing diagnostics and tracing to proceed without becoming stuck. 2276 */ 2277 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2278 2279 /* 2280 * When calculating the delay, we use these either side of the exponentiation to 2281 * maintain precision and scale to a reasonable number of jiffies (see the table 2282 * below. 2283 * 2284 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2285 * overage ratio to a delay. 2286 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the 2287 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2288 * to produce a reasonable delay curve. 2289 * 2290 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2291 * reasonable delay curve compared to precision-adjusted overage, not 2292 * penalising heavily at first, but still making sure that growth beyond the 2293 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2294 * example, with a high of 100 megabytes: 2295 * 2296 * +-------+------------------------+ 2297 * | usage | time to allocate in ms | 2298 * +-------+------------------------+ 2299 * | 100M | 0 | 2300 * | 101M | 6 | 2301 * | 102M | 25 | 2302 * | 103M | 57 | 2303 * | 104M | 102 | 2304 * | 105M | 159 | 2305 * | 106M | 230 | 2306 * | 107M | 313 | 2307 * | 108M | 409 | 2308 * | 109M | 518 | 2309 * | 110M | 639 | 2310 * | 111M | 774 | 2311 * | 112M | 921 | 2312 * | 113M | 1081 | 2313 * | 114M | 1254 | 2314 * | 115M | 1439 | 2315 * | 116M | 1638 | 2316 * | 117M | 1849 | 2317 * | 118M | 2000 | 2318 * | 119M | 2000 | 2319 * | 120M | 2000 | 2320 * +-------+------------------------+ 2321 */ 2322 #define MEMCG_DELAY_PRECISION_SHIFT 20 2323 #define MEMCG_DELAY_SCALING_SHIFT 14 2324 2325 static u64 calculate_overage(unsigned long usage, unsigned long high) 2326 { 2327 u64 overage; 2328 2329 if (usage <= high) 2330 return 0; 2331 2332 /* 2333 * Prevent division by 0 in overage calculation by acting as if 2334 * it was a threshold of 1 page 2335 */ 2336 high = max(high, 1UL); 2337 2338 overage = usage - high; 2339 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2340 return div64_u64(overage, high); 2341 } 2342 2343 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2344 { 2345 u64 overage, max_overage = 0; 2346 2347 do { 2348 overage = calculate_overage(page_counter_read(&memcg->memory), 2349 READ_ONCE(memcg->memory.high)); 2350 max_overage = max(overage, max_overage); 2351 } while ((memcg = parent_mem_cgroup(memcg)) && 2352 !mem_cgroup_is_root(memcg)); 2353 2354 return max_overage; 2355 } 2356 2357 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2358 { 2359 u64 overage, max_overage = 0; 2360 2361 do { 2362 overage = calculate_overage(page_counter_read(&memcg->swap), 2363 READ_ONCE(memcg->swap.high)); 2364 if (overage) 2365 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2366 max_overage = max(overage, max_overage); 2367 } while ((memcg = parent_mem_cgroup(memcg)) && 2368 !mem_cgroup_is_root(memcg)); 2369 2370 return max_overage; 2371 } 2372 2373 /* 2374 * Get the number of jiffies that we should penalise a mischievous cgroup which 2375 * is exceeding its memory.high by checking both it and its ancestors. 2376 */ 2377 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2378 unsigned int nr_pages, 2379 u64 max_overage) 2380 { 2381 unsigned long penalty_jiffies; 2382 2383 if (!max_overage) 2384 return 0; 2385 2386 /* 2387 * We use overage compared to memory.high to calculate the number of 2388 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2389 * fairly lenient on small overages, and increasingly harsh when the 2390 * memcg in question makes it clear that it has no intention of stopping 2391 * its crazy behaviour, so we exponentially increase the delay based on 2392 * overage amount. 2393 */ 2394 penalty_jiffies = max_overage * max_overage * HZ; 2395 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2396 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2397 2398 /* 2399 * Factor in the task's own contribution to the overage, such that four 2400 * N-sized allocations are throttled approximately the same as one 2401 * 4N-sized allocation. 2402 * 2403 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2404 * larger the current charge patch is than that. 2405 */ 2406 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2407 } 2408 2409 /* 2410 * Scheduled by try_charge() to be executed from the userland return path 2411 * and reclaims memory over the high limit. 2412 */ 2413 void mem_cgroup_handle_over_high(void) 2414 { 2415 unsigned long penalty_jiffies; 2416 unsigned long pflags; 2417 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2418 struct mem_cgroup *memcg; 2419 2420 if (likely(!nr_pages)) 2421 return; 2422 2423 memcg = get_mem_cgroup_from_mm(current->mm); 2424 reclaim_high(memcg, nr_pages, GFP_KERNEL); 2425 current->memcg_nr_pages_over_high = 0; 2426 2427 /* 2428 * memory.high is breached and reclaim is unable to keep up. Throttle 2429 * allocators proactively to slow down excessive growth. 2430 */ 2431 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2432 mem_find_max_overage(memcg)); 2433 2434 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2435 swap_find_max_overage(memcg)); 2436 2437 /* 2438 * Clamp the max delay per usermode return so as to still keep the 2439 * application moving forwards and also permit diagnostics, albeit 2440 * extremely slowly. 2441 */ 2442 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2443 2444 /* 2445 * Don't sleep if the amount of jiffies this memcg owes us is so low 2446 * that it's not even worth doing, in an attempt to be nice to those who 2447 * go only a small amount over their memory.high value and maybe haven't 2448 * been aggressively reclaimed enough yet. 2449 */ 2450 if (penalty_jiffies <= HZ / 100) 2451 goto out; 2452 2453 /* 2454 * If we exit early, we're guaranteed to die (since 2455 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2456 * need to account for any ill-begotten jiffies to pay them off later. 2457 */ 2458 psi_memstall_enter(&pflags); 2459 schedule_timeout_killable(penalty_jiffies); 2460 psi_memstall_leave(&pflags); 2461 2462 out: 2463 css_put(&memcg->css); 2464 } 2465 2466 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2467 unsigned int nr_pages) 2468 { 2469 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2470 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2471 struct mem_cgroup *mem_over_limit; 2472 struct page_counter *counter; 2473 unsigned long nr_reclaimed; 2474 bool may_swap = true; 2475 bool drained = false; 2476 enum oom_status oom_status; 2477 2478 if (mem_cgroup_is_root(memcg)) 2479 return 0; 2480 retry: 2481 if (consume_stock(memcg, nr_pages)) 2482 return 0; 2483 2484 if (!do_memsw_account() || 2485 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2486 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2487 goto done_restock; 2488 if (do_memsw_account()) 2489 page_counter_uncharge(&memcg->memsw, batch); 2490 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2491 } else { 2492 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2493 may_swap = false; 2494 } 2495 2496 if (batch > nr_pages) { 2497 batch = nr_pages; 2498 goto retry; 2499 } 2500 2501 /* 2502 * Memcg doesn't have a dedicated reserve for atomic 2503 * allocations. But like the global atomic pool, we need to 2504 * put the burden of reclaim on regular allocation requests 2505 * and let these go through as privileged allocations. 2506 */ 2507 if (gfp_mask & __GFP_ATOMIC) 2508 goto force; 2509 2510 /* 2511 * Unlike in global OOM situations, memcg is not in a physical 2512 * memory shortage. Allow dying and OOM-killed tasks to 2513 * bypass the last charges so that they can exit quickly and 2514 * free their memory. 2515 */ 2516 if (unlikely(should_force_charge())) 2517 goto force; 2518 2519 /* 2520 * Prevent unbounded recursion when reclaim operations need to 2521 * allocate memory. This might exceed the limits temporarily, 2522 * but we prefer facilitating memory reclaim and getting back 2523 * under the limit over triggering OOM kills in these cases. 2524 */ 2525 if (unlikely(current->flags & PF_MEMALLOC)) 2526 goto force; 2527 2528 if (unlikely(task_in_memcg_oom(current))) 2529 goto nomem; 2530 2531 if (!gfpflags_allow_blocking(gfp_mask)) 2532 goto nomem; 2533 2534 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2535 2536 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2537 gfp_mask, may_swap); 2538 2539 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2540 goto retry; 2541 2542 if (!drained) { 2543 drain_all_stock(mem_over_limit); 2544 drained = true; 2545 goto retry; 2546 } 2547 2548 if (gfp_mask & __GFP_NORETRY) 2549 goto nomem; 2550 /* 2551 * Even though the limit is exceeded at this point, reclaim 2552 * may have been able to free some pages. Retry the charge 2553 * before killing the task. 2554 * 2555 * Only for regular pages, though: huge pages are rather 2556 * unlikely to succeed so close to the limit, and we fall back 2557 * to regular pages anyway in case of failure. 2558 */ 2559 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2560 goto retry; 2561 /* 2562 * At task move, charge accounts can be doubly counted. So, it's 2563 * better to wait until the end of task_move if something is going on. 2564 */ 2565 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2566 goto retry; 2567 2568 if (nr_retries--) 2569 goto retry; 2570 2571 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2572 goto nomem; 2573 2574 if (gfp_mask & __GFP_NOFAIL) 2575 goto force; 2576 2577 if (fatal_signal_pending(current)) 2578 goto force; 2579 2580 /* 2581 * keep retrying as long as the memcg oom killer is able to make 2582 * a forward progress or bypass the charge if the oom killer 2583 * couldn't make any progress. 2584 */ 2585 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2586 get_order(nr_pages * PAGE_SIZE)); 2587 switch (oom_status) { 2588 case OOM_SUCCESS: 2589 nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2590 goto retry; 2591 case OOM_FAILED: 2592 goto force; 2593 default: 2594 goto nomem; 2595 } 2596 nomem: 2597 if (!(gfp_mask & __GFP_NOFAIL)) 2598 return -ENOMEM; 2599 force: 2600 /* 2601 * The allocation either can't fail or will lead to more memory 2602 * being freed very soon. Allow memory usage go over the limit 2603 * temporarily by force charging it. 2604 */ 2605 page_counter_charge(&memcg->memory, nr_pages); 2606 if (do_memsw_account()) 2607 page_counter_charge(&memcg->memsw, nr_pages); 2608 css_get_many(&memcg->css, nr_pages); 2609 2610 return 0; 2611 2612 done_restock: 2613 css_get_many(&memcg->css, batch); 2614 if (batch > nr_pages) 2615 refill_stock(memcg, batch - nr_pages); 2616 2617 /* 2618 * If the hierarchy is above the normal consumption range, schedule 2619 * reclaim on returning to userland. We can perform reclaim here 2620 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2621 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2622 * not recorded as it most likely matches current's and won't 2623 * change in the meantime. As high limit is checked again before 2624 * reclaim, the cost of mismatch is negligible. 2625 */ 2626 do { 2627 bool mem_high, swap_high; 2628 2629 mem_high = page_counter_read(&memcg->memory) > 2630 READ_ONCE(memcg->memory.high); 2631 swap_high = page_counter_read(&memcg->swap) > 2632 READ_ONCE(memcg->swap.high); 2633 2634 /* Don't bother a random interrupted task */ 2635 if (in_interrupt()) { 2636 if (mem_high) { 2637 schedule_work(&memcg->high_work); 2638 break; 2639 } 2640 continue; 2641 } 2642 2643 if (mem_high || swap_high) { 2644 /* 2645 * The allocating tasks in this cgroup will need to do 2646 * reclaim or be throttled to prevent further growth 2647 * of the memory or swap footprints. 2648 * 2649 * Target some best-effort fairness between the tasks, 2650 * and distribute reclaim work and delay penalties 2651 * based on how much each task is actually allocating. 2652 */ 2653 current->memcg_nr_pages_over_high += batch; 2654 set_notify_resume(current); 2655 break; 2656 } 2657 } while ((memcg = parent_mem_cgroup(memcg))); 2658 2659 return 0; 2660 } 2661 2662 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2663 { 2664 if (mem_cgroup_is_root(memcg)) 2665 return; 2666 2667 page_counter_uncharge(&memcg->memory, nr_pages); 2668 if (do_memsw_account()) 2669 page_counter_uncharge(&memcg->memsw, nr_pages); 2670 2671 css_put_many(&memcg->css, nr_pages); 2672 } 2673 2674 static void lock_page_lru(struct page *page, int *isolated) 2675 { 2676 pg_data_t *pgdat = page_pgdat(page); 2677 2678 spin_lock_irq(&pgdat->lru_lock); 2679 if (PageLRU(page)) { 2680 struct lruvec *lruvec; 2681 2682 lruvec = mem_cgroup_page_lruvec(page, pgdat); 2683 ClearPageLRU(page); 2684 del_page_from_lru_list(page, lruvec, page_lru(page)); 2685 *isolated = 1; 2686 } else 2687 *isolated = 0; 2688 } 2689 2690 static void unlock_page_lru(struct page *page, int isolated) 2691 { 2692 pg_data_t *pgdat = page_pgdat(page); 2693 2694 if (isolated) { 2695 struct lruvec *lruvec; 2696 2697 lruvec = mem_cgroup_page_lruvec(page, pgdat); 2698 VM_BUG_ON_PAGE(PageLRU(page), page); 2699 SetPageLRU(page); 2700 add_page_to_lru_list(page, lruvec, page_lru(page)); 2701 } 2702 spin_unlock_irq(&pgdat->lru_lock); 2703 } 2704 2705 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2706 bool lrucare) 2707 { 2708 int isolated; 2709 2710 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2711 2712 /* 2713 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2714 * may already be on some other mem_cgroup's LRU. Take care of it. 2715 */ 2716 if (lrucare) 2717 lock_page_lru(page, &isolated); 2718 2719 /* 2720 * Nobody should be changing or seriously looking at 2721 * page->mem_cgroup at this point: 2722 * 2723 * - the page is uncharged 2724 * 2725 * - the page is off-LRU 2726 * 2727 * - an anonymous fault has exclusive page access, except for 2728 * a locked page table 2729 * 2730 * - a page cache insertion, a swapin fault, or a migration 2731 * have the page locked 2732 */ 2733 page->mem_cgroup = memcg; 2734 2735 if (lrucare) 2736 unlock_page_lru(page, isolated); 2737 } 2738 2739 #ifdef CONFIG_MEMCG_KMEM 2740 /* 2741 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2742 * 2743 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2744 * cgroup_mutex, etc. 2745 */ 2746 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2747 { 2748 struct page *page; 2749 2750 if (mem_cgroup_disabled()) 2751 return NULL; 2752 2753 page = virt_to_head_page(p); 2754 2755 /* 2756 * Slab pages don't have page->mem_cgroup set because corresponding 2757 * kmem caches can be reparented during the lifetime. That's why 2758 * memcg_from_slab_page() should be used instead. 2759 */ 2760 if (PageSlab(page)) 2761 return memcg_from_slab_page(page); 2762 2763 /* All other pages use page->mem_cgroup */ 2764 return page->mem_cgroup; 2765 } 2766 2767 static int memcg_alloc_cache_id(void) 2768 { 2769 int id, size; 2770 int err; 2771 2772 id = ida_simple_get(&memcg_cache_ida, 2773 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2774 if (id < 0) 2775 return id; 2776 2777 if (id < memcg_nr_cache_ids) 2778 return id; 2779 2780 /* 2781 * There's no space for the new id in memcg_caches arrays, 2782 * so we have to grow them. 2783 */ 2784 down_write(&memcg_cache_ids_sem); 2785 2786 size = 2 * (id + 1); 2787 if (size < MEMCG_CACHES_MIN_SIZE) 2788 size = MEMCG_CACHES_MIN_SIZE; 2789 else if (size > MEMCG_CACHES_MAX_SIZE) 2790 size = MEMCG_CACHES_MAX_SIZE; 2791 2792 err = memcg_update_all_caches(size); 2793 if (!err) 2794 err = memcg_update_all_list_lrus(size); 2795 if (!err) 2796 memcg_nr_cache_ids = size; 2797 2798 up_write(&memcg_cache_ids_sem); 2799 2800 if (err) { 2801 ida_simple_remove(&memcg_cache_ida, id); 2802 return err; 2803 } 2804 return id; 2805 } 2806 2807 static void memcg_free_cache_id(int id) 2808 { 2809 ida_simple_remove(&memcg_cache_ida, id); 2810 } 2811 2812 struct memcg_kmem_cache_create_work { 2813 struct mem_cgroup *memcg; 2814 struct kmem_cache *cachep; 2815 struct work_struct work; 2816 }; 2817 2818 static void memcg_kmem_cache_create_func(struct work_struct *w) 2819 { 2820 struct memcg_kmem_cache_create_work *cw = 2821 container_of(w, struct memcg_kmem_cache_create_work, work); 2822 struct mem_cgroup *memcg = cw->memcg; 2823 struct kmem_cache *cachep = cw->cachep; 2824 2825 memcg_create_kmem_cache(memcg, cachep); 2826 2827 css_put(&memcg->css); 2828 kfree(cw); 2829 } 2830 2831 /* 2832 * Enqueue the creation of a per-memcg kmem_cache. 2833 */ 2834 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2835 struct kmem_cache *cachep) 2836 { 2837 struct memcg_kmem_cache_create_work *cw; 2838 2839 if (!css_tryget_online(&memcg->css)) 2840 return; 2841 2842 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2843 if (!cw) 2844 return; 2845 2846 cw->memcg = memcg; 2847 cw->cachep = cachep; 2848 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2849 2850 queue_work(memcg_kmem_cache_wq, &cw->work); 2851 } 2852 2853 static inline bool memcg_kmem_bypass(void) 2854 { 2855 if (in_interrupt()) 2856 return true; 2857 2858 /* Allow remote memcg charging in kthread contexts. */ 2859 if ((!current->mm || (current->flags & PF_KTHREAD)) && 2860 !current->active_memcg) 2861 return true; 2862 return false; 2863 } 2864 2865 /** 2866 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2867 * @cachep: the original global kmem cache 2868 * 2869 * Return the kmem_cache we're supposed to use for a slab allocation. 2870 * We try to use the current memcg's version of the cache. 2871 * 2872 * If the cache does not exist yet, if we are the first user of it, we 2873 * create it asynchronously in a workqueue and let the current allocation 2874 * go through with the original cache. 2875 * 2876 * This function takes a reference to the cache it returns to assure it 2877 * won't get destroyed while we are working with it. Once the caller is 2878 * done with it, memcg_kmem_put_cache() must be called to release the 2879 * reference. 2880 */ 2881 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2882 { 2883 struct mem_cgroup *memcg; 2884 struct kmem_cache *memcg_cachep; 2885 struct memcg_cache_array *arr; 2886 int kmemcg_id; 2887 2888 VM_BUG_ON(!is_root_cache(cachep)); 2889 2890 if (memcg_kmem_bypass()) 2891 return cachep; 2892 2893 rcu_read_lock(); 2894 2895 if (unlikely(current->active_memcg)) 2896 memcg = current->active_memcg; 2897 else 2898 memcg = mem_cgroup_from_task(current); 2899 2900 if (!memcg || memcg == root_mem_cgroup) 2901 goto out_unlock; 2902 2903 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2904 if (kmemcg_id < 0) 2905 goto out_unlock; 2906 2907 arr = rcu_dereference(cachep->memcg_params.memcg_caches); 2908 2909 /* 2910 * Make sure we will access the up-to-date value. The code updating 2911 * memcg_caches issues a write barrier to match the data dependency 2912 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()). 2913 */ 2914 memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]); 2915 2916 /* 2917 * If we are in a safe context (can wait, and not in interrupt 2918 * context), we could be be predictable and return right away. 2919 * This would guarantee that the allocation being performed 2920 * already belongs in the new cache. 2921 * 2922 * However, there are some clashes that can arrive from locking. 2923 * For instance, because we acquire the slab_mutex while doing 2924 * memcg_create_kmem_cache, this means no further allocation 2925 * could happen with the slab_mutex held. So it's better to 2926 * defer everything. 2927 * 2928 * If the memcg is dying or memcg_cache is about to be released, 2929 * don't bother creating new kmem_caches. Because memcg_cachep 2930 * is ZEROed as the fist step of kmem offlining, we don't need 2931 * percpu_ref_tryget_live() here. css_tryget_online() check in 2932 * memcg_schedule_kmem_cache_create() will prevent us from 2933 * creation of a new kmem_cache. 2934 */ 2935 if (unlikely(!memcg_cachep)) 2936 memcg_schedule_kmem_cache_create(memcg, cachep); 2937 else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) 2938 cachep = memcg_cachep; 2939 out_unlock: 2940 rcu_read_unlock(); 2941 return cachep; 2942 } 2943 2944 /** 2945 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2946 * @cachep: the cache returned by memcg_kmem_get_cache 2947 */ 2948 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2949 { 2950 if (!is_root_cache(cachep)) 2951 percpu_ref_put(&cachep->memcg_params.refcnt); 2952 } 2953 2954 /** 2955 * __memcg_kmem_charge: charge a number of kernel pages to a memcg 2956 * @memcg: memory cgroup to charge 2957 * @gfp: reclaim mode 2958 * @nr_pages: number of pages to charge 2959 * 2960 * Returns 0 on success, an error code on failure. 2961 */ 2962 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 2963 unsigned int nr_pages) 2964 { 2965 struct page_counter *counter; 2966 int ret; 2967 2968 ret = try_charge(memcg, gfp, nr_pages); 2969 if (ret) 2970 return ret; 2971 2972 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2973 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2974 2975 /* 2976 * Enforce __GFP_NOFAIL allocation because callers are not 2977 * prepared to see failures and likely do not have any failure 2978 * handling code. 2979 */ 2980 if (gfp & __GFP_NOFAIL) { 2981 page_counter_charge(&memcg->kmem, nr_pages); 2982 return 0; 2983 } 2984 cancel_charge(memcg, nr_pages); 2985 return -ENOMEM; 2986 } 2987 return 0; 2988 } 2989 2990 /** 2991 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg 2992 * @memcg: memcg to uncharge 2993 * @nr_pages: number of pages to uncharge 2994 */ 2995 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 2996 { 2997 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2998 page_counter_uncharge(&memcg->kmem, nr_pages); 2999 3000 page_counter_uncharge(&memcg->memory, nr_pages); 3001 if (do_memsw_account()) 3002 page_counter_uncharge(&memcg->memsw, nr_pages); 3003 } 3004 3005 /** 3006 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3007 * @page: page to charge 3008 * @gfp: reclaim mode 3009 * @order: allocation order 3010 * 3011 * Returns 0 on success, an error code on failure. 3012 */ 3013 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3014 { 3015 struct mem_cgroup *memcg; 3016 int ret = 0; 3017 3018 if (memcg_kmem_bypass()) 3019 return 0; 3020 3021 memcg = get_mem_cgroup_from_current(); 3022 if (!mem_cgroup_is_root(memcg)) { 3023 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); 3024 if (!ret) { 3025 page->mem_cgroup = memcg; 3026 __SetPageKmemcg(page); 3027 } 3028 } 3029 css_put(&memcg->css); 3030 return ret; 3031 } 3032 3033 /** 3034 * __memcg_kmem_uncharge_page: uncharge a kmem page 3035 * @page: page to uncharge 3036 * @order: allocation order 3037 */ 3038 void __memcg_kmem_uncharge_page(struct page *page, int order) 3039 { 3040 struct mem_cgroup *memcg = page->mem_cgroup; 3041 unsigned int nr_pages = 1 << order; 3042 3043 if (!memcg) 3044 return; 3045 3046 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3047 __memcg_kmem_uncharge(memcg, nr_pages); 3048 page->mem_cgroup = NULL; 3049 3050 /* slab pages do not have PageKmemcg flag set */ 3051 if (PageKmemcg(page)) 3052 __ClearPageKmemcg(page); 3053 3054 css_put_many(&memcg->css, nr_pages); 3055 } 3056 #endif /* CONFIG_MEMCG_KMEM */ 3057 3058 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3059 3060 /* 3061 * Because tail pages are not marked as "used", set it. We're under 3062 * pgdat->lru_lock and migration entries setup in all page mappings. 3063 */ 3064 void mem_cgroup_split_huge_fixup(struct page *head) 3065 { 3066 int i; 3067 3068 if (mem_cgroup_disabled()) 3069 return; 3070 3071 for (i = 1; i < HPAGE_PMD_NR; i++) 3072 head[i].mem_cgroup = head->mem_cgroup; 3073 3074 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); 3075 } 3076 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3077 3078 #ifdef CONFIG_MEMCG_SWAP 3079 /** 3080 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3081 * @entry: swap entry to be moved 3082 * @from: mem_cgroup which the entry is moved from 3083 * @to: mem_cgroup which the entry is moved to 3084 * 3085 * It succeeds only when the swap_cgroup's record for this entry is the same 3086 * as the mem_cgroup's id of @from. 3087 * 3088 * Returns 0 on success, -EINVAL on failure. 3089 * 3090 * The caller must have charged to @to, IOW, called page_counter_charge() about 3091 * both res and memsw, and called css_get(). 3092 */ 3093 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3094 struct mem_cgroup *from, struct mem_cgroup *to) 3095 { 3096 unsigned short old_id, new_id; 3097 3098 old_id = mem_cgroup_id(from); 3099 new_id = mem_cgroup_id(to); 3100 3101 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3102 mod_memcg_state(from, MEMCG_SWAP, -1); 3103 mod_memcg_state(to, MEMCG_SWAP, 1); 3104 return 0; 3105 } 3106 return -EINVAL; 3107 } 3108 #else 3109 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3110 struct mem_cgroup *from, struct mem_cgroup *to) 3111 { 3112 return -EINVAL; 3113 } 3114 #endif 3115 3116 static DEFINE_MUTEX(memcg_max_mutex); 3117 3118 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3119 unsigned long max, bool memsw) 3120 { 3121 bool enlarge = false; 3122 bool drained = false; 3123 int ret; 3124 bool limits_invariant; 3125 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3126 3127 do { 3128 if (signal_pending(current)) { 3129 ret = -EINTR; 3130 break; 3131 } 3132 3133 mutex_lock(&memcg_max_mutex); 3134 /* 3135 * Make sure that the new limit (memsw or memory limit) doesn't 3136 * break our basic invariant rule memory.max <= memsw.max. 3137 */ 3138 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3139 max <= memcg->memsw.max; 3140 if (!limits_invariant) { 3141 mutex_unlock(&memcg_max_mutex); 3142 ret = -EINVAL; 3143 break; 3144 } 3145 if (max > counter->max) 3146 enlarge = true; 3147 ret = page_counter_set_max(counter, max); 3148 mutex_unlock(&memcg_max_mutex); 3149 3150 if (!ret) 3151 break; 3152 3153 if (!drained) { 3154 drain_all_stock(memcg); 3155 drained = true; 3156 continue; 3157 } 3158 3159 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3160 GFP_KERNEL, !memsw)) { 3161 ret = -EBUSY; 3162 break; 3163 } 3164 } while (true); 3165 3166 if (!ret && enlarge) 3167 memcg_oom_recover(memcg); 3168 3169 return ret; 3170 } 3171 3172 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3173 gfp_t gfp_mask, 3174 unsigned long *total_scanned) 3175 { 3176 unsigned long nr_reclaimed = 0; 3177 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3178 unsigned long reclaimed; 3179 int loop = 0; 3180 struct mem_cgroup_tree_per_node *mctz; 3181 unsigned long excess; 3182 unsigned long nr_scanned; 3183 3184 if (order > 0) 3185 return 0; 3186 3187 mctz = soft_limit_tree_node(pgdat->node_id); 3188 3189 /* 3190 * Do not even bother to check the largest node if the root 3191 * is empty. Do it lockless to prevent lock bouncing. Races 3192 * are acceptable as soft limit is best effort anyway. 3193 */ 3194 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3195 return 0; 3196 3197 /* 3198 * This loop can run a while, specially if mem_cgroup's continuously 3199 * keep exceeding their soft limit and putting the system under 3200 * pressure 3201 */ 3202 do { 3203 if (next_mz) 3204 mz = next_mz; 3205 else 3206 mz = mem_cgroup_largest_soft_limit_node(mctz); 3207 if (!mz) 3208 break; 3209 3210 nr_scanned = 0; 3211 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3212 gfp_mask, &nr_scanned); 3213 nr_reclaimed += reclaimed; 3214 *total_scanned += nr_scanned; 3215 spin_lock_irq(&mctz->lock); 3216 __mem_cgroup_remove_exceeded(mz, mctz); 3217 3218 /* 3219 * If we failed to reclaim anything from this memory cgroup 3220 * it is time to move on to the next cgroup 3221 */ 3222 next_mz = NULL; 3223 if (!reclaimed) 3224 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3225 3226 excess = soft_limit_excess(mz->memcg); 3227 /* 3228 * One school of thought says that we should not add 3229 * back the node to the tree if reclaim returns 0. 3230 * But our reclaim could return 0, simply because due 3231 * to priority we are exposing a smaller subset of 3232 * memory to reclaim from. Consider this as a longer 3233 * term TODO. 3234 */ 3235 /* If excess == 0, no tree ops */ 3236 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3237 spin_unlock_irq(&mctz->lock); 3238 css_put(&mz->memcg->css); 3239 loop++; 3240 /* 3241 * Could not reclaim anything and there are no more 3242 * mem cgroups to try or we seem to be looping without 3243 * reclaiming anything. 3244 */ 3245 if (!nr_reclaimed && 3246 (next_mz == NULL || 3247 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3248 break; 3249 } while (!nr_reclaimed); 3250 if (next_mz) 3251 css_put(&next_mz->memcg->css); 3252 return nr_reclaimed; 3253 } 3254 3255 /* 3256 * Test whether @memcg has children, dead or alive. Note that this 3257 * function doesn't care whether @memcg has use_hierarchy enabled and 3258 * returns %true if there are child csses according to the cgroup 3259 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 3260 */ 3261 static inline bool memcg_has_children(struct mem_cgroup *memcg) 3262 { 3263 bool ret; 3264 3265 rcu_read_lock(); 3266 ret = css_next_child(NULL, &memcg->css); 3267 rcu_read_unlock(); 3268 return ret; 3269 } 3270 3271 /* 3272 * Reclaims as many pages from the given memcg as possible. 3273 * 3274 * Caller is responsible for holding css reference for memcg. 3275 */ 3276 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3277 { 3278 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3279 3280 /* we call try-to-free pages for make this cgroup empty */ 3281 lru_add_drain_all(); 3282 3283 drain_all_stock(memcg); 3284 3285 /* try to free all pages in this cgroup */ 3286 while (nr_retries && page_counter_read(&memcg->memory)) { 3287 int progress; 3288 3289 if (signal_pending(current)) 3290 return -EINTR; 3291 3292 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3293 GFP_KERNEL, true); 3294 if (!progress) { 3295 nr_retries--; 3296 /* maybe some writeback is necessary */ 3297 congestion_wait(BLK_RW_ASYNC, HZ/10); 3298 } 3299 3300 } 3301 3302 return 0; 3303 } 3304 3305 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3306 char *buf, size_t nbytes, 3307 loff_t off) 3308 { 3309 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3310 3311 if (mem_cgroup_is_root(memcg)) 3312 return -EINVAL; 3313 return mem_cgroup_force_empty(memcg) ?: nbytes; 3314 } 3315 3316 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3317 struct cftype *cft) 3318 { 3319 return mem_cgroup_from_css(css)->use_hierarchy; 3320 } 3321 3322 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3323 struct cftype *cft, u64 val) 3324 { 3325 int retval = 0; 3326 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3327 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3328 3329 if (memcg->use_hierarchy == val) 3330 return 0; 3331 3332 /* 3333 * If parent's use_hierarchy is set, we can't make any modifications 3334 * in the child subtrees. If it is unset, then the change can 3335 * occur, provided the current cgroup has no children. 3336 * 3337 * For the root cgroup, parent_mem is NULL, we allow value to be 3338 * set if there are no children. 3339 */ 3340 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3341 (val == 1 || val == 0)) { 3342 if (!memcg_has_children(memcg)) 3343 memcg->use_hierarchy = val; 3344 else 3345 retval = -EBUSY; 3346 } else 3347 retval = -EINVAL; 3348 3349 return retval; 3350 } 3351 3352 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3353 { 3354 unsigned long val; 3355 3356 if (mem_cgroup_is_root(memcg)) { 3357 val = memcg_page_state(memcg, MEMCG_CACHE) + 3358 memcg_page_state(memcg, MEMCG_RSS); 3359 if (swap) 3360 val += memcg_page_state(memcg, MEMCG_SWAP); 3361 } else { 3362 if (!swap) 3363 val = page_counter_read(&memcg->memory); 3364 else 3365 val = page_counter_read(&memcg->memsw); 3366 } 3367 return val; 3368 } 3369 3370 enum { 3371 RES_USAGE, 3372 RES_LIMIT, 3373 RES_MAX_USAGE, 3374 RES_FAILCNT, 3375 RES_SOFT_LIMIT, 3376 }; 3377 3378 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3379 struct cftype *cft) 3380 { 3381 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3382 struct page_counter *counter; 3383 3384 switch (MEMFILE_TYPE(cft->private)) { 3385 case _MEM: 3386 counter = &memcg->memory; 3387 break; 3388 case _MEMSWAP: 3389 counter = &memcg->memsw; 3390 break; 3391 case _KMEM: 3392 counter = &memcg->kmem; 3393 break; 3394 case _TCP: 3395 counter = &memcg->tcpmem; 3396 break; 3397 default: 3398 BUG(); 3399 } 3400 3401 switch (MEMFILE_ATTR(cft->private)) { 3402 case RES_USAGE: 3403 if (counter == &memcg->memory) 3404 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3405 if (counter == &memcg->memsw) 3406 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3407 return (u64)page_counter_read(counter) * PAGE_SIZE; 3408 case RES_LIMIT: 3409 return (u64)counter->max * PAGE_SIZE; 3410 case RES_MAX_USAGE: 3411 return (u64)counter->watermark * PAGE_SIZE; 3412 case RES_FAILCNT: 3413 return counter->failcnt; 3414 case RES_SOFT_LIMIT: 3415 return (u64)memcg->soft_limit * PAGE_SIZE; 3416 default: 3417 BUG(); 3418 } 3419 } 3420 3421 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) 3422 { 3423 unsigned long stat[MEMCG_NR_STAT] = {0}; 3424 struct mem_cgroup *mi; 3425 int node, cpu, i; 3426 3427 for_each_online_cpu(cpu) 3428 for (i = 0; i < MEMCG_NR_STAT; i++) 3429 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); 3430 3431 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3432 for (i = 0; i < MEMCG_NR_STAT; i++) 3433 atomic_long_add(stat[i], &mi->vmstats[i]); 3434 3435 for_each_node(node) { 3436 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3437 struct mem_cgroup_per_node *pi; 3438 3439 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3440 stat[i] = 0; 3441 3442 for_each_online_cpu(cpu) 3443 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3444 stat[i] += per_cpu( 3445 pn->lruvec_stat_cpu->count[i], cpu); 3446 3447 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) 3448 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3449 atomic_long_add(stat[i], &pi->lruvec_stat[i]); 3450 } 3451 } 3452 3453 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) 3454 { 3455 unsigned long events[NR_VM_EVENT_ITEMS]; 3456 struct mem_cgroup *mi; 3457 int cpu, i; 3458 3459 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3460 events[i] = 0; 3461 3462 for_each_online_cpu(cpu) 3463 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3464 events[i] += per_cpu(memcg->vmstats_percpu->events[i], 3465 cpu); 3466 3467 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3468 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3469 atomic_long_add(events[i], &mi->vmevents[i]); 3470 } 3471 3472 #ifdef CONFIG_MEMCG_KMEM 3473 static int memcg_online_kmem(struct mem_cgroup *memcg) 3474 { 3475 int memcg_id; 3476 3477 if (cgroup_memory_nokmem) 3478 return 0; 3479 3480 BUG_ON(memcg->kmemcg_id >= 0); 3481 BUG_ON(memcg->kmem_state); 3482 3483 memcg_id = memcg_alloc_cache_id(); 3484 if (memcg_id < 0) 3485 return memcg_id; 3486 3487 static_branch_inc(&memcg_kmem_enabled_key); 3488 /* 3489 * A memory cgroup is considered kmem-online as soon as it gets 3490 * kmemcg_id. Setting the id after enabling static branching will 3491 * guarantee no one starts accounting before all call sites are 3492 * patched. 3493 */ 3494 memcg->kmemcg_id = memcg_id; 3495 memcg->kmem_state = KMEM_ONLINE; 3496 INIT_LIST_HEAD(&memcg->kmem_caches); 3497 3498 return 0; 3499 } 3500 3501 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3502 { 3503 struct cgroup_subsys_state *css; 3504 struct mem_cgroup *parent, *child; 3505 int kmemcg_id; 3506 3507 if (memcg->kmem_state != KMEM_ONLINE) 3508 return; 3509 /* 3510 * Clear the online state before clearing memcg_caches array 3511 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 3512 * guarantees that no cache will be created for this cgroup 3513 * after we are done (see memcg_create_kmem_cache()). 3514 */ 3515 memcg->kmem_state = KMEM_ALLOCATED; 3516 3517 parent = parent_mem_cgroup(memcg); 3518 if (!parent) 3519 parent = root_mem_cgroup; 3520 3521 /* 3522 * Deactivate and reparent kmem_caches. 3523 */ 3524 memcg_deactivate_kmem_caches(memcg, parent); 3525 3526 kmemcg_id = memcg->kmemcg_id; 3527 BUG_ON(kmemcg_id < 0); 3528 3529 /* 3530 * Change kmemcg_id of this cgroup and all its descendants to the 3531 * parent's id, and then move all entries from this cgroup's list_lrus 3532 * to ones of the parent. After we have finished, all list_lrus 3533 * corresponding to this cgroup are guaranteed to remain empty. The 3534 * ordering is imposed by list_lru_node->lock taken by 3535 * memcg_drain_all_list_lrus(). 3536 */ 3537 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3538 css_for_each_descendant_pre(css, &memcg->css) { 3539 child = mem_cgroup_from_css(css); 3540 BUG_ON(child->kmemcg_id != kmemcg_id); 3541 child->kmemcg_id = parent->kmemcg_id; 3542 if (!memcg->use_hierarchy) 3543 break; 3544 } 3545 rcu_read_unlock(); 3546 3547 memcg_drain_all_list_lrus(kmemcg_id, parent); 3548 3549 memcg_free_cache_id(kmemcg_id); 3550 } 3551 3552 static void memcg_free_kmem(struct mem_cgroup *memcg) 3553 { 3554 /* css_alloc() failed, offlining didn't happen */ 3555 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3556 memcg_offline_kmem(memcg); 3557 3558 if (memcg->kmem_state == KMEM_ALLOCATED) { 3559 WARN_ON(!list_empty(&memcg->kmem_caches)); 3560 static_branch_dec(&memcg_kmem_enabled_key); 3561 } 3562 } 3563 #else 3564 static int memcg_online_kmem(struct mem_cgroup *memcg) 3565 { 3566 return 0; 3567 } 3568 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3569 { 3570 } 3571 static void memcg_free_kmem(struct mem_cgroup *memcg) 3572 { 3573 } 3574 #endif /* CONFIG_MEMCG_KMEM */ 3575 3576 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3577 unsigned long max) 3578 { 3579 int ret; 3580 3581 mutex_lock(&memcg_max_mutex); 3582 ret = page_counter_set_max(&memcg->kmem, max); 3583 mutex_unlock(&memcg_max_mutex); 3584 return ret; 3585 } 3586 3587 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3588 { 3589 int ret; 3590 3591 mutex_lock(&memcg_max_mutex); 3592 3593 ret = page_counter_set_max(&memcg->tcpmem, max); 3594 if (ret) 3595 goto out; 3596 3597 if (!memcg->tcpmem_active) { 3598 /* 3599 * The active flag needs to be written after the static_key 3600 * update. This is what guarantees that the socket activation 3601 * function is the last one to run. See mem_cgroup_sk_alloc() 3602 * for details, and note that we don't mark any socket as 3603 * belonging to this memcg until that flag is up. 3604 * 3605 * We need to do this, because static_keys will span multiple 3606 * sites, but we can't control their order. If we mark a socket 3607 * as accounted, but the accounting functions are not patched in 3608 * yet, we'll lose accounting. 3609 * 3610 * We never race with the readers in mem_cgroup_sk_alloc(), 3611 * because when this value change, the code to process it is not 3612 * patched in yet. 3613 */ 3614 static_branch_inc(&memcg_sockets_enabled_key); 3615 memcg->tcpmem_active = true; 3616 } 3617 out: 3618 mutex_unlock(&memcg_max_mutex); 3619 return ret; 3620 } 3621 3622 /* 3623 * The user of this function is... 3624 * RES_LIMIT. 3625 */ 3626 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3627 char *buf, size_t nbytes, loff_t off) 3628 { 3629 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3630 unsigned long nr_pages; 3631 int ret; 3632 3633 buf = strstrip(buf); 3634 ret = page_counter_memparse(buf, "-1", &nr_pages); 3635 if (ret) 3636 return ret; 3637 3638 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3639 case RES_LIMIT: 3640 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3641 ret = -EINVAL; 3642 break; 3643 } 3644 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3645 case _MEM: 3646 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3647 break; 3648 case _MEMSWAP: 3649 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3650 break; 3651 case _KMEM: 3652 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3653 "Please report your usecase to linux-mm@kvack.org if you " 3654 "depend on this functionality.\n"); 3655 ret = memcg_update_kmem_max(memcg, nr_pages); 3656 break; 3657 case _TCP: 3658 ret = memcg_update_tcp_max(memcg, nr_pages); 3659 break; 3660 } 3661 break; 3662 case RES_SOFT_LIMIT: 3663 memcg->soft_limit = nr_pages; 3664 ret = 0; 3665 break; 3666 } 3667 return ret ?: nbytes; 3668 } 3669 3670 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3671 size_t nbytes, loff_t off) 3672 { 3673 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3674 struct page_counter *counter; 3675 3676 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3677 case _MEM: 3678 counter = &memcg->memory; 3679 break; 3680 case _MEMSWAP: 3681 counter = &memcg->memsw; 3682 break; 3683 case _KMEM: 3684 counter = &memcg->kmem; 3685 break; 3686 case _TCP: 3687 counter = &memcg->tcpmem; 3688 break; 3689 default: 3690 BUG(); 3691 } 3692 3693 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3694 case RES_MAX_USAGE: 3695 page_counter_reset_watermark(counter); 3696 break; 3697 case RES_FAILCNT: 3698 counter->failcnt = 0; 3699 break; 3700 default: 3701 BUG(); 3702 } 3703 3704 return nbytes; 3705 } 3706 3707 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3708 struct cftype *cft) 3709 { 3710 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3711 } 3712 3713 #ifdef CONFIG_MMU 3714 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3715 struct cftype *cft, u64 val) 3716 { 3717 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3718 3719 if (val & ~MOVE_MASK) 3720 return -EINVAL; 3721 3722 /* 3723 * No kind of locking is needed in here, because ->can_attach() will 3724 * check this value once in the beginning of the process, and then carry 3725 * on with stale data. This means that changes to this value will only 3726 * affect task migrations starting after the change. 3727 */ 3728 memcg->move_charge_at_immigrate = val; 3729 return 0; 3730 } 3731 #else 3732 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3733 struct cftype *cft, u64 val) 3734 { 3735 return -ENOSYS; 3736 } 3737 #endif 3738 3739 #ifdef CONFIG_NUMA 3740 3741 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3742 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3743 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3744 3745 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3746 int nid, unsigned int lru_mask, bool tree) 3747 { 3748 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3749 unsigned long nr = 0; 3750 enum lru_list lru; 3751 3752 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3753 3754 for_each_lru(lru) { 3755 if (!(BIT(lru) & lru_mask)) 3756 continue; 3757 if (tree) 3758 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3759 else 3760 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3761 } 3762 return nr; 3763 } 3764 3765 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3766 unsigned int lru_mask, 3767 bool tree) 3768 { 3769 unsigned long nr = 0; 3770 enum lru_list lru; 3771 3772 for_each_lru(lru) { 3773 if (!(BIT(lru) & lru_mask)) 3774 continue; 3775 if (tree) 3776 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3777 else 3778 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3779 } 3780 return nr; 3781 } 3782 3783 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3784 { 3785 struct numa_stat { 3786 const char *name; 3787 unsigned int lru_mask; 3788 }; 3789 3790 static const struct numa_stat stats[] = { 3791 { "total", LRU_ALL }, 3792 { "file", LRU_ALL_FILE }, 3793 { "anon", LRU_ALL_ANON }, 3794 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3795 }; 3796 const struct numa_stat *stat; 3797 int nid; 3798 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3799 3800 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3801 seq_printf(m, "%s=%lu", stat->name, 3802 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3803 false)); 3804 for_each_node_state(nid, N_MEMORY) 3805 seq_printf(m, " N%d=%lu", nid, 3806 mem_cgroup_node_nr_lru_pages(memcg, nid, 3807 stat->lru_mask, false)); 3808 seq_putc(m, '\n'); 3809 } 3810 3811 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3812 3813 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3814 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3815 true)); 3816 for_each_node_state(nid, N_MEMORY) 3817 seq_printf(m, " N%d=%lu", nid, 3818 mem_cgroup_node_nr_lru_pages(memcg, nid, 3819 stat->lru_mask, true)); 3820 seq_putc(m, '\n'); 3821 } 3822 3823 return 0; 3824 } 3825 #endif /* CONFIG_NUMA */ 3826 3827 static const unsigned int memcg1_stats[] = { 3828 MEMCG_CACHE, 3829 MEMCG_RSS, 3830 MEMCG_RSS_HUGE, 3831 NR_SHMEM, 3832 NR_FILE_MAPPED, 3833 NR_FILE_DIRTY, 3834 NR_WRITEBACK, 3835 MEMCG_SWAP, 3836 }; 3837 3838 static const char *const memcg1_stat_names[] = { 3839 "cache", 3840 "rss", 3841 "rss_huge", 3842 "shmem", 3843 "mapped_file", 3844 "dirty", 3845 "writeback", 3846 "swap", 3847 }; 3848 3849 /* Universal VM events cgroup1 shows, original sort order */ 3850 static const unsigned int memcg1_events[] = { 3851 PGPGIN, 3852 PGPGOUT, 3853 PGFAULT, 3854 PGMAJFAULT, 3855 }; 3856 3857 static int memcg_stat_show(struct seq_file *m, void *v) 3858 { 3859 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3860 unsigned long memory, memsw; 3861 struct mem_cgroup *mi; 3862 unsigned int i; 3863 3864 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3865 3866 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3867 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3868 continue; 3869 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3870 memcg_page_state_local(memcg, memcg1_stats[i]) * 3871 PAGE_SIZE); 3872 } 3873 3874 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3875 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 3876 memcg_events_local(memcg, memcg1_events[i])); 3877 3878 for (i = 0; i < NR_LRU_LISTS; i++) 3879 seq_printf(m, "%s %lu\n", lru_list_name(i), 3880 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 3881 PAGE_SIZE); 3882 3883 /* Hierarchical information */ 3884 memory = memsw = PAGE_COUNTER_MAX; 3885 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3886 memory = min(memory, READ_ONCE(mi->memory.max)); 3887 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 3888 } 3889 seq_printf(m, "hierarchical_memory_limit %llu\n", 3890 (u64)memory * PAGE_SIZE); 3891 if (do_memsw_account()) 3892 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3893 (u64)memsw * PAGE_SIZE); 3894 3895 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3896 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3897 continue; 3898 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 3899 (u64)memcg_page_state(memcg, memcg1_stats[i]) * 3900 PAGE_SIZE); 3901 } 3902 3903 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3904 seq_printf(m, "total_%s %llu\n", 3905 vm_event_name(memcg1_events[i]), 3906 (u64)memcg_events(memcg, memcg1_events[i])); 3907 3908 for (i = 0; i < NR_LRU_LISTS; i++) 3909 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 3910 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 3911 PAGE_SIZE); 3912 3913 #ifdef CONFIG_DEBUG_VM 3914 { 3915 pg_data_t *pgdat; 3916 struct mem_cgroup_per_node *mz; 3917 struct zone_reclaim_stat *rstat; 3918 unsigned long recent_rotated[2] = {0, 0}; 3919 unsigned long recent_scanned[2] = {0, 0}; 3920 3921 for_each_online_pgdat(pgdat) { 3922 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3923 rstat = &mz->lruvec.reclaim_stat; 3924 3925 recent_rotated[0] += rstat->recent_rotated[0]; 3926 recent_rotated[1] += rstat->recent_rotated[1]; 3927 recent_scanned[0] += rstat->recent_scanned[0]; 3928 recent_scanned[1] += rstat->recent_scanned[1]; 3929 } 3930 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3931 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3932 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3933 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3934 } 3935 #endif 3936 3937 return 0; 3938 } 3939 3940 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3941 struct cftype *cft) 3942 { 3943 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3944 3945 return mem_cgroup_swappiness(memcg); 3946 } 3947 3948 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3949 struct cftype *cft, u64 val) 3950 { 3951 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3952 3953 if (val > 100) 3954 return -EINVAL; 3955 3956 if (css->parent) 3957 memcg->swappiness = val; 3958 else 3959 vm_swappiness = val; 3960 3961 return 0; 3962 } 3963 3964 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3965 { 3966 struct mem_cgroup_threshold_ary *t; 3967 unsigned long usage; 3968 int i; 3969 3970 rcu_read_lock(); 3971 if (!swap) 3972 t = rcu_dereference(memcg->thresholds.primary); 3973 else 3974 t = rcu_dereference(memcg->memsw_thresholds.primary); 3975 3976 if (!t) 3977 goto unlock; 3978 3979 usage = mem_cgroup_usage(memcg, swap); 3980 3981 /* 3982 * current_threshold points to threshold just below or equal to usage. 3983 * If it's not true, a threshold was crossed after last 3984 * call of __mem_cgroup_threshold(). 3985 */ 3986 i = t->current_threshold; 3987 3988 /* 3989 * Iterate backward over array of thresholds starting from 3990 * current_threshold and check if a threshold is crossed. 3991 * If none of thresholds below usage is crossed, we read 3992 * only one element of the array here. 3993 */ 3994 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3995 eventfd_signal(t->entries[i].eventfd, 1); 3996 3997 /* i = current_threshold + 1 */ 3998 i++; 3999 4000 /* 4001 * Iterate forward over array of thresholds starting from 4002 * current_threshold+1 and check if a threshold is crossed. 4003 * If none of thresholds above usage is crossed, we read 4004 * only one element of the array here. 4005 */ 4006 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4007 eventfd_signal(t->entries[i].eventfd, 1); 4008 4009 /* Update current_threshold */ 4010 t->current_threshold = i - 1; 4011 unlock: 4012 rcu_read_unlock(); 4013 } 4014 4015 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4016 { 4017 while (memcg) { 4018 __mem_cgroup_threshold(memcg, false); 4019 if (do_memsw_account()) 4020 __mem_cgroup_threshold(memcg, true); 4021 4022 memcg = parent_mem_cgroup(memcg); 4023 } 4024 } 4025 4026 static int compare_thresholds(const void *a, const void *b) 4027 { 4028 const struct mem_cgroup_threshold *_a = a; 4029 const struct mem_cgroup_threshold *_b = b; 4030 4031 if (_a->threshold > _b->threshold) 4032 return 1; 4033 4034 if (_a->threshold < _b->threshold) 4035 return -1; 4036 4037 return 0; 4038 } 4039 4040 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4041 { 4042 struct mem_cgroup_eventfd_list *ev; 4043 4044 spin_lock(&memcg_oom_lock); 4045 4046 list_for_each_entry(ev, &memcg->oom_notify, list) 4047 eventfd_signal(ev->eventfd, 1); 4048 4049 spin_unlock(&memcg_oom_lock); 4050 return 0; 4051 } 4052 4053 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4054 { 4055 struct mem_cgroup *iter; 4056 4057 for_each_mem_cgroup_tree(iter, memcg) 4058 mem_cgroup_oom_notify_cb(iter); 4059 } 4060 4061 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4062 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4063 { 4064 struct mem_cgroup_thresholds *thresholds; 4065 struct mem_cgroup_threshold_ary *new; 4066 unsigned long threshold; 4067 unsigned long usage; 4068 int i, size, ret; 4069 4070 ret = page_counter_memparse(args, "-1", &threshold); 4071 if (ret) 4072 return ret; 4073 4074 mutex_lock(&memcg->thresholds_lock); 4075 4076 if (type == _MEM) { 4077 thresholds = &memcg->thresholds; 4078 usage = mem_cgroup_usage(memcg, false); 4079 } else if (type == _MEMSWAP) { 4080 thresholds = &memcg->memsw_thresholds; 4081 usage = mem_cgroup_usage(memcg, true); 4082 } else 4083 BUG(); 4084 4085 /* Check if a threshold crossed before adding a new one */ 4086 if (thresholds->primary) 4087 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4088 4089 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4090 4091 /* Allocate memory for new array of thresholds */ 4092 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4093 if (!new) { 4094 ret = -ENOMEM; 4095 goto unlock; 4096 } 4097 new->size = size; 4098 4099 /* Copy thresholds (if any) to new array */ 4100 if (thresholds->primary) { 4101 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4102 sizeof(struct mem_cgroup_threshold)); 4103 } 4104 4105 /* Add new threshold */ 4106 new->entries[size - 1].eventfd = eventfd; 4107 new->entries[size - 1].threshold = threshold; 4108 4109 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4110 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4111 compare_thresholds, NULL); 4112 4113 /* Find current threshold */ 4114 new->current_threshold = -1; 4115 for (i = 0; i < size; i++) { 4116 if (new->entries[i].threshold <= usage) { 4117 /* 4118 * new->current_threshold will not be used until 4119 * rcu_assign_pointer(), so it's safe to increment 4120 * it here. 4121 */ 4122 ++new->current_threshold; 4123 } else 4124 break; 4125 } 4126 4127 /* Free old spare buffer and save old primary buffer as spare */ 4128 kfree(thresholds->spare); 4129 thresholds->spare = thresholds->primary; 4130 4131 rcu_assign_pointer(thresholds->primary, new); 4132 4133 /* To be sure that nobody uses thresholds */ 4134 synchronize_rcu(); 4135 4136 unlock: 4137 mutex_unlock(&memcg->thresholds_lock); 4138 4139 return ret; 4140 } 4141 4142 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4143 struct eventfd_ctx *eventfd, const char *args) 4144 { 4145 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4146 } 4147 4148 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4149 struct eventfd_ctx *eventfd, const char *args) 4150 { 4151 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4152 } 4153 4154 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4155 struct eventfd_ctx *eventfd, enum res_type type) 4156 { 4157 struct mem_cgroup_thresholds *thresholds; 4158 struct mem_cgroup_threshold_ary *new; 4159 unsigned long usage; 4160 int i, j, size, entries; 4161 4162 mutex_lock(&memcg->thresholds_lock); 4163 4164 if (type == _MEM) { 4165 thresholds = &memcg->thresholds; 4166 usage = mem_cgroup_usage(memcg, false); 4167 } else if (type == _MEMSWAP) { 4168 thresholds = &memcg->memsw_thresholds; 4169 usage = mem_cgroup_usage(memcg, true); 4170 } else 4171 BUG(); 4172 4173 if (!thresholds->primary) 4174 goto unlock; 4175 4176 /* Check if a threshold crossed before removing */ 4177 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4178 4179 /* Calculate new number of threshold */ 4180 size = entries = 0; 4181 for (i = 0; i < thresholds->primary->size; i++) { 4182 if (thresholds->primary->entries[i].eventfd != eventfd) 4183 size++; 4184 else 4185 entries++; 4186 } 4187 4188 new = thresholds->spare; 4189 4190 /* If no items related to eventfd have been cleared, nothing to do */ 4191 if (!entries) 4192 goto unlock; 4193 4194 /* Set thresholds array to NULL if we don't have thresholds */ 4195 if (!size) { 4196 kfree(new); 4197 new = NULL; 4198 goto swap_buffers; 4199 } 4200 4201 new->size = size; 4202 4203 /* Copy thresholds and find current threshold */ 4204 new->current_threshold = -1; 4205 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4206 if (thresholds->primary->entries[i].eventfd == eventfd) 4207 continue; 4208 4209 new->entries[j] = thresholds->primary->entries[i]; 4210 if (new->entries[j].threshold <= usage) { 4211 /* 4212 * new->current_threshold will not be used 4213 * until rcu_assign_pointer(), so it's safe to increment 4214 * it here. 4215 */ 4216 ++new->current_threshold; 4217 } 4218 j++; 4219 } 4220 4221 swap_buffers: 4222 /* Swap primary and spare array */ 4223 thresholds->spare = thresholds->primary; 4224 4225 rcu_assign_pointer(thresholds->primary, new); 4226 4227 /* To be sure that nobody uses thresholds */ 4228 synchronize_rcu(); 4229 4230 /* If all events are unregistered, free the spare array */ 4231 if (!new) { 4232 kfree(thresholds->spare); 4233 thresholds->spare = NULL; 4234 } 4235 unlock: 4236 mutex_unlock(&memcg->thresholds_lock); 4237 } 4238 4239 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4240 struct eventfd_ctx *eventfd) 4241 { 4242 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4243 } 4244 4245 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4246 struct eventfd_ctx *eventfd) 4247 { 4248 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4249 } 4250 4251 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4252 struct eventfd_ctx *eventfd, const char *args) 4253 { 4254 struct mem_cgroup_eventfd_list *event; 4255 4256 event = kmalloc(sizeof(*event), GFP_KERNEL); 4257 if (!event) 4258 return -ENOMEM; 4259 4260 spin_lock(&memcg_oom_lock); 4261 4262 event->eventfd = eventfd; 4263 list_add(&event->list, &memcg->oom_notify); 4264 4265 /* already in OOM ? */ 4266 if (memcg->under_oom) 4267 eventfd_signal(eventfd, 1); 4268 spin_unlock(&memcg_oom_lock); 4269 4270 return 0; 4271 } 4272 4273 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4274 struct eventfd_ctx *eventfd) 4275 { 4276 struct mem_cgroup_eventfd_list *ev, *tmp; 4277 4278 spin_lock(&memcg_oom_lock); 4279 4280 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4281 if (ev->eventfd == eventfd) { 4282 list_del(&ev->list); 4283 kfree(ev); 4284 } 4285 } 4286 4287 spin_unlock(&memcg_oom_lock); 4288 } 4289 4290 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4291 { 4292 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4293 4294 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4295 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4296 seq_printf(sf, "oom_kill %lu\n", 4297 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4298 return 0; 4299 } 4300 4301 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4302 struct cftype *cft, u64 val) 4303 { 4304 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4305 4306 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4307 if (!css->parent || !((val == 0) || (val == 1))) 4308 return -EINVAL; 4309 4310 memcg->oom_kill_disable = val; 4311 if (!val) 4312 memcg_oom_recover(memcg); 4313 4314 return 0; 4315 } 4316 4317 #ifdef CONFIG_CGROUP_WRITEBACK 4318 4319 #include <trace/events/writeback.h> 4320 4321 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4322 { 4323 return wb_domain_init(&memcg->cgwb_domain, gfp); 4324 } 4325 4326 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4327 { 4328 wb_domain_exit(&memcg->cgwb_domain); 4329 } 4330 4331 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4332 { 4333 wb_domain_size_changed(&memcg->cgwb_domain); 4334 } 4335 4336 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4337 { 4338 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4339 4340 if (!memcg->css.parent) 4341 return NULL; 4342 4343 return &memcg->cgwb_domain; 4344 } 4345 4346 /* 4347 * idx can be of type enum memcg_stat_item or node_stat_item. 4348 * Keep in sync with memcg_exact_page(). 4349 */ 4350 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 4351 { 4352 long x = atomic_long_read(&memcg->vmstats[idx]); 4353 int cpu; 4354 4355 for_each_online_cpu(cpu) 4356 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 4357 if (x < 0) 4358 x = 0; 4359 return x; 4360 } 4361 4362 /** 4363 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4364 * @wb: bdi_writeback in question 4365 * @pfilepages: out parameter for number of file pages 4366 * @pheadroom: out parameter for number of allocatable pages according to memcg 4367 * @pdirty: out parameter for number of dirty pages 4368 * @pwriteback: out parameter for number of pages under writeback 4369 * 4370 * Determine the numbers of file, headroom, dirty, and writeback pages in 4371 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4372 * is a bit more involved. 4373 * 4374 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4375 * headroom is calculated as the lowest headroom of itself and the 4376 * ancestors. Note that this doesn't consider the actual amount of 4377 * available memory in the system. The caller should further cap 4378 * *@pheadroom accordingly. 4379 */ 4380 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4381 unsigned long *pheadroom, unsigned long *pdirty, 4382 unsigned long *pwriteback) 4383 { 4384 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4385 struct mem_cgroup *parent; 4386 4387 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4388 4389 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4390 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4391 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4392 *pheadroom = PAGE_COUNTER_MAX; 4393 4394 while ((parent = parent_mem_cgroup(memcg))) { 4395 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4396 READ_ONCE(memcg->memory.high)); 4397 unsigned long used = page_counter_read(&memcg->memory); 4398 4399 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4400 memcg = parent; 4401 } 4402 } 4403 4404 /* 4405 * Foreign dirty flushing 4406 * 4407 * There's an inherent mismatch between memcg and writeback. The former 4408 * trackes ownership per-page while the latter per-inode. This was a 4409 * deliberate design decision because honoring per-page ownership in the 4410 * writeback path is complicated, may lead to higher CPU and IO overheads 4411 * and deemed unnecessary given that write-sharing an inode across 4412 * different cgroups isn't a common use-case. 4413 * 4414 * Combined with inode majority-writer ownership switching, this works well 4415 * enough in most cases but there are some pathological cases. For 4416 * example, let's say there are two cgroups A and B which keep writing to 4417 * different but confined parts of the same inode. B owns the inode and 4418 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4419 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4420 * triggering background writeback. A will be slowed down without a way to 4421 * make writeback of the dirty pages happen. 4422 * 4423 * Conditions like the above can lead to a cgroup getting repatedly and 4424 * severely throttled after making some progress after each 4425 * dirty_expire_interval while the underyling IO device is almost 4426 * completely idle. 4427 * 4428 * Solving this problem completely requires matching the ownership tracking 4429 * granularities between memcg and writeback in either direction. However, 4430 * the more egregious behaviors can be avoided by simply remembering the 4431 * most recent foreign dirtying events and initiating remote flushes on 4432 * them when local writeback isn't enough to keep the memory clean enough. 4433 * 4434 * The following two functions implement such mechanism. When a foreign 4435 * page - a page whose memcg and writeback ownerships don't match - is 4436 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4437 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4438 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4439 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4440 * foreign bdi_writebacks which haven't expired. Both the numbers of 4441 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4442 * limited to MEMCG_CGWB_FRN_CNT. 4443 * 4444 * The mechanism only remembers IDs and doesn't hold any object references. 4445 * As being wrong occasionally doesn't matter, updates and accesses to the 4446 * records are lockless and racy. 4447 */ 4448 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4449 struct bdi_writeback *wb) 4450 { 4451 struct mem_cgroup *memcg = page->mem_cgroup; 4452 struct memcg_cgwb_frn *frn; 4453 u64 now = get_jiffies_64(); 4454 u64 oldest_at = now; 4455 int oldest = -1; 4456 int i; 4457 4458 trace_track_foreign_dirty(page, wb); 4459 4460 /* 4461 * Pick the slot to use. If there is already a slot for @wb, keep 4462 * using it. If not replace the oldest one which isn't being 4463 * written out. 4464 */ 4465 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4466 frn = &memcg->cgwb_frn[i]; 4467 if (frn->bdi_id == wb->bdi->id && 4468 frn->memcg_id == wb->memcg_css->id) 4469 break; 4470 if (time_before64(frn->at, oldest_at) && 4471 atomic_read(&frn->done.cnt) == 1) { 4472 oldest = i; 4473 oldest_at = frn->at; 4474 } 4475 } 4476 4477 if (i < MEMCG_CGWB_FRN_CNT) { 4478 /* 4479 * Re-using an existing one. Update timestamp lazily to 4480 * avoid making the cacheline hot. We want them to be 4481 * reasonably up-to-date and significantly shorter than 4482 * dirty_expire_interval as that's what expires the record. 4483 * Use the shorter of 1s and dirty_expire_interval / 8. 4484 */ 4485 unsigned long update_intv = 4486 min_t(unsigned long, HZ, 4487 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4488 4489 if (time_before64(frn->at, now - update_intv)) 4490 frn->at = now; 4491 } else if (oldest >= 0) { 4492 /* replace the oldest free one */ 4493 frn = &memcg->cgwb_frn[oldest]; 4494 frn->bdi_id = wb->bdi->id; 4495 frn->memcg_id = wb->memcg_css->id; 4496 frn->at = now; 4497 } 4498 } 4499 4500 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4501 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4502 { 4503 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4504 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4505 u64 now = jiffies_64; 4506 int i; 4507 4508 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4509 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4510 4511 /* 4512 * If the record is older than dirty_expire_interval, 4513 * writeback on it has already started. No need to kick it 4514 * off again. Also, don't start a new one if there's 4515 * already one in flight. 4516 */ 4517 if (time_after64(frn->at, now - intv) && 4518 atomic_read(&frn->done.cnt) == 1) { 4519 frn->at = 0; 4520 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4521 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4522 WB_REASON_FOREIGN_FLUSH, 4523 &frn->done); 4524 } 4525 } 4526 } 4527 4528 #else /* CONFIG_CGROUP_WRITEBACK */ 4529 4530 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4531 { 4532 return 0; 4533 } 4534 4535 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4536 { 4537 } 4538 4539 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4540 { 4541 } 4542 4543 #endif /* CONFIG_CGROUP_WRITEBACK */ 4544 4545 /* 4546 * DO NOT USE IN NEW FILES. 4547 * 4548 * "cgroup.event_control" implementation. 4549 * 4550 * This is way over-engineered. It tries to support fully configurable 4551 * events for each user. Such level of flexibility is completely 4552 * unnecessary especially in the light of the planned unified hierarchy. 4553 * 4554 * Please deprecate this and replace with something simpler if at all 4555 * possible. 4556 */ 4557 4558 /* 4559 * Unregister event and free resources. 4560 * 4561 * Gets called from workqueue. 4562 */ 4563 static void memcg_event_remove(struct work_struct *work) 4564 { 4565 struct mem_cgroup_event *event = 4566 container_of(work, struct mem_cgroup_event, remove); 4567 struct mem_cgroup *memcg = event->memcg; 4568 4569 remove_wait_queue(event->wqh, &event->wait); 4570 4571 event->unregister_event(memcg, event->eventfd); 4572 4573 /* Notify userspace the event is going away. */ 4574 eventfd_signal(event->eventfd, 1); 4575 4576 eventfd_ctx_put(event->eventfd); 4577 kfree(event); 4578 css_put(&memcg->css); 4579 } 4580 4581 /* 4582 * Gets called on EPOLLHUP on eventfd when user closes it. 4583 * 4584 * Called with wqh->lock held and interrupts disabled. 4585 */ 4586 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4587 int sync, void *key) 4588 { 4589 struct mem_cgroup_event *event = 4590 container_of(wait, struct mem_cgroup_event, wait); 4591 struct mem_cgroup *memcg = event->memcg; 4592 __poll_t flags = key_to_poll(key); 4593 4594 if (flags & EPOLLHUP) { 4595 /* 4596 * If the event has been detached at cgroup removal, we 4597 * can simply return knowing the other side will cleanup 4598 * for us. 4599 * 4600 * We can't race against event freeing since the other 4601 * side will require wqh->lock via remove_wait_queue(), 4602 * which we hold. 4603 */ 4604 spin_lock(&memcg->event_list_lock); 4605 if (!list_empty(&event->list)) { 4606 list_del_init(&event->list); 4607 /* 4608 * We are in atomic context, but cgroup_event_remove() 4609 * may sleep, so we have to call it in workqueue. 4610 */ 4611 schedule_work(&event->remove); 4612 } 4613 spin_unlock(&memcg->event_list_lock); 4614 } 4615 4616 return 0; 4617 } 4618 4619 static void memcg_event_ptable_queue_proc(struct file *file, 4620 wait_queue_head_t *wqh, poll_table *pt) 4621 { 4622 struct mem_cgroup_event *event = 4623 container_of(pt, struct mem_cgroup_event, pt); 4624 4625 event->wqh = wqh; 4626 add_wait_queue(wqh, &event->wait); 4627 } 4628 4629 /* 4630 * DO NOT USE IN NEW FILES. 4631 * 4632 * Parse input and register new cgroup event handler. 4633 * 4634 * Input must be in format '<event_fd> <control_fd> <args>'. 4635 * Interpretation of args is defined by control file implementation. 4636 */ 4637 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4638 char *buf, size_t nbytes, loff_t off) 4639 { 4640 struct cgroup_subsys_state *css = of_css(of); 4641 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4642 struct mem_cgroup_event *event; 4643 struct cgroup_subsys_state *cfile_css; 4644 unsigned int efd, cfd; 4645 struct fd efile; 4646 struct fd cfile; 4647 const char *name; 4648 char *endp; 4649 int ret; 4650 4651 buf = strstrip(buf); 4652 4653 efd = simple_strtoul(buf, &endp, 10); 4654 if (*endp != ' ') 4655 return -EINVAL; 4656 buf = endp + 1; 4657 4658 cfd = simple_strtoul(buf, &endp, 10); 4659 if ((*endp != ' ') && (*endp != '\0')) 4660 return -EINVAL; 4661 buf = endp + 1; 4662 4663 event = kzalloc(sizeof(*event), GFP_KERNEL); 4664 if (!event) 4665 return -ENOMEM; 4666 4667 event->memcg = memcg; 4668 INIT_LIST_HEAD(&event->list); 4669 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4670 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4671 INIT_WORK(&event->remove, memcg_event_remove); 4672 4673 efile = fdget(efd); 4674 if (!efile.file) { 4675 ret = -EBADF; 4676 goto out_kfree; 4677 } 4678 4679 event->eventfd = eventfd_ctx_fileget(efile.file); 4680 if (IS_ERR(event->eventfd)) { 4681 ret = PTR_ERR(event->eventfd); 4682 goto out_put_efile; 4683 } 4684 4685 cfile = fdget(cfd); 4686 if (!cfile.file) { 4687 ret = -EBADF; 4688 goto out_put_eventfd; 4689 } 4690 4691 /* the process need read permission on control file */ 4692 /* AV: shouldn't we check that it's been opened for read instead? */ 4693 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4694 if (ret < 0) 4695 goto out_put_cfile; 4696 4697 /* 4698 * Determine the event callbacks and set them in @event. This used 4699 * to be done via struct cftype but cgroup core no longer knows 4700 * about these events. The following is crude but the whole thing 4701 * is for compatibility anyway. 4702 * 4703 * DO NOT ADD NEW FILES. 4704 */ 4705 name = cfile.file->f_path.dentry->d_name.name; 4706 4707 if (!strcmp(name, "memory.usage_in_bytes")) { 4708 event->register_event = mem_cgroup_usage_register_event; 4709 event->unregister_event = mem_cgroup_usage_unregister_event; 4710 } else if (!strcmp(name, "memory.oom_control")) { 4711 event->register_event = mem_cgroup_oom_register_event; 4712 event->unregister_event = mem_cgroup_oom_unregister_event; 4713 } else if (!strcmp(name, "memory.pressure_level")) { 4714 event->register_event = vmpressure_register_event; 4715 event->unregister_event = vmpressure_unregister_event; 4716 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4717 event->register_event = memsw_cgroup_usage_register_event; 4718 event->unregister_event = memsw_cgroup_usage_unregister_event; 4719 } else { 4720 ret = -EINVAL; 4721 goto out_put_cfile; 4722 } 4723 4724 /* 4725 * Verify @cfile should belong to @css. Also, remaining events are 4726 * automatically removed on cgroup destruction but the removal is 4727 * asynchronous, so take an extra ref on @css. 4728 */ 4729 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4730 &memory_cgrp_subsys); 4731 ret = -EINVAL; 4732 if (IS_ERR(cfile_css)) 4733 goto out_put_cfile; 4734 if (cfile_css != css) { 4735 css_put(cfile_css); 4736 goto out_put_cfile; 4737 } 4738 4739 ret = event->register_event(memcg, event->eventfd, buf); 4740 if (ret) 4741 goto out_put_css; 4742 4743 vfs_poll(efile.file, &event->pt); 4744 4745 spin_lock(&memcg->event_list_lock); 4746 list_add(&event->list, &memcg->event_list); 4747 spin_unlock(&memcg->event_list_lock); 4748 4749 fdput(cfile); 4750 fdput(efile); 4751 4752 return nbytes; 4753 4754 out_put_css: 4755 css_put(css); 4756 out_put_cfile: 4757 fdput(cfile); 4758 out_put_eventfd: 4759 eventfd_ctx_put(event->eventfd); 4760 out_put_efile: 4761 fdput(efile); 4762 out_kfree: 4763 kfree(event); 4764 4765 return ret; 4766 } 4767 4768 static struct cftype mem_cgroup_legacy_files[] = { 4769 { 4770 .name = "usage_in_bytes", 4771 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4772 .read_u64 = mem_cgroup_read_u64, 4773 }, 4774 { 4775 .name = "max_usage_in_bytes", 4776 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4777 .write = mem_cgroup_reset, 4778 .read_u64 = mem_cgroup_read_u64, 4779 }, 4780 { 4781 .name = "limit_in_bytes", 4782 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4783 .write = mem_cgroup_write, 4784 .read_u64 = mem_cgroup_read_u64, 4785 }, 4786 { 4787 .name = "soft_limit_in_bytes", 4788 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4789 .write = mem_cgroup_write, 4790 .read_u64 = mem_cgroup_read_u64, 4791 }, 4792 { 4793 .name = "failcnt", 4794 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4795 .write = mem_cgroup_reset, 4796 .read_u64 = mem_cgroup_read_u64, 4797 }, 4798 { 4799 .name = "stat", 4800 .seq_show = memcg_stat_show, 4801 }, 4802 { 4803 .name = "force_empty", 4804 .write = mem_cgroup_force_empty_write, 4805 }, 4806 { 4807 .name = "use_hierarchy", 4808 .write_u64 = mem_cgroup_hierarchy_write, 4809 .read_u64 = mem_cgroup_hierarchy_read, 4810 }, 4811 { 4812 .name = "cgroup.event_control", /* XXX: for compat */ 4813 .write = memcg_write_event_control, 4814 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4815 }, 4816 { 4817 .name = "swappiness", 4818 .read_u64 = mem_cgroup_swappiness_read, 4819 .write_u64 = mem_cgroup_swappiness_write, 4820 }, 4821 { 4822 .name = "move_charge_at_immigrate", 4823 .read_u64 = mem_cgroup_move_charge_read, 4824 .write_u64 = mem_cgroup_move_charge_write, 4825 }, 4826 { 4827 .name = "oom_control", 4828 .seq_show = mem_cgroup_oom_control_read, 4829 .write_u64 = mem_cgroup_oom_control_write, 4830 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4831 }, 4832 { 4833 .name = "pressure_level", 4834 }, 4835 #ifdef CONFIG_NUMA 4836 { 4837 .name = "numa_stat", 4838 .seq_show = memcg_numa_stat_show, 4839 }, 4840 #endif 4841 { 4842 .name = "kmem.limit_in_bytes", 4843 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4844 .write = mem_cgroup_write, 4845 .read_u64 = mem_cgroup_read_u64, 4846 }, 4847 { 4848 .name = "kmem.usage_in_bytes", 4849 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4850 .read_u64 = mem_cgroup_read_u64, 4851 }, 4852 { 4853 .name = "kmem.failcnt", 4854 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4855 .write = mem_cgroup_reset, 4856 .read_u64 = mem_cgroup_read_u64, 4857 }, 4858 { 4859 .name = "kmem.max_usage_in_bytes", 4860 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4861 .write = mem_cgroup_reset, 4862 .read_u64 = mem_cgroup_read_u64, 4863 }, 4864 #if defined(CONFIG_MEMCG_KMEM) && \ 4865 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 4866 { 4867 .name = "kmem.slabinfo", 4868 .seq_start = memcg_slab_start, 4869 .seq_next = memcg_slab_next, 4870 .seq_stop = memcg_slab_stop, 4871 .seq_show = memcg_slab_show, 4872 }, 4873 #endif 4874 { 4875 .name = "kmem.tcp.limit_in_bytes", 4876 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4877 .write = mem_cgroup_write, 4878 .read_u64 = mem_cgroup_read_u64, 4879 }, 4880 { 4881 .name = "kmem.tcp.usage_in_bytes", 4882 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4883 .read_u64 = mem_cgroup_read_u64, 4884 }, 4885 { 4886 .name = "kmem.tcp.failcnt", 4887 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4888 .write = mem_cgroup_reset, 4889 .read_u64 = mem_cgroup_read_u64, 4890 }, 4891 { 4892 .name = "kmem.tcp.max_usage_in_bytes", 4893 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4894 .write = mem_cgroup_reset, 4895 .read_u64 = mem_cgroup_read_u64, 4896 }, 4897 { }, /* terminate */ 4898 }; 4899 4900 /* 4901 * Private memory cgroup IDR 4902 * 4903 * Swap-out records and page cache shadow entries need to store memcg 4904 * references in constrained space, so we maintain an ID space that is 4905 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4906 * memory-controlled cgroups to 64k. 4907 * 4908 * However, there usually are many references to the oflline CSS after 4909 * the cgroup has been destroyed, such as page cache or reclaimable 4910 * slab objects, that don't need to hang on to the ID. We want to keep 4911 * those dead CSS from occupying IDs, or we might quickly exhaust the 4912 * relatively small ID space and prevent the creation of new cgroups 4913 * even when there are much fewer than 64k cgroups - possibly none. 4914 * 4915 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4916 * be freed and recycled when it's no longer needed, which is usually 4917 * when the CSS is offlined. 4918 * 4919 * The only exception to that are records of swapped out tmpfs/shmem 4920 * pages that need to be attributed to live ancestors on swapin. But 4921 * those references are manageable from userspace. 4922 */ 4923 4924 static DEFINE_IDR(mem_cgroup_idr); 4925 4926 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 4927 { 4928 if (memcg->id.id > 0) { 4929 idr_remove(&mem_cgroup_idr, memcg->id.id); 4930 memcg->id.id = 0; 4931 } 4932 } 4933 4934 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 4935 unsigned int n) 4936 { 4937 refcount_add(n, &memcg->id.ref); 4938 } 4939 4940 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4941 { 4942 if (refcount_sub_and_test(n, &memcg->id.ref)) { 4943 mem_cgroup_id_remove(memcg); 4944 4945 /* Memcg ID pins CSS */ 4946 css_put(&memcg->css); 4947 } 4948 } 4949 4950 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4951 { 4952 mem_cgroup_id_put_many(memcg, 1); 4953 } 4954 4955 /** 4956 * mem_cgroup_from_id - look up a memcg from a memcg id 4957 * @id: the memcg id to look up 4958 * 4959 * Caller must hold rcu_read_lock(). 4960 */ 4961 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4962 { 4963 WARN_ON_ONCE(!rcu_read_lock_held()); 4964 return idr_find(&mem_cgroup_idr, id); 4965 } 4966 4967 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4968 { 4969 struct mem_cgroup_per_node *pn; 4970 int tmp = node; 4971 /* 4972 * This routine is called against possible nodes. 4973 * But it's BUG to call kmalloc() against offline node. 4974 * 4975 * TODO: this routine can waste much memory for nodes which will 4976 * never be onlined. It's better to use memory hotplug callback 4977 * function. 4978 */ 4979 if (!node_state(node, N_NORMAL_MEMORY)) 4980 tmp = -1; 4981 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4982 if (!pn) 4983 return 1; 4984 4985 pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat); 4986 if (!pn->lruvec_stat_local) { 4987 kfree(pn); 4988 return 1; 4989 } 4990 4991 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); 4992 if (!pn->lruvec_stat_cpu) { 4993 free_percpu(pn->lruvec_stat_local); 4994 kfree(pn); 4995 return 1; 4996 } 4997 4998 lruvec_init(&pn->lruvec); 4999 pn->usage_in_excess = 0; 5000 pn->on_tree = false; 5001 pn->memcg = memcg; 5002 5003 memcg->nodeinfo[node] = pn; 5004 return 0; 5005 } 5006 5007 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5008 { 5009 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5010 5011 if (!pn) 5012 return; 5013 5014 free_percpu(pn->lruvec_stat_cpu); 5015 free_percpu(pn->lruvec_stat_local); 5016 kfree(pn); 5017 } 5018 5019 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5020 { 5021 int node; 5022 5023 for_each_node(node) 5024 free_mem_cgroup_per_node_info(memcg, node); 5025 free_percpu(memcg->vmstats_percpu); 5026 free_percpu(memcg->vmstats_local); 5027 kfree(memcg); 5028 } 5029 5030 static void mem_cgroup_free(struct mem_cgroup *memcg) 5031 { 5032 memcg_wb_domain_exit(memcg); 5033 /* 5034 * Flush percpu vmstats and vmevents to guarantee the value correctness 5035 * on parent's and all ancestor levels. 5036 */ 5037 memcg_flush_percpu_vmstats(memcg); 5038 memcg_flush_percpu_vmevents(memcg); 5039 __mem_cgroup_free(memcg); 5040 } 5041 5042 static struct mem_cgroup *mem_cgroup_alloc(void) 5043 { 5044 struct mem_cgroup *memcg; 5045 unsigned int size; 5046 int node; 5047 int __maybe_unused i; 5048 long error = -ENOMEM; 5049 5050 size = sizeof(struct mem_cgroup); 5051 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5052 5053 memcg = kzalloc(size, GFP_KERNEL); 5054 if (!memcg) 5055 return ERR_PTR(error); 5056 5057 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5058 1, MEM_CGROUP_ID_MAX, 5059 GFP_KERNEL); 5060 if (memcg->id.id < 0) { 5061 error = memcg->id.id; 5062 goto fail; 5063 } 5064 5065 memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); 5066 if (!memcg->vmstats_local) 5067 goto fail; 5068 5069 memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); 5070 if (!memcg->vmstats_percpu) 5071 goto fail; 5072 5073 for_each_node(node) 5074 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5075 goto fail; 5076 5077 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5078 goto fail; 5079 5080 INIT_WORK(&memcg->high_work, high_work_func); 5081 INIT_LIST_HEAD(&memcg->oom_notify); 5082 mutex_init(&memcg->thresholds_lock); 5083 spin_lock_init(&memcg->move_lock); 5084 vmpressure_init(&memcg->vmpressure); 5085 INIT_LIST_HEAD(&memcg->event_list); 5086 spin_lock_init(&memcg->event_list_lock); 5087 memcg->socket_pressure = jiffies; 5088 #ifdef CONFIG_MEMCG_KMEM 5089 memcg->kmemcg_id = -1; 5090 #endif 5091 #ifdef CONFIG_CGROUP_WRITEBACK 5092 INIT_LIST_HEAD(&memcg->cgwb_list); 5093 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5094 memcg->cgwb_frn[i].done = 5095 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5096 #endif 5097 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5098 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5099 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5100 memcg->deferred_split_queue.split_queue_len = 0; 5101 #endif 5102 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5103 return memcg; 5104 fail: 5105 mem_cgroup_id_remove(memcg); 5106 __mem_cgroup_free(memcg); 5107 return ERR_PTR(error); 5108 } 5109 5110 static struct cgroup_subsys_state * __ref 5111 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5112 { 5113 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5114 struct mem_cgroup *memcg; 5115 long error = -ENOMEM; 5116 5117 memcg = mem_cgroup_alloc(); 5118 if (IS_ERR(memcg)) 5119 return ERR_CAST(memcg); 5120 5121 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5122 memcg->soft_limit = PAGE_COUNTER_MAX; 5123 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5124 if (parent) { 5125 memcg->swappiness = mem_cgroup_swappiness(parent); 5126 memcg->oom_kill_disable = parent->oom_kill_disable; 5127 } 5128 if (parent && parent->use_hierarchy) { 5129 memcg->use_hierarchy = true; 5130 page_counter_init(&memcg->memory, &parent->memory); 5131 page_counter_init(&memcg->swap, &parent->swap); 5132 page_counter_init(&memcg->memsw, &parent->memsw); 5133 page_counter_init(&memcg->kmem, &parent->kmem); 5134 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5135 } else { 5136 page_counter_init(&memcg->memory, NULL); 5137 page_counter_init(&memcg->swap, NULL); 5138 page_counter_init(&memcg->memsw, NULL); 5139 page_counter_init(&memcg->kmem, NULL); 5140 page_counter_init(&memcg->tcpmem, NULL); 5141 /* 5142 * Deeper hierachy with use_hierarchy == false doesn't make 5143 * much sense so let cgroup subsystem know about this 5144 * unfortunate state in our controller. 5145 */ 5146 if (parent != root_mem_cgroup) 5147 memory_cgrp_subsys.broken_hierarchy = true; 5148 } 5149 5150 /* The following stuff does not apply to the root */ 5151 if (!parent) { 5152 #ifdef CONFIG_MEMCG_KMEM 5153 INIT_LIST_HEAD(&memcg->kmem_caches); 5154 #endif 5155 root_mem_cgroup = memcg; 5156 return &memcg->css; 5157 } 5158 5159 error = memcg_online_kmem(memcg); 5160 if (error) 5161 goto fail; 5162 5163 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5164 static_branch_inc(&memcg_sockets_enabled_key); 5165 5166 return &memcg->css; 5167 fail: 5168 mem_cgroup_id_remove(memcg); 5169 mem_cgroup_free(memcg); 5170 return ERR_PTR(error); 5171 } 5172 5173 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5174 { 5175 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5176 5177 /* 5178 * A memcg must be visible for memcg_expand_shrinker_maps() 5179 * by the time the maps are allocated. So, we allocate maps 5180 * here, when for_each_mem_cgroup() can't skip it. 5181 */ 5182 if (memcg_alloc_shrinker_maps(memcg)) { 5183 mem_cgroup_id_remove(memcg); 5184 return -ENOMEM; 5185 } 5186 5187 /* Online state pins memcg ID, memcg ID pins CSS */ 5188 refcount_set(&memcg->id.ref, 1); 5189 css_get(css); 5190 return 0; 5191 } 5192 5193 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5194 { 5195 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5196 struct mem_cgroup_event *event, *tmp; 5197 5198 /* 5199 * Unregister events and notify userspace. 5200 * Notify userspace about cgroup removing only after rmdir of cgroup 5201 * directory to avoid race between userspace and kernelspace. 5202 */ 5203 spin_lock(&memcg->event_list_lock); 5204 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5205 list_del_init(&event->list); 5206 schedule_work(&event->remove); 5207 } 5208 spin_unlock(&memcg->event_list_lock); 5209 5210 page_counter_set_min(&memcg->memory, 0); 5211 page_counter_set_low(&memcg->memory, 0); 5212 5213 memcg_offline_kmem(memcg); 5214 wb_memcg_offline(memcg); 5215 5216 drain_all_stock(memcg); 5217 5218 mem_cgroup_id_put(memcg); 5219 } 5220 5221 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5222 { 5223 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5224 5225 invalidate_reclaim_iterators(memcg); 5226 } 5227 5228 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5229 { 5230 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5231 int __maybe_unused i; 5232 5233 #ifdef CONFIG_CGROUP_WRITEBACK 5234 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5235 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5236 #endif 5237 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5238 static_branch_dec(&memcg_sockets_enabled_key); 5239 5240 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5241 static_branch_dec(&memcg_sockets_enabled_key); 5242 5243 vmpressure_cleanup(&memcg->vmpressure); 5244 cancel_work_sync(&memcg->high_work); 5245 mem_cgroup_remove_from_trees(memcg); 5246 memcg_free_shrinker_maps(memcg); 5247 memcg_free_kmem(memcg); 5248 mem_cgroup_free(memcg); 5249 } 5250 5251 /** 5252 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5253 * @css: the target css 5254 * 5255 * Reset the states of the mem_cgroup associated with @css. This is 5256 * invoked when the userland requests disabling on the default hierarchy 5257 * but the memcg is pinned through dependency. The memcg should stop 5258 * applying policies and should revert to the vanilla state as it may be 5259 * made visible again. 5260 * 5261 * The current implementation only resets the essential configurations. 5262 * This needs to be expanded to cover all the visible parts. 5263 */ 5264 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5265 { 5266 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5267 5268 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5269 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5270 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); 5271 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5272 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5273 page_counter_set_min(&memcg->memory, 0); 5274 page_counter_set_low(&memcg->memory, 0); 5275 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5276 memcg->soft_limit = PAGE_COUNTER_MAX; 5277 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5278 memcg_wb_domain_size_changed(memcg); 5279 } 5280 5281 #ifdef CONFIG_MMU 5282 /* Handlers for move charge at task migration. */ 5283 static int mem_cgroup_do_precharge(unsigned long count) 5284 { 5285 int ret; 5286 5287 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5288 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5289 if (!ret) { 5290 mc.precharge += count; 5291 return ret; 5292 } 5293 5294 /* Try charges one by one with reclaim, but do not retry */ 5295 while (count--) { 5296 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5297 if (ret) 5298 return ret; 5299 mc.precharge++; 5300 cond_resched(); 5301 } 5302 return 0; 5303 } 5304 5305 union mc_target { 5306 struct page *page; 5307 swp_entry_t ent; 5308 }; 5309 5310 enum mc_target_type { 5311 MC_TARGET_NONE = 0, 5312 MC_TARGET_PAGE, 5313 MC_TARGET_SWAP, 5314 MC_TARGET_DEVICE, 5315 }; 5316 5317 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5318 unsigned long addr, pte_t ptent) 5319 { 5320 struct page *page = vm_normal_page(vma, addr, ptent); 5321 5322 if (!page || !page_mapped(page)) 5323 return NULL; 5324 if (PageAnon(page)) { 5325 if (!(mc.flags & MOVE_ANON)) 5326 return NULL; 5327 } else { 5328 if (!(mc.flags & MOVE_FILE)) 5329 return NULL; 5330 } 5331 if (!get_page_unless_zero(page)) 5332 return NULL; 5333 5334 return page; 5335 } 5336 5337 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5338 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5339 pte_t ptent, swp_entry_t *entry) 5340 { 5341 struct page *page = NULL; 5342 swp_entry_t ent = pte_to_swp_entry(ptent); 5343 5344 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 5345 return NULL; 5346 5347 /* 5348 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5349 * a device and because they are not accessible by CPU they are store 5350 * as special swap entry in the CPU page table. 5351 */ 5352 if (is_device_private_entry(ent)) { 5353 page = device_private_entry_to_page(ent); 5354 /* 5355 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5356 * a refcount of 1 when free (unlike normal page) 5357 */ 5358 if (!page_ref_add_unless(page, 1, 1)) 5359 return NULL; 5360 return page; 5361 } 5362 5363 /* 5364 * Because lookup_swap_cache() updates some statistics counter, 5365 * we call find_get_page() with swapper_space directly. 5366 */ 5367 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5368 if (do_memsw_account()) 5369 entry->val = ent.val; 5370 5371 return page; 5372 } 5373 #else 5374 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5375 pte_t ptent, swp_entry_t *entry) 5376 { 5377 return NULL; 5378 } 5379 #endif 5380 5381 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5382 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5383 { 5384 struct page *page = NULL; 5385 struct address_space *mapping; 5386 pgoff_t pgoff; 5387 5388 if (!vma->vm_file) /* anonymous vma */ 5389 return NULL; 5390 if (!(mc.flags & MOVE_FILE)) 5391 return NULL; 5392 5393 mapping = vma->vm_file->f_mapping; 5394 pgoff = linear_page_index(vma, addr); 5395 5396 /* page is moved even if it's not RSS of this task(page-faulted). */ 5397 #ifdef CONFIG_SWAP 5398 /* shmem/tmpfs may report page out on swap: account for that too. */ 5399 if (shmem_mapping(mapping)) { 5400 page = find_get_entry(mapping, pgoff); 5401 if (xa_is_value(page)) { 5402 swp_entry_t swp = radix_to_swp_entry(page); 5403 if (do_memsw_account()) 5404 *entry = swp; 5405 page = find_get_page(swap_address_space(swp), 5406 swp_offset(swp)); 5407 } 5408 } else 5409 page = find_get_page(mapping, pgoff); 5410 #else 5411 page = find_get_page(mapping, pgoff); 5412 #endif 5413 return page; 5414 } 5415 5416 /** 5417 * mem_cgroup_move_account - move account of the page 5418 * @page: the page 5419 * @compound: charge the page as compound or small page 5420 * @from: mem_cgroup which the page is moved from. 5421 * @to: mem_cgroup which the page is moved to. @from != @to. 5422 * 5423 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5424 * 5425 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5426 * from old cgroup. 5427 */ 5428 static int mem_cgroup_move_account(struct page *page, 5429 bool compound, 5430 struct mem_cgroup *from, 5431 struct mem_cgroup *to) 5432 { 5433 struct lruvec *from_vec, *to_vec; 5434 struct pglist_data *pgdat; 5435 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5436 int ret; 5437 bool anon; 5438 5439 VM_BUG_ON(from == to); 5440 VM_BUG_ON_PAGE(PageLRU(page), page); 5441 VM_BUG_ON(compound && !PageTransHuge(page)); 5442 5443 /* 5444 * Prevent mem_cgroup_migrate() from looking at 5445 * page->mem_cgroup of its source page while we change it. 5446 */ 5447 ret = -EBUSY; 5448 if (!trylock_page(page)) 5449 goto out; 5450 5451 ret = -EINVAL; 5452 if (page->mem_cgroup != from) 5453 goto out_unlock; 5454 5455 anon = PageAnon(page); 5456 5457 pgdat = page_pgdat(page); 5458 from_vec = mem_cgroup_lruvec(from, pgdat); 5459 to_vec = mem_cgroup_lruvec(to, pgdat); 5460 5461 lock_page_memcg(page); 5462 5463 if (!anon && page_mapped(page)) { 5464 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5465 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5466 } 5467 5468 if (!anon && PageDirty(page)) { 5469 struct address_space *mapping = page_mapping(page); 5470 5471 if (mapping_cap_account_dirty(mapping)) { 5472 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages); 5473 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages); 5474 } 5475 } 5476 5477 if (PageWriteback(page)) { 5478 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5479 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5480 } 5481 5482 /* 5483 * All state has been migrated, let's switch to the new memcg. 5484 * 5485 * It is safe to change page->mem_cgroup here because the page 5486 * is referenced, charged, isolated, and locked: we can't race 5487 * with (un)charging, migration, LRU putback, or anything else 5488 * that would rely on a stable page->mem_cgroup. 5489 * 5490 * Note that lock_page_memcg is a memcg lock, not a page lock, 5491 * to save space. As soon as we switch page->mem_cgroup to a 5492 * new memcg that isn't locked, the above state can change 5493 * concurrently again. Make sure we're truly done with it. 5494 */ 5495 smp_mb(); 5496 5497 page->mem_cgroup = to; /* caller should have done css_get */ 5498 5499 __unlock_page_memcg(from); 5500 5501 ret = 0; 5502 5503 local_irq_disable(); 5504 mem_cgroup_charge_statistics(to, page, nr_pages); 5505 memcg_check_events(to, page); 5506 mem_cgroup_charge_statistics(from, page, -nr_pages); 5507 memcg_check_events(from, page); 5508 local_irq_enable(); 5509 out_unlock: 5510 unlock_page(page); 5511 out: 5512 return ret; 5513 } 5514 5515 /** 5516 * get_mctgt_type - get target type of moving charge 5517 * @vma: the vma the pte to be checked belongs 5518 * @addr: the address corresponding to the pte to be checked 5519 * @ptent: the pte to be checked 5520 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5521 * 5522 * Returns 5523 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5524 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5525 * move charge. if @target is not NULL, the page is stored in target->page 5526 * with extra refcnt got(Callers should handle it). 5527 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5528 * target for charge migration. if @target is not NULL, the entry is stored 5529 * in target->ent. 5530 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5531 * (so ZONE_DEVICE page and thus not on the lru). 5532 * For now we such page is charge like a regular page would be as for all 5533 * intent and purposes it is just special memory taking the place of a 5534 * regular page. 5535 * 5536 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5537 * 5538 * Called with pte lock held. 5539 */ 5540 5541 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5542 unsigned long addr, pte_t ptent, union mc_target *target) 5543 { 5544 struct page *page = NULL; 5545 enum mc_target_type ret = MC_TARGET_NONE; 5546 swp_entry_t ent = { .val = 0 }; 5547 5548 if (pte_present(ptent)) 5549 page = mc_handle_present_pte(vma, addr, ptent); 5550 else if (is_swap_pte(ptent)) 5551 page = mc_handle_swap_pte(vma, ptent, &ent); 5552 else if (pte_none(ptent)) 5553 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5554 5555 if (!page && !ent.val) 5556 return ret; 5557 if (page) { 5558 /* 5559 * Do only loose check w/o serialization. 5560 * mem_cgroup_move_account() checks the page is valid or 5561 * not under LRU exclusion. 5562 */ 5563 if (page->mem_cgroup == mc.from) { 5564 ret = MC_TARGET_PAGE; 5565 if (is_device_private_page(page)) 5566 ret = MC_TARGET_DEVICE; 5567 if (target) 5568 target->page = page; 5569 } 5570 if (!ret || !target) 5571 put_page(page); 5572 } 5573 /* 5574 * There is a swap entry and a page doesn't exist or isn't charged. 5575 * But we cannot move a tail-page in a THP. 5576 */ 5577 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5578 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5579 ret = MC_TARGET_SWAP; 5580 if (target) 5581 target->ent = ent; 5582 } 5583 return ret; 5584 } 5585 5586 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5587 /* 5588 * We don't consider PMD mapped swapping or file mapped pages because THP does 5589 * not support them for now. 5590 * Caller should make sure that pmd_trans_huge(pmd) is true. 5591 */ 5592 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5593 unsigned long addr, pmd_t pmd, union mc_target *target) 5594 { 5595 struct page *page = NULL; 5596 enum mc_target_type ret = MC_TARGET_NONE; 5597 5598 if (unlikely(is_swap_pmd(pmd))) { 5599 VM_BUG_ON(thp_migration_supported() && 5600 !is_pmd_migration_entry(pmd)); 5601 return ret; 5602 } 5603 page = pmd_page(pmd); 5604 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5605 if (!(mc.flags & MOVE_ANON)) 5606 return ret; 5607 if (page->mem_cgroup == mc.from) { 5608 ret = MC_TARGET_PAGE; 5609 if (target) { 5610 get_page(page); 5611 target->page = page; 5612 } 5613 } 5614 return ret; 5615 } 5616 #else 5617 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5618 unsigned long addr, pmd_t pmd, union mc_target *target) 5619 { 5620 return MC_TARGET_NONE; 5621 } 5622 #endif 5623 5624 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5625 unsigned long addr, unsigned long end, 5626 struct mm_walk *walk) 5627 { 5628 struct vm_area_struct *vma = walk->vma; 5629 pte_t *pte; 5630 spinlock_t *ptl; 5631 5632 ptl = pmd_trans_huge_lock(pmd, vma); 5633 if (ptl) { 5634 /* 5635 * Note their can not be MC_TARGET_DEVICE for now as we do not 5636 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5637 * this might change. 5638 */ 5639 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5640 mc.precharge += HPAGE_PMD_NR; 5641 spin_unlock(ptl); 5642 return 0; 5643 } 5644 5645 if (pmd_trans_unstable(pmd)) 5646 return 0; 5647 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5648 for (; addr != end; pte++, addr += PAGE_SIZE) 5649 if (get_mctgt_type(vma, addr, *pte, NULL)) 5650 mc.precharge++; /* increment precharge temporarily */ 5651 pte_unmap_unlock(pte - 1, ptl); 5652 cond_resched(); 5653 5654 return 0; 5655 } 5656 5657 static const struct mm_walk_ops precharge_walk_ops = { 5658 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5659 }; 5660 5661 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5662 { 5663 unsigned long precharge; 5664 5665 down_read(&mm->mmap_sem); 5666 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5667 up_read(&mm->mmap_sem); 5668 5669 precharge = mc.precharge; 5670 mc.precharge = 0; 5671 5672 return precharge; 5673 } 5674 5675 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5676 { 5677 unsigned long precharge = mem_cgroup_count_precharge(mm); 5678 5679 VM_BUG_ON(mc.moving_task); 5680 mc.moving_task = current; 5681 return mem_cgroup_do_precharge(precharge); 5682 } 5683 5684 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5685 static void __mem_cgroup_clear_mc(void) 5686 { 5687 struct mem_cgroup *from = mc.from; 5688 struct mem_cgroup *to = mc.to; 5689 5690 /* we must uncharge all the leftover precharges from mc.to */ 5691 if (mc.precharge) { 5692 cancel_charge(mc.to, mc.precharge); 5693 mc.precharge = 0; 5694 } 5695 /* 5696 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5697 * we must uncharge here. 5698 */ 5699 if (mc.moved_charge) { 5700 cancel_charge(mc.from, mc.moved_charge); 5701 mc.moved_charge = 0; 5702 } 5703 /* we must fixup refcnts and charges */ 5704 if (mc.moved_swap) { 5705 /* uncharge swap account from the old cgroup */ 5706 if (!mem_cgroup_is_root(mc.from)) 5707 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5708 5709 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5710 5711 /* 5712 * we charged both to->memory and to->memsw, so we 5713 * should uncharge to->memory. 5714 */ 5715 if (!mem_cgroup_is_root(mc.to)) 5716 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5717 5718 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 5719 css_put_many(&mc.to->css, mc.moved_swap); 5720 5721 mc.moved_swap = 0; 5722 } 5723 memcg_oom_recover(from); 5724 memcg_oom_recover(to); 5725 wake_up_all(&mc.waitq); 5726 } 5727 5728 static void mem_cgroup_clear_mc(void) 5729 { 5730 struct mm_struct *mm = mc.mm; 5731 5732 /* 5733 * we must clear moving_task before waking up waiters at the end of 5734 * task migration. 5735 */ 5736 mc.moving_task = NULL; 5737 __mem_cgroup_clear_mc(); 5738 spin_lock(&mc.lock); 5739 mc.from = NULL; 5740 mc.to = NULL; 5741 mc.mm = NULL; 5742 spin_unlock(&mc.lock); 5743 5744 mmput(mm); 5745 } 5746 5747 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5748 { 5749 struct cgroup_subsys_state *css; 5750 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5751 struct mem_cgroup *from; 5752 struct task_struct *leader, *p; 5753 struct mm_struct *mm; 5754 unsigned long move_flags; 5755 int ret = 0; 5756 5757 /* charge immigration isn't supported on the default hierarchy */ 5758 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5759 return 0; 5760 5761 /* 5762 * Multi-process migrations only happen on the default hierarchy 5763 * where charge immigration is not used. Perform charge 5764 * immigration if @tset contains a leader and whine if there are 5765 * multiple. 5766 */ 5767 p = NULL; 5768 cgroup_taskset_for_each_leader(leader, css, tset) { 5769 WARN_ON_ONCE(p); 5770 p = leader; 5771 memcg = mem_cgroup_from_css(css); 5772 } 5773 if (!p) 5774 return 0; 5775 5776 /* 5777 * We are now commited to this value whatever it is. Changes in this 5778 * tunable will only affect upcoming migrations, not the current one. 5779 * So we need to save it, and keep it going. 5780 */ 5781 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5782 if (!move_flags) 5783 return 0; 5784 5785 from = mem_cgroup_from_task(p); 5786 5787 VM_BUG_ON(from == memcg); 5788 5789 mm = get_task_mm(p); 5790 if (!mm) 5791 return 0; 5792 /* We move charges only when we move a owner of the mm */ 5793 if (mm->owner == p) { 5794 VM_BUG_ON(mc.from); 5795 VM_BUG_ON(mc.to); 5796 VM_BUG_ON(mc.precharge); 5797 VM_BUG_ON(mc.moved_charge); 5798 VM_BUG_ON(mc.moved_swap); 5799 5800 spin_lock(&mc.lock); 5801 mc.mm = mm; 5802 mc.from = from; 5803 mc.to = memcg; 5804 mc.flags = move_flags; 5805 spin_unlock(&mc.lock); 5806 /* We set mc.moving_task later */ 5807 5808 ret = mem_cgroup_precharge_mc(mm); 5809 if (ret) 5810 mem_cgroup_clear_mc(); 5811 } else { 5812 mmput(mm); 5813 } 5814 return ret; 5815 } 5816 5817 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5818 { 5819 if (mc.to) 5820 mem_cgroup_clear_mc(); 5821 } 5822 5823 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5824 unsigned long addr, unsigned long end, 5825 struct mm_walk *walk) 5826 { 5827 int ret = 0; 5828 struct vm_area_struct *vma = walk->vma; 5829 pte_t *pte; 5830 spinlock_t *ptl; 5831 enum mc_target_type target_type; 5832 union mc_target target; 5833 struct page *page; 5834 5835 ptl = pmd_trans_huge_lock(pmd, vma); 5836 if (ptl) { 5837 if (mc.precharge < HPAGE_PMD_NR) { 5838 spin_unlock(ptl); 5839 return 0; 5840 } 5841 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5842 if (target_type == MC_TARGET_PAGE) { 5843 page = target.page; 5844 if (!isolate_lru_page(page)) { 5845 if (!mem_cgroup_move_account(page, true, 5846 mc.from, mc.to)) { 5847 mc.precharge -= HPAGE_PMD_NR; 5848 mc.moved_charge += HPAGE_PMD_NR; 5849 } 5850 putback_lru_page(page); 5851 } 5852 put_page(page); 5853 } else if (target_type == MC_TARGET_DEVICE) { 5854 page = target.page; 5855 if (!mem_cgroup_move_account(page, true, 5856 mc.from, mc.to)) { 5857 mc.precharge -= HPAGE_PMD_NR; 5858 mc.moved_charge += HPAGE_PMD_NR; 5859 } 5860 put_page(page); 5861 } 5862 spin_unlock(ptl); 5863 return 0; 5864 } 5865 5866 if (pmd_trans_unstable(pmd)) 5867 return 0; 5868 retry: 5869 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5870 for (; addr != end; addr += PAGE_SIZE) { 5871 pte_t ptent = *(pte++); 5872 bool device = false; 5873 swp_entry_t ent; 5874 5875 if (!mc.precharge) 5876 break; 5877 5878 switch (get_mctgt_type(vma, addr, ptent, &target)) { 5879 case MC_TARGET_DEVICE: 5880 device = true; 5881 fallthrough; 5882 case MC_TARGET_PAGE: 5883 page = target.page; 5884 /* 5885 * We can have a part of the split pmd here. Moving it 5886 * can be done but it would be too convoluted so simply 5887 * ignore such a partial THP and keep it in original 5888 * memcg. There should be somebody mapping the head. 5889 */ 5890 if (PageTransCompound(page)) 5891 goto put; 5892 if (!device && isolate_lru_page(page)) 5893 goto put; 5894 if (!mem_cgroup_move_account(page, false, 5895 mc.from, mc.to)) { 5896 mc.precharge--; 5897 /* we uncharge from mc.from later. */ 5898 mc.moved_charge++; 5899 } 5900 if (!device) 5901 putback_lru_page(page); 5902 put: /* get_mctgt_type() gets the page */ 5903 put_page(page); 5904 break; 5905 case MC_TARGET_SWAP: 5906 ent = target.ent; 5907 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5908 mc.precharge--; 5909 /* we fixup refcnts and charges later. */ 5910 mc.moved_swap++; 5911 } 5912 break; 5913 default: 5914 break; 5915 } 5916 } 5917 pte_unmap_unlock(pte - 1, ptl); 5918 cond_resched(); 5919 5920 if (addr != end) { 5921 /* 5922 * We have consumed all precharges we got in can_attach(). 5923 * We try charge one by one, but don't do any additional 5924 * charges to mc.to if we have failed in charge once in attach() 5925 * phase. 5926 */ 5927 ret = mem_cgroup_do_precharge(1); 5928 if (!ret) 5929 goto retry; 5930 } 5931 5932 return ret; 5933 } 5934 5935 static const struct mm_walk_ops charge_walk_ops = { 5936 .pmd_entry = mem_cgroup_move_charge_pte_range, 5937 }; 5938 5939 static void mem_cgroup_move_charge(void) 5940 { 5941 lru_add_drain_all(); 5942 /* 5943 * Signal lock_page_memcg() to take the memcg's move_lock 5944 * while we're moving its pages to another memcg. Then wait 5945 * for already started RCU-only updates to finish. 5946 */ 5947 atomic_inc(&mc.from->moving_account); 5948 synchronize_rcu(); 5949 retry: 5950 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 5951 /* 5952 * Someone who are holding the mmap_sem might be waiting in 5953 * waitq. So we cancel all extra charges, wake up all waiters, 5954 * and retry. Because we cancel precharges, we might not be able 5955 * to move enough charges, but moving charge is a best-effort 5956 * feature anyway, so it wouldn't be a big problem. 5957 */ 5958 __mem_cgroup_clear_mc(); 5959 cond_resched(); 5960 goto retry; 5961 } 5962 /* 5963 * When we have consumed all precharges and failed in doing 5964 * additional charge, the page walk just aborts. 5965 */ 5966 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 5967 NULL); 5968 5969 up_read(&mc.mm->mmap_sem); 5970 atomic_dec(&mc.from->moving_account); 5971 } 5972 5973 static void mem_cgroup_move_task(void) 5974 { 5975 if (mc.to) { 5976 mem_cgroup_move_charge(); 5977 mem_cgroup_clear_mc(); 5978 } 5979 } 5980 #else /* !CONFIG_MMU */ 5981 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5982 { 5983 return 0; 5984 } 5985 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5986 { 5987 } 5988 static void mem_cgroup_move_task(void) 5989 { 5990 } 5991 #endif 5992 5993 /* 5994 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5995 * to verify whether we're attached to the default hierarchy on each mount 5996 * attempt. 5997 */ 5998 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5999 { 6000 /* 6001 * use_hierarchy is forced on the default hierarchy. cgroup core 6002 * guarantees that @root doesn't have any children, so turning it 6003 * on for the root memcg is enough. 6004 */ 6005 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6006 root_mem_cgroup->use_hierarchy = true; 6007 else 6008 root_mem_cgroup->use_hierarchy = false; 6009 } 6010 6011 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6012 { 6013 if (value == PAGE_COUNTER_MAX) 6014 seq_puts(m, "max\n"); 6015 else 6016 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6017 6018 return 0; 6019 } 6020 6021 static u64 memory_current_read(struct cgroup_subsys_state *css, 6022 struct cftype *cft) 6023 { 6024 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6025 6026 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6027 } 6028 6029 static int memory_min_show(struct seq_file *m, void *v) 6030 { 6031 return seq_puts_memcg_tunable(m, 6032 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6033 } 6034 6035 static ssize_t memory_min_write(struct kernfs_open_file *of, 6036 char *buf, size_t nbytes, loff_t off) 6037 { 6038 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6039 unsigned long min; 6040 int err; 6041 6042 buf = strstrip(buf); 6043 err = page_counter_memparse(buf, "max", &min); 6044 if (err) 6045 return err; 6046 6047 page_counter_set_min(&memcg->memory, min); 6048 6049 return nbytes; 6050 } 6051 6052 static int memory_low_show(struct seq_file *m, void *v) 6053 { 6054 return seq_puts_memcg_tunable(m, 6055 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6056 } 6057 6058 static ssize_t memory_low_write(struct kernfs_open_file *of, 6059 char *buf, size_t nbytes, loff_t off) 6060 { 6061 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6062 unsigned long low; 6063 int err; 6064 6065 buf = strstrip(buf); 6066 err = page_counter_memparse(buf, "max", &low); 6067 if (err) 6068 return err; 6069 6070 page_counter_set_low(&memcg->memory, low); 6071 6072 return nbytes; 6073 } 6074 6075 static int memory_high_show(struct seq_file *m, void *v) 6076 { 6077 return seq_puts_memcg_tunable(m, 6078 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6079 } 6080 6081 static ssize_t memory_high_write(struct kernfs_open_file *of, 6082 char *buf, size_t nbytes, loff_t off) 6083 { 6084 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6085 unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 6086 bool drained = false; 6087 unsigned long high; 6088 int err; 6089 6090 buf = strstrip(buf); 6091 err = page_counter_memparse(buf, "max", &high); 6092 if (err) 6093 return err; 6094 6095 page_counter_set_high(&memcg->memory, high); 6096 6097 for (;;) { 6098 unsigned long nr_pages = page_counter_read(&memcg->memory); 6099 unsigned long reclaimed; 6100 6101 if (nr_pages <= high) 6102 break; 6103 6104 if (signal_pending(current)) 6105 break; 6106 6107 if (!drained) { 6108 drain_all_stock(memcg); 6109 drained = true; 6110 continue; 6111 } 6112 6113 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6114 GFP_KERNEL, true); 6115 6116 if (!reclaimed && !nr_retries--) 6117 break; 6118 } 6119 6120 return nbytes; 6121 } 6122 6123 static int memory_max_show(struct seq_file *m, void *v) 6124 { 6125 return seq_puts_memcg_tunable(m, 6126 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6127 } 6128 6129 static ssize_t memory_max_write(struct kernfs_open_file *of, 6130 char *buf, size_t nbytes, loff_t off) 6131 { 6132 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6133 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 6134 bool drained = false; 6135 unsigned long max; 6136 int err; 6137 6138 buf = strstrip(buf); 6139 err = page_counter_memparse(buf, "max", &max); 6140 if (err) 6141 return err; 6142 6143 xchg(&memcg->memory.max, max); 6144 6145 for (;;) { 6146 unsigned long nr_pages = page_counter_read(&memcg->memory); 6147 6148 if (nr_pages <= max) 6149 break; 6150 6151 if (signal_pending(current)) 6152 break; 6153 6154 if (!drained) { 6155 drain_all_stock(memcg); 6156 drained = true; 6157 continue; 6158 } 6159 6160 if (nr_reclaims) { 6161 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6162 GFP_KERNEL, true)) 6163 nr_reclaims--; 6164 continue; 6165 } 6166 6167 memcg_memory_event(memcg, MEMCG_OOM); 6168 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6169 break; 6170 } 6171 6172 memcg_wb_domain_size_changed(memcg); 6173 return nbytes; 6174 } 6175 6176 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6177 { 6178 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6179 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6180 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6181 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6182 seq_printf(m, "oom_kill %lu\n", 6183 atomic_long_read(&events[MEMCG_OOM_KILL])); 6184 } 6185 6186 static int memory_events_show(struct seq_file *m, void *v) 6187 { 6188 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6189 6190 __memory_events_show(m, memcg->memory_events); 6191 return 0; 6192 } 6193 6194 static int memory_events_local_show(struct seq_file *m, void *v) 6195 { 6196 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6197 6198 __memory_events_show(m, memcg->memory_events_local); 6199 return 0; 6200 } 6201 6202 static int memory_stat_show(struct seq_file *m, void *v) 6203 { 6204 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6205 char *buf; 6206 6207 buf = memory_stat_format(memcg); 6208 if (!buf) 6209 return -ENOMEM; 6210 seq_puts(m, buf); 6211 kfree(buf); 6212 return 0; 6213 } 6214 6215 static int memory_oom_group_show(struct seq_file *m, void *v) 6216 { 6217 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6218 6219 seq_printf(m, "%d\n", memcg->oom_group); 6220 6221 return 0; 6222 } 6223 6224 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6225 char *buf, size_t nbytes, loff_t off) 6226 { 6227 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6228 int ret, oom_group; 6229 6230 buf = strstrip(buf); 6231 if (!buf) 6232 return -EINVAL; 6233 6234 ret = kstrtoint(buf, 0, &oom_group); 6235 if (ret) 6236 return ret; 6237 6238 if (oom_group != 0 && oom_group != 1) 6239 return -EINVAL; 6240 6241 memcg->oom_group = oom_group; 6242 6243 return nbytes; 6244 } 6245 6246 static struct cftype memory_files[] = { 6247 { 6248 .name = "current", 6249 .flags = CFTYPE_NOT_ON_ROOT, 6250 .read_u64 = memory_current_read, 6251 }, 6252 { 6253 .name = "min", 6254 .flags = CFTYPE_NOT_ON_ROOT, 6255 .seq_show = memory_min_show, 6256 .write = memory_min_write, 6257 }, 6258 { 6259 .name = "low", 6260 .flags = CFTYPE_NOT_ON_ROOT, 6261 .seq_show = memory_low_show, 6262 .write = memory_low_write, 6263 }, 6264 { 6265 .name = "high", 6266 .flags = CFTYPE_NOT_ON_ROOT, 6267 .seq_show = memory_high_show, 6268 .write = memory_high_write, 6269 }, 6270 { 6271 .name = "max", 6272 .flags = CFTYPE_NOT_ON_ROOT, 6273 .seq_show = memory_max_show, 6274 .write = memory_max_write, 6275 }, 6276 { 6277 .name = "events", 6278 .flags = CFTYPE_NOT_ON_ROOT, 6279 .file_offset = offsetof(struct mem_cgroup, events_file), 6280 .seq_show = memory_events_show, 6281 }, 6282 { 6283 .name = "events.local", 6284 .flags = CFTYPE_NOT_ON_ROOT, 6285 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6286 .seq_show = memory_events_local_show, 6287 }, 6288 { 6289 .name = "stat", 6290 .seq_show = memory_stat_show, 6291 }, 6292 { 6293 .name = "oom.group", 6294 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6295 .seq_show = memory_oom_group_show, 6296 .write = memory_oom_group_write, 6297 }, 6298 { } /* terminate */ 6299 }; 6300 6301 struct cgroup_subsys memory_cgrp_subsys = { 6302 .css_alloc = mem_cgroup_css_alloc, 6303 .css_online = mem_cgroup_css_online, 6304 .css_offline = mem_cgroup_css_offline, 6305 .css_released = mem_cgroup_css_released, 6306 .css_free = mem_cgroup_css_free, 6307 .css_reset = mem_cgroup_css_reset, 6308 .can_attach = mem_cgroup_can_attach, 6309 .cancel_attach = mem_cgroup_cancel_attach, 6310 .post_attach = mem_cgroup_move_task, 6311 .bind = mem_cgroup_bind, 6312 .dfl_cftypes = memory_files, 6313 .legacy_cftypes = mem_cgroup_legacy_files, 6314 .early_init = 0, 6315 }; 6316 6317 /* 6318 * This function calculates an individual cgroup's effective 6319 * protection which is derived from its own memory.min/low, its 6320 * parent's and siblings' settings, as well as the actual memory 6321 * distribution in the tree. 6322 * 6323 * The following rules apply to the effective protection values: 6324 * 6325 * 1. At the first level of reclaim, effective protection is equal to 6326 * the declared protection in memory.min and memory.low. 6327 * 6328 * 2. To enable safe delegation of the protection configuration, at 6329 * subsequent levels the effective protection is capped to the 6330 * parent's effective protection. 6331 * 6332 * 3. To make complex and dynamic subtrees easier to configure, the 6333 * user is allowed to overcommit the declared protection at a given 6334 * level. If that is the case, the parent's effective protection is 6335 * distributed to the children in proportion to how much protection 6336 * they have declared and how much of it they are utilizing. 6337 * 6338 * This makes distribution proportional, but also work-conserving: 6339 * if one cgroup claims much more protection than it uses memory, 6340 * the unused remainder is available to its siblings. 6341 * 6342 * 4. Conversely, when the declared protection is undercommitted at a 6343 * given level, the distribution of the larger parental protection 6344 * budget is NOT proportional. A cgroup's protection from a sibling 6345 * is capped to its own memory.min/low setting. 6346 * 6347 * 5. However, to allow protecting recursive subtrees from each other 6348 * without having to declare each individual cgroup's fixed share 6349 * of the ancestor's claim to protection, any unutilized - 6350 * "floating" - protection from up the tree is distributed in 6351 * proportion to each cgroup's *usage*. This makes the protection 6352 * neutral wrt sibling cgroups and lets them compete freely over 6353 * the shared parental protection budget, but it protects the 6354 * subtree as a whole from neighboring subtrees. 6355 * 6356 * Note that 4. and 5. are not in conflict: 4. is about protecting 6357 * against immediate siblings whereas 5. is about protecting against 6358 * neighboring subtrees. 6359 */ 6360 static unsigned long effective_protection(unsigned long usage, 6361 unsigned long parent_usage, 6362 unsigned long setting, 6363 unsigned long parent_effective, 6364 unsigned long siblings_protected) 6365 { 6366 unsigned long protected; 6367 unsigned long ep; 6368 6369 protected = min(usage, setting); 6370 /* 6371 * If all cgroups at this level combined claim and use more 6372 * protection then what the parent affords them, distribute 6373 * shares in proportion to utilization. 6374 * 6375 * We are using actual utilization rather than the statically 6376 * claimed protection in order to be work-conserving: claimed 6377 * but unused protection is available to siblings that would 6378 * otherwise get a smaller chunk than what they claimed. 6379 */ 6380 if (siblings_protected > parent_effective) 6381 return protected * parent_effective / siblings_protected; 6382 6383 /* 6384 * Ok, utilized protection of all children is within what the 6385 * parent affords them, so we know whatever this child claims 6386 * and utilizes is effectively protected. 6387 * 6388 * If there is unprotected usage beyond this value, reclaim 6389 * will apply pressure in proportion to that amount. 6390 * 6391 * If there is unutilized protection, the cgroup will be fully 6392 * shielded from reclaim, but we do return a smaller value for 6393 * protection than what the group could enjoy in theory. This 6394 * is okay. With the overcommit distribution above, effective 6395 * protection is always dependent on how memory is actually 6396 * consumed among the siblings anyway. 6397 */ 6398 ep = protected; 6399 6400 /* 6401 * If the children aren't claiming (all of) the protection 6402 * afforded to them by the parent, distribute the remainder in 6403 * proportion to the (unprotected) memory of each cgroup. That 6404 * way, cgroups that aren't explicitly prioritized wrt each 6405 * other compete freely over the allowance, but they are 6406 * collectively protected from neighboring trees. 6407 * 6408 * We're using unprotected memory for the weight so that if 6409 * some cgroups DO claim explicit protection, we don't protect 6410 * the same bytes twice. 6411 */ 6412 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6413 return ep; 6414 6415 if (parent_effective > siblings_protected && usage > protected) { 6416 unsigned long unclaimed; 6417 6418 unclaimed = parent_effective - siblings_protected; 6419 unclaimed *= usage - protected; 6420 unclaimed /= parent_usage - siblings_protected; 6421 6422 ep += unclaimed; 6423 } 6424 6425 return ep; 6426 } 6427 6428 /** 6429 * mem_cgroup_protected - check if memory consumption is in the normal range 6430 * @root: the top ancestor of the sub-tree being checked 6431 * @memcg: the memory cgroup to check 6432 * 6433 * WARNING: This function is not stateless! It can only be used as part 6434 * of a top-down tree iteration, not for isolated queries. 6435 * 6436 * Returns one of the following: 6437 * MEMCG_PROT_NONE: cgroup memory is not protected 6438 * MEMCG_PROT_LOW: cgroup memory is protected as long there is 6439 * an unprotected supply of reclaimable memory from other cgroups. 6440 * MEMCG_PROT_MIN: cgroup memory is protected 6441 */ 6442 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 6443 struct mem_cgroup *memcg) 6444 { 6445 unsigned long usage, parent_usage; 6446 struct mem_cgroup *parent; 6447 6448 if (mem_cgroup_disabled()) 6449 return MEMCG_PROT_NONE; 6450 6451 if (!root) 6452 root = root_mem_cgroup; 6453 if (memcg == root) 6454 return MEMCG_PROT_NONE; 6455 6456 usage = page_counter_read(&memcg->memory); 6457 if (!usage) 6458 return MEMCG_PROT_NONE; 6459 6460 parent = parent_mem_cgroup(memcg); 6461 /* No parent means a non-hierarchical mode on v1 memcg */ 6462 if (!parent) 6463 return MEMCG_PROT_NONE; 6464 6465 if (parent == root) { 6466 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6467 memcg->memory.elow = memcg->memory.low; 6468 goto out; 6469 } 6470 6471 parent_usage = page_counter_read(&parent->memory); 6472 6473 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6474 READ_ONCE(memcg->memory.min), 6475 READ_ONCE(parent->memory.emin), 6476 atomic_long_read(&parent->memory.children_min_usage))); 6477 6478 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6479 memcg->memory.low, READ_ONCE(parent->memory.elow), 6480 atomic_long_read(&parent->memory.children_low_usage))); 6481 6482 out: 6483 if (usage <= memcg->memory.emin) 6484 return MEMCG_PROT_MIN; 6485 else if (usage <= memcg->memory.elow) 6486 return MEMCG_PROT_LOW; 6487 else 6488 return MEMCG_PROT_NONE; 6489 } 6490 6491 /** 6492 * mem_cgroup_try_charge - try charging a page 6493 * @page: page to charge 6494 * @mm: mm context of the victim 6495 * @gfp_mask: reclaim mode 6496 * @memcgp: charged memcg return 6497 * 6498 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6499 * pages according to @gfp_mask if necessary. 6500 * 6501 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 6502 * Otherwise, an error code is returned. 6503 * 6504 * After page->mapping has been set up, the caller must finalize the 6505 * charge with mem_cgroup_commit_charge(). Or abort the transaction 6506 * with mem_cgroup_cancel_charge() in case page instantiation fails. 6507 */ 6508 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 6509 gfp_t gfp_mask, struct mem_cgroup **memcgp) 6510 { 6511 unsigned int nr_pages = hpage_nr_pages(page); 6512 struct mem_cgroup *memcg = NULL; 6513 int ret = 0; 6514 6515 if (mem_cgroup_disabled()) 6516 goto out; 6517 6518 if (PageSwapCache(page)) { 6519 /* 6520 * Every swap fault against a single page tries to charge the 6521 * page, bail as early as possible. shmem_unuse() encounters 6522 * already charged pages, too. The USED bit is protected by 6523 * the page lock, which serializes swap cache removal, which 6524 * in turn serializes uncharging. 6525 */ 6526 VM_BUG_ON_PAGE(!PageLocked(page), page); 6527 if (compound_head(page)->mem_cgroup) 6528 goto out; 6529 6530 if (do_swap_account) { 6531 swp_entry_t ent = { .val = page_private(page), }; 6532 unsigned short id = lookup_swap_cgroup_id(ent); 6533 6534 rcu_read_lock(); 6535 memcg = mem_cgroup_from_id(id); 6536 if (memcg && !css_tryget_online(&memcg->css)) 6537 memcg = NULL; 6538 rcu_read_unlock(); 6539 } 6540 } 6541 6542 if (!memcg) 6543 memcg = get_mem_cgroup_from_mm(mm); 6544 6545 ret = try_charge(memcg, gfp_mask, nr_pages); 6546 6547 css_put(&memcg->css); 6548 out: 6549 *memcgp = memcg; 6550 return ret; 6551 } 6552 6553 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, 6554 gfp_t gfp_mask, struct mem_cgroup **memcgp) 6555 { 6556 int ret; 6557 6558 ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp); 6559 if (*memcgp) 6560 cgroup_throttle_swaprate(page, gfp_mask); 6561 return ret; 6562 } 6563 6564 /** 6565 * mem_cgroup_commit_charge - commit a page charge 6566 * @page: page to charge 6567 * @memcg: memcg to charge the page to 6568 * @lrucare: page might be on LRU already 6569 * 6570 * Finalize a charge transaction started by mem_cgroup_try_charge(), 6571 * after page->mapping has been set up. This must happen atomically 6572 * as part of the page instantiation, i.e. under the page table lock 6573 * for anonymous pages, under the page lock for page and swap cache. 6574 * 6575 * In addition, the page must not be on the LRU during the commit, to 6576 * prevent racing with task migration. If it might be, use @lrucare. 6577 * 6578 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 6579 */ 6580 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 6581 bool lrucare) 6582 { 6583 unsigned int nr_pages = hpage_nr_pages(page); 6584 6585 VM_BUG_ON_PAGE(!page->mapping, page); 6586 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 6587 6588 if (mem_cgroup_disabled()) 6589 return; 6590 /* 6591 * Swap faults will attempt to charge the same page multiple 6592 * times. But reuse_swap_page() might have removed the page 6593 * from swapcache already, so we can't check PageSwapCache(). 6594 */ 6595 if (!memcg) 6596 return; 6597 6598 commit_charge(page, memcg, lrucare); 6599 6600 local_irq_disable(); 6601 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6602 memcg_check_events(memcg, page); 6603 local_irq_enable(); 6604 6605 if (do_memsw_account() && PageSwapCache(page)) { 6606 swp_entry_t entry = { .val = page_private(page) }; 6607 /* 6608 * The swap entry might not get freed for a long time, 6609 * let's not wait for it. The page already received a 6610 * memory+swap charge, drop the swap entry duplicate. 6611 */ 6612 mem_cgroup_uncharge_swap(entry, nr_pages); 6613 } 6614 } 6615 6616 /** 6617 * mem_cgroup_cancel_charge - cancel a page charge 6618 * @page: page to charge 6619 * @memcg: memcg to charge the page to 6620 * 6621 * Cancel a charge transaction started by mem_cgroup_try_charge(). 6622 */ 6623 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) 6624 { 6625 unsigned int nr_pages = hpage_nr_pages(page); 6626 6627 if (mem_cgroup_disabled()) 6628 return; 6629 /* 6630 * Swap faults will attempt to charge the same page multiple 6631 * times. But reuse_swap_page() might have removed the page 6632 * from swapcache already, so we can't check PageSwapCache(). 6633 */ 6634 if (!memcg) 6635 return; 6636 6637 cancel_charge(memcg, nr_pages); 6638 } 6639 6640 /** 6641 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6642 * @page: page to charge 6643 * @mm: mm context of the victim 6644 * @gfp_mask: reclaim mode 6645 * @lrucare: page might be on the LRU already 6646 * 6647 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6648 * pages according to @gfp_mask if necessary. 6649 * 6650 * Returns 0 on success. Otherwise, an error code is returned. 6651 */ 6652 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, 6653 bool lrucare) 6654 { 6655 struct mem_cgroup *memcg; 6656 int ret; 6657 6658 VM_BUG_ON_PAGE(!page->mapping, page); 6659 6660 ret = mem_cgroup_try_charge(page, mm, gfp_mask, &memcg); 6661 if (ret) 6662 return ret; 6663 mem_cgroup_commit_charge(page, memcg, lrucare); 6664 return 0; 6665 } 6666 6667 struct uncharge_gather { 6668 struct mem_cgroup *memcg; 6669 unsigned long pgpgout; 6670 unsigned long nr_anon; 6671 unsigned long nr_file; 6672 unsigned long nr_kmem; 6673 unsigned long nr_huge; 6674 unsigned long nr_shmem; 6675 struct page *dummy_page; 6676 }; 6677 6678 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6679 { 6680 memset(ug, 0, sizeof(*ug)); 6681 } 6682 6683 static void uncharge_batch(const struct uncharge_gather *ug) 6684 { 6685 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; 6686 unsigned long flags; 6687 6688 if (!mem_cgroup_is_root(ug->memcg)) { 6689 page_counter_uncharge(&ug->memcg->memory, nr_pages); 6690 if (do_memsw_account()) 6691 page_counter_uncharge(&ug->memcg->memsw, nr_pages); 6692 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6693 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6694 memcg_oom_recover(ug->memcg); 6695 } 6696 6697 local_irq_save(flags); 6698 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); 6699 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); 6700 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); 6701 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); 6702 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6703 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); 6704 memcg_check_events(ug->memcg, ug->dummy_page); 6705 local_irq_restore(flags); 6706 6707 if (!mem_cgroup_is_root(ug->memcg)) 6708 css_put_many(&ug->memcg->css, nr_pages); 6709 } 6710 6711 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6712 { 6713 VM_BUG_ON_PAGE(PageLRU(page), page); 6714 6715 if (!page->mem_cgroup) 6716 return; 6717 6718 /* 6719 * Nobody should be changing or seriously looking at 6720 * page->mem_cgroup at this point, we have fully 6721 * exclusive access to the page. 6722 */ 6723 6724 if (ug->memcg != page->mem_cgroup) { 6725 if (ug->memcg) { 6726 uncharge_batch(ug); 6727 uncharge_gather_clear(ug); 6728 } 6729 ug->memcg = page->mem_cgroup; 6730 } 6731 6732 if (!PageKmemcg(page)) { 6733 unsigned int nr_pages = 1; 6734 6735 if (PageTransHuge(page)) { 6736 nr_pages = compound_nr(page); 6737 ug->nr_huge += nr_pages; 6738 } 6739 if (PageAnon(page)) 6740 ug->nr_anon += nr_pages; 6741 else { 6742 ug->nr_file += nr_pages; 6743 if (PageSwapBacked(page)) 6744 ug->nr_shmem += nr_pages; 6745 } 6746 ug->pgpgout++; 6747 } else { 6748 ug->nr_kmem += compound_nr(page); 6749 __ClearPageKmemcg(page); 6750 } 6751 6752 ug->dummy_page = page; 6753 page->mem_cgroup = NULL; 6754 } 6755 6756 static void uncharge_list(struct list_head *page_list) 6757 { 6758 struct uncharge_gather ug; 6759 struct list_head *next; 6760 6761 uncharge_gather_clear(&ug); 6762 6763 /* 6764 * Note that the list can be a single page->lru; hence the 6765 * do-while loop instead of a simple list_for_each_entry(). 6766 */ 6767 next = page_list->next; 6768 do { 6769 struct page *page; 6770 6771 page = list_entry(next, struct page, lru); 6772 next = page->lru.next; 6773 6774 uncharge_page(page, &ug); 6775 } while (next != page_list); 6776 6777 if (ug.memcg) 6778 uncharge_batch(&ug); 6779 } 6780 6781 /** 6782 * mem_cgroup_uncharge - uncharge a page 6783 * @page: page to uncharge 6784 * 6785 * Uncharge a page previously charged with mem_cgroup_try_charge() and 6786 * mem_cgroup_commit_charge(). 6787 */ 6788 void mem_cgroup_uncharge(struct page *page) 6789 { 6790 struct uncharge_gather ug; 6791 6792 if (mem_cgroup_disabled()) 6793 return; 6794 6795 /* Don't touch page->lru of any random page, pre-check: */ 6796 if (!page->mem_cgroup) 6797 return; 6798 6799 uncharge_gather_clear(&ug); 6800 uncharge_page(page, &ug); 6801 uncharge_batch(&ug); 6802 } 6803 6804 /** 6805 * mem_cgroup_uncharge_list - uncharge a list of page 6806 * @page_list: list of pages to uncharge 6807 * 6808 * Uncharge a list of pages previously charged with 6809 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 6810 */ 6811 void mem_cgroup_uncharge_list(struct list_head *page_list) 6812 { 6813 if (mem_cgroup_disabled()) 6814 return; 6815 6816 if (!list_empty(page_list)) 6817 uncharge_list(page_list); 6818 } 6819 6820 /** 6821 * mem_cgroup_migrate - charge a page's replacement 6822 * @oldpage: currently circulating page 6823 * @newpage: replacement page 6824 * 6825 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6826 * be uncharged upon free. 6827 * 6828 * Both pages must be locked, @newpage->mapping must be set up. 6829 */ 6830 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6831 { 6832 struct mem_cgroup *memcg; 6833 unsigned int nr_pages; 6834 unsigned long flags; 6835 6836 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6837 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6838 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6839 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6840 newpage); 6841 6842 if (mem_cgroup_disabled()) 6843 return; 6844 6845 /* Page cache replacement: new page already charged? */ 6846 if (newpage->mem_cgroup) 6847 return; 6848 6849 /* Swapcache readahead pages can get replaced before being charged */ 6850 memcg = oldpage->mem_cgroup; 6851 if (!memcg) 6852 return; 6853 6854 /* Force-charge the new page. The old one will be freed soon */ 6855 nr_pages = hpage_nr_pages(newpage); 6856 6857 page_counter_charge(&memcg->memory, nr_pages); 6858 if (do_memsw_account()) 6859 page_counter_charge(&memcg->memsw, nr_pages); 6860 css_get_many(&memcg->css, nr_pages); 6861 6862 commit_charge(newpage, memcg, false); 6863 6864 local_irq_save(flags); 6865 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 6866 memcg_check_events(memcg, newpage); 6867 local_irq_restore(flags); 6868 } 6869 6870 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6871 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6872 6873 void mem_cgroup_sk_alloc(struct sock *sk) 6874 { 6875 struct mem_cgroup *memcg; 6876 6877 if (!mem_cgroup_sockets_enabled) 6878 return; 6879 6880 /* Do not associate the sock with unrelated interrupted task's memcg. */ 6881 if (in_interrupt()) 6882 return; 6883 6884 rcu_read_lock(); 6885 memcg = mem_cgroup_from_task(current); 6886 if (memcg == root_mem_cgroup) 6887 goto out; 6888 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6889 goto out; 6890 if (css_tryget(&memcg->css)) 6891 sk->sk_memcg = memcg; 6892 out: 6893 rcu_read_unlock(); 6894 } 6895 6896 void mem_cgroup_sk_free(struct sock *sk) 6897 { 6898 if (sk->sk_memcg) 6899 css_put(&sk->sk_memcg->css); 6900 } 6901 6902 /** 6903 * mem_cgroup_charge_skmem - charge socket memory 6904 * @memcg: memcg to charge 6905 * @nr_pages: number of pages to charge 6906 * 6907 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6908 * @memcg's configured limit, %false if the charge had to be forced. 6909 */ 6910 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6911 { 6912 gfp_t gfp_mask = GFP_KERNEL; 6913 6914 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6915 struct page_counter *fail; 6916 6917 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6918 memcg->tcpmem_pressure = 0; 6919 return true; 6920 } 6921 page_counter_charge(&memcg->tcpmem, nr_pages); 6922 memcg->tcpmem_pressure = 1; 6923 return false; 6924 } 6925 6926 /* Don't block in the packet receive path */ 6927 if (in_softirq()) 6928 gfp_mask = GFP_NOWAIT; 6929 6930 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 6931 6932 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 6933 return true; 6934 6935 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 6936 return false; 6937 } 6938 6939 /** 6940 * mem_cgroup_uncharge_skmem - uncharge socket memory 6941 * @memcg: memcg to uncharge 6942 * @nr_pages: number of pages to uncharge 6943 */ 6944 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6945 { 6946 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6947 page_counter_uncharge(&memcg->tcpmem, nr_pages); 6948 return; 6949 } 6950 6951 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 6952 6953 refill_stock(memcg, nr_pages); 6954 } 6955 6956 static int __init cgroup_memory(char *s) 6957 { 6958 char *token; 6959 6960 while ((token = strsep(&s, ",")) != NULL) { 6961 if (!*token) 6962 continue; 6963 if (!strcmp(token, "nosocket")) 6964 cgroup_memory_nosocket = true; 6965 if (!strcmp(token, "nokmem")) 6966 cgroup_memory_nokmem = true; 6967 } 6968 return 0; 6969 } 6970 __setup("cgroup.memory=", cgroup_memory); 6971 6972 /* 6973 * subsys_initcall() for memory controller. 6974 * 6975 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 6976 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 6977 * basically everything that doesn't depend on a specific mem_cgroup structure 6978 * should be initialized from here. 6979 */ 6980 static int __init mem_cgroup_init(void) 6981 { 6982 int cpu, node; 6983 6984 #ifdef CONFIG_MEMCG_KMEM 6985 /* 6986 * Kmem cache creation is mostly done with the slab_mutex held, 6987 * so use a workqueue with limited concurrency to avoid stalling 6988 * all worker threads in case lots of cgroups are created and 6989 * destroyed simultaneously. 6990 */ 6991 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 6992 BUG_ON(!memcg_kmem_cache_wq); 6993 #endif 6994 6995 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 6996 memcg_hotplug_cpu_dead); 6997 6998 for_each_possible_cpu(cpu) 6999 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7000 drain_local_stock); 7001 7002 for_each_node(node) { 7003 struct mem_cgroup_tree_per_node *rtpn; 7004 7005 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7006 node_online(node) ? node : NUMA_NO_NODE); 7007 7008 rtpn->rb_root = RB_ROOT; 7009 rtpn->rb_rightmost = NULL; 7010 spin_lock_init(&rtpn->lock); 7011 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7012 } 7013 7014 return 0; 7015 } 7016 subsys_initcall(mem_cgroup_init); 7017 7018 #ifdef CONFIG_MEMCG_SWAP 7019 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7020 { 7021 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7022 /* 7023 * The root cgroup cannot be destroyed, so it's refcount must 7024 * always be >= 1. 7025 */ 7026 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7027 VM_BUG_ON(1); 7028 break; 7029 } 7030 memcg = parent_mem_cgroup(memcg); 7031 if (!memcg) 7032 memcg = root_mem_cgroup; 7033 } 7034 return memcg; 7035 } 7036 7037 /** 7038 * mem_cgroup_swapout - transfer a memsw charge to swap 7039 * @page: page whose memsw charge to transfer 7040 * @entry: swap entry to move the charge to 7041 * 7042 * Transfer the memsw charge of @page to @entry. 7043 */ 7044 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7045 { 7046 struct mem_cgroup *memcg, *swap_memcg; 7047 unsigned int nr_entries; 7048 unsigned short oldid; 7049 7050 VM_BUG_ON_PAGE(PageLRU(page), page); 7051 VM_BUG_ON_PAGE(page_count(page), page); 7052 7053 if (!do_memsw_account()) 7054 return; 7055 7056 memcg = page->mem_cgroup; 7057 7058 /* Readahead page, never charged */ 7059 if (!memcg) 7060 return; 7061 7062 /* 7063 * In case the memcg owning these pages has been offlined and doesn't 7064 * have an ID allocated to it anymore, charge the closest online 7065 * ancestor for the swap instead and transfer the memory+swap charge. 7066 */ 7067 swap_memcg = mem_cgroup_id_get_online(memcg); 7068 nr_entries = hpage_nr_pages(page); 7069 /* Get references for the tail pages, too */ 7070 if (nr_entries > 1) 7071 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7072 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7073 nr_entries); 7074 VM_BUG_ON_PAGE(oldid, page); 7075 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7076 7077 page->mem_cgroup = NULL; 7078 7079 if (!mem_cgroup_is_root(memcg)) 7080 page_counter_uncharge(&memcg->memory, nr_entries); 7081 7082 if (memcg != swap_memcg) { 7083 if (!mem_cgroup_is_root(swap_memcg)) 7084 page_counter_charge(&swap_memcg->memsw, nr_entries); 7085 page_counter_uncharge(&memcg->memsw, nr_entries); 7086 } 7087 7088 /* 7089 * Interrupts should be disabled here because the caller holds the 7090 * i_pages lock which is taken with interrupts-off. It is 7091 * important here to have the interrupts disabled because it is the 7092 * only synchronisation we have for updating the per-CPU variables. 7093 */ 7094 VM_BUG_ON(!irqs_disabled()); 7095 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7096 memcg_check_events(memcg, page); 7097 7098 if (!mem_cgroup_is_root(memcg)) 7099 css_put_many(&memcg->css, nr_entries); 7100 } 7101 7102 /** 7103 * mem_cgroup_try_charge_swap - try charging swap space for a page 7104 * @page: page being added to swap 7105 * @entry: swap entry to charge 7106 * 7107 * Try to charge @page's memcg for the swap space at @entry. 7108 * 7109 * Returns 0 on success, -ENOMEM on failure. 7110 */ 7111 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7112 { 7113 unsigned int nr_pages = hpage_nr_pages(page); 7114 struct page_counter *counter; 7115 struct mem_cgroup *memcg; 7116 unsigned short oldid; 7117 7118 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 7119 return 0; 7120 7121 memcg = page->mem_cgroup; 7122 7123 /* Readahead page, never charged */ 7124 if (!memcg) 7125 return 0; 7126 7127 if (!entry.val) { 7128 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7129 return 0; 7130 } 7131 7132 memcg = mem_cgroup_id_get_online(memcg); 7133 7134 if (!mem_cgroup_is_root(memcg) && 7135 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7136 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7137 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7138 mem_cgroup_id_put(memcg); 7139 return -ENOMEM; 7140 } 7141 7142 /* Get references for the tail pages, too */ 7143 if (nr_pages > 1) 7144 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7145 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7146 VM_BUG_ON_PAGE(oldid, page); 7147 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7148 7149 return 0; 7150 } 7151 7152 /** 7153 * mem_cgroup_uncharge_swap - uncharge swap space 7154 * @entry: swap entry to uncharge 7155 * @nr_pages: the amount of swap space to uncharge 7156 */ 7157 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7158 { 7159 struct mem_cgroup *memcg; 7160 unsigned short id; 7161 7162 if (!do_swap_account) 7163 return; 7164 7165 id = swap_cgroup_record(entry, 0, nr_pages); 7166 rcu_read_lock(); 7167 memcg = mem_cgroup_from_id(id); 7168 if (memcg) { 7169 if (!mem_cgroup_is_root(memcg)) { 7170 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7171 page_counter_uncharge(&memcg->swap, nr_pages); 7172 else 7173 page_counter_uncharge(&memcg->memsw, nr_pages); 7174 } 7175 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7176 mem_cgroup_id_put_many(memcg, nr_pages); 7177 } 7178 rcu_read_unlock(); 7179 } 7180 7181 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7182 { 7183 long nr_swap_pages = get_nr_swap_pages(); 7184 7185 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7186 return nr_swap_pages; 7187 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7188 nr_swap_pages = min_t(long, nr_swap_pages, 7189 READ_ONCE(memcg->swap.max) - 7190 page_counter_read(&memcg->swap)); 7191 return nr_swap_pages; 7192 } 7193 7194 bool mem_cgroup_swap_full(struct page *page) 7195 { 7196 struct mem_cgroup *memcg; 7197 7198 VM_BUG_ON_PAGE(!PageLocked(page), page); 7199 7200 if (vm_swap_full()) 7201 return true; 7202 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7203 return false; 7204 7205 memcg = page->mem_cgroup; 7206 if (!memcg) 7207 return false; 7208 7209 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7210 unsigned long usage = page_counter_read(&memcg->swap); 7211 7212 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7213 usage * 2 >= READ_ONCE(memcg->swap.max)) 7214 return true; 7215 } 7216 7217 return false; 7218 } 7219 7220 /* for remember boot option*/ 7221 #ifdef CONFIG_MEMCG_SWAP_ENABLED 7222 static int really_do_swap_account __initdata = 1; 7223 #else 7224 static int really_do_swap_account __initdata; 7225 #endif 7226 7227 static int __init enable_swap_account(char *s) 7228 { 7229 if (!strcmp(s, "1")) 7230 really_do_swap_account = 1; 7231 else if (!strcmp(s, "0")) 7232 really_do_swap_account = 0; 7233 return 1; 7234 } 7235 __setup("swapaccount=", enable_swap_account); 7236 7237 static u64 swap_current_read(struct cgroup_subsys_state *css, 7238 struct cftype *cft) 7239 { 7240 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7241 7242 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7243 } 7244 7245 static int swap_high_show(struct seq_file *m, void *v) 7246 { 7247 return seq_puts_memcg_tunable(m, 7248 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7249 } 7250 7251 static ssize_t swap_high_write(struct kernfs_open_file *of, 7252 char *buf, size_t nbytes, loff_t off) 7253 { 7254 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7255 unsigned long high; 7256 int err; 7257 7258 buf = strstrip(buf); 7259 err = page_counter_memparse(buf, "max", &high); 7260 if (err) 7261 return err; 7262 7263 page_counter_set_high(&memcg->swap, high); 7264 7265 return nbytes; 7266 } 7267 7268 static int swap_max_show(struct seq_file *m, void *v) 7269 { 7270 return seq_puts_memcg_tunable(m, 7271 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7272 } 7273 7274 static ssize_t swap_max_write(struct kernfs_open_file *of, 7275 char *buf, size_t nbytes, loff_t off) 7276 { 7277 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7278 unsigned long max; 7279 int err; 7280 7281 buf = strstrip(buf); 7282 err = page_counter_memparse(buf, "max", &max); 7283 if (err) 7284 return err; 7285 7286 xchg(&memcg->swap.max, max); 7287 7288 return nbytes; 7289 } 7290 7291 static int swap_events_show(struct seq_file *m, void *v) 7292 { 7293 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7294 7295 seq_printf(m, "high %lu\n", 7296 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7297 seq_printf(m, "max %lu\n", 7298 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7299 seq_printf(m, "fail %lu\n", 7300 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7301 7302 return 0; 7303 } 7304 7305 static struct cftype swap_files[] = { 7306 { 7307 .name = "swap.current", 7308 .flags = CFTYPE_NOT_ON_ROOT, 7309 .read_u64 = swap_current_read, 7310 }, 7311 { 7312 .name = "swap.high", 7313 .flags = CFTYPE_NOT_ON_ROOT, 7314 .seq_show = swap_high_show, 7315 .write = swap_high_write, 7316 }, 7317 { 7318 .name = "swap.max", 7319 .flags = CFTYPE_NOT_ON_ROOT, 7320 .seq_show = swap_max_show, 7321 .write = swap_max_write, 7322 }, 7323 { 7324 .name = "swap.events", 7325 .flags = CFTYPE_NOT_ON_ROOT, 7326 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7327 .seq_show = swap_events_show, 7328 }, 7329 { } /* terminate */ 7330 }; 7331 7332 static struct cftype memsw_cgroup_files[] = { 7333 { 7334 .name = "memsw.usage_in_bytes", 7335 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7336 .read_u64 = mem_cgroup_read_u64, 7337 }, 7338 { 7339 .name = "memsw.max_usage_in_bytes", 7340 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7341 .write = mem_cgroup_reset, 7342 .read_u64 = mem_cgroup_read_u64, 7343 }, 7344 { 7345 .name = "memsw.limit_in_bytes", 7346 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7347 .write = mem_cgroup_write, 7348 .read_u64 = mem_cgroup_read_u64, 7349 }, 7350 { 7351 .name = "memsw.failcnt", 7352 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7353 .write = mem_cgroup_reset, 7354 .read_u64 = mem_cgroup_read_u64, 7355 }, 7356 { }, /* terminate */ 7357 }; 7358 7359 static int __init mem_cgroup_swap_init(void) 7360 { 7361 if (!mem_cgroup_disabled() && really_do_swap_account) { 7362 do_swap_account = 1; 7363 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 7364 swap_files)); 7365 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 7366 memsw_cgroup_files)); 7367 } 7368 return 0; 7369 } 7370 subsys_initcall(mem_cgroup_swap_init); 7371 7372 #endif /* CONFIG_MEMCG_SWAP */ 7373