1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/sched/mm.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/hugetlb.h> 41 #include <linux/pagemap.h> 42 #include <linux/smp.h> 43 #include <linux/page-flags.h> 44 #include <linux/backing-dev.h> 45 #include <linux/bit_spinlock.h> 46 #include <linux/rcupdate.h> 47 #include <linux/limits.h> 48 #include <linux/export.h> 49 #include <linux/mutex.h> 50 #include <linux/rbtree.h> 51 #include <linux/slab.h> 52 #include <linux/swap.h> 53 #include <linux/swapops.h> 54 #include <linux/spinlock.h> 55 #include <linux/eventfd.h> 56 #include <linux/poll.h> 57 #include <linux/sort.h> 58 #include <linux/fs.h> 59 #include <linux/seq_file.h> 60 #include <linux/vmpressure.h> 61 #include <linux/mm_inline.h> 62 #include <linux/swap_cgroup.h> 63 #include <linux/cpu.h> 64 #include <linux/oom.h> 65 #include <linux/lockdep.h> 66 #include <linux/file.h> 67 #include <linux/tracehook.h> 68 #include "internal.h" 69 #include <net/sock.h> 70 #include <net/ip.h> 71 #include "slab.h" 72 73 #include <linux/uaccess.h> 74 75 #include <trace/events/vmscan.h> 76 77 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 78 EXPORT_SYMBOL(memory_cgrp_subsys); 79 80 struct mem_cgroup *root_mem_cgroup __read_mostly; 81 82 #define MEM_CGROUP_RECLAIM_RETRIES 5 83 84 /* Socket memory accounting disabled? */ 85 static bool cgroup_memory_nosocket; 86 87 /* Kernel memory accounting disabled? */ 88 static bool cgroup_memory_nokmem; 89 90 /* Whether the swap controller is active */ 91 #ifdef CONFIG_MEMCG_SWAP 92 int do_swap_account __read_mostly; 93 #else 94 #define do_swap_account 0 95 #endif 96 97 /* Whether legacy memory+swap accounting is active */ 98 static bool do_memsw_account(void) 99 { 100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 101 } 102 103 static const char *const mem_cgroup_lru_names[] = { 104 "inactive_anon", 105 "active_anon", 106 "inactive_file", 107 "active_file", 108 "unevictable", 109 }; 110 111 #define THRESHOLDS_EVENTS_TARGET 128 112 #define SOFTLIMIT_EVENTS_TARGET 1024 113 #define NUMAINFO_EVENTS_TARGET 1024 114 115 /* 116 * Cgroups above their limits are maintained in a RB-Tree, independent of 117 * their hierarchy representation 118 */ 119 120 struct mem_cgroup_tree_per_node { 121 struct rb_root rb_root; 122 struct rb_node *rb_rightmost; 123 spinlock_t lock; 124 }; 125 126 struct mem_cgroup_tree { 127 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 128 }; 129 130 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 131 132 /* for OOM */ 133 struct mem_cgroup_eventfd_list { 134 struct list_head list; 135 struct eventfd_ctx *eventfd; 136 }; 137 138 /* 139 * cgroup_event represents events which userspace want to receive. 140 */ 141 struct mem_cgroup_event { 142 /* 143 * memcg which the event belongs to. 144 */ 145 struct mem_cgroup *memcg; 146 /* 147 * eventfd to signal userspace about the event. 148 */ 149 struct eventfd_ctx *eventfd; 150 /* 151 * Each of these stored in a list by the cgroup. 152 */ 153 struct list_head list; 154 /* 155 * register_event() callback will be used to add new userspace 156 * waiter for changes related to this event. Use eventfd_signal() 157 * on eventfd to send notification to userspace. 158 */ 159 int (*register_event)(struct mem_cgroup *memcg, 160 struct eventfd_ctx *eventfd, const char *args); 161 /* 162 * unregister_event() callback will be called when userspace closes 163 * the eventfd or on cgroup removing. This callback must be set, 164 * if you want provide notification functionality. 165 */ 166 void (*unregister_event)(struct mem_cgroup *memcg, 167 struct eventfd_ctx *eventfd); 168 /* 169 * All fields below needed to unregister event when 170 * userspace closes eventfd. 171 */ 172 poll_table pt; 173 wait_queue_head_t *wqh; 174 wait_queue_entry_t wait; 175 struct work_struct remove; 176 }; 177 178 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 179 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 180 181 /* Stuffs for move charges at task migration. */ 182 /* 183 * Types of charges to be moved. 184 */ 185 #define MOVE_ANON 0x1U 186 #define MOVE_FILE 0x2U 187 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 188 189 /* "mc" and its members are protected by cgroup_mutex */ 190 static struct move_charge_struct { 191 spinlock_t lock; /* for from, to */ 192 struct mm_struct *mm; 193 struct mem_cgroup *from; 194 struct mem_cgroup *to; 195 unsigned long flags; 196 unsigned long precharge; 197 unsigned long moved_charge; 198 unsigned long moved_swap; 199 struct task_struct *moving_task; /* a task moving charges */ 200 wait_queue_head_t waitq; /* a waitq for other context */ 201 } mc = { 202 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 203 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 204 }; 205 206 /* 207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 208 * limit reclaim to prevent infinite loops, if they ever occur. 209 */ 210 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 211 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 212 213 enum charge_type { 214 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 215 MEM_CGROUP_CHARGE_TYPE_ANON, 216 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 217 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 218 NR_CHARGE_TYPE, 219 }; 220 221 /* for encoding cft->private value on file */ 222 enum res_type { 223 _MEM, 224 _MEMSWAP, 225 _OOM_TYPE, 226 _KMEM, 227 _TCP, 228 }; 229 230 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 231 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 232 #define MEMFILE_ATTR(val) ((val) & 0xffff) 233 /* Used for OOM nofiier */ 234 #define OOM_CONTROL (0) 235 236 /* 237 * Iteration constructs for visiting all cgroups (under a tree). If 238 * loops are exited prematurely (break), mem_cgroup_iter_break() must 239 * be used for reference counting. 240 */ 241 #define for_each_mem_cgroup_tree(iter, root) \ 242 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 243 iter != NULL; \ 244 iter = mem_cgroup_iter(root, iter, NULL)) 245 246 #define for_each_mem_cgroup(iter) \ 247 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 248 iter != NULL; \ 249 iter = mem_cgroup_iter(NULL, iter, NULL)) 250 251 /* Some nice accessors for the vmpressure. */ 252 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 253 { 254 if (!memcg) 255 memcg = root_mem_cgroup; 256 return &memcg->vmpressure; 257 } 258 259 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 260 { 261 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 262 } 263 264 #ifdef CONFIG_MEMCG_KMEM 265 /* 266 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 267 * The main reason for not using cgroup id for this: 268 * this works better in sparse environments, where we have a lot of memcgs, 269 * but only a few kmem-limited. Or also, if we have, for instance, 200 270 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 271 * 200 entry array for that. 272 * 273 * The current size of the caches array is stored in memcg_nr_cache_ids. It 274 * will double each time we have to increase it. 275 */ 276 static DEFINE_IDA(memcg_cache_ida); 277 int memcg_nr_cache_ids; 278 279 /* Protects memcg_nr_cache_ids */ 280 static DECLARE_RWSEM(memcg_cache_ids_sem); 281 282 void memcg_get_cache_ids(void) 283 { 284 down_read(&memcg_cache_ids_sem); 285 } 286 287 void memcg_put_cache_ids(void) 288 { 289 up_read(&memcg_cache_ids_sem); 290 } 291 292 /* 293 * MIN_SIZE is different than 1, because we would like to avoid going through 294 * the alloc/free process all the time. In a small machine, 4 kmem-limited 295 * cgroups is a reasonable guess. In the future, it could be a parameter or 296 * tunable, but that is strictly not necessary. 297 * 298 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 299 * this constant directly from cgroup, but it is understandable that this is 300 * better kept as an internal representation in cgroup.c. In any case, the 301 * cgrp_id space is not getting any smaller, and we don't have to necessarily 302 * increase ours as well if it increases. 303 */ 304 #define MEMCG_CACHES_MIN_SIZE 4 305 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 306 307 /* 308 * A lot of the calls to the cache allocation functions are expected to be 309 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 310 * conditional to this static branch, we'll have to allow modules that does 311 * kmem_cache_alloc and the such to see this symbol as well 312 */ 313 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 314 EXPORT_SYMBOL(memcg_kmem_enabled_key); 315 316 struct workqueue_struct *memcg_kmem_cache_wq; 317 318 static int memcg_shrinker_map_size; 319 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 320 321 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 322 { 323 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 324 } 325 326 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 327 int size, int old_size) 328 { 329 struct memcg_shrinker_map *new, *old; 330 int nid; 331 332 lockdep_assert_held(&memcg_shrinker_map_mutex); 333 334 for_each_node(nid) { 335 old = rcu_dereference_protected( 336 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 337 /* Not yet online memcg */ 338 if (!old) 339 return 0; 340 341 new = kvmalloc(sizeof(*new) + size, GFP_KERNEL); 342 if (!new) 343 return -ENOMEM; 344 345 /* Set all old bits, clear all new bits */ 346 memset(new->map, (int)0xff, old_size); 347 memset((void *)new->map + old_size, 0, size - old_size); 348 349 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 350 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 351 } 352 353 return 0; 354 } 355 356 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 357 { 358 struct mem_cgroup_per_node *pn; 359 struct memcg_shrinker_map *map; 360 int nid; 361 362 if (mem_cgroup_is_root(memcg)) 363 return; 364 365 for_each_node(nid) { 366 pn = mem_cgroup_nodeinfo(memcg, nid); 367 map = rcu_dereference_protected(pn->shrinker_map, true); 368 if (map) 369 kvfree(map); 370 rcu_assign_pointer(pn->shrinker_map, NULL); 371 } 372 } 373 374 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 375 { 376 struct memcg_shrinker_map *map; 377 int nid, size, ret = 0; 378 379 if (mem_cgroup_is_root(memcg)) 380 return 0; 381 382 mutex_lock(&memcg_shrinker_map_mutex); 383 size = memcg_shrinker_map_size; 384 for_each_node(nid) { 385 map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); 386 if (!map) { 387 memcg_free_shrinker_maps(memcg); 388 ret = -ENOMEM; 389 break; 390 } 391 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 392 } 393 mutex_unlock(&memcg_shrinker_map_mutex); 394 395 return ret; 396 } 397 398 int memcg_expand_shrinker_maps(int new_id) 399 { 400 int size, old_size, ret = 0; 401 struct mem_cgroup *memcg; 402 403 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 404 old_size = memcg_shrinker_map_size; 405 if (size <= old_size) 406 return 0; 407 408 mutex_lock(&memcg_shrinker_map_mutex); 409 if (!root_mem_cgroup) 410 goto unlock; 411 412 for_each_mem_cgroup(memcg) { 413 if (mem_cgroup_is_root(memcg)) 414 continue; 415 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 416 if (ret) 417 goto unlock; 418 } 419 unlock: 420 if (!ret) 421 memcg_shrinker_map_size = size; 422 mutex_unlock(&memcg_shrinker_map_mutex); 423 return ret; 424 } 425 426 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 427 { 428 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 429 struct memcg_shrinker_map *map; 430 431 rcu_read_lock(); 432 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 433 /* Pairs with smp mb in shrink_slab() */ 434 smp_mb__before_atomic(); 435 set_bit(shrinker_id, map->map); 436 rcu_read_unlock(); 437 } 438 } 439 440 #else /* CONFIG_MEMCG_KMEM */ 441 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 442 { 443 return 0; 444 } 445 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { } 446 #endif /* CONFIG_MEMCG_KMEM */ 447 448 /** 449 * mem_cgroup_css_from_page - css of the memcg associated with a page 450 * @page: page of interest 451 * 452 * If memcg is bound to the default hierarchy, css of the memcg associated 453 * with @page is returned. The returned css remains associated with @page 454 * until it is released. 455 * 456 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 457 * is returned. 458 */ 459 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 460 { 461 struct mem_cgroup *memcg; 462 463 memcg = page->mem_cgroup; 464 465 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 466 memcg = root_mem_cgroup; 467 468 return &memcg->css; 469 } 470 471 /** 472 * page_cgroup_ino - return inode number of the memcg a page is charged to 473 * @page: the page 474 * 475 * Look up the closest online ancestor of the memory cgroup @page is charged to 476 * and return its inode number or 0 if @page is not charged to any cgroup. It 477 * is safe to call this function without holding a reference to @page. 478 * 479 * Note, this function is inherently racy, because there is nothing to prevent 480 * the cgroup inode from getting torn down and potentially reallocated a moment 481 * after page_cgroup_ino() returns, so it only should be used by callers that 482 * do not care (such as procfs interfaces). 483 */ 484 ino_t page_cgroup_ino(struct page *page) 485 { 486 struct mem_cgroup *memcg; 487 unsigned long ino = 0; 488 489 rcu_read_lock(); 490 memcg = READ_ONCE(page->mem_cgroup); 491 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 492 memcg = parent_mem_cgroup(memcg); 493 if (memcg) 494 ino = cgroup_ino(memcg->css.cgroup); 495 rcu_read_unlock(); 496 return ino; 497 } 498 499 static struct mem_cgroup_per_node * 500 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 501 { 502 int nid = page_to_nid(page); 503 504 return memcg->nodeinfo[nid]; 505 } 506 507 static struct mem_cgroup_tree_per_node * 508 soft_limit_tree_node(int nid) 509 { 510 return soft_limit_tree.rb_tree_per_node[nid]; 511 } 512 513 static struct mem_cgroup_tree_per_node * 514 soft_limit_tree_from_page(struct page *page) 515 { 516 int nid = page_to_nid(page); 517 518 return soft_limit_tree.rb_tree_per_node[nid]; 519 } 520 521 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 522 struct mem_cgroup_tree_per_node *mctz, 523 unsigned long new_usage_in_excess) 524 { 525 struct rb_node **p = &mctz->rb_root.rb_node; 526 struct rb_node *parent = NULL; 527 struct mem_cgroup_per_node *mz_node; 528 bool rightmost = true; 529 530 if (mz->on_tree) 531 return; 532 533 mz->usage_in_excess = new_usage_in_excess; 534 if (!mz->usage_in_excess) 535 return; 536 while (*p) { 537 parent = *p; 538 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 539 tree_node); 540 if (mz->usage_in_excess < mz_node->usage_in_excess) { 541 p = &(*p)->rb_left; 542 rightmost = false; 543 } 544 545 /* 546 * We can't avoid mem cgroups that are over their soft 547 * limit by the same amount 548 */ 549 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 550 p = &(*p)->rb_right; 551 } 552 553 if (rightmost) 554 mctz->rb_rightmost = &mz->tree_node; 555 556 rb_link_node(&mz->tree_node, parent, p); 557 rb_insert_color(&mz->tree_node, &mctz->rb_root); 558 mz->on_tree = true; 559 } 560 561 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 562 struct mem_cgroup_tree_per_node *mctz) 563 { 564 if (!mz->on_tree) 565 return; 566 567 if (&mz->tree_node == mctz->rb_rightmost) 568 mctz->rb_rightmost = rb_prev(&mz->tree_node); 569 570 rb_erase(&mz->tree_node, &mctz->rb_root); 571 mz->on_tree = false; 572 } 573 574 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 575 struct mem_cgroup_tree_per_node *mctz) 576 { 577 unsigned long flags; 578 579 spin_lock_irqsave(&mctz->lock, flags); 580 __mem_cgroup_remove_exceeded(mz, mctz); 581 spin_unlock_irqrestore(&mctz->lock, flags); 582 } 583 584 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 585 { 586 unsigned long nr_pages = page_counter_read(&memcg->memory); 587 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 588 unsigned long excess = 0; 589 590 if (nr_pages > soft_limit) 591 excess = nr_pages - soft_limit; 592 593 return excess; 594 } 595 596 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 597 { 598 unsigned long excess; 599 struct mem_cgroup_per_node *mz; 600 struct mem_cgroup_tree_per_node *mctz; 601 602 mctz = soft_limit_tree_from_page(page); 603 if (!mctz) 604 return; 605 /* 606 * Necessary to update all ancestors when hierarchy is used. 607 * because their event counter is not touched. 608 */ 609 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 610 mz = mem_cgroup_page_nodeinfo(memcg, page); 611 excess = soft_limit_excess(memcg); 612 /* 613 * We have to update the tree if mz is on RB-tree or 614 * mem is over its softlimit. 615 */ 616 if (excess || mz->on_tree) { 617 unsigned long flags; 618 619 spin_lock_irqsave(&mctz->lock, flags); 620 /* if on-tree, remove it */ 621 if (mz->on_tree) 622 __mem_cgroup_remove_exceeded(mz, mctz); 623 /* 624 * Insert again. mz->usage_in_excess will be updated. 625 * If excess is 0, no tree ops. 626 */ 627 __mem_cgroup_insert_exceeded(mz, mctz, excess); 628 spin_unlock_irqrestore(&mctz->lock, flags); 629 } 630 } 631 } 632 633 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 634 { 635 struct mem_cgroup_tree_per_node *mctz; 636 struct mem_cgroup_per_node *mz; 637 int nid; 638 639 for_each_node(nid) { 640 mz = mem_cgroup_nodeinfo(memcg, nid); 641 mctz = soft_limit_tree_node(nid); 642 if (mctz) 643 mem_cgroup_remove_exceeded(mz, mctz); 644 } 645 } 646 647 static struct mem_cgroup_per_node * 648 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 649 { 650 struct mem_cgroup_per_node *mz; 651 652 retry: 653 mz = NULL; 654 if (!mctz->rb_rightmost) 655 goto done; /* Nothing to reclaim from */ 656 657 mz = rb_entry(mctz->rb_rightmost, 658 struct mem_cgroup_per_node, tree_node); 659 /* 660 * Remove the node now but someone else can add it back, 661 * we will to add it back at the end of reclaim to its correct 662 * position in the tree. 663 */ 664 __mem_cgroup_remove_exceeded(mz, mctz); 665 if (!soft_limit_excess(mz->memcg) || 666 !css_tryget_online(&mz->memcg->css)) 667 goto retry; 668 done: 669 return mz; 670 } 671 672 static struct mem_cgroup_per_node * 673 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 674 { 675 struct mem_cgroup_per_node *mz; 676 677 spin_lock_irq(&mctz->lock); 678 mz = __mem_cgroup_largest_soft_limit_node(mctz); 679 spin_unlock_irq(&mctz->lock); 680 return mz; 681 } 682 683 static unsigned long memcg_sum_events(struct mem_cgroup *memcg, 684 int event) 685 { 686 return atomic_long_read(&memcg->events[event]); 687 } 688 689 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 690 struct page *page, 691 bool compound, int nr_pages) 692 { 693 /* 694 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 695 * counted as CACHE even if it's on ANON LRU. 696 */ 697 if (PageAnon(page)) 698 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); 699 else { 700 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); 701 if (PageSwapBacked(page)) 702 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); 703 } 704 705 if (compound) { 706 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 707 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); 708 } 709 710 /* pagein of a big page is an event. So, ignore page size */ 711 if (nr_pages > 0) 712 __count_memcg_events(memcg, PGPGIN, 1); 713 else { 714 __count_memcg_events(memcg, PGPGOUT, 1); 715 nr_pages = -nr_pages; /* for event */ 716 } 717 718 __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages); 719 } 720 721 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 722 int nid, unsigned int lru_mask) 723 { 724 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 725 unsigned long nr = 0; 726 enum lru_list lru; 727 728 VM_BUG_ON((unsigned)nid >= nr_node_ids); 729 730 for_each_lru(lru) { 731 if (!(BIT(lru) & lru_mask)) 732 continue; 733 nr += mem_cgroup_get_lru_size(lruvec, lru); 734 } 735 return nr; 736 } 737 738 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 739 unsigned int lru_mask) 740 { 741 unsigned long nr = 0; 742 int nid; 743 744 for_each_node_state(nid, N_MEMORY) 745 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 746 return nr; 747 } 748 749 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 750 enum mem_cgroup_events_target target) 751 { 752 unsigned long val, next; 753 754 val = __this_cpu_read(memcg->stat_cpu->nr_page_events); 755 next = __this_cpu_read(memcg->stat_cpu->targets[target]); 756 /* from time_after() in jiffies.h */ 757 if ((long)(next - val) < 0) { 758 switch (target) { 759 case MEM_CGROUP_TARGET_THRESH: 760 next = val + THRESHOLDS_EVENTS_TARGET; 761 break; 762 case MEM_CGROUP_TARGET_SOFTLIMIT: 763 next = val + SOFTLIMIT_EVENTS_TARGET; 764 break; 765 case MEM_CGROUP_TARGET_NUMAINFO: 766 next = val + NUMAINFO_EVENTS_TARGET; 767 break; 768 default: 769 break; 770 } 771 __this_cpu_write(memcg->stat_cpu->targets[target], next); 772 return true; 773 } 774 return false; 775 } 776 777 /* 778 * Check events in order. 779 * 780 */ 781 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 782 { 783 /* threshold event is triggered in finer grain than soft limit */ 784 if (unlikely(mem_cgroup_event_ratelimit(memcg, 785 MEM_CGROUP_TARGET_THRESH))) { 786 bool do_softlimit; 787 bool do_numainfo __maybe_unused; 788 789 do_softlimit = mem_cgroup_event_ratelimit(memcg, 790 MEM_CGROUP_TARGET_SOFTLIMIT); 791 #if MAX_NUMNODES > 1 792 do_numainfo = mem_cgroup_event_ratelimit(memcg, 793 MEM_CGROUP_TARGET_NUMAINFO); 794 #endif 795 mem_cgroup_threshold(memcg); 796 if (unlikely(do_softlimit)) 797 mem_cgroup_update_tree(memcg, page); 798 #if MAX_NUMNODES > 1 799 if (unlikely(do_numainfo)) 800 atomic_inc(&memcg->numainfo_events); 801 #endif 802 } 803 } 804 805 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 806 { 807 /* 808 * mm_update_next_owner() may clear mm->owner to NULL 809 * if it races with swapoff, page migration, etc. 810 * So this can be called with p == NULL. 811 */ 812 if (unlikely(!p)) 813 return NULL; 814 815 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 816 } 817 EXPORT_SYMBOL(mem_cgroup_from_task); 818 819 /** 820 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 821 * @mm: mm from which memcg should be extracted. It can be NULL. 822 * 823 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 824 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 825 * returned. 826 */ 827 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 828 { 829 struct mem_cgroup *memcg; 830 831 if (mem_cgroup_disabled()) 832 return NULL; 833 834 rcu_read_lock(); 835 do { 836 /* 837 * Page cache insertions can happen withou an 838 * actual mm context, e.g. during disk probing 839 * on boot, loopback IO, acct() writes etc. 840 */ 841 if (unlikely(!mm)) 842 memcg = root_mem_cgroup; 843 else { 844 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 845 if (unlikely(!memcg)) 846 memcg = root_mem_cgroup; 847 } 848 } while (!css_tryget_online(&memcg->css)); 849 rcu_read_unlock(); 850 return memcg; 851 } 852 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 853 854 /** 855 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 856 * @page: page from which memcg should be extracted. 857 * 858 * Obtain a reference on page->memcg and returns it if successful. Otherwise 859 * root_mem_cgroup is returned. 860 */ 861 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 862 { 863 struct mem_cgroup *memcg = page->mem_cgroup; 864 865 if (mem_cgroup_disabled()) 866 return NULL; 867 868 rcu_read_lock(); 869 if (!memcg || !css_tryget_online(&memcg->css)) 870 memcg = root_mem_cgroup; 871 rcu_read_unlock(); 872 return memcg; 873 } 874 EXPORT_SYMBOL(get_mem_cgroup_from_page); 875 876 /** 877 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg. 878 */ 879 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 880 { 881 if (unlikely(current->active_memcg)) { 882 struct mem_cgroup *memcg = root_mem_cgroup; 883 884 rcu_read_lock(); 885 if (css_tryget_online(¤t->active_memcg->css)) 886 memcg = current->active_memcg; 887 rcu_read_unlock(); 888 return memcg; 889 } 890 return get_mem_cgroup_from_mm(current->mm); 891 } 892 893 /** 894 * mem_cgroup_iter - iterate over memory cgroup hierarchy 895 * @root: hierarchy root 896 * @prev: previously returned memcg, NULL on first invocation 897 * @reclaim: cookie for shared reclaim walks, NULL for full walks 898 * 899 * Returns references to children of the hierarchy below @root, or 900 * @root itself, or %NULL after a full round-trip. 901 * 902 * Caller must pass the return value in @prev on subsequent 903 * invocations for reference counting, or use mem_cgroup_iter_break() 904 * to cancel a hierarchy walk before the round-trip is complete. 905 * 906 * Reclaimers can specify a node and a priority level in @reclaim to 907 * divide up the memcgs in the hierarchy among all concurrent 908 * reclaimers operating on the same node and priority. 909 */ 910 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 911 struct mem_cgroup *prev, 912 struct mem_cgroup_reclaim_cookie *reclaim) 913 { 914 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 915 struct cgroup_subsys_state *css = NULL; 916 struct mem_cgroup *memcg = NULL; 917 struct mem_cgroup *pos = NULL; 918 919 if (mem_cgroup_disabled()) 920 return NULL; 921 922 if (!root) 923 root = root_mem_cgroup; 924 925 if (prev && !reclaim) 926 pos = prev; 927 928 if (!root->use_hierarchy && root != root_mem_cgroup) { 929 if (prev) 930 goto out; 931 return root; 932 } 933 934 rcu_read_lock(); 935 936 if (reclaim) { 937 struct mem_cgroup_per_node *mz; 938 939 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 940 iter = &mz->iter[reclaim->priority]; 941 942 if (prev && reclaim->generation != iter->generation) 943 goto out_unlock; 944 945 while (1) { 946 pos = READ_ONCE(iter->position); 947 if (!pos || css_tryget(&pos->css)) 948 break; 949 /* 950 * css reference reached zero, so iter->position will 951 * be cleared by ->css_released. However, we should not 952 * rely on this happening soon, because ->css_released 953 * is called from a work queue, and by busy-waiting we 954 * might block it. So we clear iter->position right 955 * away. 956 */ 957 (void)cmpxchg(&iter->position, pos, NULL); 958 } 959 } 960 961 if (pos) 962 css = &pos->css; 963 964 for (;;) { 965 css = css_next_descendant_pre(css, &root->css); 966 if (!css) { 967 /* 968 * Reclaimers share the hierarchy walk, and a 969 * new one might jump in right at the end of 970 * the hierarchy - make sure they see at least 971 * one group and restart from the beginning. 972 */ 973 if (!prev) 974 continue; 975 break; 976 } 977 978 /* 979 * Verify the css and acquire a reference. The root 980 * is provided by the caller, so we know it's alive 981 * and kicking, and don't take an extra reference. 982 */ 983 memcg = mem_cgroup_from_css(css); 984 985 if (css == &root->css) 986 break; 987 988 if (css_tryget(css)) 989 break; 990 991 memcg = NULL; 992 } 993 994 if (reclaim) { 995 /* 996 * The position could have already been updated by a competing 997 * thread, so check that the value hasn't changed since we read 998 * it to avoid reclaiming from the same cgroup twice. 999 */ 1000 (void)cmpxchg(&iter->position, pos, memcg); 1001 1002 if (pos) 1003 css_put(&pos->css); 1004 1005 if (!memcg) 1006 iter->generation++; 1007 else if (!prev) 1008 reclaim->generation = iter->generation; 1009 } 1010 1011 out_unlock: 1012 rcu_read_unlock(); 1013 out: 1014 if (prev && prev != root) 1015 css_put(&prev->css); 1016 1017 return memcg; 1018 } 1019 1020 /** 1021 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1022 * @root: hierarchy root 1023 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1024 */ 1025 void mem_cgroup_iter_break(struct mem_cgroup *root, 1026 struct mem_cgroup *prev) 1027 { 1028 if (!root) 1029 root = root_mem_cgroup; 1030 if (prev && prev != root) 1031 css_put(&prev->css); 1032 } 1033 1034 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1035 { 1036 struct mem_cgroup *memcg = dead_memcg; 1037 struct mem_cgroup_reclaim_iter *iter; 1038 struct mem_cgroup_per_node *mz; 1039 int nid; 1040 int i; 1041 1042 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1043 for_each_node(nid) { 1044 mz = mem_cgroup_nodeinfo(memcg, nid); 1045 for (i = 0; i <= DEF_PRIORITY; i++) { 1046 iter = &mz->iter[i]; 1047 cmpxchg(&iter->position, 1048 dead_memcg, NULL); 1049 } 1050 } 1051 } 1052 } 1053 1054 /** 1055 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1056 * @memcg: hierarchy root 1057 * @fn: function to call for each task 1058 * @arg: argument passed to @fn 1059 * 1060 * This function iterates over tasks attached to @memcg or to any of its 1061 * descendants and calls @fn for each task. If @fn returns a non-zero 1062 * value, the function breaks the iteration loop and returns the value. 1063 * Otherwise, it will iterate over all tasks and return 0. 1064 * 1065 * This function must not be called for the root memory cgroup. 1066 */ 1067 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1068 int (*fn)(struct task_struct *, void *), void *arg) 1069 { 1070 struct mem_cgroup *iter; 1071 int ret = 0; 1072 1073 BUG_ON(memcg == root_mem_cgroup); 1074 1075 for_each_mem_cgroup_tree(iter, memcg) { 1076 struct css_task_iter it; 1077 struct task_struct *task; 1078 1079 css_task_iter_start(&iter->css, 0, &it); 1080 while (!ret && (task = css_task_iter_next(&it))) 1081 ret = fn(task, arg); 1082 css_task_iter_end(&it); 1083 if (ret) { 1084 mem_cgroup_iter_break(memcg, iter); 1085 break; 1086 } 1087 } 1088 return ret; 1089 } 1090 1091 /** 1092 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1093 * @page: the page 1094 * @pgdat: pgdat of the page 1095 * 1096 * This function is only safe when following the LRU page isolation 1097 * and putback protocol: the LRU lock must be held, and the page must 1098 * either be PageLRU() or the caller must have isolated/allocated it. 1099 */ 1100 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1101 { 1102 struct mem_cgroup_per_node *mz; 1103 struct mem_cgroup *memcg; 1104 struct lruvec *lruvec; 1105 1106 if (mem_cgroup_disabled()) { 1107 lruvec = &pgdat->lruvec; 1108 goto out; 1109 } 1110 1111 memcg = page->mem_cgroup; 1112 /* 1113 * Swapcache readahead pages are added to the LRU - and 1114 * possibly migrated - before they are charged. 1115 */ 1116 if (!memcg) 1117 memcg = root_mem_cgroup; 1118 1119 mz = mem_cgroup_page_nodeinfo(memcg, page); 1120 lruvec = &mz->lruvec; 1121 out: 1122 /* 1123 * Since a node can be onlined after the mem_cgroup was created, 1124 * we have to be prepared to initialize lruvec->zone here; 1125 * and if offlined then reonlined, we need to reinitialize it. 1126 */ 1127 if (unlikely(lruvec->pgdat != pgdat)) 1128 lruvec->pgdat = pgdat; 1129 return lruvec; 1130 } 1131 1132 /** 1133 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1134 * @lruvec: mem_cgroup per zone lru vector 1135 * @lru: index of lru list the page is sitting on 1136 * @zid: zone id of the accounted pages 1137 * @nr_pages: positive when adding or negative when removing 1138 * 1139 * This function must be called under lru_lock, just before a page is added 1140 * to or just after a page is removed from an lru list (that ordering being 1141 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1142 */ 1143 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1144 int zid, int nr_pages) 1145 { 1146 struct mem_cgroup_per_node *mz; 1147 unsigned long *lru_size; 1148 long size; 1149 1150 if (mem_cgroup_disabled()) 1151 return; 1152 1153 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1154 lru_size = &mz->lru_zone_size[zid][lru]; 1155 1156 if (nr_pages < 0) 1157 *lru_size += nr_pages; 1158 1159 size = *lru_size; 1160 if (WARN_ONCE(size < 0, 1161 "%s(%p, %d, %d): lru_size %ld\n", 1162 __func__, lruvec, lru, nr_pages, size)) { 1163 VM_BUG_ON(1); 1164 *lru_size = 0; 1165 } 1166 1167 if (nr_pages > 0) 1168 *lru_size += nr_pages; 1169 } 1170 1171 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1172 { 1173 struct mem_cgroup *task_memcg; 1174 struct task_struct *p; 1175 bool ret; 1176 1177 p = find_lock_task_mm(task); 1178 if (p) { 1179 task_memcg = get_mem_cgroup_from_mm(p->mm); 1180 task_unlock(p); 1181 } else { 1182 /* 1183 * All threads may have already detached their mm's, but the oom 1184 * killer still needs to detect if they have already been oom 1185 * killed to prevent needlessly killing additional tasks. 1186 */ 1187 rcu_read_lock(); 1188 task_memcg = mem_cgroup_from_task(task); 1189 css_get(&task_memcg->css); 1190 rcu_read_unlock(); 1191 } 1192 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1193 css_put(&task_memcg->css); 1194 return ret; 1195 } 1196 1197 /** 1198 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1199 * @memcg: the memory cgroup 1200 * 1201 * Returns the maximum amount of memory @mem can be charged with, in 1202 * pages. 1203 */ 1204 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1205 { 1206 unsigned long margin = 0; 1207 unsigned long count; 1208 unsigned long limit; 1209 1210 count = page_counter_read(&memcg->memory); 1211 limit = READ_ONCE(memcg->memory.max); 1212 if (count < limit) 1213 margin = limit - count; 1214 1215 if (do_memsw_account()) { 1216 count = page_counter_read(&memcg->memsw); 1217 limit = READ_ONCE(memcg->memsw.max); 1218 if (count <= limit) 1219 margin = min(margin, limit - count); 1220 else 1221 margin = 0; 1222 } 1223 1224 return margin; 1225 } 1226 1227 /* 1228 * A routine for checking "mem" is under move_account() or not. 1229 * 1230 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1231 * moving cgroups. This is for waiting at high-memory pressure 1232 * caused by "move". 1233 */ 1234 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1235 { 1236 struct mem_cgroup *from; 1237 struct mem_cgroup *to; 1238 bool ret = false; 1239 /* 1240 * Unlike task_move routines, we access mc.to, mc.from not under 1241 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1242 */ 1243 spin_lock(&mc.lock); 1244 from = mc.from; 1245 to = mc.to; 1246 if (!from) 1247 goto unlock; 1248 1249 ret = mem_cgroup_is_descendant(from, memcg) || 1250 mem_cgroup_is_descendant(to, memcg); 1251 unlock: 1252 spin_unlock(&mc.lock); 1253 return ret; 1254 } 1255 1256 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1257 { 1258 if (mc.moving_task && current != mc.moving_task) { 1259 if (mem_cgroup_under_move(memcg)) { 1260 DEFINE_WAIT(wait); 1261 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1262 /* moving charge context might have finished. */ 1263 if (mc.moving_task) 1264 schedule(); 1265 finish_wait(&mc.waitq, &wait); 1266 return true; 1267 } 1268 } 1269 return false; 1270 } 1271 1272 static const unsigned int memcg1_stats[] = { 1273 MEMCG_CACHE, 1274 MEMCG_RSS, 1275 MEMCG_RSS_HUGE, 1276 NR_SHMEM, 1277 NR_FILE_MAPPED, 1278 NR_FILE_DIRTY, 1279 NR_WRITEBACK, 1280 MEMCG_SWAP, 1281 }; 1282 1283 static const char *const memcg1_stat_names[] = { 1284 "cache", 1285 "rss", 1286 "rss_huge", 1287 "shmem", 1288 "mapped_file", 1289 "dirty", 1290 "writeback", 1291 "swap", 1292 }; 1293 1294 #define K(x) ((x) << (PAGE_SHIFT-10)) 1295 /** 1296 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1297 * @memcg: The memory cgroup that went over limit 1298 * @p: Task that is going to be killed 1299 * 1300 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1301 * enabled 1302 */ 1303 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1304 { 1305 struct mem_cgroup *iter; 1306 unsigned int i; 1307 1308 rcu_read_lock(); 1309 1310 if (p) { 1311 pr_info("Task in "); 1312 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1313 pr_cont(" killed as a result of limit of "); 1314 } else { 1315 pr_info("Memory limit reached of cgroup "); 1316 } 1317 1318 pr_cont_cgroup_path(memcg->css.cgroup); 1319 pr_cont("\n"); 1320 1321 rcu_read_unlock(); 1322 1323 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1324 K((u64)page_counter_read(&memcg->memory)), 1325 K((u64)memcg->memory.max), memcg->memory.failcnt); 1326 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1327 K((u64)page_counter_read(&memcg->memsw)), 1328 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1329 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1330 K((u64)page_counter_read(&memcg->kmem)), 1331 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1332 1333 for_each_mem_cgroup_tree(iter, memcg) { 1334 pr_info("Memory cgroup stats for "); 1335 pr_cont_cgroup_path(iter->css.cgroup); 1336 pr_cont(":"); 1337 1338 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 1339 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account) 1340 continue; 1341 pr_cont(" %s:%luKB", memcg1_stat_names[i], 1342 K(memcg_page_state(iter, memcg1_stats[i]))); 1343 } 1344 1345 for (i = 0; i < NR_LRU_LISTS; i++) 1346 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1347 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1348 1349 pr_cont("\n"); 1350 } 1351 } 1352 1353 /* 1354 * Return the memory (and swap, if configured) limit for a memcg. 1355 */ 1356 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1357 { 1358 unsigned long max; 1359 1360 max = memcg->memory.max; 1361 if (mem_cgroup_swappiness(memcg)) { 1362 unsigned long memsw_max; 1363 unsigned long swap_max; 1364 1365 memsw_max = memcg->memsw.max; 1366 swap_max = memcg->swap.max; 1367 swap_max = min(swap_max, (unsigned long)total_swap_pages); 1368 max = min(max + swap_max, memsw_max); 1369 } 1370 return max; 1371 } 1372 1373 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1374 int order) 1375 { 1376 struct oom_control oc = { 1377 .zonelist = NULL, 1378 .nodemask = NULL, 1379 .memcg = memcg, 1380 .gfp_mask = gfp_mask, 1381 .order = order, 1382 }; 1383 bool ret; 1384 1385 mutex_lock(&oom_lock); 1386 ret = out_of_memory(&oc); 1387 mutex_unlock(&oom_lock); 1388 return ret; 1389 } 1390 1391 #if MAX_NUMNODES > 1 1392 1393 /** 1394 * test_mem_cgroup_node_reclaimable 1395 * @memcg: the target memcg 1396 * @nid: the node ID to be checked. 1397 * @noswap : specify true here if the user wants flle only information. 1398 * 1399 * This function returns whether the specified memcg contains any 1400 * reclaimable pages on a node. Returns true if there are any reclaimable 1401 * pages in the node. 1402 */ 1403 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1404 int nid, bool noswap) 1405 { 1406 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1407 return true; 1408 if (noswap || !total_swap_pages) 1409 return false; 1410 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1411 return true; 1412 return false; 1413 1414 } 1415 1416 /* 1417 * Always updating the nodemask is not very good - even if we have an empty 1418 * list or the wrong list here, we can start from some node and traverse all 1419 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1420 * 1421 */ 1422 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1423 { 1424 int nid; 1425 /* 1426 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1427 * pagein/pageout changes since the last update. 1428 */ 1429 if (!atomic_read(&memcg->numainfo_events)) 1430 return; 1431 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1432 return; 1433 1434 /* make a nodemask where this memcg uses memory from */ 1435 memcg->scan_nodes = node_states[N_MEMORY]; 1436 1437 for_each_node_mask(nid, node_states[N_MEMORY]) { 1438 1439 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1440 node_clear(nid, memcg->scan_nodes); 1441 } 1442 1443 atomic_set(&memcg->numainfo_events, 0); 1444 atomic_set(&memcg->numainfo_updating, 0); 1445 } 1446 1447 /* 1448 * Selecting a node where we start reclaim from. Because what we need is just 1449 * reducing usage counter, start from anywhere is O,K. Considering 1450 * memory reclaim from current node, there are pros. and cons. 1451 * 1452 * Freeing memory from current node means freeing memory from a node which 1453 * we'll use or we've used. So, it may make LRU bad. And if several threads 1454 * hit limits, it will see a contention on a node. But freeing from remote 1455 * node means more costs for memory reclaim because of memory latency. 1456 * 1457 * Now, we use round-robin. Better algorithm is welcomed. 1458 */ 1459 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1460 { 1461 int node; 1462 1463 mem_cgroup_may_update_nodemask(memcg); 1464 node = memcg->last_scanned_node; 1465 1466 node = next_node_in(node, memcg->scan_nodes); 1467 /* 1468 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1469 * last time it really checked all the LRUs due to rate limiting. 1470 * Fallback to the current node in that case for simplicity. 1471 */ 1472 if (unlikely(node == MAX_NUMNODES)) 1473 node = numa_node_id(); 1474 1475 memcg->last_scanned_node = node; 1476 return node; 1477 } 1478 #else 1479 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1480 { 1481 return 0; 1482 } 1483 #endif 1484 1485 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1486 pg_data_t *pgdat, 1487 gfp_t gfp_mask, 1488 unsigned long *total_scanned) 1489 { 1490 struct mem_cgroup *victim = NULL; 1491 int total = 0; 1492 int loop = 0; 1493 unsigned long excess; 1494 unsigned long nr_scanned; 1495 struct mem_cgroup_reclaim_cookie reclaim = { 1496 .pgdat = pgdat, 1497 .priority = 0, 1498 }; 1499 1500 excess = soft_limit_excess(root_memcg); 1501 1502 while (1) { 1503 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1504 if (!victim) { 1505 loop++; 1506 if (loop >= 2) { 1507 /* 1508 * If we have not been able to reclaim 1509 * anything, it might because there are 1510 * no reclaimable pages under this hierarchy 1511 */ 1512 if (!total) 1513 break; 1514 /* 1515 * We want to do more targeted reclaim. 1516 * excess >> 2 is not to excessive so as to 1517 * reclaim too much, nor too less that we keep 1518 * coming back to reclaim from this cgroup 1519 */ 1520 if (total >= (excess >> 2) || 1521 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1522 break; 1523 } 1524 continue; 1525 } 1526 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1527 pgdat, &nr_scanned); 1528 *total_scanned += nr_scanned; 1529 if (!soft_limit_excess(root_memcg)) 1530 break; 1531 } 1532 mem_cgroup_iter_break(root_memcg, victim); 1533 return total; 1534 } 1535 1536 #ifdef CONFIG_LOCKDEP 1537 static struct lockdep_map memcg_oom_lock_dep_map = { 1538 .name = "memcg_oom_lock", 1539 }; 1540 #endif 1541 1542 static DEFINE_SPINLOCK(memcg_oom_lock); 1543 1544 /* 1545 * Check OOM-Killer is already running under our hierarchy. 1546 * If someone is running, return false. 1547 */ 1548 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1549 { 1550 struct mem_cgroup *iter, *failed = NULL; 1551 1552 spin_lock(&memcg_oom_lock); 1553 1554 for_each_mem_cgroup_tree(iter, memcg) { 1555 if (iter->oom_lock) { 1556 /* 1557 * this subtree of our hierarchy is already locked 1558 * so we cannot give a lock. 1559 */ 1560 failed = iter; 1561 mem_cgroup_iter_break(memcg, iter); 1562 break; 1563 } else 1564 iter->oom_lock = true; 1565 } 1566 1567 if (failed) { 1568 /* 1569 * OK, we failed to lock the whole subtree so we have 1570 * to clean up what we set up to the failing subtree 1571 */ 1572 for_each_mem_cgroup_tree(iter, memcg) { 1573 if (iter == failed) { 1574 mem_cgroup_iter_break(memcg, iter); 1575 break; 1576 } 1577 iter->oom_lock = false; 1578 } 1579 } else 1580 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1581 1582 spin_unlock(&memcg_oom_lock); 1583 1584 return !failed; 1585 } 1586 1587 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1588 { 1589 struct mem_cgroup *iter; 1590 1591 spin_lock(&memcg_oom_lock); 1592 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1593 for_each_mem_cgroup_tree(iter, memcg) 1594 iter->oom_lock = false; 1595 spin_unlock(&memcg_oom_lock); 1596 } 1597 1598 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1599 { 1600 struct mem_cgroup *iter; 1601 1602 spin_lock(&memcg_oom_lock); 1603 for_each_mem_cgroup_tree(iter, memcg) 1604 iter->under_oom++; 1605 spin_unlock(&memcg_oom_lock); 1606 } 1607 1608 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1609 { 1610 struct mem_cgroup *iter; 1611 1612 /* 1613 * When a new child is created while the hierarchy is under oom, 1614 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1615 */ 1616 spin_lock(&memcg_oom_lock); 1617 for_each_mem_cgroup_tree(iter, memcg) 1618 if (iter->under_oom > 0) 1619 iter->under_oom--; 1620 spin_unlock(&memcg_oom_lock); 1621 } 1622 1623 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1624 1625 struct oom_wait_info { 1626 struct mem_cgroup *memcg; 1627 wait_queue_entry_t wait; 1628 }; 1629 1630 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1631 unsigned mode, int sync, void *arg) 1632 { 1633 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1634 struct mem_cgroup *oom_wait_memcg; 1635 struct oom_wait_info *oom_wait_info; 1636 1637 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1638 oom_wait_memcg = oom_wait_info->memcg; 1639 1640 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1641 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1642 return 0; 1643 return autoremove_wake_function(wait, mode, sync, arg); 1644 } 1645 1646 static void memcg_oom_recover(struct mem_cgroup *memcg) 1647 { 1648 /* 1649 * For the following lockless ->under_oom test, the only required 1650 * guarantee is that it must see the state asserted by an OOM when 1651 * this function is called as a result of userland actions 1652 * triggered by the notification of the OOM. This is trivially 1653 * achieved by invoking mem_cgroup_mark_under_oom() before 1654 * triggering notification. 1655 */ 1656 if (memcg && memcg->under_oom) 1657 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1658 } 1659 1660 enum oom_status { 1661 OOM_SUCCESS, 1662 OOM_FAILED, 1663 OOM_ASYNC, 1664 OOM_SKIPPED 1665 }; 1666 1667 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1668 { 1669 if (order > PAGE_ALLOC_COSTLY_ORDER) 1670 return OOM_SKIPPED; 1671 1672 /* 1673 * We are in the middle of the charge context here, so we 1674 * don't want to block when potentially sitting on a callstack 1675 * that holds all kinds of filesystem and mm locks. 1676 * 1677 * cgroup1 allows disabling the OOM killer and waiting for outside 1678 * handling until the charge can succeed; remember the context and put 1679 * the task to sleep at the end of the page fault when all locks are 1680 * released. 1681 * 1682 * On the other hand, in-kernel OOM killer allows for an async victim 1683 * memory reclaim (oom_reaper) and that means that we are not solely 1684 * relying on the oom victim to make a forward progress and we can 1685 * invoke the oom killer here. 1686 * 1687 * Please note that mem_cgroup_out_of_memory might fail to find a 1688 * victim and then we have to bail out from the charge path. 1689 */ 1690 if (memcg->oom_kill_disable) { 1691 if (!current->in_user_fault) 1692 return OOM_SKIPPED; 1693 css_get(&memcg->css); 1694 current->memcg_in_oom = memcg; 1695 current->memcg_oom_gfp_mask = mask; 1696 current->memcg_oom_order = order; 1697 1698 return OOM_ASYNC; 1699 } 1700 1701 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1702 return OOM_SUCCESS; 1703 1704 WARN(1,"Memory cgroup charge failed because of no reclaimable memory! " 1705 "This looks like a misconfiguration or a kernel bug."); 1706 return OOM_FAILED; 1707 } 1708 1709 /** 1710 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1711 * @handle: actually kill/wait or just clean up the OOM state 1712 * 1713 * This has to be called at the end of a page fault if the memcg OOM 1714 * handler was enabled. 1715 * 1716 * Memcg supports userspace OOM handling where failed allocations must 1717 * sleep on a waitqueue until the userspace task resolves the 1718 * situation. Sleeping directly in the charge context with all kinds 1719 * of locks held is not a good idea, instead we remember an OOM state 1720 * in the task and mem_cgroup_oom_synchronize() has to be called at 1721 * the end of the page fault to complete the OOM handling. 1722 * 1723 * Returns %true if an ongoing memcg OOM situation was detected and 1724 * completed, %false otherwise. 1725 */ 1726 bool mem_cgroup_oom_synchronize(bool handle) 1727 { 1728 struct mem_cgroup *memcg = current->memcg_in_oom; 1729 struct oom_wait_info owait; 1730 bool locked; 1731 1732 /* OOM is global, do not handle */ 1733 if (!memcg) 1734 return false; 1735 1736 if (!handle) 1737 goto cleanup; 1738 1739 owait.memcg = memcg; 1740 owait.wait.flags = 0; 1741 owait.wait.func = memcg_oom_wake_function; 1742 owait.wait.private = current; 1743 INIT_LIST_HEAD(&owait.wait.entry); 1744 1745 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1746 mem_cgroup_mark_under_oom(memcg); 1747 1748 locked = mem_cgroup_oom_trylock(memcg); 1749 1750 if (locked) 1751 mem_cgroup_oom_notify(memcg); 1752 1753 if (locked && !memcg->oom_kill_disable) { 1754 mem_cgroup_unmark_under_oom(memcg); 1755 finish_wait(&memcg_oom_waitq, &owait.wait); 1756 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1757 current->memcg_oom_order); 1758 } else { 1759 schedule(); 1760 mem_cgroup_unmark_under_oom(memcg); 1761 finish_wait(&memcg_oom_waitq, &owait.wait); 1762 } 1763 1764 if (locked) { 1765 mem_cgroup_oom_unlock(memcg); 1766 /* 1767 * There is no guarantee that an OOM-lock contender 1768 * sees the wakeups triggered by the OOM kill 1769 * uncharges. Wake any sleepers explicitely. 1770 */ 1771 memcg_oom_recover(memcg); 1772 } 1773 cleanup: 1774 current->memcg_in_oom = NULL; 1775 css_put(&memcg->css); 1776 return true; 1777 } 1778 1779 /** 1780 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1781 * @victim: task to be killed by the OOM killer 1782 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1783 * 1784 * Returns a pointer to a memory cgroup, which has to be cleaned up 1785 * by killing all belonging OOM-killable tasks. 1786 * 1787 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1788 */ 1789 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1790 struct mem_cgroup *oom_domain) 1791 { 1792 struct mem_cgroup *oom_group = NULL; 1793 struct mem_cgroup *memcg; 1794 1795 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1796 return NULL; 1797 1798 if (!oom_domain) 1799 oom_domain = root_mem_cgroup; 1800 1801 rcu_read_lock(); 1802 1803 memcg = mem_cgroup_from_task(victim); 1804 if (memcg == root_mem_cgroup) 1805 goto out; 1806 1807 /* 1808 * Traverse the memory cgroup hierarchy from the victim task's 1809 * cgroup up to the OOMing cgroup (or root) to find the 1810 * highest-level memory cgroup with oom.group set. 1811 */ 1812 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1813 if (memcg->oom_group) 1814 oom_group = memcg; 1815 1816 if (memcg == oom_domain) 1817 break; 1818 } 1819 1820 if (oom_group) 1821 css_get(&oom_group->css); 1822 out: 1823 rcu_read_unlock(); 1824 1825 return oom_group; 1826 } 1827 1828 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1829 { 1830 pr_info("Tasks in "); 1831 pr_cont_cgroup_path(memcg->css.cgroup); 1832 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1833 } 1834 1835 /** 1836 * lock_page_memcg - lock a page->mem_cgroup binding 1837 * @page: the page 1838 * 1839 * This function protects unlocked LRU pages from being moved to 1840 * another cgroup. 1841 * 1842 * It ensures lifetime of the returned memcg. Caller is responsible 1843 * for the lifetime of the page; __unlock_page_memcg() is available 1844 * when @page might get freed inside the locked section. 1845 */ 1846 struct mem_cgroup *lock_page_memcg(struct page *page) 1847 { 1848 struct mem_cgroup *memcg; 1849 unsigned long flags; 1850 1851 /* 1852 * The RCU lock is held throughout the transaction. The fast 1853 * path can get away without acquiring the memcg->move_lock 1854 * because page moving starts with an RCU grace period. 1855 * 1856 * The RCU lock also protects the memcg from being freed when 1857 * the page state that is going to change is the only thing 1858 * preventing the page itself from being freed. E.g. writeback 1859 * doesn't hold a page reference and relies on PG_writeback to 1860 * keep off truncation, migration and so forth. 1861 */ 1862 rcu_read_lock(); 1863 1864 if (mem_cgroup_disabled()) 1865 return NULL; 1866 again: 1867 memcg = page->mem_cgroup; 1868 if (unlikely(!memcg)) 1869 return NULL; 1870 1871 if (atomic_read(&memcg->moving_account) <= 0) 1872 return memcg; 1873 1874 spin_lock_irqsave(&memcg->move_lock, flags); 1875 if (memcg != page->mem_cgroup) { 1876 spin_unlock_irqrestore(&memcg->move_lock, flags); 1877 goto again; 1878 } 1879 1880 /* 1881 * When charge migration first begins, we can have locked and 1882 * unlocked page stat updates happening concurrently. Track 1883 * the task who has the lock for unlock_page_memcg(). 1884 */ 1885 memcg->move_lock_task = current; 1886 memcg->move_lock_flags = flags; 1887 1888 return memcg; 1889 } 1890 EXPORT_SYMBOL(lock_page_memcg); 1891 1892 /** 1893 * __unlock_page_memcg - unlock and unpin a memcg 1894 * @memcg: the memcg 1895 * 1896 * Unlock and unpin a memcg returned by lock_page_memcg(). 1897 */ 1898 void __unlock_page_memcg(struct mem_cgroup *memcg) 1899 { 1900 if (memcg && memcg->move_lock_task == current) { 1901 unsigned long flags = memcg->move_lock_flags; 1902 1903 memcg->move_lock_task = NULL; 1904 memcg->move_lock_flags = 0; 1905 1906 spin_unlock_irqrestore(&memcg->move_lock, flags); 1907 } 1908 1909 rcu_read_unlock(); 1910 } 1911 1912 /** 1913 * unlock_page_memcg - unlock a page->mem_cgroup binding 1914 * @page: the page 1915 */ 1916 void unlock_page_memcg(struct page *page) 1917 { 1918 __unlock_page_memcg(page->mem_cgroup); 1919 } 1920 EXPORT_SYMBOL(unlock_page_memcg); 1921 1922 struct memcg_stock_pcp { 1923 struct mem_cgroup *cached; /* this never be root cgroup */ 1924 unsigned int nr_pages; 1925 struct work_struct work; 1926 unsigned long flags; 1927 #define FLUSHING_CACHED_CHARGE 0 1928 }; 1929 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1930 static DEFINE_MUTEX(percpu_charge_mutex); 1931 1932 /** 1933 * consume_stock: Try to consume stocked charge on this cpu. 1934 * @memcg: memcg to consume from. 1935 * @nr_pages: how many pages to charge. 1936 * 1937 * The charges will only happen if @memcg matches the current cpu's memcg 1938 * stock, and at least @nr_pages are available in that stock. Failure to 1939 * service an allocation will refill the stock. 1940 * 1941 * returns true if successful, false otherwise. 1942 */ 1943 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1944 { 1945 struct memcg_stock_pcp *stock; 1946 unsigned long flags; 1947 bool ret = false; 1948 1949 if (nr_pages > MEMCG_CHARGE_BATCH) 1950 return ret; 1951 1952 local_irq_save(flags); 1953 1954 stock = this_cpu_ptr(&memcg_stock); 1955 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1956 stock->nr_pages -= nr_pages; 1957 ret = true; 1958 } 1959 1960 local_irq_restore(flags); 1961 1962 return ret; 1963 } 1964 1965 /* 1966 * Returns stocks cached in percpu and reset cached information. 1967 */ 1968 static void drain_stock(struct memcg_stock_pcp *stock) 1969 { 1970 struct mem_cgroup *old = stock->cached; 1971 1972 if (stock->nr_pages) { 1973 page_counter_uncharge(&old->memory, stock->nr_pages); 1974 if (do_memsw_account()) 1975 page_counter_uncharge(&old->memsw, stock->nr_pages); 1976 css_put_many(&old->css, stock->nr_pages); 1977 stock->nr_pages = 0; 1978 } 1979 stock->cached = NULL; 1980 } 1981 1982 static void drain_local_stock(struct work_struct *dummy) 1983 { 1984 struct memcg_stock_pcp *stock; 1985 unsigned long flags; 1986 1987 /* 1988 * The only protection from memory hotplug vs. drain_stock races is 1989 * that we always operate on local CPU stock here with IRQ disabled 1990 */ 1991 local_irq_save(flags); 1992 1993 stock = this_cpu_ptr(&memcg_stock); 1994 drain_stock(stock); 1995 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1996 1997 local_irq_restore(flags); 1998 } 1999 2000 /* 2001 * Cache charges(val) to local per_cpu area. 2002 * This will be consumed by consume_stock() function, later. 2003 */ 2004 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2005 { 2006 struct memcg_stock_pcp *stock; 2007 unsigned long flags; 2008 2009 local_irq_save(flags); 2010 2011 stock = this_cpu_ptr(&memcg_stock); 2012 if (stock->cached != memcg) { /* reset if necessary */ 2013 drain_stock(stock); 2014 stock->cached = memcg; 2015 } 2016 stock->nr_pages += nr_pages; 2017 2018 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2019 drain_stock(stock); 2020 2021 local_irq_restore(flags); 2022 } 2023 2024 /* 2025 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2026 * of the hierarchy under it. 2027 */ 2028 static void drain_all_stock(struct mem_cgroup *root_memcg) 2029 { 2030 int cpu, curcpu; 2031 2032 /* If someone's already draining, avoid adding running more workers. */ 2033 if (!mutex_trylock(&percpu_charge_mutex)) 2034 return; 2035 /* 2036 * Notify other cpus that system-wide "drain" is running 2037 * We do not care about races with the cpu hotplug because cpu down 2038 * as well as workers from this path always operate on the local 2039 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2040 */ 2041 curcpu = get_cpu(); 2042 for_each_online_cpu(cpu) { 2043 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2044 struct mem_cgroup *memcg; 2045 2046 memcg = stock->cached; 2047 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) 2048 continue; 2049 if (!mem_cgroup_is_descendant(memcg, root_memcg)) { 2050 css_put(&memcg->css); 2051 continue; 2052 } 2053 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2054 if (cpu == curcpu) 2055 drain_local_stock(&stock->work); 2056 else 2057 schedule_work_on(cpu, &stock->work); 2058 } 2059 css_put(&memcg->css); 2060 } 2061 put_cpu(); 2062 mutex_unlock(&percpu_charge_mutex); 2063 } 2064 2065 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2066 { 2067 struct memcg_stock_pcp *stock; 2068 struct mem_cgroup *memcg; 2069 2070 stock = &per_cpu(memcg_stock, cpu); 2071 drain_stock(stock); 2072 2073 for_each_mem_cgroup(memcg) { 2074 int i; 2075 2076 for (i = 0; i < MEMCG_NR_STAT; i++) { 2077 int nid; 2078 long x; 2079 2080 x = this_cpu_xchg(memcg->stat_cpu->count[i], 0); 2081 if (x) 2082 atomic_long_add(x, &memcg->stat[i]); 2083 2084 if (i >= NR_VM_NODE_STAT_ITEMS) 2085 continue; 2086 2087 for_each_node(nid) { 2088 struct mem_cgroup_per_node *pn; 2089 2090 pn = mem_cgroup_nodeinfo(memcg, nid); 2091 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2092 if (x) 2093 atomic_long_add(x, &pn->lruvec_stat[i]); 2094 } 2095 } 2096 2097 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2098 long x; 2099 2100 x = this_cpu_xchg(memcg->stat_cpu->events[i], 0); 2101 if (x) 2102 atomic_long_add(x, &memcg->events[i]); 2103 } 2104 } 2105 2106 return 0; 2107 } 2108 2109 static void reclaim_high(struct mem_cgroup *memcg, 2110 unsigned int nr_pages, 2111 gfp_t gfp_mask) 2112 { 2113 do { 2114 if (page_counter_read(&memcg->memory) <= memcg->high) 2115 continue; 2116 memcg_memory_event(memcg, MEMCG_HIGH); 2117 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 2118 } while ((memcg = parent_mem_cgroup(memcg))); 2119 } 2120 2121 static void high_work_func(struct work_struct *work) 2122 { 2123 struct mem_cgroup *memcg; 2124 2125 memcg = container_of(work, struct mem_cgroup, high_work); 2126 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2127 } 2128 2129 /* 2130 * Scheduled by try_charge() to be executed from the userland return path 2131 * and reclaims memory over the high limit. 2132 */ 2133 void mem_cgroup_handle_over_high(void) 2134 { 2135 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2136 struct mem_cgroup *memcg; 2137 2138 if (likely(!nr_pages)) 2139 return; 2140 2141 memcg = get_mem_cgroup_from_mm(current->mm); 2142 reclaim_high(memcg, nr_pages, GFP_KERNEL); 2143 css_put(&memcg->css); 2144 current->memcg_nr_pages_over_high = 0; 2145 } 2146 2147 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2148 unsigned int nr_pages) 2149 { 2150 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2151 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2152 struct mem_cgroup *mem_over_limit; 2153 struct page_counter *counter; 2154 unsigned long nr_reclaimed; 2155 bool may_swap = true; 2156 bool drained = false; 2157 bool oomed = false; 2158 enum oom_status oom_status; 2159 2160 if (mem_cgroup_is_root(memcg)) 2161 return 0; 2162 retry: 2163 if (consume_stock(memcg, nr_pages)) 2164 return 0; 2165 2166 if (!do_memsw_account() || 2167 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2168 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2169 goto done_restock; 2170 if (do_memsw_account()) 2171 page_counter_uncharge(&memcg->memsw, batch); 2172 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2173 } else { 2174 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2175 may_swap = false; 2176 } 2177 2178 if (batch > nr_pages) { 2179 batch = nr_pages; 2180 goto retry; 2181 } 2182 2183 /* 2184 * Unlike in global OOM situations, memcg is not in a physical 2185 * memory shortage. Allow dying and OOM-killed tasks to 2186 * bypass the last charges so that they can exit quickly and 2187 * free their memory. 2188 */ 2189 if (unlikely(tsk_is_oom_victim(current) || 2190 fatal_signal_pending(current) || 2191 current->flags & PF_EXITING)) 2192 goto force; 2193 2194 /* 2195 * Prevent unbounded recursion when reclaim operations need to 2196 * allocate memory. This might exceed the limits temporarily, 2197 * but we prefer facilitating memory reclaim and getting back 2198 * under the limit over triggering OOM kills in these cases. 2199 */ 2200 if (unlikely(current->flags & PF_MEMALLOC)) 2201 goto force; 2202 2203 if (unlikely(task_in_memcg_oom(current))) 2204 goto nomem; 2205 2206 if (!gfpflags_allow_blocking(gfp_mask)) 2207 goto nomem; 2208 2209 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2210 2211 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2212 gfp_mask, may_swap); 2213 2214 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2215 goto retry; 2216 2217 if (!drained) { 2218 drain_all_stock(mem_over_limit); 2219 drained = true; 2220 goto retry; 2221 } 2222 2223 if (gfp_mask & __GFP_NORETRY) 2224 goto nomem; 2225 /* 2226 * Even though the limit is exceeded at this point, reclaim 2227 * may have been able to free some pages. Retry the charge 2228 * before killing the task. 2229 * 2230 * Only for regular pages, though: huge pages are rather 2231 * unlikely to succeed so close to the limit, and we fall back 2232 * to regular pages anyway in case of failure. 2233 */ 2234 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2235 goto retry; 2236 /* 2237 * At task move, charge accounts can be doubly counted. So, it's 2238 * better to wait until the end of task_move if something is going on. 2239 */ 2240 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2241 goto retry; 2242 2243 if (nr_retries--) 2244 goto retry; 2245 2246 if (gfp_mask & __GFP_RETRY_MAYFAIL && oomed) 2247 goto nomem; 2248 2249 if (gfp_mask & __GFP_NOFAIL) 2250 goto force; 2251 2252 if (fatal_signal_pending(current)) 2253 goto force; 2254 2255 memcg_memory_event(mem_over_limit, MEMCG_OOM); 2256 2257 /* 2258 * keep retrying as long as the memcg oom killer is able to make 2259 * a forward progress or bypass the charge if the oom killer 2260 * couldn't make any progress. 2261 */ 2262 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2263 get_order(nr_pages * PAGE_SIZE)); 2264 switch (oom_status) { 2265 case OOM_SUCCESS: 2266 nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2267 oomed = true; 2268 goto retry; 2269 case OOM_FAILED: 2270 goto force; 2271 default: 2272 goto nomem; 2273 } 2274 nomem: 2275 if (!(gfp_mask & __GFP_NOFAIL)) 2276 return -ENOMEM; 2277 force: 2278 /* 2279 * The allocation either can't fail or will lead to more memory 2280 * being freed very soon. Allow memory usage go over the limit 2281 * temporarily by force charging it. 2282 */ 2283 page_counter_charge(&memcg->memory, nr_pages); 2284 if (do_memsw_account()) 2285 page_counter_charge(&memcg->memsw, nr_pages); 2286 css_get_many(&memcg->css, nr_pages); 2287 2288 return 0; 2289 2290 done_restock: 2291 css_get_many(&memcg->css, batch); 2292 if (batch > nr_pages) 2293 refill_stock(memcg, batch - nr_pages); 2294 2295 /* 2296 * If the hierarchy is above the normal consumption range, schedule 2297 * reclaim on returning to userland. We can perform reclaim here 2298 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2299 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2300 * not recorded as it most likely matches current's and won't 2301 * change in the meantime. As high limit is checked again before 2302 * reclaim, the cost of mismatch is negligible. 2303 */ 2304 do { 2305 if (page_counter_read(&memcg->memory) > memcg->high) { 2306 /* Don't bother a random interrupted task */ 2307 if (in_interrupt()) { 2308 schedule_work(&memcg->high_work); 2309 break; 2310 } 2311 current->memcg_nr_pages_over_high += batch; 2312 set_notify_resume(current); 2313 break; 2314 } 2315 } while ((memcg = parent_mem_cgroup(memcg))); 2316 2317 return 0; 2318 } 2319 2320 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2321 { 2322 if (mem_cgroup_is_root(memcg)) 2323 return; 2324 2325 page_counter_uncharge(&memcg->memory, nr_pages); 2326 if (do_memsw_account()) 2327 page_counter_uncharge(&memcg->memsw, nr_pages); 2328 2329 css_put_many(&memcg->css, nr_pages); 2330 } 2331 2332 static void lock_page_lru(struct page *page, int *isolated) 2333 { 2334 struct zone *zone = page_zone(page); 2335 2336 spin_lock_irq(zone_lru_lock(zone)); 2337 if (PageLRU(page)) { 2338 struct lruvec *lruvec; 2339 2340 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2341 ClearPageLRU(page); 2342 del_page_from_lru_list(page, lruvec, page_lru(page)); 2343 *isolated = 1; 2344 } else 2345 *isolated = 0; 2346 } 2347 2348 static void unlock_page_lru(struct page *page, int isolated) 2349 { 2350 struct zone *zone = page_zone(page); 2351 2352 if (isolated) { 2353 struct lruvec *lruvec; 2354 2355 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2356 VM_BUG_ON_PAGE(PageLRU(page), page); 2357 SetPageLRU(page); 2358 add_page_to_lru_list(page, lruvec, page_lru(page)); 2359 } 2360 spin_unlock_irq(zone_lru_lock(zone)); 2361 } 2362 2363 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2364 bool lrucare) 2365 { 2366 int isolated; 2367 2368 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2369 2370 /* 2371 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2372 * may already be on some other mem_cgroup's LRU. Take care of it. 2373 */ 2374 if (lrucare) 2375 lock_page_lru(page, &isolated); 2376 2377 /* 2378 * Nobody should be changing or seriously looking at 2379 * page->mem_cgroup at this point: 2380 * 2381 * - the page is uncharged 2382 * 2383 * - the page is off-LRU 2384 * 2385 * - an anonymous fault has exclusive page access, except for 2386 * a locked page table 2387 * 2388 * - a page cache insertion, a swapin fault, or a migration 2389 * have the page locked 2390 */ 2391 page->mem_cgroup = memcg; 2392 2393 if (lrucare) 2394 unlock_page_lru(page, isolated); 2395 } 2396 2397 #ifdef CONFIG_MEMCG_KMEM 2398 static int memcg_alloc_cache_id(void) 2399 { 2400 int id, size; 2401 int err; 2402 2403 id = ida_simple_get(&memcg_cache_ida, 2404 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2405 if (id < 0) 2406 return id; 2407 2408 if (id < memcg_nr_cache_ids) 2409 return id; 2410 2411 /* 2412 * There's no space for the new id in memcg_caches arrays, 2413 * so we have to grow them. 2414 */ 2415 down_write(&memcg_cache_ids_sem); 2416 2417 size = 2 * (id + 1); 2418 if (size < MEMCG_CACHES_MIN_SIZE) 2419 size = MEMCG_CACHES_MIN_SIZE; 2420 else if (size > MEMCG_CACHES_MAX_SIZE) 2421 size = MEMCG_CACHES_MAX_SIZE; 2422 2423 err = memcg_update_all_caches(size); 2424 if (!err) 2425 err = memcg_update_all_list_lrus(size); 2426 if (!err) 2427 memcg_nr_cache_ids = size; 2428 2429 up_write(&memcg_cache_ids_sem); 2430 2431 if (err) { 2432 ida_simple_remove(&memcg_cache_ida, id); 2433 return err; 2434 } 2435 return id; 2436 } 2437 2438 static void memcg_free_cache_id(int id) 2439 { 2440 ida_simple_remove(&memcg_cache_ida, id); 2441 } 2442 2443 struct memcg_kmem_cache_create_work { 2444 struct mem_cgroup *memcg; 2445 struct kmem_cache *cachep; 2446 struct work_struct work; 2447 }; 2448 2449 static void memcg_kmem_cache_create_func(struct work_struct *w) 2450 { 2451 struct memcg_kmem_cache_create_work *cw = 2452 container_of(w, struct memcg_kmem_cache_create_work, work); 2453 struct mem_cgroup *memcg = cw->memcg; 2454 struct kmem_cache *cachep = cw->cachep; 2455 2456 memcg_create_kmem_cache(memcg, cachep); 2457 2458 css_put(&memcg->css); 2459 kfree(cw); 2460 } 2461 2462 /* 2463 * Enqueue the creation of a per-memcg kmem_cache. 2464 */ 2465 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2466 struct kmem_cache *cachep) 2467 { 2468 struct memcg_kmem_cache_create_work *cw; 2469 2470 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2471 if (!cw) 2472 return; 2473 2474 css_get(&memcg->css); 2475 2476 cw->memcg = memcg; 2477 cw->cachep = cachep; 2478 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2479 2480 queue_work(memcg_kmem_cache_wq, &cw->work); 2481 } 2482 2483 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2484 struct kmem_cache *cachep) 2485 { 2486 /* 2487 * We need to stop accounting when we kmalloc, because if the 2488 * corresponding kmalloc cache is not yet created, the first allocation 2489 * in __memcg_schedule_kmem_cache_create will recurse. 2490 * 2491 * However, it is better to enclose the whole function. Depending on 2492 * the debugging options enabled, INIT_WORK(), for instance, can 2493 * trigger an allocation. This too, will make us recurse. Because at 2494 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2495 * the safest choice is to do it like this, wrapping the whole function. 2496 */ 2497 current->memcg_kmem_skip_account = 1; 2498 __memcg_schedule_kmem_cache_create(memcg, cachep); 2499 current->memcg_kmem_skip_account = 0; 2500 } 2501 2502 static inline bool memcg_kmem_bypass(void) 2503 { 2504 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2505 return true; 2506 return false; 2507 } 2508 2509 /** 2510 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2511 * @cachep: the original global kmem cache 2512 * 2513 * Return the kmem_cache we're supposed to use for a slab allocation. 2514 * We try to use the current memcg's version of the cache. 2515 * 2516 * If the cache does not exist yet, if we are the first user of it, we 2517 * create it asynchronously in a workqueue and let the current allocation 2518 * go through with the original cache. 2519 * 2520 * This function takes a reference to the cache it returns to assure it 2521 * won't get destroyed while we are working with it. Once the caller is 2522 * done with it, memcg_kmem_put_cache() must be called to release the 2523 * reference. 2524 */ 2525 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2526 { 2527 struct mem_cgroup *memcg; 2528 struct kmem_cache *memcg_cachep; 2529 int kmemcg_id; 2530 2531 VM_BUG_ON(!is_root_cache(cachep)); 2532 2533 if (memcg_kmem_bypass()) 2534 return cachep; 2535 2536 if (current->memcg_kmem_skip_account) 2537 return cachep; 2538 2539 memcg = get_mem_cgroup_from_current(); 2540 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2541 if (kmemcg_id < 0) 2542 goto out; 2543 2544 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2545 if (likely(memcg_cachep)) 2546 return memcg_cachep; 2547 2548 /* 2549 * If we are in a safe context (can wait, and not in interrupt 2550 * context), we could be be predictable and return right away. 2551 * This would guarantee that the allocation being performed 2552 * already belongs in the new cache. 2553 * 2554 * However, there are some clashes that can arrive from locking. 2555 * For instance, because we acquire the slab_mutex while doing 2556 * memcg_create_kmem_cache, this means no further allocation 2557 * could happen with the slab_mutex held. So it's better to 2558 * defer everything. 2559 */ 2560 memcg_schedule_kmem_cache_create(memcg, cachep); 2561 out: 2562 css_put(&memcg->css); 2563 return cachep; 2564 } 2565 2566 /** 2567 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2568 * @cachep: the cache returned by memcg_kmem_get_cache 2569 */ 2570 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2571 { 2572 if (!is_root_cache(cachep)) 2573 css_put(&cachep->memcg_params.memcg->css); 2574 } 2575 2576 /** 2577 * memcg_kmem_charge_memcg: charge a kmem page 2578 * @page: page to charge 2579 * @gfp: reclaim mode 2580 * @order: allocation order 2581 * @memcg: memory cgroup to charge 2582 * 2583 * Returns 0 on success, an error code on failure. 2584 */ 2585 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2586 struct mem_cgroup *memcg) 2587 { 2588 unsigned int nr_pages = 1 << order; 2589 struct page_counter *counter; 2590 int ret; 2591 2592 ret = try_charge(memcg, gfp, nr_pages); 2593 if (ret) 2594 return ret; 2595 2596 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2597 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2598 cancel_charge(memcg, nr_pages); 2599 return -ENOMEM; 2600 } 2601 2602 page->mem_cgroup = memcg; 2603 2604 return 0; 2605 } 2606 2607 /** 2608 * memcg_kmem_charge: charge a kmem page to the current memory cgroup 2609 * @page: page to charge 2610 * @gfp: reclaim mode 2611 * @order: allocation order 2612 * 2613 * Returns 0 on success, an error code on failure. 2614 */ 2615 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2616 { 2617 struct mem_cgroup *memcg; 2618 int ret = 0; 2619 2620 if (memcg_kmem_bypass()) 2621 return 0; 2622 2623 memcg = get_mem_cgroup_from_current(); 2624 if (!mem_cgroup_is_root(memcg)) { 2625 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2626 if (!ret) 2627 __SetPageKmemcg(page); 2628 } 2629 css_put(&memcg->css); 2630 return ret; 2631 } 2632 /** 2633 * memcg_kmem_uncharge: uncharge a kmem page 2634 * @page: page to uncharge 2635 * @order: allocation order 2636 */ 2637 void memcg_kmem_uncharge(struct page *page, int order) 2638 { 2639 struct mem_cgroup *memcg = page->mem_cgroup; 2640 unsigned int nr_pages = 1 << order; 2641 2642 if (!memcg) 2643 return; 2644 2645 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2646 2647 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2648 page_counter_uncharge(&memcg->kmem, nr_pages); 2649 2650 page_counter_uncharge(&memcg->memory, nr_pages); 2651 if (do_memsw_account()) 2652 page_counter_uncharge(&memcg->memsw, nr_pages); 2653 2654 page->mem_cgroup = NULL; 2655 2656 /* slab pages do not have PageKmemcg flag set */ 2657 if (PageKmemcg(page)) 2658 __ClearPageKmemcg(page); 2659 2660 css_put_many(&memcg->css, nr_pages); 2661 } 2662 #endif /* CONFIG_MEMCG_KMEM */ 2663 2664 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2665 2666 /* 2667 * Because tail pages are not marked as "used", set it. We're under 2668 * zone_lru_lock and migration entries setup in all page mappings. 2669 */ 2670 void mem_cgroup_split_huge_fixup(struct page *head) 2671 { 2672 int i; 2673 2674 if (mem_cgroup_disabled()) 2675 return; 2676 2677 for (i = 1; i < HPAGE_PMD_NR; i++) 2678 head[i].mem_cgroup = head->mem_cgroup; 2679 2680 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); 2681 } 2682 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2683 2684 #ifdef CONFIG_MEMCG_SWAP 2685 /** 2686 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2687 * @entry: swap entry to be moved 2688 * @from: mem_cgroup which the entry is moved from 2689 * @to: mem_cgroup which the entry is moved to 2690 * 2691 * It succeeds only when the swap_cgroup's record for this entry is the same 2692 * as the mem_cgroup's id of @from. 2693 * 2694 * Returns 0 on success, -EINVAL on failure. 2695 * 2696 * The caller must have charged to @to, IOW, called page_counter_charge() about 2697 * both res and memsw, and called css_get(). 2698 */ 2699 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2700 struct mem_cgroup *from, struct mem_cgroup *to) 2701 { 2702 unsigned short old_id, new_id; 2703 2704 old_id = mem_cgroup_id(from); 2705 new_id = mem_cgroup_id(to); 2706 2707 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2708 mod_memcg_state(from, MEMCG_SWAP, -1); 2709 mod_memcg_state(to, MEMCG_SWAP, 1); 2710 return 0; 2711 } 2712 return -EINVAL; 2713 } 2714 #else 2715 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2716 struct mem_cgroup *from, struct mem_cgroup *to) 2717 { 2718 return -EINVAL; 2719 } 2720 #endif 2721 2722 static DEFINE_MUTEX(memcg_max_mutex); 2723 2724 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 2725 unsigned long max, bool memsw) 2726 { 2727 bool enlarge = false; 2728 bool drained = false; 2729 int ret; 2730 bool limits_invariant; 2731 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 2732 2733 do { 2734 if (signal_pending(current)) { 2735 ret = -EINTR; 2736 break; 2737 } 2738 2739 mutex_lock(&memcg_max_mutex); 2740 /* 2741 * Make sure that the new limit (memsw or memory limit) doesn't 2742 * break our basic invariant rule memory.max <= memsw.max. 2743 */ 2744 limits_invariant = memsw ? max >= memcg->memory.max : 2745 max <= memcg->memsw.max; 2746 if (!limits_invariant) { 2747 mutex_unlock(&memcg_max_mutex); 2748 ret = -EINVAL; 2749 break; 2750 } 2751 if (max > counter->max) 2752 enlarge = true; 2753 ret = page_counter_set_max(counter, max); 2754 mutex_unlock(&memcg_max_mutex); 2755 2756 if (!ret) 2757 break; 2758 2759 if (!drained) { 2760 drain_all_stock(memcg); 2761 drained = true; 2762 continue; 2763 } 2764 2765 if (!try_to_free_mem_cgroup_pages(memcg, 1, 2766 GFP_KERNEL, !memsw)) { 2767 ret = -EBUSY; 2768 break; 2769 } 2770 } while (true); 2771 2772 if (!ret && enlarge) 2773 memcg_oom_recover(memcg); 2774 2775 return ret; 2776 } 2777 2778 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2779 gfp_t gfp_mask, 2780 unsigned long *total_scanned) 2781 { 2782 unsigned long nr_reclaimed = 0; 2783 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2784 unsigned long reclaimed; 2785 int loop = 0; 2786 struct mem_cgroup_tree_per_node *mctz; 2787 unsigned long excess; 2788 unsigned long nr_scanned; 2789 2790 if (order > 0) 2791 return 0; 2792 2793 mctz = soft_limit_tree_node(pgdat->node_id); 2794 2795 /* 2796 * Do not even bother to check the largest node if the root 2797 * is empty. Do it lockless to prevent lock bouncing. Races 2798 * are acceptable as soft limit is best effort anyway. 2799 */ 2800 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 2801 return 0; 2802 2803 /* 2804 * This loop can run a while, specially if mem_cgroup's continuously 2805 * keep exceeding their soft limit and putting the system under 2806 * pressure 2807 */ 2808 do { 2809 if (next_mz) 2810 mz = next_mz; 2811 else 2812 mz = mem_cgroup_largest_soft_limit_node(mctz); 2813 if (!mz) 2814 break; 2815 2816 nr_scanned = 0; 2817 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2818 gfp_mask, &nr_scanned); 2819 nr_reclaimed += reclaimed; 2820 *total_scanned += nr_scanned; 2821 spin_lock_irq(&mctz->lock); 2822 __mem_cgroup_remove_exceeded(mz, mctz); 2823 2824 /* 2825 * If we failed to reclaim anything from this memory cgroup 2826 * it is time to move on to the next cgroup 2827 */ 2828 next_mz = NULL; 2829 if (!reclaimed) 2830 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2831 2832 excess = soft_limit_excess(mz->memcg); 2833 /* 2834 * One school of thought says that we should not add 2835 * back the node to the tree if reclaim returns 0. 2836 * But our reclaim could return 0, simply because due 2837 * to priority we are exposing a smaller subset of 2838 * memory to reclaim from. Consider this as a longer 2839 * term TODO. 2840 */ 2841 /* If excess == 0, no tree ops */ 2842 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2843 spin_unlock_irq(&mctz->lock); 2844 css_put(&mz->memcg->css); 2845 loop++; 2846 /* 2847 * Could not reclaim anything and there are no more 2848 * mem cgroups to try or we seem to be looping without 2849 * reclaiming anything. 2850 */ 2851 if (!nr_reclaimed && 2852 (next_mz == NULL || 2853 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2854 break; 2855 } while (!nr_reclaimed); 2856 if (next_mz) 2857 css_put(&next_mz->memcg->css); 2858 return nr_reclaimed; 2859 } 2860 2861 /* 2862 * Test whether @memcg has children, dead or alive. Note that this 2863 * function doesn't care whether @memcg has use_hierarchy enabled and 2864 * returns %true if there are child csses according to the cgroup 2865 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2866 */ 2867 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2868 { 2869 bool ret; 2870 2871 rcu_read_lock(); 2872 ret = css_next_child(NULL, &memcg->css); 2873 rcu_read_unlock(); 2874 return ret; 2875 } 2876 2877 /* 2878 * Reclaims as many pages from the given memcg as possible. 2879 * 2880 * Caller is responsible for holding css reference for memcg. 2881 */ 2882 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2883 { 2884 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2885 2886 /* we call try-to-free pages for make this cgroup empty */ 2887 lru_add_drain_all(); 2888 2889 drain_all_stock(memcg); 2890 2891 /* try to free all pages in this cgroup */ 2892 while (nr_retries && page_counter_read(&memcg->memory)) { 2893 int progress; 2894 2895 if (signal_pending(current)) 2896 return -EINTR; 2897 2898 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2899 GFP_KERNEL, true); 2900 if (!progress) { 2901 nr_retries--; 2902 /* maybe some writeback is necessary */ 2903 congestion_wait(BLK_RW_ASYNC, HZ/10); 2904 } 2905 2906 } 2907 2908 return 0; 2909 } 2910 2911 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2912 char *buf, size_t nbytes, 2913 loff_t off) 2914 { 2915 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2916 2917 if (mem_cgroup_is_root(memcg)) 2918 return -EINVAL; 2919 return mem_cgroup_force_empty(memcg) ?: nbytes; 2920 } 2921 2922 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2923 struct cftype *cft) 2924 { 2925 return mem_cgroup_from_css(css)->use_hierarchy; 2926 } 2927 2928 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2929 struct cftype *cft, u64 val) 2930 { 2931 int retval = 0; 2932 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2933 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2934 2935 if (memcg->use_hierarchy == val) 2936 return 0; 2937 2938 /* 2939 * If parent's use_hierarchy is set, we can't make any modifications 2940 * in the child subtrees. If it is unset, then the change can 2941 * occur, provided the current cgroup has no children. 2942 * 2943 * For the root cgroup, parent_mem is NULL, we allow value to be 2944 * set if there are no children. 2945 */ 2946 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2947 (val == 1 || val == 0)) { 2948 if (!memcg_has_children(memcg)) 2949 memcg->use_hierarchy = val; 2950 else 2951 retval = -EBUSY; 2952 } else 2953 retval = -EINVAL; 2954 2955 return retval; 2956 } 2957 2958 struct accumulated_stats { 2959 unsigned long stat[MEMCG_NR_STAT]; 2960 unsigned long events[NR_VM_EVENT_ITEMS]; 2961 unsigned long lru_pages[NR_LRU_LISTS]; 2962 const unsigned int *stats_array; 2963 const unsigned int *events_array; 2964 int stats_size; 2965 int events_size; 2966 }; 2967 2968 static void accumulate_memcg_tree(struct mem_cgroup *memcg, 2969 struct accumulated_stats *acc) 2970 { 2971 struct mem_cgroup *mi; 2972 int i; 2973 2974 for_each_mem_cgroup_tree(mi, memcg) { 2975 for (i = 0; i < acc->stats_size; i++) 2976 acc->stat[i] += memcg_page_state(mi, 2977 acc->stats_array ? acc->stats_array[i] : i); 2978 2979 for (i = 0; i < acc->events_size; i++) 2980 acc->events[i] += memcg_sum_events(mi, 2981 acc->events_array ? acc->events_array[i] : i); 2982 2983 for (i = 0; i < NR_LRU_LISTS; i++) 2984 acc->lru_pages[i] += 2985 mem_cgroup_nr_lru_pages(mi, BIT(i)); 2986 } 2987 } 2988 2989 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2990 { 2991 unsigned long val = 0; 2992 2993 if (mem_cgroup_is_root(memcg)) { 2994 struct mem_cgroup *iter; 2995 2996 for_each_mem_cgroup_tree(iter, memcg) { 2997 val += memcg_page_state(iter, MEMCG_CACHE); 2998 val += memcg_page_state(iter, MEMCG_RSS); 2999 if (swap) 3000 val += memcg_page_state(iter, MEMCG_SWAP); 3001 } 3002 } else { 3003 if (!swap) 3004 val = page_counter_read(&memcg->memory); 3005 else 3006 val = page_counter_read(&memcg->memsw); 3007 } 3008 return val; 3009 } 3010 3011 enum { 3012 RES_USAGE, 3013 RES_LIMIT, 3014 RES_MAX_USAGE, 3015 RES_FAILCNT, 3016 RES_SOFT_LIMIT, 3017 }; 3018 3019 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3020 struct cftype *cft) 3021 { 3022 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3023 struct page_counter *counter; 3024 3025 switch (MEMFILE_TYPE(cft->private)) { 3026 case _MEM: 3027 counter = &memcg->memory; 3028 break; 3029 case _MEMSWAP: 3030 counter = &memcg->memsw; 3031 break; 3032 case _KMEM: 3033 counter = &memcg->kmem; 3034 break; 3035 case _TCP: 3036 counter = &memcg->tcpmem; 3037 break; 3038 default: 3039 BUG(); 3040 } 3041 3042 switch (MEMFILE_ATTR(cft->private)) { 3043 case RES_USAGE: 3044 if (counter == &memcg->memory) 3045 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3046 if (counter == &memcg->memsw) 3047 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3048 return (u64)page_counter_read(counter) * PAGE_SIZE; 3049 case RES_LIMIT: 3050 return (u64)counter->max * PAGE_SIZE; 3051 case RES_MAX_USAGE: 3052 return (u64)counter->watermark * PAGE_SIZE; 3053 case RES_FAILCNT: 3054 return counter->failcnt; 3055 case RES_SOFT_LIMIT: 3056 return (u64)memcg->soft_limit * PAGE_SIZE; 3057 default: 3058 BUG(); 3059 } 3060 } 3061 3062 #ifdef CONFIG_MEMCG_KMEM 3063 static int memcg_online_kmem(struct mem_cgroup *memcg) 3064 { 3065 int memcg_id; 3066 3067 if (cgroup_memory_nokmem) 3068 return 0; 3069 3070 BUG_ON(memcg->kmemcg_id >= 0); 3071 BUG_ON(memcg->kmem_state); 3072 3073 memcg_id = memcg_alloc_cache_id(); 3074 if (memcg_id < 0) 3075 return memcg_id; 3076 3077 static_branch_inc(&memcg_kmem_enabled_key); 3078 /* 3079 * A memory cgroup is considered kmem-online as soon as it gets 3080 * kmemcg_id. Setting the id after enabling static branching will 3081 * guarantee no one starts accounting before all call sites are 3082 * patched. 3083 */ 3084 memcg->kmemcg_id = memcg_id; 3085 memcg->kmem_state = KMEM_ONLINE; 3086 INIT_LIST_HEAD(&memcg->kmem_caches); 3087 3088 return 0; 3089 } 3090 3091 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3092 { 3093 struct cgroup_subsys_state *css; 3094 struct mem_cgroup *parent, *child; 3095 int kmemcg_id; 3096 3097 if (memcg->kmem_state != KMEM_ONLINE) 3098 return; 3099 /* 3100 * Clear the online state before clearing memcg_caches array 3101 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 3102 * guarantees that no cache will be created for this cgroup 3103 * after we are done (see memcg_create_kmem_cache()). 3104 */ 3105 memcg->kmem_state = KMEM_ALLOCATED; 3106 3107 memcg_deactivate_kmem_caches(memcg); 3108 3109 kmemcg_id = memcg->kmemcg_id; 3110 BUG_ON(kmemcg_id < 0); 3111 3112 parent = parent_mem_cgroup(memcg); 3113 if (!parent) 3114 parent = root_mem_cgroup; 3115 3116 /* 3117 * Change kmemcg_id of this cgroup and all its descendants to the 3118 * parent's id, and then move all entries from this cgroup's list_lrus 3119 * to ones of the parent. After we have finished, all list_lrus 3120 * corresponding to this cgroup are guaranteed to remain empty. The 3121 * ordering is imposed by list_lru_node->lock taken by 3122 * memcg_drain_all_list_lrus(). 3123 */ 3124 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3125 css_for_each_descendant_pre(css, &memcg->css) { 3126 child = mem_cgroup_from_css(css); 3127 BUG_ON(child->kmemcg_id != kmemcg_id); 3128 child->kmemcg_id = parent->kmemcg_id; 3129 if (!memcg->use_hierarchy) 3130 break; 3131 } 3132 rcu_read_unlock(); 3133 3134 memcg_drain_all_list_lrus(kmemcg_id, parent); 3135 3136 memcg_free_cache_id(kmemcg_id); 3137 } 3138 3139 static void memcg_free_kmem(struct mem_cgroup *memcg) 3140 { 3141 /* css_alloc() failed, offlining didn't happen */ 3142 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3143 memcg_offline_kmem(memcg); 3144 3145 if (memcg->kmem_state == KMEM_ALLOCATED) { 3146 memcg_destroy_kmem_caches(memcg); 3147 static_branch_dec(&memcg_kmem_enabled_key); 3148 WARN_ON(page_counter_read(&memcg->kmem)); 3149 } 3150 } 3151 #else 3152 static int memcg_online_kmem(struct mem_cgroup *memcg) 3153 { 3154 return 0; 3155 } 3156 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3157 { 3158 } 3159 static void memcg_free_kmem(struct mem_cgroup *memcg) 3160 { 3161 } 3162 #endif /* CONFIG_MEMCG_KMEM */ 3163 3164 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3165 unsigned long max) 3166 { 3167 int ret; 3168 3169 mutex_lock(&memcg_max_mutex); 3170 ret = page_counter_set_max(&memcg->kmem, max); 3171 mutex_unlock(&memcg_max_mutex); 3172 return ret; 3173 } 3174 3175 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3176 { 3177 int ret; 3178 3179 mutex_lock(&memcg_max_mutex); 3180 3181 ret = page_counter_set_max(&memcg->tcpmem, max); 3182 if (ret) 3183 goto out; 3184 3185 if (!memcg->tcpmem_active) { 3186 /* 3187 * The active flag needs to be written after the static_key 3188 * update. This is what guarantees that the socket activation 3189 * function is the last one to run. See mem_cgroup_sk_alloc() 3190 * for details, and note that we don't mark any socket as 3191 * belonging to this memcg until that flag is up. 3192 * 3193 * We need to do this, because static_keys will span multiple 3194 * sites, but we can't control their order. If we mark a socket 3195 * as accounted, but the accounting functions are not patched in 3196 * yet, we'll lose accounting. 3197 * 3198 * We never race with the readers in mem_cgroup_sk_alloc(), 3199 * because when this value change, the code to process it is not 3200 * patched in yet. 3201 */ 3202 static_branch_inc(&memcg_sockets_enabled_key); 3203 memcg->tcpmem_active = true; 3204 } 3205 out: 3206 mutex_unlock(&memcg_max_mutex); 3207 return ret; 3208 } 3209 3210 /* 3211 * The user of this function is... 3212 * RES_LIMIT. 3213 */ 3214 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3215 char *buf, size_t nbytes, loff_t off) 3216 { 3217 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3218 unsigned long nr_pages; 3219 int ret; 3220 3221 buf = strstrip(buf); 3222 ret = page_counter_memparse(buf, "-1", &nr_pages); 3223 if (ret) 3224 return ret; 3225 3226 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3227 case RES_LIMIT: 3228 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3229 ret = -EINVAL; 3230 break; 3231 } 3232 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3233 case _MEM: 3234 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3235 break; 3236 case _MEMSWAP: 3237 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3238 break; 3239 case _KMEM: 3240 ret = memcg_update_kmem_max(memcg, nr_pages); 3241 break; 3242 case _TCP: 3243 ret = memcg_update_tcp_max(memcg, nr_pages); 3244 break; 3245 } 3246 break; 3247 case RES_SOFT_LIMIT: 3248 memcg->soft_limit = nr_pages; 3249 ret = 0; 3250 break; 3251 } 3252 return ret ?: nbytes; 3253 } 3254 3255 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3256 size_t nbytes, loff_t off) 3257 { 3258 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3259 struct page_counter *counter; 3260 3261 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3262 case _MEM: 3263 counter = &memcg->memory; 3264 break; 3265 case _MEMSWAP: 3266 counter = &memcg->memsw; 3267 break; 3268 case _KMEM: 3269 counter = &memcg->kmem; 3270 break; 3271 case _TCP: 3272 counter = &memcg->tcpmem; 3273 break; 3274 default: 3275 BUG(); 3276 } 3277 3278 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3279 case RES_MAX_USAGE: 3280 page_counter_reset_watermark(counter); 3281 break; 3282 case RES_FAILCNT: 3283 counter->failcnt = 0; 3284 break; 3285 default: 3286 BUG(); 3287 } 3288 3289 return nbytes; 3290 } 3291 3292 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3293 struct cftype *cft) 3294 { 3295 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3296 } 3297 3298 #ifdef CONFIG_MMU 3299 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3300 struct cftype *cft, u64 val) 3301 { 3302 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3303 3304 if (val & ~MOVE_MASK) 3305 return -EINVAL; 3306 3307 /* 3308 * No kind of locking is needed in here, because ->can_attach() will 3309 * check this value once in the beginning of the process, and then carry 3310 * on with stale data. This means that changes to this value will only 3311 * affect task migrations starting after the change. 3312 */ 3313 memcg->move_charge_at_immigrate = val; 3314 return 0; 3315 } 3316 #else 3317 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3318 struct cftype *cft, u64 val) 3319 { 3320 return -ENOSYS; 3321 } 3322 #endif 3323 3324 #ifdef CONFIG_NUMA 3325 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3326 { 3327 struct numa_stat { 3328 const char *name; 3329 unsigned int lru_mask; 3330 }; 3331 3332 static const struct numa_stat stats[] = { 3333 { "total", LRU_ALL }, 3334 { "file", LRU_ALL_FILE }, 3335 { "anon", LRU_ALL_ANON }, 3336 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3337 }; 3338 const struct numa_stat *stat; 3339 int nid; 3340 unsigned long nr; 3341 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3342 3343 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3344 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3345 seq_printf(m, "%s=%lu", stat->name, nr); 3346 for_each_node_state(nid, N_MEMORY) { 3347 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3348 stat->lru_mask); 3349 seq_printf(m, " N%d=%lu", nid, nr); 3350 } 3351 seq_putc(m, '\n'); 3352 } 3353 3354 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3355 struct mem_cgroup *iter; 3356 3357 nr = 0; 3358 for_each_mem_cgroup_tree(iter, memcg) 3359 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3360 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3361 for_each_node_state(nid, N_MEMORY) { 3362 nr = 0; 3363 for_each_mem_cgroup_tree(iter, memcg) 3364 nr += mem_cgroup_node_nr_lru_pages( 3365 iter, nid, stat->lru_mask); 3366 seq_printf(m, " N%d=%lu", nid, nr); 3367 } 3368 seq_putc(m, '\n'); 3369 } 3370 3371 return 0; 3372 } 3373 #endif /* CONFIG_NUMA */ 3374 3375 /* Universal VM events cgroup1 shows, original sort order */ 3376 static const unsigned int memcg1_events[] = { 3377 PGPGIN, 3378 PGPGOUT, 3379 PGFAULT, 3380 PGMAJFAULT, 3381 }; 3382 3383 static const char *const memcg1_event_names[] = { 3384 "pgpgin", 3385 "pgpgout", 3386 "pgfault", 3387 "pgmajfault", 3388 }; 3389 3390 static int memcg_stat_show(struct seq_file *m, void *v) 3391 { 3392 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3393 unsigned long memory, memsw; 3394 struct mem_cgroup *mi; 3395 unsigned int i; 3396 struct accumulated_stats acc; 3397 3398 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3399 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3400 3401 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3402 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3403 continue; 3404 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3405 memcg_page_state(memcg, memcg1_stats[i]) * 3406 PAGE_SIZE); 3407 } 3408 3409 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3410 seq_printf(m, "%s %lu\n", memcg1_event_names[i], 3411 memcg_sum_events(memcg, memcg1_events[i])); 3412 3413 for (i = 0; i < NR_LRU_LISTS; i++) 3414 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3415 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3416 3417 /* Hierarchical information */ 3418 memory = memsw = PAGE_COUNTER_MAX; 3419 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3420 memory = min(memory, mi->memory.max); 3421 memsw = min(memsw, mi->memsw.max); 3422 } 3423 seq_printf(m, "hierarchical_memory_limit %llu\n", 3424 (u64)memory * PAGE_SIZE); 3425 if (do_memsw_account()) 3426 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3427 (u64)memsw * PAGE_SIZE); 3428 3429 memset(&acc, 0, sizeof(acc)); 3430 acc.stats_size = ARRAY_SIZE(memcg1_stats); 3431 acc.stats_array = memcg1_stats; 3432 acc.events_size = ARRAY_SIZE(memcg1_events); 3433 acc.events_array = memcg1_events; 3434 accumulate_memcg_tree(memcg, &acc); 3435 3436 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3437 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3438 continue; 3439 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 3440 (u64)acc.stat[i] * PAGE_SIZE); 3441 } 3442 3443 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3444 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], 3445 (u64)acc.events[i]); 3446 3447 for (i = 0; i < NR_LRU_LISTS; i++) 3448 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], 3449 (u64)acc.lru_pages[i] * PAGE_SIZE); 3450 3451 #ifdef CONFIG_DEBUG_VM 3452 { 3453 pg_data_t *pgdat; 3454 struct mem_cgroup_per_node *mz; 3455 struct zone_reclaim_stat *rstat; 3456 unsigned long recent_rotated[2] = {0, 0}; 3457 unsigned long recent_scanned[2] = {0, 0}; 3458 3459 for_each_online_pgdat(pgdat) { 3460 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3461 rstat = &mz->lruvec.reclaim_stat; 3462 3463 recent_rotated[0] += rstat->recent_rotated[0]; 3464 recent_rotated[1] += rstat->recent_rotated[1]; 3465 recent_scanned[0] += rstat->recent_scanned[0]; 3466 recent_scanned[1] += rstat->recent_scanned[1]; 3467 } 3468 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3469 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3470 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3471 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3472 } 3473 #endif 3474 3475 return 0; 3476 } 3477 3478 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3479 struct cftype *cft) 3480 { 3481 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3482 3483 return mem_cgroup_swappiness(memcg); 3484 } 3485 3486 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3487 struct cftype *cft, u64 val) 3488 { 3489 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3490 3491 if (val > 100) 3492 return -EINVAL; 3493 3494 if (css->parent) 3495 memcg->swappiness = val; 3496 else 3497 vm_swappiness = val; 3498 3499 return 0; 3500 } 3501 3502 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3503 { 3504 struct mem_cgroup_threshold_ary *t; 3505 unsigned long usage; 3506 int i; 3507 3508 rcu_read_lock(); 3509 if (!swap) 3510 t = rcu_dereference(memcg->thresholds.primary); 3511 else 3512 t = rcu_dereference(memcg->memsw_thresholds.primary); 3513 3514 if (!t) 3515 goto unlock; 3516 3517 usage = mem_cgroup_usage(memcg, swap); 3518 3519 /* 3520 * current_threshold points to threshold just below or equal to usage. 3521 * If it's not true, a threshold was crossed after last 3522 * call of __mem_cgroup_threshold(). 3523 */ 3524 i = t->current_threshold; 3525 3526 /* 3527 * Iterate backward over array of thresholds starting from 3528 * current_threshold and check if a threshold is crossed. 3529 * If none of thresholds below usage is crossed, we read 3530 * only one element of the array here. 3531 */ 3532 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3533 eventfd_signal(t->entries[i].eventfd, 1); 3534 3535 /* i = current_threshold + 1 */ 3536 i++; 3537 3538 /* 3539 * Iterate forward over array of thresholds starting from 3540 * current_threshold+1 and check if a threshold is crossed. 3541 * If none of thresholds above usage is crossed, we read 3542 * only one element of the array here. 3543 */ 3544 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3545 eventfd_signal(t->entries[i].eventfd, 1); 3546 3547 /* Update current_threshold */ 3548 t->current_threshold = i - 1; 3549 unlock: 3550 rcu_read_unlock(); 3551 } 3552 3553 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3554 { 3555 while (memcg) { 3556 __mem_cgroup_threshold(memcg, false); 3557 if (do_memsw_account()) 3558 __mem_cgroup_threshold(memcg, true); 3559 3560 memcg = parent_mem_cgroup(memcg); 3561 } 3562 } 3563 3564 static int compare_thresholds(const void *a, const void *b) 3565 { 3566 const struct mem_cgroup_threshold *_a = a; 3567 const struct mem_cgroup_threshold *_b = b; 3568 3569 if (_a->threshold > _b->threshold) 3570 return 1; 3571 3572 if (_a->threshold < _b->threshold) 3573 return -1; 3574 3575 return 0; 3576 } 3577 3578 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3579 { 3580 struct mem_cgroup_eventfd_list *ev; 3581 3582 spin_lock(&memcg_oom_lock); 3583 3584 list_for_each_entry(ev, &memcg->oom_notify, list) 3585 eventfd_signal(ev->eventfd, 1); 3586 3587 spin_unlock(&memcg_oom_lock); 3588 return 0; 3589 } 3590 3591 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3592 { 3593 struct mem_cgroup *iter; 3594 3595 for_each_mem_cgroup_tree(iter, memcg) 3596 mem_cgroup_oom_notify_cb(iter); 3597 } 3598 3599 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3600 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3601 { 3602 struct mem_cgroup_thresholds *thresholds; 3603 struct mem_cgroup_threshold_ary *new; 3604 unsigned long threshold; 3605 unsigned long usage; 3606 int i, size, ret; 3607 3608 ret = page_counter_memparse(args, "-1", &threshold); 3609 if (ret) 3610 return ret; 3611 3612 mutex_lock(&memcg->thresholds_lock); 3613 3614 if (type == _MEM) { 3615 thresholds = &memcg->thresholds; 3616 usage = mem_cgroup_usage(memcg, false); 3617 } else if (type == _MEMSWAP) { 3618 thresholds = &memcg->memsw_thresholds; 3619 usage = mem_cgroup_usage(memcg, true); 3620 } else 3621 BUG(); 3622 3623 /* Check if a threshold crossed before adding a new one */ 3624 if (thresholds->primary) 3625 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3626 3627 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3628 3629 /* Allocate memory for new array of thresholds */ 3630 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3631 GFP_KERNEL); 3632 if (!new) { 3633 ret = -ENOMEM; 3634 goto unlock; 3635 } 3636 new->size = size; 3637 3638 /* Copy thresholds (if any) to new array */ 3639 if (thresholds->primary) { 3640 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3641 sizeof(struct mem_cgroup_threshold)); 3642 } 3643 3644 /* Add new threshold */ 3645 new->entries[size - 1].eventfd = eventfd; 3646 new->entries[size - 1].threshold = threshold; 3647 3648 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3649 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3650 compare_thresholds, NULL); 3651 3652 /* Find current threshold */ 3653 new->current_threshold = -1; 3654 for (i = 0; i < size; i++) { 3655 if (new->entries[i].threshold <= usage) { 3656 /* 3657 * new->current_threshold will not be used until 3658 * rcu_assign_pointer(), so it's safe to increment 3659 * it here. 3660 */ 3661 ++new->current_threshold; 3662 } else 3663 break; 3664 } 3665 3666 /* Free old spare buffer and save old primary buffer as spare */ 3667 kfree(thresholds->spare); 3668 thresholds->spare = thresholds->primary; 3669 3670 rcu_assign_pointer(thresholds->primary, new); 3671 3672 /* To be sure that nobody uses thresholds */ 3673 synchronize_rcu(); 3674 3675 unlock: 3676 mutex_unlock(&memcg->thresholds_lock); 3677 3678 return ret; 3679 } 3680 3681 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3682 struct eventfd_ctx *eventfd, const char *args) 3683 { 3684 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3685 } 3686 3687 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3688 struct eventfd_ctx *eventfd, const char *args) 3689 { 3690 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3691 } 3692 3693 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3694 struct eventfd_ctx *eventfd, enum res_type type) 3695 { 3696 struct mem_cgroup_thresholds *thresholds; 3697 struct mem_cgroup_threshold_ary *new; 3698 unsigned long usage; 3699 int i, j, size; 3700 3701 mutex_lock(&memcg->thresholds_lock); 3702 3703 if (type == _MEM) { 3704 thresholds = &memcg->thresholds; 3705 usage = mem_cgroup_usage(memcg, false); 3706 } else if (type == _MEMSWAP) { 3707 thresholds = &memcg->memsw_thresholds; 3708 usage = mem_cgroup_usage(memcg, true); 3709 } else 3710 BUG(); 3711 3712 if (!thresholds->primary) 3713 goto unlock; 3714 3715 /* Check if a threshold crossed before removing */ 3716 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3717 3718 /* Calculate new number of threshold */ 3719 size = 0; 3720 for (i = 0; i < thresholds->primary->size; i++) { 3721 if (thresholds->primary->entries[i].eventfd != eventfd) 3722 size++; 3723 } 3724 3725 new = thresholds->spare; 3726 3727 /* Set thresholds array to NULL if we don't have thresholds */ 3728 if (!size) { 3729 kfree(new); 3730 new = NULL; 3731 goto swap_buffers; 3732 } 3733 3734 new->size = size; 3735 3736 /* Copy thresholds and find current threshold */ 3737 new->current_threshold = -1; 3738 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3739 if (thresholds->primary->entries[i].eventfd == eventfd) 3740 continue; 3741 3742 new->entries[j] = thresholds->primary->entries[i]; 3743 if (new->entries[j].threshold <= usage) { 3744 /* 3745 * new->current_threshold will not be used 3746 * until rcu_assign_pointer(), so it's safe to increment 3747 * it here. 3748 */ 3749 ++new->current_threshold; 3750 } 3751 j++; 3752 } 3753 3754 swap_buffers: 3755 /* Swap primary and spare array */ 3756 thresholds->spare = thresholds->primary; 3757 3758 rcu_assign_pointer(thresholds->primary, new); 3759 3760 /* To be sure that nobody uses thresholds */ 3761 synchronize_rcu(); 3762 3763 /* If all events are unregistered, free the spare array */ 3764 if (!new) { 3765 kfree(thresholds->spare); 3766 thresholds->spare = NULL; 3767 } 3768 unlock: 3769 mutex_unlock(&memcg->thresholds_lock); 3770 } 3771 3772 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3773 struct eventfd_ctx *eventfd) 3774 { 3775 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3776 } 3777 3778 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3779 struct eventfd_ctx *eventfd) 3780 { 3781 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3782 } 3783 3784 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3785 struct eventfd_ctx *eventfd, const char *args) 3786 { 3787 struct mem_cgroup_eventfd_list *event; 3788 3789 event = kmalloc(sizeof(*event), GFP_KERNEL); 3790 if (!event) 3791 return -ENOMEM; 3792 3793 spin_lock(&memcg_oom_lock); 3794 3795 event->eventfd = eventfd; 3796 list_add(&event->list, &memcg->oom_notify); 3797 3798 /* already in OOM ? */ 3799 if (memcg->under_oom) 3800 eventfd_signal(eventfd, 1); 3801 spin_unlock(&memcg_oom_lock); 3802 3803 return 0; 3804 } 3805 3806 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3807 struct eventfd_ctx *eventfd) 3808 { 3809 struct mem_cgroup_eventfd_list *ev, *tmp; 3810 3811 spin_lock(&memcg_oom_lock); 3812 3813 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3814 if (ev->eventfd == eventfd) { 3815 list_del(&ev->list); 3816 kfree(ev); 3817 } 3818 } 3819 3820 spin_unlock(&memcg_oom_lock); 3821 } 3822 3823 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3824 { 3825 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3826 3827 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3828 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3829 seq_printf(sf, "oom_kill %lu\n", 3830 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 3831 return 0; 3832 } 3833 3834 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3835 struct cftype *cft, u64 val) 3836 { 3837 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3838 3839 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3840 if (!css->parent || !((val == 0) || (val == 1))) 3841 return -EINVAL; 3842 3843 memcg->oom_kill_disable = val; 3844 if (!val) 3845 memcg_oom_recover(memcg); 3846 3847 return 0; 3848 } 3849 3850 #ifdef CONFIG_CGROUP_WRITEBACK 3851 3852 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3853 { 3854 return wb_domain_init(&memcg->cgwb_domain, gfp); 3855 } 3856 3857 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3858 { 3859 wb_domain_exit(&memcg->cgwb_domain); 3860 } 3861 3862 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3863 { 3864 wb_domain_size_changed(&memcg->cgwb_domain); 3865 } 3866 3867 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3868 { 3869 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3870 3871 if (!memcg->css.parent) 3872 return NULL; 3873 3874 return &memcg->cgwb_domain; 3875 } 3876 3877 /** 3878 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3879 * @wb: bdi_writeback in question 3880 * @pfilepages: out parameter for number of file pages 3881 * @pheadroom: out parameter for number of allocatable pages according to memcg 3882 * @pdirty: out parameter for number of dirty pages 3883 * @pwriteback: out parameter for number of pages under writeback 3884 * 3885 * Determine the numbers of file, headroom, dirty, and writeback pages in 3886 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3887 * is a bit more involved. 3888 * 3889 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3890 * headroom is calculated as the lowest headroom of itself and the 3891 * ancestors. Note that this doesn't consider the actual amount of 3892 * available memory in the system. The caller should further cap 3893 * *@pheadroom accordingly. 3894 */ 3895 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3896 unsigned long *pheadroom, unsigned long *pdirty, 3897 unsigned long *pwriteback) 3898 { 3899 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3900 struct mem_cgroup *parent; 3901 3902 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3903 3904 /* this should eventually include NR_UNSTABLE_NFS */ 3905 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3906 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3907 (1 << LRU_ACTIVE_FILE)); 3908 *pheadroom = PAGE_COUNTER_MAX; 3909 3910 while ((parent = parent_mem_cgroup(memcg))) { 3911 unsigned long ceiling = min(memcg->memory.max, memcg->high); 3912 unsigned long used = page_counter_read(&memcg->memory); 3913 3914 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3915 memcg = parent; 3916 } 3917 } 3918 3919 #else /* CONFIG_CGROUP_WRITEBACK */ 3920 3921 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3922 { 3923 return 0; 3924 } 3925 3926 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3927 { 3928 } 3929 3930 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3931 { 3932 } 3933 3934 #endif /* CONFIG_CGROUP_WRITEBACK */ 3935 3936 /* 3937 * DO NOT USE IN NEW FILES. 3938 * 3939 * "cgroup.event_control" implementation. 3940 * 3941 * This is way over-engineered. It tries to support fully configurable 3942 * events for each user. Such level of flexibility is completely 3943 * unnecessary especially in the light of the planned unified hierarchy. 3944 * 3945 * Please deprecate this and replace with something simpler if at all 3946 * possible. 3947 */ 3948 3949 /* 3950 * Unregister event and free resources. 3951 * 3952 * Gets called from workqueue. 3953 */ 3954 static void memcg_event_remove(struct work_struct *work) 3955 { 3956 struct mem_cgroup_event *event = 3957 container_of(work, struct mem_cgroup_event, remove); 3958 struct mem_cgroup *memcg = event->memcg; 3959 3960 remove_wait_queue(event->wqh, &event->wait); 3961 3962 event->unregister_event(memcg, event->eventfd); 3963 3964 /* Notify userspace the event is going away. */ 3965 eventfd_signal(event->eventfd, 1); 3966 3967 eventfd_ctx_put(event->eventfd); 3968 kfree(event); 3969 css_put(&memcg->css); 3970 } 3971 3972 /* 3973 * Gets called on EPOLLHUP on eventfd when user closes it. 3974 * 3975 * Called with wqh->lock held and interrupts disabled. 3976 */ 3977 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 3978 int sync, void *key) 3979 { 3980 struct mem_cgroup_event *event = 3981 container_of(wait, struct mem_cgroup_event, wait); 3982 struct mem_cgroup *memcg = event->memcg; 3983 __poll_t flags = key_to_poll(key); 3984 3985 if (flags & EPOLLHUP) { 3986 /* 3987 * If the event has been detached at cgroup removal, we 3988 * can simply return knowing the other side will cleanup 3989 * for us. 3990 * 3991 * We can't race against event freeing since the other 3992 * side will require wqh->lock via remove_wait_queue(), 3993 * which we hold. 3994 */ 3995 spin_lock(&memcg->event_list_lock); 3996 if (!list_empty(&event->list)) { 3997 list_del_init(&event->list); 3998 /* 3999 * We are in atomic context, but cgroup_event_remove() 4000 * may sleep, so we have to call it in workqueue. 4001 */ 4002 schedule_work(&event->remove); 4003 } 4004 spin_unlock(&memcg->event_list_lock); 4005 } 4006 4007 return 0; 4008 } 4009 4010 static void memcg_event_ptable_queue_proc(struct file *file, 4011 wait_queue_head_t *wqh, poll_table *pt) 4012 { 4013 struct mem_cgroup_event *event = 4014 container_of(pt, struct mem_cgroup_event, pt); 4015 4016 event->wqh = wqh; 4017 add_wait_queue(wqh, &event->wait); 4018 } 4019 4020 /* 4021 * DO NOT USE IN NEW FILES. 4022 * 4023 * Parse input and register new cgroup event handler. 4024 * 4025 * Input must be in format '<event_fd> <control_fd> <args>'. 4026 * Interpretation of args is defined by control file implementation. 4027 */ 4028 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4029 char *buf, size_t nbytes, loff_t off) 4030 { 4031 struct cgroup_subsys_state *css = of_css(of); 4032 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4033 struct mem_cgroup_event *event; 4034 struct cgroup_subsys_state *cfile_css; 4035 unsigned int efd, cfd; 4036 struct fd efile; 4037 struct fd cfile; 4038 const char *name; 4039 char *endp; 4040 int ret; 4041 4042 buf = strstrip(buf); 4043 4044 efd = simple_strtoul(buf, &endp, 10); 4045 if (*endp != ' ') 4046 return -EINVAL; 4047 buf = endp + 1; 4048 4049 cfd = simple_strtoul(buf, &endp, 10); 4050 if ((*endp != ' ') && (*endp != '\0')) 4051 return -EINVAL; 4052 buf = endp + 1; 4053 4054 event = kzalloc(sizeof(*event), GFP_KERNEL); 4055 if (!event) 4056 return -ENOMEM; 4057 4058 event->memcg = memcg; 4059 INIT_LIST_HEAD(&event->list); 4060 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4061 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4062 INIT_WORK(&event->remove, memcg_event_remove); 4063 4064 efile = fdget(efd); 4065 if (!efile.file) { 4066 ret = -EBADF; 4067 goto out_kfree; 4068 } 4069 4070 event->eventfd = eventfd_ctx_fileget(efile.file); 4071 if (IS_ERR(event->eventfd)) { 4072 ret = PTR_ERR(event->eventfd); 4073 goto out_put_efile; 4074 } 4075 4076 cfile = fdget(cfd); 4077 if (!cfile.file) { 4078 ret = -EBADF; 4079 goto out_put_eventfd; 4080 } 4081 4082 /* the process need read permission on control file */ 4083 /* AV: shouldn't we check that it's been opened for read instead? */ 4084 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4085 if (ret < 0) 4086 goto out_put_cfile; 4087 4088 /* 4089 * Determine the event callbacks and set them in @event. This used 4090 * to be done via struct cftype but cgroup core no longer knows 4091 * about these events. The following is crude but the whole thing 4092 * is for compatibility anyway. 4093 * 4094 * DO NOT ADD NEW FILES. 4095 */ 4096 name = cfile.file->f_path.dentry->d_name.name; 4097 4098 if (!strcmp(name, "memory.usage_in_bytes")) { 4099 event->register_event = mem_cgroup_usage_register_event; 4100 event->unregister_event = mem_cgroup_usage_unregister_event; 4101 } else if (!strcmp(name, "memory.oom_control")) { 4102 event->register_event = mem_cgroup_oom_register_event; 4103 event->unregister_event = mem_cgroup_oom_unregister_event; 4104 } else if (!strcmp(name, "memory.pressure_level")) { 4105 event->register_event = vmpressure_register_event; 4106 event->unregister_event = vmpressure_unregister_event; 4107 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4108 event->register_event = memsw_cgroup_usage_register_event; 4109 event->unregister_event = memsw_cgroup_usage_unregister_event; 4110 } else { 4111 ret = -EINVAL; 4112 goto out_put_cfile; 4113 } 4114 4115 /* 4116 * Verify @cfile should belong to @css. Also, remaining events are 4117 * automatically removed on cgroup destruction but the removal is 4118 * asynchronous, so take an extra ref on @css. 4119 */ 4120 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4121 &memory_cgrp_subsys); 4122 ret = -EINVAL; 4123 if (IS_ERR(cfile_css)) 4124 goto out_put_cfile; 4125 if (cfile_css != css) { 4126 css_put(cfile_css); 4127 goto out_put_cfile; 4128 } 4129 4130 ret = event->register_event(memcg, event->eventfd, buf); 4131 if (ret) 4132 goto out_put_css; 4133 4134 vfs_poll(efile.file, &event->pt); 4135 4136 spin_lock(&memcg->event_list_lock); 4137 list_add(&event->list, &memcg->event_list); 4138 spin_unlock(&memcg->event_list_lock); 4139 4140 fdput(cfile); 4141 fdput(efile); 4142 4143 return nbytes; 4144 4145 out_put_css: 4146 css_put(css); 4147 out_put_cfile: 4148 fdput(cfile); 4149 out_put_eventfd: 4150 eventfd_ctx_put(event->eventfd); 4151 out_put_efile: 4152 fdput(efile); 4153 out_kfree: 4154 kfree(event); 4155 4156 return ret; 4157 } 4158 4159 static struct cftype mem_cgroup_legacy_files[] = { 4160 { 4161 .name = "usage_in_bytes", 4162 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4163 .read_u64 = mem_cgroup_read_u64, 4164 }, 4165 { 4166 .name = "max_usage_in_bytes", 4167 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4168 .write = mem_cgroup_reset, 4169 .read_u64 = mem_cgroup_read_u64, 4170 }, 4171 { 4172 .name = "limit_in_bytes", 4173 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4174 .write = mem_cgroup_write, 4175 .read_u64 = mem_cgroup_read_u64, 4176 }, 4177 { 4178 .name = "soft_limit_in_bytes", 4179 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4180 .write = mem_cgroup_write, 4181 .read_u64 = mem_cgroup_read_u64, 4182 }, 4183 { 4184 .name = "failcnt", 4185 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4186 .write = mem_cgroup_reset, 4187 .read_u64 = mem_cgroup_read_u64, 4188 }, 4189 { 4190 .name = "stat", 4191 .seq_show = memcg_stat_show, 4192 }, 4193 { 4194 .name = "force_empty", 4195 .write = mem_cgroup_force_empty_write, 4196 }, 4197 { 4198 .name = "use_hierarchy", 4199 .write_u64 = mem_cgroup_hierarchy_write, 4200 .read_u64 = mem_cgroup_hierarchy_read, 4201 }, 4202 { 4203 .name = "cgroup.event_control", /* XXX: for compat */ 4204 .write = memcg_write_event_control, 4205 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4206 }, 4207 { 4208 .name = "swappiness", 4209 .read_u64 = mem_cgroup_swappiness_read, 4210 .write_u64 = mem_cgroup_swappiness_write, 4211 }, 4212 { 4213 .name = "move_charge_at_immigrate", 4214 .read_u64 = mem_cgroup_move_charge_read, 4215 .write_u64 = mem_cgroup_move_charge_write, 4216 }, 4217 { 4218 .name = "oom_control", 4219 .seq_show = mem_cgroup_oom_control_read, 4220 .write_u64 = mem_cgroup_oom_control_write, 4221 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4222 }, 4223 { 4224 .name = "pressure_level", 4225 }, 4226 #ifdef CONFIG_NUMA 4227 { 4228 .name = "numa_stat", 4229 .seq_show = memcg_numa_stat_show, 4230 }, 4231 #endif 4232 { 4233 .name = "kmem.limit_in_bytes", 4234 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4235 .write = mem_cgroup_write, 4236 .read_u64 = mem_cgroup_read_u64, 4237 }, 4238 { 4239 .name = "kmem.usage_in_bytes", 4240 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4241 .read_u64 = mem_cgroup_read_u64, 4242 }, 4243 { 4244 .name = "kmem.failcnt", 4245 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4246 .write = mem_cgroup_reset, 4247 .read_u64 = mem_cgroup_read_u64, 4248 }, 4249 { 4250 .name = "kmem.max_usage_in_bytes", 4251 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4252 .write = mem_cgroup_reset, 4253 .read_u64 = mem_cgroup_read_u64, 4254 }, 4255 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 4256 { 4257 .name = "kmem.slabinfo", 4258 .seq_start = memcg_slab_start, 4259 .seq_next = memcg_slab_next, 4260 .seq_stop = memcg_slab_stop, 4261 .seq_show = memcg_slab_show, 4262 }, 4263 #endif 4264 { 4265 .name = "kmem.tcp.limit_in_bytes", 4266 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4267 .write = mem_cgroup_write, 4268 .read_u64 = mem_cgroup_read_u64, 4269 }, 4270 { 4271 .name = "kmem.tcp.usage_in_bytes", 4272 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4273 .read_u64 = mem_cgroup_read_u64, 4274 }, 4275 { 4276 .name = "kmem.tcp.failcnt", 4277 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4278 .write = mem_cgroup_reset, 4279 .read_u64 = mem_cgroup_read_u64, 4280 }, 4281 { 4282 .name = "kmem.tcp.max_usage_in_bytes", 4283 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4284 .write = mem_cgroup_reset, 4285 .read_u64 = mem_cgroup_read_u64, 4286 }, 4287 { }, /* terminate */ 4288 }; 4289 4290 /* 4291 * Private memory cgroup IDR 4292 * 4293 * Swap-out records and page cache shadow entries need to store memcg 4294 * references in constrained space, so we maintain an ID space that is 4295 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4296 * memory-controlled cgroups to 64k. 4297 * 4298 * However, there usually are many references to the oflline CSS after 4299 * the cgroup has been destroyed, such as page cache or reclaimable 4300 * slab objects, that don't need to hang on to the ID. We want to keep 4301 * those dead CSS from occupying IDs, or we might quickly exhaust the 4302 * relatively small ID space and prevent the creation of new cgroups 4303 * even when there are much fewer than 64k cgroups - possibly none. 4304 * 4305 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4306 * be freed and recycled when it's no longer needed, which is usually 4307 * when the CSS is offlined. 4308 * 4309 * The only exception to that are records of swapped out tmpfs/shmem 4310 * pages that need to be attributed to live ancestors on swapin. But 4311 * those references are manageable from userspace. 4312 */ 4313 4314 static DEFINE_IDR(mem_cgroup_idr); 4315 4316 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 4317 { 4318 if (memcg->id.id > 0) { 4319 idr_remove(&mem_cgroup_idr, memcg->id.id); 4320 memcg->id.id = 0; 4321 } 4322 } 4323 4324 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4325 { 4326 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); 4327 atomic_add(n, &memcg->id.ref); 4328 } 4329 4330 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4331 { 4332 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); 4333 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4334 mem_cgroup_id_remove(memcg); 4335 4336 /* Memcg ID pins CSS */ 4337 css_put(&memcg->css); 4338 } 4339 } 4340 4341 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4342 { 4343 mem_cgroup_id_get_many(memcg, 1); 4344 } 4345 4346 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4347 { 4348 mem_cgroup_id_put_many(memcg, 1); 4349 } 4350 4351 /** 4352 * mem_cgroup_from_id - look up a memcg from a memcg id 4353 * @id: the memcg id to look up 4354 * 4355 * Caller must hold rcu_read_lock(). 4356 */ 4357 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4358 { 4359 WARN_ON_ONCE(!rcu_read_lock_held()); 4360 return idr_find(&mem_cgroup_idr, id); 4361 } 4362 4363 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4364 { 4365 struct mem_cgroup_per_node *pn; 4366 int tmp = node; 4367 /* 4368 * This routine is called against possible nodes. 4369 * But it's BUG to call kmalloc() against offline node. 4370 * 4371 * TODO: this routine can waste much memory for nodes which will 4372 * never be onlined. It's better to use memory hotplug callback 4373 * function. 4374 */ 4375 if (!node_state(node, N_NORMAL_MEMORY)) 4376 tmp = -1; 4377 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4378 if (!pn) 4379 return 1; 4380 4381 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); 4382 if (!pn->lruvec_stat_cpu) { 4383 kfree(pn); 4384 return 1; 4385 } 4386 4387 lruvec_init(&pn->lruvec); 4388 pn->usage_in_excess = 0; 4389 pn->on_tree = false; 4390 pn->memcg = memcg; 4391 4392 memcg->nodeinfo[node] = pn; 4393 return 0; 4394 } 4395 4396 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4397 { 4398 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 4399 4400 if (!pn) 4401 return; 4402 4403 free_percpu(pn->lruvec_stat_cpu); 4404 kfree(pn); 4405 } 4406 4407 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4408 { 4409 int node; 4410 4411 for_each_node(node) 4412 free_mem_cgroup_per_node_info(memcg, node); 4413 free_percpu(memcg->stat_cpu); 4414 kfree(memcg); 4415 } 4416 4417 static void mem_cgroup_free(struct mem_cgroup *memcg) 4418 { 4419 memcg_wb_domain_exit(memcg); 4420 __mem_cgroup_free(memcg); 4421 } 4422 4423 static struct mem_cgroup *mem_cgroup_alloc(void) 4424 { 4425 struct mem_cgroup *memcg; 4426 size_t size; 4427 int node; 4428 4429 size = sizeof(struct mem_cgroup); 4430 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4431 4432 memcg = kzalloc(size, GFP_KERNEL); 4433 if (!memcg) 4434 return NULL; 4435 4436 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4437 1, MEM_CGROUP_ID_MAX, 4438 GFP_KERNEL); 4439 if (memcg->id.id < 0) 4440 goto fail; 4441 4442 memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu); 4443 if (!memcg->stat_cpu) 4444 goto fail; 4445 4446 for_each_node(node) 4447 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4448 goto fail; 4449 4450 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4451 goto fail; 4452 4453 INIT_WORK(&memcg->high_work, high_work_func); 4454 memcg->last_scanned_node = MAX_NUMNODES; 4455 INIT_LIST_HEAD(&memcg->oom_notify); 4456 mutex_init(&memcg->thresholds_lock); 4457 spin_lock_init(&memcg->move_lock); 4458 vmpressure_init(&memcg->vmpressure); 4459 INIT_LIST_HEAD(&memcg->event_list); 4460 spin_lock_init(&memcg->event_list_lock); 4461 memcg->socket_pressure = jiffies; 4462 #ifdef CONFIG_MEMCG_KMEM 4463 memcg->kmemcg_id = -1; 4464 #endif 4465 #ifdef CONFIG_CGROUP_WRITEBACK 4466 INIT_LIST_HEAD(&memcg->cgwb_list); 4467 #endif 4468 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4469 return memcg; 4470 fail: 4471 mem_cgroup_id_remove(memcg); 4472 __mem_cgroup_free(memcg); 4473 return NULL; 4474 } 4475 4476 static struct cgroup_subsys_state * __ref 4477 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4478 { 4479 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4480 struct mem_cgroup *memcg; 4481 long error = -ENOMEM; 4482 4483 memcg = mem_cgroup_alloc(); 4484 if (!memcg) 4485 return ERR_PTR(error); 4486 4487 memcg->high = PAGE_COUNTER_MAX; 4488 memcg->soft_limit = PAGE_COUNTER_MAX; 4489 if (parent) { 4490 memcg->swappiness = mem_cgroup_swappiness(parent); 4491 memcg->oom_kill_disable = parent->oom_kill_disable; 4492 } 4493 if (parent && parent->use_hierarchy) { 4494 memcg->use_hierarchy = true; 4495 page_counter_init(&memcg->memory, &parent->memory); 4496 page_counter_init(&memcg->swap, &parent->swap); 4497 page_counter_init(&memcg->memsw, &parent->memsw); 4498 page_counter_init(&memcg->kmem, &parent->kmem); 4499 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4500 } else { 4501 page_counter_init(&memcg->memory, NULL); 4502 page_counter_init(&memcg->swap, NULL); 4503 page_counter_init(&memcg->memsw, NULL); 4504 page_counter_init(&memcg->kmem, NULL); 4505 page_counter_init(&memcg->tcpmem, NULL); 4506 /* 4507 * Deeper hierachy with use_hierarchy == false doesn't make 4508 * much sense so let cgroup subsystem know about this 4509 * unfortunate state in our controller. 4510 */ 4511 if (parent != root_mem_cgroup) 4512 memory_cgrp_subsys.broken_hierarchy = true; 4513 } 4514 4515 /* The following stuff does not apply to the root */ 4516 if (!parent) { 4517 root_mem_cgroup = memcg; 4518 return &memcg->css; 4519 } 4520 4521 error = memcg_online_kmem(memcg); 4522 if (error) 4523 goto fail; 4524 4525 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4526 static_branch_inc(&memcg_sockets_enabled_key); 4527 4528 return &memcg->css; 4529 fail: 4530 mem_cgroup_id_remove(memcg); 4531 mem_cgroup_free(memcg); 4532 return ERR_PTR(-ENOMEM); 4533 } 4534 4535 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4536 { 4537 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4538 4539 /* 4540 * A memcg must be visible for memcg_expand_shrinker_maps() 4541 * by the time the maps are allocated. So, we allocate maps 4542 * here, when for_each_mem_cgroup() can't skip it. 4543 */ 4544 if (memcg_alloc_shrinker_maps(memcg)) { 4545 mem_cgroup_id_remove(memcg); 4546 return -ENOMEM; 4547 } 4548 4549 /* Online state pins memcg ID, memcg ID pins CSS */ 4550 atomic_set(&memcg->id.ref, 1); 4551 css_get(css); 4552 return 0; 4553 } 4554 4555 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4556 { 4557 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4558 struct mem_cgroup_event *event, *tmp; 4559 4560 /* 4561 * Unregister events and notify userspace. 4562 * Notify userspace about cgroup removing only after rmdir of cgroup 4563 * directory to avoid race between userspace and kernelspace. 4564 */ 4565 spin_lock(&memcg->event_list_lock); 4566 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4567 list_del_init(&event->list); 4568 schedule_work(&event->remove); 4569 } 4570 spin_unlock(&memcg->event_list_lock); 4571 4572 page_counter_set_min(&memcg->memory, 0); 4573 page_counter_set_low(&memcg->memory, 0); 4574 4575 memcg_offline_kmem(memcg); 4576 wb_memcg_offline(memcg); 4577 4578 mem_cgroup_id_put(memcg); 4579 } 4580 4581 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4582 { 4583 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4584 4585 invalidate_reclaim_iterators(memcg); 4586 } 4587 4588 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4589 { 4590 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4591 4592 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4593 static_branch_dec(&memcg_sockets_enabled_key); 4594 4595 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4596 static_branch_dec(&memcg_sockets_enabled_key); 4597 4598 vmpressure_cleanup(&memcg->vmpressure); 4599 cancel_work_sync(&memcg->high_work); 4600 mem_cgroup_remove_from_trees(memcg); 4601 memcg_free_shrinker_maps(memcg); 4602 memcg_free_kmem(memcg); 4603 mem_cgroup_free(memcg); 4604 } 4605 4606 /** 4607 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4608 * @css: the target css 4609 * 4610 * Reset the states of the mem_cgroup associated with @css. This is 4611 * invoked when the userland requests disabling on the default hierarchy 4612 * but the memcg is pinned through dependency. The memcg should stop 4613 * applying policies and should revert to the vanilla state as it may be 4614 * made visible again. 4615 * 4616 * The current implementation only resets the essential configurations. 4617 * This needs to be expanded to cover all the visible parts. 4618 */ 4619 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4620 { 4621 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4622 4623 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 4624 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 4625 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); 4626 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 4627 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 4628 page_counter_set_min(&memcg->memory, 0); 4629 page_counter_set_low(&memcg->memory, 0); 4630 memcg->high = PAGE_COUNTER_MAX; 4631 memcg->soft_limit = PAGE_COUNTER_MAX; 4632 memcg_wb_domain_size_changed(memcg); 4633 } 4634 4635 #ifdef CONFIG_MMU 4636 /* Handlers for move charge at task migration. */ 4637 static int mem_cgroup_do_precharge(unsigned long count) 4638 { 4639 int ret; 4640 4641 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4642 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4643 if (!ret) { 4644 mc.precharge += count; 4645 return ret; 4646 } 4647 4648 /* Try charges one by one with reclaim, but do not retry */ 4649 while (count--) { 4650 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 4651 if (ret) 4652 return ret; 4653 mc.precharge++; 4654 cond_resched(); 4655 } 4656 return 0; 4657 } 4658 4659 union mc_target { 4660 struct page *page; 4661 swp_entry_t ent; 4662 }; 4663 4664 enum mc_target_type { 4665 MC_TARGET_NONE = 0, 4666 MC_TARGET_PAGE, 4667 MC_TARGET_SWAP, 4668 MC_TARGET_DEVICE, 4669 }; 4670 4671 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4672 unsigned long addr, pte_t ptent) 4673 { 4674 struct page *page = _vm_normal_page(vma, addr, ptent, true); 4675 4676 if (!page || !page_mapped(page)) 4677 return NULL; 4678 if (PageAnon(page)) { 4679 if (!(mc.flags & MOVE_ANON)) 4680 return NULL; 4681 } else { 4682 if (!(mc.flags & MOVE_FILE)) 4683 return NULL; 4684 } 4685 if (!get_page_unless_zero(page)) 4686 return NULL; 4687 4688 return page; 4689 } 4690 4691 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 4692 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4693 pte_t ptent, swp_entry_t *entry) 4694 { 4695 struct page *page = NULL; 4696 swp_entry_t ent = pte_to_swp_entry(ptent); 4697 4698 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4699 return NULL; 4700 4701 /* 4702 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 4703 * a device and because they are not accessible by CPU they are store 4704 * as special swap entry in the CPU page table. 4705 */ 4706 if (is_device_private_entry(ent)) { 4707 page = device_private_entry_to_page(ent); 4708 /* 4709 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 4710 * a refcount of 1 when free (unlike normal page) 4711 */ 4712 if (!page_ref_add_unless(page, 1, 1)) 4713 return NULL; 4714 return page; 4715 } 4716 4717 /* 4718 * Because lookup_swap_cache() updates some statistics counter, 4719 * we call find_get_page() with swapper_space directly. 4720 */ 4721 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4722 if (do_memsw_account()) 4723 entry->val = ent.val; 4724 4725 return page; 4726 } 4727 #else 4728 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4729 pte_t ptent, swp_entry_t *entry) 4730 { 4731 return NULL; 4732 } 4733 #endif 4734 4735 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4736 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4737 { 4738 struct page *page = NULL; 4739 struct address_space *mapping; 4740 pgoff_t pgoff; 4741 4742 if (!vma->vm_file) /* anonymous vma */ 4743 return NULL; 4744 if (!(mc.flags & MOVE_FILE)) 4745 return NULL; 4746 4747 mapping = vma->vm_file->f_mapping; 4748 pgoff = linear_page_index(vma, addr); 4749 4750 /* page is moved even if it's not RSS of this task(page-faulted). */ 4751 #ifdef CONFIG_SWAP 4752 /* shmem/tmpfs may report page out on swap: account for that too. */ 4753 if (shmem_mapping(mapping)) { 4754 page = find_get_entry(mapping, pgoff); 4755 if (radix_tree_exceptional_entry(page)) { 4756 swp_entry_t swp = radix_to_swp_entry(page); 4757 if (do_memsw_account()) 4758 *entry = swp; 4759 page = find_get_page(swap_address_space(swp), 4760 swp_offset(swp)); 4761 } 4762 } else 4763 page = find_get_page(mapping, pgoff); 4764 #else 4765 page = find_get_page(mapping, pgoff); 4766 #endif 4767 return page; 4768 } 4769 4770 /** 4771 * mem_cgroup_move_account - move account of the page 4772 * @page: the page 4773 * @compound: charge the page as compound or small page 4774 * @from: mem_cgroup which the page is moved from. 4775 * @to: mem_cgroup which the page is moved to. @from != @to. 4776 * 4777 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4778 * 4779 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4780 * from old cgroup. 4781 */ 4782 static int mem_cgroup_move_account(struct page *page, 4783 bool compound, 4784 struct mem_cgroup *from, 4785 struct mem_cgroup *to) 4786 { 4787 unsigned long flags; 4788 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4789 int ret; 4790 bool anon; 4791 4792 VM_BUG_ON(from == to); 4793 VM_BUG_ON_PAGE(PageLRU(page), page); 4794 VM_BUG_ON(compound && !PageTransHuge(page)); 4795 4796 /* 4797 * Prevent mem_cgroup_migrate() from looking at 4798 * page->mem_cgroup of its source page while we change it. 4799 */ 4800 ret = -EBUSY; 4801 if (!trylock_page(page)) 4802 goto out; 4803 4804 ret = -EINVAL; 4805 if (page->mem_cgroup != from) 4806 goto out_unlock; 4807 4808 anon = PageAnon(page); 4809 4810 spin_lock_irqsave(&from->move_lock, flags); 4811 4812 if (!anon && page_mapped(page)) { 4813 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages); 4814 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages); 4815 } 4816 4817 /* 4818 * move_lock grabbed above and caller set from->moving_account, so 4819 * mod_memcg_page_state will serialize updates to PageDirty. 4820 * So mapping should be stable for dirty pages. 4821 */ 4822 if (!anon && PageDirty(page)) { 4823 struct address_space *mapping = page_mapping(page); 4824 4825 if (mapping_cap_account_dirty(mapping)) { 4826 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages); 4827 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages); 4828 } 4829 } 4830 4831 if (PageWriteback(page)) { 4832 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages); 4833 __mod_memcg_state(to, NR_WRITEBACK, nr_pages); 4834 } 4835 4836 /* 4837 * It is safe to change page->mem_cgroup here because the page 4838 * is referenced, charged, and isolated - we can't race with 4839 * uncharging, charging, migration, or LRU putback. 4840 */ 4841 4842 /* caller should have done css_get */ 4843 page->mem_cgroup = to; 4844 spin_unlock_irqrestore(&from->move_lock, flags); 4845 4846 ret = 0; 4847 4848 local_irq_disable(); 4849 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4850 memcg_check_events(to, page); 4851 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4852 memcg_check_events(from, page); 4853 local_irq_enable(); 4854 out_unlock: 4855 unlock_page(page); 4856 out: 4857 return ret; 4858 } 4859 4860 /** 4861 * get_mctgt_type - get target type of moving charge 4862 * @vma: the vma the pte to be checked belongs 4863 * @addr: the address corresponding to the pte to be checked 4864 * @ptent: the pte to be checked 4865 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4866 * 4867 * Returns 4868 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4869 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4870 * move charge. if @target is not NULL, the page is stored in target->page 4871 * with extra refcnt got(Callers should handle it). 4872 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4873 * target for charge migration. if @target is not NULL, the entry is stored 4874 * in target->ent. 4875 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC 4876 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru). 4877 * For now we such page is charge like a regular page would be as for all 4878 * intent and purposes it is just special memory taking the place of a 4879 * regular page. 4880 * 4881 * See Documentations/vm/hmm.txt and include/linux/hmm.h 4882 * 4883 * Called with pte lock held. 4884 */ 4885 4886 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4887 unsigned long addr, pte_t ptent, union mc_target *target) 4888 { 4889 struct page *page = NULL; 4890 enum mc_target_type ret = MC_TARGET_NONE; 4891 swp_entry_t ent = { .val = 0 }; 4892 4893 if (pte_present(ptent)) 4894 page = mc_handle_present_pte(vma, addr, ptent); 4895 else if (is_swap_pte(ptent)) 4896 page = mc_handle_swap_pte(vma, ptent, &ent); 4897 else if (pte_none(ptent)) 4898 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4899 4900 if (!page && !ent.val) 4901 return ret; 4902 if (page) { 4903 /* 4904 * Do only loose check w/o serialization. 4905 * mem_cgroup_move_account() checks the page is valid or 4906 * not under LRU exclusion. 4907 */ 4908 if (page->mem_cgroup == mc.from) { 4909 ret = MC_TARGET_PAGE; 4910 if (is_device_private_page(page) || 4911 is_device_public_page(page)) 4912 ret = MC_TARGET_DEVICE; 4913 if (target) 4914 target->page = page; 4915 } 4916 if (!ret || !target) 4917 put_page(page); 4918 } 4919 /* 4920 * There is a swap entry and a page doesn't exist or isn't charged. 4921 * But we cannot move a tail-page in a THP. 4922 */ 4923 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 4924 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4925 ret = MC_TARGET_SWAP; 4926 if (target) 4927 target->ent = ent; 4928 } 4929 return ret; 4930 } 4931 4932 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4933 /* 4934 * We don't consider PMD mapped swapping or file mapped pages because THP does 4935 * not support them for now. 4936 * Caller should make sure that pmd_trans_huge(pmd) is true. 4937 */ 4938 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4939 unsigned long addr, pmd_t pmd, union mc_target *target) 4940 { 4941 struct page *page = NULL; 4942 enum mc_target_type ret = MC_TARGET_NONE; 4943 4944 if (unlikely(is_swap_pmd(pmd))) { 4945 VM_BUG_ON(thp_migration_supported() && 4946 !is_pmd_migration_entry(pmd)); 4947 return ret; 4948 } 4949 page = pmd_page(pmd); 4950 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4951 if (!(mc.flags & MOVE_ANON)) 4952 return ret; 4953 if (page->mem_cgroup == mc.from) { 4954 ret = MC_TARGET_PAGE; 4955 if (target) { 4956 get_page(page); 4957 target->page = page; 4958 } 4959 } 4960 return ret; 4961 } 4962 #else 4963 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4964 unsigned long addr, pmd_t pmd, union mc_target *target) 4965 { 4966 return MC_TARGET_NONE; 4967 } 4968 #endif 4969 4970 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4971 unsigned long addr, unsigned long end, 4972 struct mm_walk *walk) 4973 { 4974 struct vm_area_struct *vma = walk->vma; 4975 pte_t *pte; 4976 spinlock_t *ptl; 4977 4978 ptl = pmd_trans_huge_lock(pmd, vma); 4979 if (ptl) { 4980 /* 4981 * Note their can not be MC_TARGET_DEVICE for now as we do not 4982 * support transparent huge page with MEMORY_DEVICE_PUBLIC or 4983 * MEMORY_DEVICE_PRIVATE but this might change. 4984 */ 4985 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4986 mc.precharge += HPAGE_PMD_NR; 4987 spin_unlock(ptl); 4988 return 0; 4989 } 4990 4991 if (pmd_trans_unstable(pmd)) 4992 return 0; 4993 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4994 for (; addr != end; pte++, addr += PAGE_SIZE) 4995 if (get_mctgt_type(vma, addr, *pte, NULL)) 4996 mc.precharge++; /* increment precharge temporarily */ 4997 pte_unmap_unlock(pte - 1, ptl); 4998 cond_resched(); 4999 5000 return 0; 5001 } 5002 5003 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5004 { 5005 unsigned long precharge; 5006 5007 struct mm_walk mem_cgroup_count_precharge_walk = { 5008 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5009 .mm = mm, 5010 }; 5011 down_read(&mm->mmap_sem); 5012 walk_page_range(0, mm->highest_vm_end, 5013 &mem_cgroup_count_precharge_walk); 5014 up_read(&mm->mmap_sem); 5015 5016 precharge = mc.precharge; 5017 mc.precharge = 0; 5018 5019 return precharge; 5020 } 5021 5022 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5023 { 5024 unsigned long precharge = mem_cgroup_count_precharge(mm); 5025 5026 VM_BUG_ON(mc.moving_task); 5027 mc.moving_task = current; 5028 return mem_cgroup_do_precharge(precharge); 5029 } 5030 5031 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5032 static void __mem_cgroup_clear_mc(void) 5033 { 5034 struct mem_cgroup *from = mc.from; 5035 struct mem_cgroup *to = mc.to; 5036 5037 /* we must uncharge all the leftover precharges from mc.to */ 5038 if (mc.precharge) { 5039 cancel_charge(mc.to, mc.precharge); 5040 mc.precharge = 0; 5041 } 5042 /* 5043 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5044 * we must uncharge here. 5045 */ 5046 if (mc.moved_charge) { 5047 cancel_charge(mc.from, mc.moved_charge); 5048 mc.moved_charge = 0; 5049 } 5050 /* we must fixup refcnts and charges */ 5051 if (mc.moved_swap) { 5052 /* uncharge swap account from the old cgroup */ 5053 if (!mem_cgroup_is_root(mc.from)) 5054 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5055 5056 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5057 5058 /* 5059 * we charged both to->memory and to->memsw, so we 5060 * should uncharge to->memory. 5061 */ 5062 if (!mem_cgroup_is_root(mc.to)) 5063 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5064 5065 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 5066 css_put_many(&mc.to->css, mc.moved_swap); 5067 5068 mc.moved_swap = 0; 5069 } 5070 memcg_oom_recover(from); 5071 memcg_oom_recover(to); 5072 wake_up_all(&mc.waitq); 5073 } 5074 5075 static void mem_cgroup_clear_mc(void) 5076 { 5077 struct mm_struct *mm = mc.mm; 5078 5079 /* 5080 * we must clear moving_task before waking up waiters at the end of 5081 * task migration. 5082 */ 5083 mc.moving_task = NULL; 5084 __mem_cgroup_clear_mc(); 5085 spin_lock(&mc.lock); 5086 mc.from = NULL; 5087 mc.to = NULL; 5088 mc.mm = NULL; 5089 spin_unlock(&mc.lock); 5090 5091 mmput(mm); 5092 } 5093 5094 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5095 { 5096 struct cgroup_subsys_state *css; 5097 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5098 struct mem_cgroup *from; 5099 struct task_struct *leader, *p; 5100 struct mm_struct *mm; 5101 unsigned long move_flags; 5102 int ret = 0; 5103 5104 /* charge immigration isn't supported on the default hierarchy */ 5105 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5106 return 0; 5107 5108 /* 5109 * Multi-process migrations only happen on the default hierarchy 5110 * where charge immigration is not used. Perform charge 5111 * immigration if @tset contains a leader and whine if there are 5112 * multiple. 5113 */ 5114 p = NULL; 5115 cgroup_taskset_for_each_leader(leader, css, tset) { 5116 WARN_ON_ONCE(p); 5117 p = leader; 5118 memcg = mem_cgroup_from_css(css); 5119 } 5120 if (!p) 5121 return 0; 5122 5123 /* 5124 * We are now commited to this value whatever it is. Changes in this 5125 * tunable will only affect upcoming migrations, not the current one. 5126 * So we need to save it, and keep it going. 5127 */ 5128 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5129 if (!move_flags) 5130 return 0; 5131 5132 from = mem_cgroup_from_task(p); 5133 5134 VM_BUG_ON(from == memcg); 5135 5136 mm = get_task_mm(p); 5137 if (!mm) 5138 return 0; 5139 /* We move charges only when we move a owner of the mm */ 5140 if (mm->owner == p) { 5141 VM_BUG_ON(mc.from); 5142 VM_BUG_ON(mc.to); 5143 VM_BUG_ON(mc.precharge); 5144 VM_BUG_ON(mc.moved_charge); 5145 VM_BUG_ON(mc.moved_swap); 5146 5147 spin_lock(&mc.lock); 5148 mc.mm = mm; 5149 mc.from = from; 5150 mc.to = memcg; 5151 mc.flags = move_flags; 5152 spin_unlock(&mc.lock); 5153 /* We set mc.moving_task later */ 5154 5155 ret = mem_cgroup_precharge_mc(mm); 5156 if (ret) 5157 mem_cgroup_clear_mc(); 5158 } else { 5159 mmput(mm); 5160 } 5161 return ret; 5162 } 5163 5164 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5165 { 5166 if (mc.to) 5167 mem_cgroup_clear_mc(); 5168 } 5169 5170 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5171 unsigned long addr, unsigned long end, 5172 struct mm_walk *walk) 5173 { 5174 int ret = 0; 5175 struct vm_area_struct *vma = walk->vma; 5176 pte_t *pte; 5177 spinlock_t *ptl; 5178 enum mc_target_type target_type; 5179 union mc_target target; 5180 struct page *page; 5181 5182 ptl = pmd_trans_huge_lock(pmd, vma); 5183 if (ptl) { 5184 if (mc.precharge < HPAGE_PMD_NR) { 5185 spin_unlock(ptl); 5186 return 0; 5187 } 5188 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5189 if (target_type == MC_TARGET_PAGE) { 5190 page = target.page; 5191 if (!isolate_lru_page(page)) { 5192 if (!mem_cgroup_move_account(page, true, 5193 mc.from, mc.to)) { 5194 mc.precharge -= HPAGE_PMD_NR; 5195 mc.moved_charge += HPAGE_PMD_NR; 5196 } 5197 putback_lru_page(page); 5198 } 5199 put_page(page); 5200 } else if (target_type == MC_TARGET_DEVICE) { 5201 page = target.page; 5202 if (!mem_cgroup_move_account(page, true, 5203 mc.from, mc.to)) { 5204 mc.precharge -= HPAGE_PMD_NR; 5205 mc.moved_charge += HPAGE_PMD_NR; 5206 } 5207 put_page(page); 5208 } 5209 spin_unlock(ptl); 5210 return 0; 5211 } 5212 5213 if (pmd_trans_unstable(pmd)) 5214 return 0; 5215 retry: 5216 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5217 for (; addr != end; addr += PAGE_SIZE) { 5218 pte_t ptent = *(pte++); 5219 bool device = false; 5220 swp_entry_t ent; 5221 5222 if (!mc.precharge) 5223 break; 5224 5225 switch (get_mctgt_type(vma, addr, ptent, &target)) { 5226 case MC_TARGET_DEVICE: 5227 device = true; 5228 /* fall through */ 5229 case MC_TARGET_PAGE: 5230 page = target.page; 5231 /* 5232 * We can have a part of the split pmd here. Moving it 5233 * can be done but it would be too convoluted so simply 5234 * ignore such a partial THP and keep it in original 5235 * memcg. There should be somebody mapping the head. 5236 */ 5237 if (PageTransCompound(page)) 5238 goto put; 5239 if (!device && isolate_lru_page(page)) 5240 goto put; 5241 if (!mem_cgroup_move_account(page, false, 5242 mc.from, mc.to)) { 5243 mc.precharge--; 5244 /* we uncharge from mc.from later. */ 5245 mc.moved_charge++; 5246 } 5247 if (!device) 5248 putback_lru_page(page); 5249 put: /* get_mctgt_type() gets the page */ 5250 put_page(page); 5251 break; 5252 case MC_TARGET_SWAP: 5253 ent = target.ent; 5254 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5255 mc.precharge--; 5256 /* we fixup refcnts and charges later. */ 5257 mc.moved_swap++; 5258 } 5259 break; 5260 default: 5261 break; 5262 } 5263 } 5264 pte_unmap_unlock(pte - 1, ptl); 5265 cond_resched(); 5266 5267 if (addr != end) { 5268 /* 5269 * We have consumed all precharges we got in can_attach(). 5270 * We try charge one by one, but don't do any additional 5271 * charges to mc.to if we have failed in charge once in attach() 5272 * phase. 5273 */ 5274 ret = mem_cgroup_do_precharge(1); 5275 if (!ret) 5276 goto retry; 5277 } 5278 5279 return ret; 5280 } 5281 5282 static void mem_cgroup_move_charge(void) 5283 { 5284 struct mm_walk mem_cgroup_move_charge_walk = { 5285 .pmd_entry = mem_cgroup_move_charge_pte_range, 5286 .mm = mc.mm, 5287 }; 5288 5289 lru_add_drain_all(); 5290 /* 5291 * Signal lock_page_memcg() to take the memcg's move_lock 5292 * while we're moving its pages to another memcg. Then wait 5293 * for already started RCU-only updates to finish. 5294 */ 5295 atomic_inc(&mc.from->moving_account); 5296 synchronize_rcu(); 5297 retry: 5298 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 5299 /* 5300 * Someone who are holding the mmap_sem might be waiting in 5301 * waitq. So we cancel all extra charges, wake up all waiters, 5302 * and retry. Because we cancel precharges, we might not be able 5303 * to move enough charges, but moving charge is a best-effort 5304 * feature anyway, so it wouldn't be a big problem. 5305 */ 5306 __mem_cgroup_clear_mc(); 5307 cond_resched(); 5308 goto retry; 5309 } 5310 /* 5311 * When we have consumed all precharges and failed in doing 5312 * additional charge, the page walk just aborts. 5313 */ 5314 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 5315 5316 up_read(&mc.mm->mmap_sem); 5317 atomic_dec(&mc.from->moving_account); 5318 } 5319 5320 static void mem_cgroup_move_task(void) 5321 { 5322 if (mc.to) { 5323 mem_cgroup_move_charge(); 5324 mem_cgroup_clear_mc(); 5325 } 5326 } 5327 #else /* !CONFIG_MMU */ 5328 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5329 { 5330 return 0; 5331 } 5332 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5333 { 5334 } 5335 static void mem_cgroup_move_task(void) 5336 { 5337 } 5338 #endif 5339 5340 /* 5341 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5342 * to verify whether we're attached to the default hierarchy on each mount 5343 * attempt. 5344 */ 5345 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5346 { 5347 /* 5348 * use_hierarchy is forced on the default hierarchy. cgroup core 5349 * guarantees that @root doesn't have any children, so turning it 5350 * on for the root memcg is enough. 5351 */ 5352 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5353 root_mem_cgroup->use_hierarchy = true; 5354 else 5355 root_mem_cgroup->use_hierarchy = false; 5356 } 5357 5358 static u64 memory_current_read(struct cgroup_subsys_state *css, 5359 struct cftype *cft) 5360 { 5361 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5362 5363 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5364 } 5365 5366 static int memory_min_show(struct seq_file *m, void *v) 5367 { 5368 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5369 unsigned long min = READ_ONCE(memcg->memory.min); 5370 5371 if (min == PAGE_COUNTER_MAX) 5372 seq_puts(m, "max\n"); 5373 else 5374 seq_printf(m, "%llu\n", (u64)min * PAGE_SIZE); 5375 5376 return 0; 5377 } 5378 5379 static ssize_t memory_min_write(struct kernfs_open_file *of, 5380 char *buf, size_t nbytes, loff_t off) 5381 { 5382 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5383 unsigned long min; 5384 int err; 5385 5386 buf = strstrip(buf); 5387 err = page_counter_memparse(buf, "max", &min); 5388 if (err) 5389 return err; 5390 5391 page_counter_set_min(&memcg->memory, min); 5392 5393 return nbytes; 5394 } 5395 5396 static int memory_low_show(struct seq_file *m, void *v) 5397 { 5398 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5399 unsigned long low = READ_ONCE(memcg->memory.low); 5400 5401 if (low == PAGE_COUNTER_MAX) 5402 seq_puts(m, "max\n"); 5403 else 5404 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5405 5406 return 0; 5407 } 5408 5409 static ssize_t memory_low_write(struct kernfs_open_file *of, 5410 char *buf, size_t nbytes, loff_t off) 5411 { 5412 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5413 unsigned long low; 5414 int err; 5415 5416 buf = strstrip(buf); 5417 err = page_counter_memparse(buf, "max", &low); 5418 if (err) 5419 return err; 5420 5421 page_counter_set_low(&memcg->memory, low); 5422 5423 return nbytes; 5424 } 5425 5426 static int memory_high_show(struct seq_file *m, void *v) 5427 { 5428 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5429 unsigned long high = READ_ONCE(memcg->high); 5430 5431 if (high == PAGE_COUNTER_MAX) 5432 seq_puts(m, "max\n"); 5433 else 5434 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5435 5436 return 0; 5437 } 5438 5439 static ssize_t memory_high_write(struct kernfs_open_file *of, 5440 char *buf, size_t nbytes, loff_t off) 5441 { 5442 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5443 unsigned long nr_pages; 5444 unsigned long high; 5445 int err; 5446 5447 buf = strstrip(buf); 5448 err = page_counter_memparse(buf, "max", &high); 5449 if (err) 5450 return err; 5451 5452 memcg->high = high; 5453 5454 nr_pages = page_counter_read(&memcg->memory); 5455 if (nr_pages > high) 5456 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5457 GFP_KERNEL, true); 5458 5459 memcg_wb_domain_size_changed(memcg); 5460 return nbytes; 5461 } 5462 5463 static int memory_max_show(struct seq_file *m, void *v) 5464 { 5465 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5466 unsigned long max = READ_ONCE(memcg->memory.max); 5467 5468 if (max == PAGE_COUNTER_MAX) 5469 seq_puts(m, "max\n"); 5470 else 5471 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5472 5473 return 0; 5474 } 5475 5476 static ssize_t memory_max_write(struct kernfs_open_file *of, 5477 char *buf, size_t nbytes, loff_t off) 5478 { 5479 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5480 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5481 bool drained = false; 5482 unsigned long max; 5483 int err; 5484 5485 buf = strstrip(buf); 5486 err = page_counter_memparse(buf, "max", &max); 5487 if (err) 5488 return err; 5489 5490 xchg(&memcg->memory.max, max); 5491 5492 for (;;) { 5493 unsigned long nr_pages = page_counter_read(&memcg->memory); 5494 5495 if (nr_pages <= max) 5496 break; 5497 5498 if (signal_pending(current)) { 5499 err = -EINTR; 5500 break; 5501 } 5502 5503 if (!drained) { 5504 drain_all_stock(memcg); 5505 drained = true; 5506 continue; 5507 } 5508 5509 if (nr_reclaims) { 5510 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5511 GFP_KERNEL, true)) 5512 nr_reclaims--; 5513 continue; 5514 } 5515 5516 memcg_memory_event(memcg, MEMCG_OOM); 5517 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5518 break; 5519 } 5520 5521 memcg_wb_domain_size_changed(memcg); 5522 return nbytes; 5523 } 5524 5525 static int memory_events_show(struct seq_file *m, void *v) 5526 { 5527 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5528 5529 seq_printf(m, "low %lu\n", 5530 atomic_long_read(&memcg->memory_events[MEMCG_LOW])); 5531 seq_printf(m, "high %lu\n", 5532 atomic_long_read(&memcg->memory_events[MEMCG_HIGH])); 5533 seq_printf(m, "max %lu\n", 5534 atomic_long_read(&memcg->memory_events[MEMCG_MAX])); 5535 seq_printf(m, "oom %lu\n", 5536 atomic_long_read(&memcg->memory_events[MEMCG_OOM])); 5537 seq_printf(m, "oom_kill %lu\n", 5538 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 5539 5540 return 0; 5541 } 5542 5543 static int memory_stat_show(struct seq_file *m, void *v) 5544 { 5545 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5546 struct accumulated_stats acc; 5547 int i; 5548 5549 /* 5550 * Provide statistics on the state of the memory subsystem as 5551 * well as cumulative event counters that show past behavior. 5552 * 5553 * This list is ordered following a combination of these gradients: 5554 * 1) generic big picture -> specifics and details 5555 * 2) reflecting userspace activity -> reflecting kernel heuristics 5556 * 5557 * Current memory state: 5558 */ 5559 5560 memset(&acc, 0, sizeof(acc)); 5561 acc.stats_size = MEMCG_NR_STAT; 5562 acc.events_size = NR_VM_EVENT_ITEMS; 5563 accumulate_memcg_tree(memcg, &acc); 5564 5565 seq_printf(m, "anon %llu\n", 5566 (u64)acc.stat[MEMCG_RSS] * PAGE_SIZE); 5567 seq_printf(m, "file %llu\n", 5568 (u64)acc.stat[MEMCG_CACHE] * PAGE_SIZE); 5569 seq_printf(m, "kernel_stack %llu\n", 5570 (u64)acc.stat[MEMCG_KERNEL_STACK_KB] * 1024); 5571 seq_printf(m, "slab %llu\n", 5572 (u64)(acc.stat[NR_SLAB_RECLAIMABLE] + 5573 acc.stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5574 seq_printf(m, "sock %llu\n", 5575 (u64)acc.stat[MEMCG_SOCK] * PAGE_SIZE); 5576 5577 seq_printf(m, "shmem %llu\n", 5578 (u64)acc.stat[NR_SHMEM] * PAGE_SIZE); 5579 seq_printf(m, "file_mapped %llu\n", 5580 (u64)acc.stat[NR_FILE_MAPPED] * PAGE_SIZE); 5581 seq_printf(m, "file_dirty %llu\n", 5582 (u64)acc.stat[NR_FILE_DIRTY] * PAGE_SIZE); 5583 seq_printf(m, "file_writeback %llu\n", 5584 (u64)acc.stat[NR_WRITEBACK] * PAGE_SIZE); 5585 5586 for (i = 0; i < NR_LRU_LISTS; i++) 5587 seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], 5588 (u64)acc.lru_pages[i] * PAGE_SIZE); 5589 5590 seq_printf(m, "slab_reclaimable %llu\n", 5591 (u64)acc.stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); 5592 seq_printf(m, "slab_unreclaimable %llu\n", 5593 (u64)acc.stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5594 5595 /* Accumulated memory events */ 5596 5597 seq_printf(m, "pgfault %lu\n", acc.events[PGFAULT]); 5598 seq_printf(m, "pgmajfault %lu\n", acc.events[PGMAJFAULT]); 5599 5600 seq_printf(m, "pgrefill %lu\n", acc.events[PGREFILL]); 5601 seq_printf(m, "pgscan %lu\n", acc.events[PGSCAN_KSWAPD] + 5602 acc.events[PGSCAN_DIRECT]); 5603 seq_printf(m, "pgsteal %lu\n", acc.events[PGSTEAL_KSWAPD] + 5604 acc.events[PGSTEAL_DIRECT]); 5605 seq_printf(m, "pgactivate %lu\n", acc.events[PGACTIVATE]); 5606 seq_printf(m, "pgdeactivate %lu\n", acc.events[PGDEACTIVATE]); 5607 seq_printf(m, "pglazyfree %lu\n", acc.events[PGLAZYFREE]); 5608 seq_printf(m, "pglazyfreed %lu\n", acc.events[PGLAZYFREED]); 5609 5610 seq_printf(m, "workingset_refault %lu\n", 5611 acc.stat[WORKINGSET_REFAULT]); 5612 seq_printf(m, "workingset_activate %lu\n", 5613 acc.stat[WORKINGSET_ACTIVATE]); 5614 seq_printf(m, "workingset_nodereclaim %lu\n", 5615 acc.stat[WORKINGSET_NODERECLAIM]); 5616 5617 return 0; 5618 } 5619 5620 static int memory_oom_group_show(struct seq_file *m, void *v) 5621 { 5622 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5623 5624 seq_printf(m, "%d\n", memcg->oom_group); 5625 5626 return 0; 5627 } 5628 5629 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 5630 char *buf, size_t nbytes, loff_t off) 5631 { 5632 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5633 int ret, oom_group; 5634 5635 buf = strstrip(buf); 5636 if (!buf) 5637 return -EINVAL; 5638 5639 ret = kstrtoint(buf, 0, &oom_group); 5640 if (ret) 5641 return ret; 5642 5643 if (oom_group != 0 && oom_group != 1) 5644 return -EINVAL; 5645 5646 memcg->oom_group = oom_group; 5647 5648 return nbytes; 5649 } 5650 5651 static struct cftype memory_files[] = { 5652 { 5653 .name = "current", 5654 .flags = CFTYPE_NOT_ON_ROOT, 5655 .read_u64 = memory_current_read, 5656 }, 5657 { 5658 .name = "min", 5659 .flags = CFTYPE_NOT_ON_ROOT, 5660 .seq_show = memory_min_show, 5661 .write = memory_min_write, 5662 }, 5663 { 5664 .name = "low", 5665 .flags = CFTYPE_NOT_ON_ROOT, 5666 .seq_show = memory_low_show, 5667 .write = memory_low_write, 5668 }, 5669 { 5670 .name = "high", 5671 .flags = CFTYPE_NOT_ON_ROOT, 5672 .seq_show = memory_high_show, 5673 .write = memory_high_write, 5674 }, 5675 { 5676 .name = "max", 5677 .flags = CFTYPE_NOT_ON_ROOT, 5678 .seq_show = memory_max_show, 5679 .write = memory_max_write, 5680 }, 5681 { 5682 .name = "events", 5683 .flags = CFTYPE_NOT_ON_ROOT, 5684 .file_offset = offsetof(struct mem_cgroup, events_file), 5685 .seq_show = memory_events_show, 5686 }, 5687 { 5688 .name = "stat", 5689 .flags = CFTYPE_NOT_ON_ROOT, 5690 .seq_show = memory_stat_show, 5691 }, 5692 { 5693 .name = "oom.group", 5694 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 5695 .seq_show = memory_oom_group_show, 5696 .write = memory_oom_group_write, 5697 }, 5698 { } /* terminate */ 5699 }; 5700 5701 struct cgroup_subsys memory_cgrp_subsys = { 5702 .css_alloc = mem_cgroup_css_alloc, 5703 .css_online = mem_cgroup_css_online, 5704 .css_offline = mem_cgroup_css_offline, 5705 .css_released = mem_cgroup_css_released, 5706 .css_free = mem_cgroup_css_free, 5707 .css_reset = mem_cgroup_css_reset, 5708 .can_attach = mem_cgroup_can_attach, 5709 .cancel_attach = mem_cgroup_cancel_attach, 5710 .post_attach = mem_cgroup_move_task, 5711 .bind = mem_cgroup_bind, 5712 .dfl_cftypes = memory_files, 5713 .legacy_cftypes = mem_cgroup_legacy_files, 5714 .early_init = 0, 5715 }; 5716 5717 /** 5718 * mem_cgroup_protected - check if memory consumption is in the normal range 5719 * @root: the top ancestor of the sub-tree being checked 5720 * @memcg: the memory cgroup to check 5721 * 5722 * WARNING: This function is not stateless! It can only be used as part 5723 * of a top-down tree iteration, not for isolated queries. 5724 * 5725 * Returns one of the following: 5726 * MEMCG_PROT_NONE: cgroup memory is not protected 5727 * MEMCG_PROT_LOW: cgroup memory is protected as long there is 5728 * an unprotected supply of reclaimable memory from other cgroups. 5729 * MEMCG_PROT_MIN: cgroup memory is protected 5730 * 5731 * @root is exclusive; it is never protected when looked at directly 5732 * 5733 * To provide a proper hierarchical behavior, effective memory.min/low values 5734 * are used. Below is the description of how effective memory.low is calculated. 5735 * Effective memory.min values is calculated in the same way. 5736 * 5737 * Effective memory.low is always equal or less than the original memory.low. 5738 * If there is no memory.low overcommittment (which is always true for 5739 * top-level memory cgroups), these two values are equal. 5740 * Otherwise, it's a part of parent's effective memory.low, 5741 * calculated as a cgroup's memory.low usage divided by sum of sibling's 5742 * memory.low usages, where memory.low usage is the size of actually 5743 * protected memory. 5744 * 5745 * low_usage 5746 * elow = min( memory.low, parent->elow * ------------------ ), 5747 * siblings_low_usage 5748 * 5749 * | memory.current, if memory.current < memory.low 5750 * low_usage = | 5751 | 0, otherwise. 5752 * 5753 * 5754 * Such definition of the effective memory.low provides the expected 5755 * hierarchical behavior: parent's memory.low value is limiting 5756 * children, unprotected memory is reclaimed first and cgroups, 5757 * which are not using their guarantee do not affect actual memory 5758 * distribution. 5759 * 5760 * For example, if there are memcgs A, A/B, A/C, A/D and A/E: 5761 * 5762 * A A/memory.low = 2G, A/memory.current = 6G 5763 * //\\ 5764 * BC DE B/memory.low = 3G B/memory.current = 2G 5765 * C/memory.low = 1G C/memory.current = 2G 5766 * D/memory.low = 0 D/memory.current = 2G 5767 * E/memory.low = 10G E/memory.current = 0 5768 * 5769 * and the memory pressure is applied, the following memory distribution 5770 * is expected (approximately): 5771 * 5772 * A/memory.current = 2G 5773 * 5774 * B/memory.current = 1.3G 5775 * C/memory.current = 0.6G 5776 * D/memory.current = 0 5777 * E/memory.current = 0 5778 * 5779 * These calculations require constant tracking of the actual low usages 5780 * (see propagate_protected_usage()), as well as recursive calculation of 5781 * effective memory.low values. But as we do call mem_cgroup_protected() 5782 * path for each memory cgroup top-down from the reclaim, 5783 * it's possible to optimize this part, and save calculated elow 5784 * for next usage. This part is intentionally racy, but it's ok, 5785 * as memory.low is a best-effort mechanism. 5786 */ 5787 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 5788 struct mem_cgroup *memcg) 5789 { 5790 struct mem_cgroup *parent; 5791 unsigned long emin, parent_emin; 5792 unsigned long elow, parent_elow; 5793 unsigned long usage; 5794 5795 if (mem_cgroup_disabled()) 5796 return MEMCG_PROT_NONE; 5797 5798 if (!root) 5799 root = root_mem_cgroup; 5800 if (memcg == root) 5801 return MEMCG_PROT_NONE; 5802 5803 usage = page_counter_read(&memcg->memory); 5804 if (!usage) 5805 return MEMCG_PROT_NONE; 5806 5807 emin = memcg->memory.min; 5808 elow = memcg->memory.low; 5809 5810 parent = parent_mem_cgroup(memcg); 5811 /* No parent means a non-hierarchical mode on v1 memcg */ 5812 if (!parent) 5813 return MEMCG_PROT_NONE; 5814 5815 if (parent == root) 5816 goto exit; 5817 5818 parent_emin = READ_ONCE(parent->memory.emin); 5819 emin = min(emin, parent_emin); 5820 if (emin && parent_emin) { 5821 unsigned long min_usage, siblings_min_usage; 5822 5823 min_usage = min(usage, memcg->memory.min); 5824 siblings_min_usage = atomic_long_read( 5825 &parent->memory.children_min_usage); 5826 5827 if (min_usage && siblings_min_usage) 5828 emin = min(emin, parent_emin * min_usage / 5829 siblings_min_usage); 5830 } 5831 5832 parent_elow = READ_ONCE(parent->memory.elow); 5833 elow = min(elow, parent_elow); 5834 if (elow && parent_elow) { 5835 unsigned long low_usage, siblings_low_usage; 5836 5837 low_usage = min(usage, memcg->memory.low); 5838 siblings_low_usage = atomic_long_read( 5839 &parent->memory.children_low_usage); 5840 5841 if (low_usage && siblings_low_usage) 5842 elow = min(elow, parent_elow * low_usage / 5843 siblings_low_usage); 5844 } 5845 5846 exit: 5847 memcg->memory.emin = emin; 5848 memcg->memory.elow = elow; 5849 5850 if (usage <= emin) 5851 return MEMCG_PROT_MIN; 5852 else if (usage <= elow) 5853 return MEMCG_PROT_LOW; 5854 else 5855 return MEMCG_PROT_NONE; 5856 } 5857 5858 /** 5859 * mem_cgroup_try_charge - try charging a page 5860 * @page: page to charge 5861 * @mm: mm context of the victim 5862 * @gfp_mask: reclaim mode 5863 * @memcgp: charged memcg return 5864 * @compound: charge the page as compound or small page 5865 * 5866 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5867 * pages according to @gfp_mask if necessary. 5868 * 5869 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5870 * Otherwise, an error code is returned. 5871 * 5872 * After page->mapping has been set up, the caller must finalize the 5873 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5874 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5875 */ 5876 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5877 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5878 bool compound) 5879 { 5880 struct mem_cgroup *memcg = NULL; 5881 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5882 int ret = 0; 5883 5884 if (mem_cgroup_disabled()) 5885 goto out; 5886 5887 if (PageSwapCache(page)) { 5888 /* 5889 * Every swap fault against a single page tries to charge the 5890 * page, bail as early as possible. shmem_unuse() encounters 5891 * already charged pages, too. The USED bit is protected by 5892 * the page lock, which serializes swap cache removal, which 5893 * in turn serializes uncharging. 5894 */ 5895 VM_BUG_ON_PAGE(!PageLocked(page), page); 5896 if (compound_head(page)->mem_cgroup) 5897 goto out; 5898 5899 if (do_swap_account) { 5900 swp_entry_t ent = { .val = page_private(page), }; 5901 unsigned short id = lookup_swap_cgroup_id(ent); 5902 5903 rcu_read_lock(); 5904 memcg = mem_cgroup_from_id(id); 5905 if (memcg && !css_tryget_online(&memcg->css)) 5906 memcg = NULL; 5907 rcu_read_unlock(); 5908 } 5909 } 5910 5911 if (!memcg) 5912 memcg = get_mem_cgroup_from_mm(mm); 5913 5914 ret = try_charge(memcg, gfp_mask, nr_pages); 5915 5916 css_put(&memcg->css); 5917 out: 5918 *memcgp = memcg; 5919 return ret; 5920 } 5921 5922 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, 5923 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5924 bool compound) 5925 { 5926 struct mem_cgroup *memcg; 5927 int ret; 5928 5929 ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound); 5930 memcg = *memcgp; 5931 mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask); 5932 return ret; 5933 } 5934 5935 /** 5936 * mem_cgroup_commit_charge - commit a page charge 5937 * @page: page to charge 5938 * @memcg: memcg to charge the page to 5939 * @lrucare: page might be on LRU already 5940 * @compound: charge the page as compound or small page 5941 * 5942 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5943 * after page->mapping has been set up. This must happen atomically 5944 * as part of the page instantiation, i.e. under the page table lock 5945 * for anonymous pages, under the page lock for page and swap cache. 5946 * 5947 * In addition, the page must not be on the LRU during the commit, to 5948 * prevent racing with task migration. If it might be, use @lrucare. 5949 * 5950 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5951 */ 5952 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5953 bool lrucare, bool compound) 5954 { 5955 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5956 5957 VM_BUG_ON_PAGE(!page->mapping, page); 5958 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5959 5960 if (mem_cgroup_disabled()) 5961 return; 5962 /* 5963 * Swap faults will attempt to charge the same page multiple 5964 * times. But reuse_swap_page() might have removed the page 5965 * from swapcache already, so we can't check PageSwapCache(). 5966 */ 5967 if (!memcg) 5968 return; 5969 5970 commit_charge(page, memcg, lrucare); 5971 5972 local_irq_disable(); 5973 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 5974 memcg_check_events(memcg, page); 5975 local_irq_enable(); 5976 5977 if (do_memsw_account() && PageSwapCache(page)) { 5978 swp_entry_t entry = { .val = page_private(page) }; 5979 /* 5980 * The swap entry might not get freed for a long time, 5981 * let's not wait for it. The page already received a 5982 * memory+swap charge, drop the swap entry duplicate. 5983 */ 5984 mem_cgroup_uncharge_swap(entry, nr_pages); 5985 } 5986 } 5987 5988 /** 5989 * mem_cgroup_cancel_charge - cancel a page charge 5990 * @page: page to charge 5991 * @memcg: memcg to charge the page to 5992 * @compound: charge the page as compound or small page 5993 * 5994 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5995 */ 5996 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 5997 bool compound) 5998 { 5999 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 6000 6001 if (mem_cgroup_disabled()) 6002 return; 6003 /* 6004 * Swap faults will attempt to charge the same page multiple 6005 * times. But reuse_swap_page() might have removed the page 6006 * from swapcache already, so we can't check PageSwapCache(). 6007 */ 6008 if (!memcg) 6009 return; 6010 6011 cancel_charge(memcg, nr_pages); 6012 } 6013 6014 struct uncharge_gather { 6015 struct mem_cgroup *memcg; 6016 unsigned long pgpgout; 6017 unsigned long nr_anon; 6018 unsigned long nr_file; 6019 unsigned long nr_kmem; 6020 unsigned long nr_huge; 6021 unsigned long nr_shmem; 6022 struct page *dummy_page; 6023 }; 6024 6025 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6026 { 6027 memset(ug, 0, sizeof(*ug)); 6028 } 6029 6030 static void uncharge_batch(const struct uncharge_gather *ug) 6031 { 6032 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; 6033 unsigned long flags; 6034 6035 if (!mem_cgroup_is_root(ug->memcg)) { 6036 page_counter_uncharge(&ug->memcg->memory, nr_pages); 6037 if (do_memsw_account()) 6038 page_counter_uncharge(&ug->memcg->memsw, nr_pages); 6039 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6040 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6041 memcg_oom_recover(ug->memcg); 6042 } 6043 6044 local_irq_save(flags); 6045 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); 6046 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); 6047 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); 6048 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); 6049 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6050 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); 6051 memcg_check_events(ug->memcg, ug->dummy_page); 6052 local_irq_restore(flags); 6053 6054 if (!mem_cgroup_is_root(ug->memcg)) 6055 css_put_many(&ug->memcg->css, nr_pages); 6056 } 6057 6058 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6059 { 6060 VM_BUG_ON_PAGE(PageLRU(page), page); 6061 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) && 6062 !PageHWPoison(page) , page); 6063 6064 if (!page->mem_cgroup) 6065 return; 6066 6067 /* 6068 * Nobody should be changing or seriously looking at 6069 * page->mem_cgroup at this point, we have fully 6070 * exclusive access to the page. 6071 */ 6072 6073 if (ug->memcg != page->mem_cgroup) { 6074 if (ug->memcg) { 6075 uncharge_batch(ug); 6076 uncharge_gather_clear(ug); 6077 } 6078 ug->memcg = page->mem_cgroup; 6079 } 6080 6081 if (!PageKmemcg(page)) { 6082 unsigned int nr_pages = 1; 6083 6084 if (PageTransHuge(page)) { 6085 nr_pages <<= compound_order(page); 6086 ug->nr_huge += nr_pages; 6087 } 6088 if (PageAnon(page)) 6089 ug->nr_anon += nr_pages; 6090 else { 6091 ug->nr_file += nr_pages; 6092 if (PageSwapBacked(page)) 6093 ug->nr_shmem += nr_pages; 6094 } 6095 ug->pgpgout++; 6096 } else { 6097 ug->nr_kmem += 1 << compound_order(page); 6098 __ClearPageKmemcg(page); 6099 } 6100 6101 ug->dummy_page = page; 6102 page->mem_cgroup = NULL; 6103 } 6104 6105 static void uncharge_list(struct list_head *page_list) 6106 { 6107 struct uncharge_gather ug; 6108 struct list_head *next; 6109 6110 uncharge_gather_clear(&ug); 6111 6112 /* 6113 * Note that the list can be a single page->lru; hence the 6114 * do-while loop instead of a simple list_for_each_entry(). 6115 */ 6116 next = page_list->next; 6117 do { 6118 struct page *page; 6119 6120 page = list_entry(next, struct page, lru); 6121 next = page->lru.next; 6122 6123 uncharge_page(page, &ug); 6124 } while (next != page_list); 6125 6126 if (ug.memcg) 6127 uncharge_batch(&ug); 6128 } 6129 6130 /** 6131 * mem_cgroup_uncharge - uncharge a page 6132 * @page: page to uncharge 6133 * 6134 * Uncharge a page previously charged with mem_cgroup_try_charge() and 6135 * mem_cgroup_commit_charge(). 6136 */ 6137 void mem_cgroup_uncharge(struct page *page) 6138 { 6139 struct uncharge_gather ug; 6140 6141 if (mem_cgroup_disabled()) 6142 return; 6143 6144 /* Don't touch page->lru of any random page, pre-check: */ 6145 if (!page->mem_cgroup) 6146 return; 6147 6148 uncharge_gather_clear(&ug); 6149 uncharge_page(page, &ug); 6150 uncharge_batch(&ug); 6151 } 6152 6153 /** 6154 * mem_cgroup_uncharge_list - uncharge a list of page 6155 * @page_list: list of pages to uncharge 6156 * 6157 * Uncharge a list of pages previously charged with 6158 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 6159 */ 6160 void mem_cgroup_uncharge_list(struct list_head *page_list) 6161 { 6162 if (mem_cgroup_disabled()) 6163 return; 6164 6165 if (!list_empty(page_list)) 6166 uncharge_list(page_list); 6167 } 6168 6169 /** 6170 * mem_cgroup_migrate - charge a page's replacement 6171 * @oldpage: currently circulating page 6172 * @newpage: replacement page 6173 * 6174 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6175 * be uncharged upon free. 6176 * 6177 * Both pages must be locked, @newpage->mapping must be set up. 6178 */ 6179 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6180 { 6181 struct mem_cgroup *memcg; 6182 unsigned int nr_pages; 6183 bool compound; 6184 unsigned long flags; 6185 6186 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6187 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6188 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6189 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6190 newpage); 6191 6192 if (mem_cgroup_disabled()) 6193 return; 6194 6195 /* Page cache replacement: new page already charged? */ 6196 if (newpage->mem_cgroup) 6197 return; 6198 6199 /* Swapcache readahead pages can get replaced before being charged */ 6200 memcg = oldpage->mem_cgroup; 6201 if (!memcg) 6202 return; 6203 6204 /* Force-charge the new page. The old one will be freed soon */ 6205 compound = PageTransHuge(newpage); 6206 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 6207 6208 page_counter_charge(&memcg->memory, nr_pages); 6209 if (do_memsw_account()) 6210 page_counter_charge(&memcg->memsw, nr_pages); 6211 css_get_many(&memcg->css, nr_pages); 6212 6213 commit_charge(newpage, memcg, false); 6214 6215 local_irq_save(flags); 6216 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 6217 memcg_check_events(memcg, newpage); 6218 local_irq_restore(flags); 6219 } 6220 6221 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6222 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6223 6224 void mem_cgroup_sk_alloc(struct sock *sk) 6225 { 6226 struct mem_cgroup *memcg; 6227 6228 if (!mem_cgroup_sockets_enabled) 6229 return; 6230 6231 /* 6232 * Socket cloning can throw us here with sk_memcg already 6233 * filled. It won't however, necessarily happen from 6234 * process context. So the test for root memcg given 6235 * the current task's memcg won't help us in this case. 6236 * 6237 * Respecting the original socket's memcg is a better 6238 * decision in this case. 6239 */ 6240 if (sk->sk_memcg) { 6241 css_get(&sk->sk_memcg->css); 6242 return; 6243 } 6244 6245 rcu_read_lock(); 6246 memcg = mem_cgroup_from_task(current); 6247 if (memcg == root_mem_cgroup) 6248 goto out; 6249 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6250 goto out; 6251 if (css_tryget_online(&memcg->css)) 6252 sk->sk_memcg = memcg; 6253 out: 6254 rcu_read_unlock(); 6255 } 6256 6257 void mem_cgroup_sk_free(struct sock *sk) 6258 { 6259 if (sk->sk_memcg) 6260 css_put(&sk->sk_memcg->css); 6261 } 6262 6263 /** 6264 * mem_cgroup_charge_skmem - charge socket memory 6265 * @memcg: memcg to charge 6266 * @nr_pages: number of pages to charge 6267 * 6268 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6269 * @memcg's configured limit, %false if the charge had to be forced. 6270 */ 6271 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6272 { 6273 gfp_t gfp_mask = GFP_KERNEL; 6274 6275 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6276 struct page_counter *fail; 6277 6278 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6279 memcg->tcpmem_pressure = 0; 6280 return true; 6281 } 6282 page_counter_charge(&memcg->tcpmem, nr_pages); 6283 memcg->tcpmem_pressure = 1; 6284 return false; 6285 } 6286 6287 /* Don't block in the packet receive path */ 6288 if (in_softirq()) 6289 gfp_mask = GFP_NOWAIT; 6290 6291 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 6292 6293 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 6294 return true; 6295 6296 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 6297 return false; 6298 } 6299 6300 /** 6301 * mem_cgroup_uncharge_skmem - uncharge socket memory 6302 * @memcg: memcg to uncharge 6303 * @nr_pages: number of pages to uncharge 6304 */ 6305 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6306 { 6307 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6308 page_counter_uncharge(&memcg->tcpmem, nr_pages); 6309 return; 6310 } 6311 6312 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 6313 6314 refill_stock(memcg, nr_pages); 6315 } 6316 6317 static int __init cgroup_memory(char *s) 6318 { 6319 char *token; 6320 6321 while ((token = strsep(&s, ",")) != NULL) { 6322 if (!*token) 6323 continue; 6324 if (!strcmp(token, "nosocket")) 6325 cgroup_memory_nosocket = true; 6326 if (!strcmp(token, "nokmem")) 6327 cgroup_memory_nokmem = true; 6328 } 6329 return 0; 6330 } 6331 __setup("cgroup.memory=", cgroup_memory); 6332 6333 /* 6334 * subsys_initcall() for memory controller. 6335 * 6336 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 6337 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 6338 * basically everything that doesn't depend on a specific mem_cgroup structure 6339 * should be initialized from here. 6340 */ 6341 static int __init mem_cgroup_init(void) 6342 { 6343 int cpu, node; 6344 6345 #ifdef CONFIG_MEMCG_KMEM 6346 /* 6347 * Kmem cache creation is mostly done with the slab_mutex held, 6348 * so use a workqueue with limited concurrency to avoid stalling 6349 * all worker threads in case lots of cgroups are created and 6350 * destroyed simultaneously. 6351 */ 6352 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 6353 BUG_ON(!memcg_kmem_cache_wq); 6354 #endif 6355 6356 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 6357 memcg_hotplug_cpu_dead); 6358 6359 for_each_possible_cpu(cpu) 6360 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 6361 drain_local_stock); 6362 6363 for_each_node(node) { 6364 struct mem_cgroup_tree_per_node *rtpn; 6365 6366 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 6367 node_online(node) ? node : NUMA_NO_NODE); 6368 6369 rtpn->rb_root = RB_ROOT; 6370 rtpn->rb_rightmost = NULL; 6371 spin_lock_init(&rtpn->lock); 6372 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6373 } 6374 6375 return 0; 6376 } 6377 subsys_initcall(mem_cgroup_init); 6378 6379 #ifdef CONFIG_MEMCG_SWAP 6380 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 6381 { 6382 while (!atomic_inc_not_zero(&memcg->id.ref)) { 6383 /* 6384 * The root cgroup cannot be destroyed, so it's refcount must 6385 * always be >= 1. 6386 */ 6387 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 6388 VM_BUG_ON(1); 6389 break; 6390 } 6391 memcg = parent_mem_cgroup(memcg); 6392 if (!memcg) 6393 memcg = root_mem_cgroup; 6394 } 6395 return memcg; 6396 } 6397 6398 /** 6399 * mem_cgroup_swapout - transfer a memsw charge to swap 6400 * @page: page whose memsw charge to transfer 6401 * @entry: swap entry to move the charge to 6402 * 6403 * Transfer the memsw charge of @page to @entry. 6404 */ 6405 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 6406 { 6407 struct mem_cgroup *memcg, *swap_memcg; 6408 unsigned int nr_entries; 6409 unsigned short oldid; 6410 6411 VM_BUG_ON_PAGE(PageLRU(page), page); 6412 VM_BUG_ON_PAGE(page_count(page), page); 6413 6414 if (!do_memsw_account()) 6415 return; 6416 6417 memcg = page->mem_cgroup; 6418 6419 /* Readahead page, never charged */ 6420 if (!memcg) 6421 return; 6422 6423 /* 6424 * In case the memcg owning these pages has been offlined and doesn't 6425 * have an ID allocated to it anymore, charge the closest online 6426 * ancestor for the swap instead and transfer the memory+swap charge. 6427 */ 6428 swap_memcg = mem_cgroup_id_get_online(memcg); 6429 nr_entries = hpage_nr_pages(page); 6430 /* Get references for the tail pages, too */ 6431 if (nr_entries > 1) 6432 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 6433 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 6434 nr_entries); 6435 VM_BUG_ON_PAGE(oldid, page); 6436 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 6437 6438 page->mem_cgroup = NULL; 6439 6440 if (!mem_cgroup_is_root(memcg)) 6441 page_counter_uncharge(&memcg->memory, nr_entries); 6442 6443 if (memcg != swap_memcg) { 6444 if (!mem_cgroup_is_root(swap_memcg)) 6445 page_counter_charge(&swap_memcg->memsw, nr_entries); 6446 page_counter_uncharge(&memcg->memsw, nr_entries); 6447 } 6448 6449 /* 6450 * Interrupts should be disabled here because the caller holds the 6451 * i_pages lock which is taken with interrupts-off. It is 6452 * important here to have the interrupts disabled because it is the 6453 * only synchronisation we have for updating the per-CPU variables. 6454 */ 6455 VM_BUG_ON(!irqs_disabled()); 6456 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), 6457 -nr_entries); 6458 memcg_check_events(memcg, page); 6459 6460 if (!mem_cgroup_is_root(memcg)) 6461 css_put_many(&memcg->css, nr_entries); 6462 } 6463 6464 /** 6465 * mem_cgroup_try_charge_swap - try charging swap space for a page 6466 * @page: page being added to swap 6467 * @entry: swap entry to charge 6468 * 6469 * Try to charge @page's memcg for the swap space at @entry. 6470 * 6471 * Returns 0 on success, -ENOMEM on failure. 6472 */ 6473 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 6474 { 6475 unsigned int nr_pages = hpage_nr_pages(page); 6476 struct page_counter *counter; 6477 struct mem_cgroup *memcg; 6478 unsigned short oldid; 6479 6480 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 6481 return 0; 6482 6483 memcg = page->mem_cgroup; 6484 6485 /* Readahead page, never charged */ 6486 if (!memcg) 6487 return 0; 6488 6489 if (!entry.val) { 6490 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6491 return 0; 6492 } 6493 6494 memcg = mem_cgroup_id_get_online(memcg); 6495 6496 if (!mem_cgroup_is_root(memcg) && 6497 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 6498 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 6499 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6500 mem_cgroup_id_put(memcg); 6501 return -ENOMEM; 6502 } 6503 6504 /* Get references for the tail pages, too */ 6505 if (nr_pages > 1) 6506 mem_cgroup_id_get_many(memcg, nr_pages - 1); 6507 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 6508 VM_BUG_ON_PAGE(oldid, page); 6509 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 6510 6511 return 0; 6512 } 6513 6514 /** 6515 * mem_cgroup_uncharge_swap - uncharge swap space 6516 * @entry: swap entry to uncharge 6517 * @nr_pages: the amount of swap space to uncharge 6518 */ 6519 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 6520 { 6521 struct mem_cgroup *memcg; 6522 unsigned short id; 6523 6524 if (!do_swap_account) 6525 return; 6526 6527 id = swap_cgroup_record(entry, 0, nr_pages); 6528 rcu_read_lock(); 6529 memcg = mem_cgroup_from_id(id); 6530 if (memcg) { 6531 if (!mem_cgroup_is_root(memcg)) { 6532 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6533 page_counter_uncharge(&memcg->swap, nr_pages); 6534 else 6535 page_counter_uncharge(&memcg->memsw, nr_pages); 6536 } 6537 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 6538 mem_cgroup_id_put_many(memcg, nr_pages); 6539 } 6540 rcu_read_unlock(); 6541 } 6542 6543 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 6544 { 6545 long nr_swap_pages = get_nr_swap_pages(); 6546 6547 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6548 return nr_swap_pages; 6549 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6550 nr_swap_pages = min_t(long, nr_swap_pages, 6551 READ_ONCE(memcg->swap.max) - 6552 page_counter_read(&memcg->swap)); 6553 return nr_swap_pages; 6554 } 6555 6556 bool mem_cgroup_swap_full(struct page *page) 6557 { 6558 struct mem_cgroup *memcg; 6559 6560 VM_BUG_ON_PAGE(!PageLocked(page), page); 6561 6562 if (vm_swap_full()) 6563 return true; 6564 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6565 return false; 6566 6567 memcg = page->mem_cgroup; 6568 if (!memcg) 6569 return false; 6570 6571 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6572 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) 6573 return true; 6574 6575 return false; 6576 } 6577 6578 /* for remember boot option*/ 6579 #ifdef CONFIG_MEMCG_SWAP_ENABLED 6580 static int really_do_swap_account __initdata = 1; 6581 #else 6582 static int really_do_swap_account __initdata; 6583 #endif 6584 6585 static int __init enable_swap_account(char *s) 6586 { 6587 if (!strcmp(s, "1")) 6588 really_do_swap_account = 1; 6589 else if (!strcmp(s, "0")) 6590 really_do_swap_account = 0; 6591 return 1; 6592 } 6593 __setup("swapaccount=", enable_swap_account); 6594 6595 static u64 swap_current_read(struct cgroup_subsys_state *css, 6596 struct cftype *cft) 6597 { 6598 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6599 6600 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6601 } 6602 6603 static int swap_max_show(struct seq_file *m, void *v) 6604 { 6605 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6606 unsigned long max = READ_ONCE(memcg->swap.max); 6607 6608 if (max == PAGE_COUNTER_MAX) 6609 seq_puts(m, "max\n"); 6610 else 6611 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 6612 6613 return 0; 6614 } 6615 6616 static ssize_t swap_max_write(struct kernfs_open_file *of, 6617 char *buf, size_t nbytes, loff_t off) 6618 { 6619 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6620 unsigned long max; 6621 int err; 6622 6623 buf = strstrip(buf); 6624 err = page_counter_memparse(buf, "max", &max); 6625 if (err) 6626 return err; 6627 6628 xchg(&memcg->swap.max, max); 6629 6630 return nbytes; 6631 } 6632 6633 static int swap_events_show(struct seq_file *m, void *v) 6634 { 6635 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6636 6637 seq_printf(m, "max %lu\n", 6638 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 6639 seq_printf(m, "fail %lu\n", 6640 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 6641 6642 return 0; 6643 } 6644 6645 static struct cftype swap_files[] = { 6646 { 6647 .name = "swap.current", 6648 .flags = CFTYPE_NOT_ON_ROOT, 6649 .read_u64 = swap_current_read, 6650 }, 6651 { 6652 .name = "swap.max", 6653 .flags = CFTYPE_NOT_ON_ROOT, 6654 .seq_show = swap_max_show, 6655 .write = swap_max_write, 6656 }, 6657 { 6658 .name = "swap.events", 6659 .flags = CFTYPE_NOT_ON_ROOT, 6660 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 6661 .seq_show = swap_events_show, 6662 }, 6663 { } /* terminate */ 6664 }; 6665 6666 static struct cftype memsw_cgroup_files[] = { 6667 { 6668 .name = "memsw.usage_in_bytes", 6669 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6670 .read_u64 = mem_cgroup_read_u64, 6671 }, 6672 { 6673 .name = "memsw.max_usage_in_bytes", 6674 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6675 .write = mem_cgroup_reset, 6676 .read_u64 = mem_cgroup_read_u64, 6677 }, 6678 { 6679 .name = "memsw.limit_in_bytes", 6680 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6681 .write = mem_cgroup_write, 6682 .read_u64 = mem_cgroup_read_u64, 6683 }, 6684 { 6685 .name = "memsw.failcnt", 6686 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6687 .write = mem_cgroup_reset, 6688 .read_u64 = mem_cgroup_read_u64, 6689 }, 6690 { }, /* terminate */ 6691 }; 6692 6693 static int __init mem_cgroup_swap_init(void) 6694 { 6695 if (!mem_cgroup_disabled() && really_do_swap_account) { 6696 do_swap_account = 1; 6697 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6698 swap_files)); 6699 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6700 memsw_cgroup_files)); 6701 } 6702 return 0; 6703 } 6704 subsys_initcall(mem_cgroup_swap_init); 6705 6706 #endif /* CONFIG_MEMCG_SWAP */ 6707