1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/file.h> 62 #include <linux/tracehook.h> 63 #include <linux/psi.h> 64 #include <linux/seq_buf.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include "slab.h" 69 70 #include <linux/uaccess.h> 71 72 #include <trace/events/vmscan.h> 73 74 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 75 EXPORT_SYMBOL(memory_cgrp_subsys); 76 77 struct mem_cgroup *root_mem_cgroup __read_mostly; 78 79 /* Active memory cgroup to use from an interrupt context */ 80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 81 82 /* Socket memory accounting disabled? */ 83 static bool cgroup_memory_nosocket; 84 85 /* Kernel memory accounting disabled? */ 86 static bool cgroup_memory_nokmem; 87 88 /* Whether the swap controller is active */ 89 #ifdef CONFIG_MEMCG_SWAP 90 bool cgroup_memory_noswap __read_mostly; 91 #else 92 #define cgroup_memory_noswap 1 93 #endif 94 95 #ifdef CONFIG_CGROUP_WRITEBACK 96 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 97 #endif 98 99 /* Whether legacy memory+swap accounting is active */ 100 static bool do_memsw_account(void) 101 { 102 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 103 } 104 105 #define THRESHOLDS_EVENTS_TARGET 128 106 #define SOFTLIMIT_EVENTS_TARGET 1024 107 108 /* 109 * Cgroups above their limits are maintained in a RB-Tree, independent of 110 * their hierarchy representation 111 */ 112 113 struct mem_cgroup_tree_per_node { 114 struct rb_root rb_root; 115 struct rb_node *rb_rightmost; 116 spinlock_t lock; 117 }; 118 119 struct mem_cgroup_tree { 120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 121 }; 122 123 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 124 125 /* for OOM */ 126 struct mem_cgroup_eventfd_list { 127 struct list_head list; 128 struct eventfd_ctx *eventfd; 129 }; 130 131 /* 132 * cgroup_event represents events which userspace want to receive. 133 */ 134 struct mem_cgroup_event { 135 /* 136 * memcg which the event belongs to. 137 */ 138 struct mem_cgroup *memcg; 139 /* 140 * eventfd to signal userspace about the event. 141 */ 142 struct eventfd_ctx *eventfd; 143 /* 144 * Each of these stored in a list by the cgroup. 145 */ 146 struct list_head list; 147 /* 148 * register_event() callback will be used to add new userspace 149 * waiter for changes related to this event. Use eventfd_signal() 150 * on eventfd to send notification to userspace. 151 */ 152 int (*register_event)(struct mem_cgroup *memcg, 153 struct eventfd_ctx *eventfd, const char *args); 154 /* 155 * unregister_event() callback will be called when userspace closes 156 * the eventfd or on cgroup removing. This callback must be set, 157 * if you want provide notification functionality. 158 */ 159 void (*unregister_event)(struct mem_cgroup *memcg, 160 struct eventfd_ctx *eventfd); 161 /* 162 * All fields below needed to unregister event when 163 * userspace closes eventfd. 164 */ 165 poll_table pt; 166 wait_queue_head_t *wqh; 167 wait_queue_entry_t wait; 168 struct work_struct remove; 169 }; 170 171 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 173 174 /* Stuffs for move charges at task migration. */ 175 /* 176 * Types of charges to be moved. 177 */ 178 #define MOVE_ANON 0x1U 179 #define MOVE_FILE 0x2U 180 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 181 182 /* "mc" and its members are protected by cgroup_mutex */ 183 static struct move_charge_struct { 184 spinlock_t lock; /* for from, to */ 185 struct mm_struct *mm; 186 struct mem_cgroup *from; 187 struct mem_cgroup *to; 188 unsigned long flags; 189 unsigned long precharge; 190 unsigned long moved_charge; 191 unsigned long moved_swap; 192 struct task_struct *moving_task; /* a task moving charges */ 193 wait_queue_head_t waitq; /* a waitq for other context */ 194 } mc = { 195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 197 }; 198 199 /* 200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 201 * limit reclaim to prevent infinite loops, if they ever occur. 202 */ 203 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 204 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 205 206 /* for encoding cft->private value on file */ 207 enum res_type { 208 _MEM, 209 _MEMSWAP, 210 _OOM_TYPE, 211 _KMEM, 212 _TCP, 213 }; 214 215 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 216 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 217 #define MEMFILE_ATTR(val) ((val) & 0xffff) 218 /* Used for OOM nofiier */ 219 #define OOM_CONTROL (0) 220 221 /* 222 * Iteration constructs for visiting all cgroups (under a tree). If 223 * loops are exited prematurely (break), mem_cgroup_iter_break() must 224 * be used for reference counting. 225 */ 226 #define for_each_mem_cgroup_tree(iter, root) \ 227 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 228 iter != NULL; \ 229 iter = mem_cgroup_iter(root, iter, NULL)) 230 231 #define for_each_mem_cgroup(iter) \ 232 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 233 iter != NULL; \ 234 iter = mem_cgroup_iter(NULL, iter, NULL)) 235 236 static inline bool should_force_charge(void) 237 { 238 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 239 (current->flags & PF_EXITING); 240 } 241 242 /* Some nice accessors for the vmpressure. */ 243 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 244 { 245 if (!memcg) 246 memcg = root_mem_cgroup; 247 return &memcg->vmpressure; 248 } 249 250 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 251 { 252 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 253 } 254 255 #ifdef CONFIG_MEMCG_KMEM 256 extern spinlock_t css_set_lock; 257 258 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 259 unsigned int nr_pages); 260 261 static void obj_cgroup_release(struct percpu_ref *ref) 262 { 263 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 264 struct mem_cgroup *memcg; 265 unsigned int nr_bytes; 266 unsigned int nr_pages; 267 unsigned long flags; 268 269 /* 270 * At this point all allocated objects are freed, and 271 * objcg->nr_charged_bytes can't have an arbitrary byte value. 272 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 273 * 274 * The following sequence can lead to it: 275 * 1) CPU0: objcg == stock->cached_objcg 276 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 277 * PAGE_SIZE bytes are charged 278 * 3) CPU1: a process from another memcg is allocating something, 279 * the stock if flushed, 280 * objcg->nr_charged_bytes = PAGE_SIZE - 92 281 * 5) CPU0: we do release this object, 282 * 92 bytes are added to stock->nr_bytes 283 * 6) CPU0: stock is flushed, 284 * 92 bytes are added to objcg->nr_charged_bytes 285 * 286 * In the result, nr_charged_bytes == PAGE_SIZE. 287 * This page will be uncharged in obj_cgroup_release(). 288 */ 289 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 290 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 291 nr_pages = nr_bytes >> PAGE_SHIFT; 292 293 spin_lock_irqsave(&css_set_lock, flags); 294 memcg = obj_cgroup_memcg(objcg); 295 if (nr_pages) 296 obj_cgroup_uncharge_pages(objcg, nr_pages); 297 list_del(&objcg->list); 298 mem_cgroup_put(memcg); 299 spin_unlock_irqrestore(&css_set_lock, flags); 300 301 percpu_ref_exit(ref); 302 kfree_rcu(objcg, rcu); 303 } 304 305 static struct obj_cgroup *obj_cgroup_alloc(void) 306 { 307 struct obj_cgroup *objcg; 308 int ret; 309 310 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 311 if (!objcg) 312 return NULL; 313 314 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 315 GFP_KERNEL); 316 if (ret) { 317 kfree(objcg); 318 return NULL; 319 } 320 INIT_LIST_HEAD(&objcg->list); 321 return objcg; 322 } 323 324 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 325 struct mem_cgroup *parent) 326 { 327 struct obj_cgroup *objcg, *iter; 328 329 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 330 331 spin_lock_irq(&css_set_lock); 332 333 /* Move active objcg to the parent's list */ 334 xchg(&objcg->memcg, parent); 335 css_get(&parent->css); 336 list_add(&objcg->list, &parent->objcg_list); 337 338 /* Move already reparented objcgs to the parent's list */ 339 list_for_each_entry(iter, &memcg->objcg_list, list) { 340 css_get(&parent->css); 341 xchg(&iter->memcg, parent); 342 css_put(&memcg->css); 343 } 344 list_splice(&memcg->objcg_list, &parent->objcg_list); 345 346 spin_unlock_irq(&css_set_lock); 347 348 percpu_ref_kill(&objcg->refcnt); 349 } 350 351 /* 352 * This will be used as a shrinker list's index. 353 * The main reason for not using cgroup id for this: 354 * this works better in sparse environments, where we have a lot of memcgs, 355 * but only a few kmem-limited. Or also, if we have, for instance, 200 356 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 357 * 200 entry array for that. 358 * 359 * The current size of the caches array is stored in memcg_nr_cache_ids. It 360 * will double each time we have to increase it. 361 */ 362 static DEFINE_IDA(memcg_cache_ida); 363 int memcg_nr_cache_ids; 364 365 /* Protects memcg_nr_cache_ids */ 366 static DECLARE_RWSEM(memcg_cache_ids_sem); 367 368 void memcg_get_cache_ids(void) 369 { 370 down_read(&memcg_cache_ids_sem); 371 } 372 373 void memcg_put_cache_ids(void) 374 { 375 up_read(&memcg_cache_ids_sem); 376 } 377 378 /* 379 * MIN_SIZE is different than 1, because we would like to avoid going through 380 * the alloc/free process all the time. In a small machine, 4 kmem-limited 381 * cgroups is a reasonable guess. In the future, it could be a parameter or 382 * tunable, but that is strictly not necessary. 383 * 384 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 385 * this constant directly from cgroup, but it is understandable that this is 386 * better kept as an internal representation in cgroup.c. In any case, the 387 * cgrp_id space is not getting any smaller, and we don't have to necessarily 388 * increase ours as well if it increases. 389 */ 390 #define MEMCG_CACHES_MIN_SIZE 4 391 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 392 393 /* 394 * A lot of the calls to the cache allocation functions are expected to be 395 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 396 * conditional to this static branch, we'll have to allow modules that does 397 * kmem_cache_alloc and the such to see this symbol as well 398 */ 399 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 400 EXPORT_SYMBOL(memcg_kmem_enabled_key); 401 #endif 402 403 static int memcg_shrinker_map_size; 404 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 405 406 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 407 { 408 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 409 } 410 411 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 412 int size, int old_size) 413 { 414 struct memcg_shrinker_map *new, *old; 415 struct mem_cgroup_per_node *pn; 416 int nid; 417 418 lockdep_assert_held(&memcg_shrinker_map_mutex); 419 420 for_each_node(nid) { 421 pn = memcg->nodeinfo[nid]; 422 old = rcu_dereference_protected(pn->shrinker_map, true); 423 /* Not yet online memcg */ 424 if (!old) 425 return 0; 426 427 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 428 if (!new) 429 return -ENOMEM; 430 431 /* Set all old bits, clear all new bits */ 432 memset(new->map, (int)0xff, old_size); 433 memset((void *)new->map + old_size, 0, size - old_size); 434 435 rcu_assign_pointer(pn->shrinker_map, new); 436 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 437 } 438 439 return 0; 440 } 441 442 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 443 { 444 struct mem_cgroup_per_node *pn; 445 struct memcg_shrinker_map *map; 446 int nid; 447 448 if (mem_cgroup_is_root(memcg)) 449 return; 450 451 for_each_node(nid) { 452 pn = memcg->nodeinfo[nid]; 453 map = rcu_dereference_protected(pn->shrinker_map, true); 454 kvfree(map); 455 rcu_assign_pointer(pn->shrinker_map, NULL); 456 } 457 } 458 459 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 460 { 461 struct memcg_shrinker_map *map; 462 int nid, size, ret = 0; 463 464 if (mem_cgroup_is_root(memcg)) 465 return 0; 466 467 mutex_lock(&memcg_shrinker_map_mutex); 468 size = memcg_shrinker_map_size; 469 for_each_node(nid) { 470 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 471 if (!map) { 472 memcg_free_shrinker_maps(memcg); 473 ret = -ENOMEM; 474 break; 475 } 476 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 477 } 478 mutex_unlock(&memcg_shrinker_map_mutex); 479 480 return ret; 481 } 482 483 int memcg_expand_shrinker_maps(int new_id) 484 { 485 int size, old_size, ret = 0; 486 struct mem_cgroup *memcg; 487 488 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 489 old_size = memcg_shrinker_map_size; 490 if (size <= old_size) 491 return 0; 492 493 mutex_lock(&memcg_shrinker_map_mutex); 494 if (!root_mem_cgroup) 495 goto unlock; 496 497 for_each_mem_cgroup(memcg) { 498 if (mem_cgroup_is_root(memcg)) 499 continue; 500 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 501 if (ret) { 502 mem_cgroup_iter_break(NULL, memcg); 503 goto unlock; 504 } 505 } 506 unlock: 507 if (!ret) 508 memcg_shrinker_map_size = size; 509 mutex_unlock(&memcg_shrinker_map_mutex); 510 return ret; 511 } 512 513 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 514 { 515 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 516 struct memcg_shrinker_map *map; 517 518 rcu_read_lock(); 519 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 520 /* Pairs with smp mb in shrink_slab() */ 521 smp_mb__before_atomic(); 522 set_bit(shrinker_id, map->map); 523 rcu_read_unlock(); 524 } 525 } 526 527 /** 528 * mem_cgroup_css_from_page - css of the memcg associated with a page 529 * @page: page of interest 530 * 531 * If memcg is bound to the default hierarchy, css of the memcg associated 532 * with @page is returned. The returned css remains associated with @page 533 * until it is released. 534 * 535 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 536 * is returned. 537 */ 538 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 539 { 540 struct mem_cgroup *memcg; 541 542 memcg = page_memcg(page); 543 544 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 545 memcg = root_mem_cgroup; 546 547 return &memcg->css; 548 } 549 550 /** 551 * page_cgroup_ino - return inode number of the memcg a page is charged to 552 * @page: the page 553 * 554 * Look up the closest online ancestor of the memory cgroup @page is charged to 555 * and return its inode number or 0 if @page is not charged to any cgroup. It 556 * is safe to call this function without holding a reference to @page. 557 * 558 * Note, this function is inherently racy, because there is nothing to prevent 559 * the cgroup inode from getting torn down and potentially reallocated a moment 560 * after page_cgroup_ino() returns, so it only should be used by callers that 561 * do not care (such as procfs interfaces). 562 */ 563 ino_t page_cgroup_ino(struct page *page) 564 { 565 struct mem_cgroup *memcg; 566 unsigned long ino = 0; 567 568 rcu_read_lock(); 569 memcg = page_memcg_check(page); 570 571 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 572 memcg = parent_mem_cgroup(memcg); 573 if (memcg) 574 ino = cgroup_ino(memcg->css.cgroup); 575 rcu_read_unlock(); 576 return ino; 577 } 578 579 static struct mem_cgroup_per_node * 580 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 581 { 582 int nid = page_to_nid(page); 583 584 return memcg->nodeinfo[nid]; 585 } 586 587 static struct mem_cgroup_tree_per_node * 588 soft_limit_tree_node(int nid) 589 { 590 return soft_limit_tree.rb_tree_per_node[nid]; 591 } 592 593 static struct mem_cgroup_tree_per_node * 594 soft_limit_tree_from_page(struct page *page) 595 { 596 int nid = page_to_nid(page); 597 598 return soft_limit_tree.rb_tree_per_node[nid]; 599 } 600 601 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 602 struct mem_cgroup_tree_per_node *mctz, 603 unsigned long new_usage_in_excess) 604 { 605 struct rb_node **p = &mctz->rb_root.rb_node; 606 struct rb_node *parent = NULL; 607 struct mem_cgroup_per_node *mz_node; 608 bool rightmost = true; 609 610 if (mz->on_tree) 611 return; 612 613 mz->usage_in_excess = new_usage_in_excess; 614 if (!mz->usage_in_excess) 615 return; 616 while (*p) { 617 parent = *p; 618 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 619 tree_node); 620 if (mz->usage_in_excess < mz_node->usage_in_excess) { 621 p = &(*p)->rb_left; 622 rightmost = false; 623 } else { 624 p = &(*p)->rb_right; 625 } 626 } 627 628 if (rightmost) 629 mctz->rb_rightmost = &mz->tree_node; 630 631 rb_link_node(&mz->tree_node, parent, p); 632 rb_insert_color(&mz->tree_node, &mctz->rb_root); 633 mz->on_tree = true; 634 } 635 636 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 637 struct mem_cgroup_tree_per_node *mctz) 638 { 639 if (!mz->on_tree) 640 return; 641 642 if (&mz->tree_node == mctz->rb_rightmost) 643 mctz->rb_rightmost = rb_prev(&mz->tree_node); 644 645 rb_erase(&mz->tree_node, &mctz->rb_root); 646 mz->on_tree = false; 647 } 648 649 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 650 struct mem_cgroup_tree_per_node *mctz) 651 { 652 unsigned long flags; 653 654 spin_lock_irqsave(&mctz->lock, flags); 655 __mem_cgroup_remove_exceeded(mz, mctz); 656 spin_unlock_irqrestore(&mctz->lock, flags); 657 } 658 659 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 660 { 661 unsigned long nr_pages = page_counter_read(&memcg->memory); 662 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 663 unsigned long excess = 0; 664 665 if (nr_pages > soft_limit) 666 excess = nr_pages - soft_limit; 667 668 return excess; 669 } 670 671 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 672 { 673 unsigned long excess; 674 struct mem_cgroup_per_node *mz; 675 struct mem_cgroup_tree_per_node *mctz; 676 677 mctz = soft_limit_tree_from_page(page); 678 if (!mctz) 679 return; 680 /* 681 * Necessary to update all ancestors when hierarchy is used. 682 * because their event counter is not touched. 683 */ 684 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 685 mz = mem_cgroup_page_nodeinfo(memcg, page); 686 excess = soft_limit_excess(memcg); 687 /* 688 * We have to update the tree if mz is on RB-tree or 689 * mem is over its softlimit. 690 */ 691 if (excess || mz->on_tree) { 692 unsigned long flags; 693 694 spin_lock_irqsave(&mctz->lock, flags); 695 /* if on-tree, remove it */ 696 if (mz->on_tree) 697 __mem_cgroup_remove_exceeded(mz, mctz); 698 /* 699 * Insert again. mz->usage_in_excess will be updated. 700 * If excess is 0, no tree ops. 701 */ 702 __mem_cgroup_insert_exceeded(mz, mctz, excess); 703 spin_unlock_irqrestore(&mctz->lock, flags); 704 } 705 } 706 } 707 708 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 709 { 710 struct mem_cgroup_tree_per_node *mctz; 711 struct mem_cgroup_per_node *mz; 712 int nid; 713 714 for_each_node(nid) { 715 mz = memcg->nodeinfo[nid]; 716 mctz = soft_limit_tree_node(nid); 717 if (mctz) 718 mem_cgroup_remove_exceeded(mz, mctz); 719 } 720 } 721 722 static struct mem_cgroup_per_node * 723 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 724 { 725 struct mem_cgroup_per_node *mz; 726 727 retry: 728 mz = NULL; 729 if (!mctz->rb_rightmost) 730 goto done; /* Nothing to reclaim from */ 731 732 mz = rb_entry(mctz->rb_rightmost, 733 struct mem_cgroup_per_node, tree_node); 734 /* 735 * Remove the node now but someone else can add it back, 736 * we will to add it back at the end of reclaim to its correct 737 * position in the tree. 738 */ 739 __mem_cgroup_remove_exceeded(mz, mctz); 740 if (!soft_limit_excess(mz->memcg) || 741 !css_tryget(&mz->memcg->css)) 742 goto retry; 743 done: 744 return mz; 745 } 746 747 static struct mem_cgroup_per_node * 748 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 749 { 750 struct mem_cgroup_per_node *mz; 751 752 spin_lock_irq(&mctz->lock); 753 mz = __mem_cgroup_largest_soft_limit_node(mctz); 754 spin_unlock_irq(&mctz->lock); 755 return mz; 756 } 757 758 /** 759 * __mod_memcg_state - update cgroup memory statistics 760 * @memcg: the memory cgroup 761 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 762 * @val: delta to add to the counter, can be negative 763 */ 764 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 765 { 766 if (mem_cgroup_disabled()) 767 return; 768 769 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 770 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 771 } 772 773 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 774 static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 775 { 776 long x = READ_ONCE(memcg->vmstats.state[idx]); 777 #ifdef CONFIG_SMP 778 if (x < 0) 779 x = 0; 780 #endif 781 return x; 782 } 783 784 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 785 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 786 { 787 long x = 0; 788 int cpu; 789 790 for_each_possible_cpu(cpu) 791 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); 792 #ifdef CONFIG_SMP 793 if (x < 0) 794 x = 0; 795 #endif 796 return x; 797 } 798 799 static struct mem_cgroup_per_node * 800 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 801 { 802 struct mem_cgroup *parent; 803 804 parent = parent_mem_cgroup(pn->memcg); 805 if (!parent) 806 return NULL; 807 return parent->nodeinfo[nid]; 808 } 809 810 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 811 int val) 812 { 813 struct mem_cgroup_per_node *pn; 814 struct mem_cgroup *memcg; 815 long x, threshold = MEMCG_CHARGE_BATCH; 816 817 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 818 memcg = pn->memcg; 819 820 /* Update memcg */ 821 __mod_memcg_state(memcg, idx, val); 822 823 /* Update lruvec */ 824 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 825 826 if (vmstat_item_in_bytes(idx)) 827 threshold <<= PAGE_SHIFT; 828 829 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 830 if (unlikely(abs(x) > threshold)) { 831 pg_data_t *pgdat = lruvec_pgdat(lruvec); 832 struct mem_cgroup_per_node *pi; 833 834 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 835 atomic_long_add(x, &pi->lruvec_stat[idx]); 836 x = 0; 837 } 838 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 839 } 840 841 /** 842 * __mod_lruvec_state - update lruvec memory statistics 843 * @lruvec: the lruvec 844 * @idx: the stat item 845 * @val: delta to add to the counter, can be negative 846 * 847 * The lruvec is the intersection of the NUMA node and a cgroup. This 848 * function updates the all three counters that are affected by a 849 * change of state at this level: per-node, per-cgroup, per-lruvec. 850 */ 851 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 852 int val) 853 { 854 /* Update node */ 855 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 856 857 /* Update memcg and lruvec */ 858 if (!mem_cgroup_disabled()) 859 __mod_memcg_lruvec_state(lruvec, idx, val); 860 } 861 862 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 863 int val) 864 { 865 struct page *head = compound_head(page); /* rmap on tail pages */ 866 struct mem_cgroup *memcg; 867 pg_data_t *pgdat = page_pgdat(page); 868 struct lruvec *lruvec; 869 870 rcu_read_lock(); 871 memcg = page_memcg(head); 872 /* Untracked pages have no memcg, no lruvec. Update only the node */ 873 if (!memcg) { 874 rcu_read_unlock(); 875 __mod_node_page_state(pgdat, idx, val); 876 return; 877 } 878 879 lruvec = mem_cgroup_lruvec(memcg, pgdat); 880 __mod_lruvec_state(lruvec, idx, val); 881 rcu_read_unlock(); 882 } 883 EXPORT_SYMBOL(__mod_lruvec_page_state); 884 885 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 886 { 887 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 888 struct mem_cgroup *memcg; 889 struct lruvec *lruvec; 890 891 rcu_read_lock(); 892 memcg = mem_cgroup_from_obj(p); 893 894 /* 895 * Untracked pages have no memcg, no lruvec. Update only the 896 * node. If we reparent the slab objects to the root memcg, 897 * when we free the slab object, we need to update the per-memcg 898 * vmstats to keep it correct for the root memcg. 899 */ 900 if (!memcg) { 901 __mod_node_page_state(pgdat, idx, val); 902 } else { 903 lruvec = mem_cgroup_lruvec(memcg, pgdat); 904 __mod_lruvec_state(lruvec, idx, val); 905 } 906 rcu_read_unlock(); 907 } 908 909 /** 910 * __count_memcg_events - account VM events in a cgroup 911 * @memcg: the memory cgroup 912 * @idx: the event item 913 * @count: the number of events that occured 914 */ 915 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 916 unsigned long count) 917 { 918 if (mem_cgroup_disabled()) 919 return; 920 921 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); 922 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 923 } 924 925 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 926 { 927 return READ_ONCE(memcg->vmstats.events[event]); 928 } 929 930 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 931 { 932 long x = 0; 933 int cpu; 934 935 for_each_possible_cpu(cpu) 936 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); 937 return x; 938 } 939 940 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 941 struct page *page, 942 int nr_pages) 943 { 944 /* pagein of a big page is an event. So, ignore page size */ 945 if (nr_pages > 0) 946 __count_memcg_events(memcg, PGPGIN, 1); 947 else { 948 __count_memcg_events(memcg, PGPGOUT, 1); 949 nr_pages = -nr_pages; /* for event */ 950 } 951 952 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 953 } 954 955 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 956 enum mem_cgroup_events_target target) 957 { 958 unsigned long val, next; 959 960 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 961 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 962 /* from time_after() in jiffies.h */ 963 if ((long)(next - val) < 0) { 964 switch (target) { 965 case MEM_CGROUP_TARGET_THRESH: 966 next = val + THRESHOLDS_EVENTS_TARGET; 967 break; 968 case MEM_CGROUP_TARGET_SOFTLIMIT: 969 next = val + SOFTLIMIT_EVENTS_TARGET; 970 break; 971 default: 972 break; 973 } 974 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 975 return true; 976 } 977 return false; 978 } 979 980 /* 981 * Check events in order. 982 * 983 */ 984 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 985 { 986 /* threshold event is triggered in finer grain than soft limit */ 987 if (unlikely(mem_cgroup_event_ratelimit(memcg, 988 MEM_CGROUP_TARGET_THRESH))) { 989 bool do_softlimit; 990 991 do_softlimit = mem_cgroup_event_ratelimit(memcg, 992 MEM_CGROUP_TARGET_SOFTLIMIT); 993 mem_cgroup_threshold(memcg); 994 if (unlikely(do_softlimit)) 995 mem_cgroup_update_tree(memcg, page); 996 } 997 } 998 999 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 1000 { 1001 /* 1002 * mm_update_next_owner() may clear mm->owner to NULL 1003 * if it races with swapoff, page migration, etc. 1004 * So this can be called with p == NULL. 1005 */ 1006 if (unlikely(!p)) 1007 return NULL; 1008 1009 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1010 } 1011 EXPORT_SYMBOL(mem_cgroup_from_task); 1012 1013 /** 1014 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1015 * @mm: mm from which memcg should be extracted. It can be NULL. 1016 * 1017 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 1018 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 1019 * returned. 1020 */ 1021 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1022 { 1023 struct mem_cgroup *memcg; 1024 1025 if (mem_cgroup_disabled()) 1026 return NULL; 1027 1028 rcu_read_lock(); 1029 do { 1030 /* 1031 * Page cache insertions can happen withou an 1032 * actual mm context, e.g. during disk probing 1033 * on boot, loopback IO, acct() writes etc. 1034 */ 1035 if (unlikely(!mm)) 1036 memcg = root_mem_cgroup; 1037 else { 1038 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1039 if (unlikely(!memcg)) 1040 memcg = root_mem_cgroup; 1041 } 1042 } while (!css_tryget(&memcg->css)); 1043 rcu_read_unlock(); 1044 return memcg; 1045 } 1046 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1047 1048 static __always_inline struct mem_cgroup *active_memcg(void) 1049 { 1050 if (in_interrupt()) 1051 return this_cpu_read(int_active_memcg); 1052 else 1053 return current->active_memcg; 1054 } 1055 1056 static __always_inline bool memcg_kmem_bypass(void) 1057 { 1058 /* Allow remote memcg charging from any context. */ 1059 if (unlikely(active_memcg())) 1060 return false; 1061 1062 /* Memcg to charge can't be determined. */ 1063 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 1064 return true; 1065 1066 return false; 1067 } 1068 1069 /** 1070 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1071 * @root: hierarchy root 1072 * @prev: previously returned memcg, NULL on first invocation 1073 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1074 * 1075 * Returns references to children of the hierarchy below @root, or 1076 * @root itself, or %NULL after a full round-trip. 1077 * 1078 * Caller must pass the return value in @prev on subsequent 1079 * invocations for reference counting, or use mem_cgroup_iter_break() 1080 * to cancel a hierarchy walk before the round-trip is complete. 1081 * 1082 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1083 * in the hierarchy among all concurrent reclaimers operating on the 1084 * same node. 1085 */ 1086 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1087 struct mem_cgroup *prev, 1088 struct mem_cgroup_reclaim_cookie *reclaim) 1089 { 1090 struct mem_cgroup_reclaim_iter *iter; 1091 struct cgroup_subsys_state *css = NULL; 1092 struct mem_cgroup *memcg = NULL; 1093 struct mem_cgroup *pos = NULL; 1094 1095 if (mem_cgroup_disabled()) 1096 return NULL; 1097 1098 if (!root) 1099 root = root_mem_cgroup; 1100 1101 if (prev && !reclaim) 1102 pos = prev; 1103 1104 rcu_read_lock(); 1105 1106 if (reclaim) { 1107 struct mem_cgroup_per_node *mz; 1108 1109 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1110 iter = &mz->iter; 1111 1112 if (prev && reclaim->generation != iter->generation) 1113 goto out_unlock; 1114 1115 while (1) { 1116 pos = READ_ONCE(iter->position); 1117 if (!pos || css_tryget(&pos->css)) 1118 break; 1119 /* 1120 * css reference reached zero, so iter->position will 1121 * be cleared by ->css_released. However, we should not 1122 * rely on this happening soon, because ->css_released 1123 * is called from a work queue, and by busy-waiting we 1124 * might block it. So we clear iter->position right 1125 * away. 1126 */ 1127 (void)cmpxchg(&iter->position, pos, NULL); 1128 } 1129 } 1130 1131 if (pos) 1132 css = &pos->css; 1133 1134 for (;;) { 1135 css = css_next_descendant_pre(css, &root->css); 1136 if (!css) { 1137 /* 1138 * Reclaimers share the hierarchy walk, and a 1139 * new one might jump in right at the end of 1140 * the hierarchy - make sure they see at least 1141 * one group and restart from the beginning. 1142 */ 1143 if (!prev) 1144 continue; 1145 break; 1146 } 1147 1148 /* 1149 * Verify the css and acquire a reference. The root 1150 * is provided by the caller, so we know it's alive 1151 * and kicking, and don't take an extra reference. 1152 */ 1153 memcg = mem_cgroup_from_css(css); 1154 1155 if (css == &root->css) 1156 break; 1157 1158 if (css_tryget(css)) 1159 break; 1160 1161 memcg = NULL; 1162 } 1163 1164 if (reclaim) { 1165 /* 1166 * The position could have already been updated by a competing 1167 * thread, so check that the value hasn't changed since we read 1168 * it to avoid reclaiming from the same cgroup twice. 1169 */ 1170 (void)cmpxchg(&iter->position, pos, memcg); 1171 1172 if (pos) 1173 css_put(&pos->css); 1174 1175 if (!memcg) 1176 iter->generation++; 1177 else if (!prev) 1178 reclaim->generation = iter->generation; 1179 } 1180 1181 out_unlock: 1182 rcu_read_unlock(); 1183 if (prev && prev != root) 1184 css_put(&prev->css); 1185 1186 return memcg; 1187 } 1188 1189 /** 1190 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1191 * @root: hierarchy root 1192 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1193 */ 1194 void mem_cgroup_iter_break(struct mem_cgroup *root, 1195 struct mem_cgroup *prev) 1196 { 1197 if (!root) 1198 root = root_mem_cgroup; 1199 if (prev && prev != root) 1200 css_put(&prev->css); 1201 } 1202 1203 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1204 struct mem_cgroup *dead_memcg) 1205 { 1206 struct mem_cgroup_reclaim_iter *iter; 1207 struct mem_cgroup_per_node *mz; 1208 int nid; 1209 1210 for_each_node(nid) { 1211 mz = from->nodeinfo[nid]; 1212 iter = &mz->iter; 1213 cmpxchg(&iter->position, dead_memcg, NULL); 1214 } 1215 } 1216 1217 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1218 { 1219 struct mem_cgroup *memcg = dead_memcg; 1220 struct mem_cgroup *last; 1221 1222 do { 1223 __invalidate_reclaim_iterators(memcg, dead_memcg); 1224 last = memcg; 1225 } while ((memcg = parent_mem_cgroup(memcg))); 1226 1227 /* 1228 * When cgruop1 non-hierarchy mode is used, 1229 * parent_mem_cgroup() does not walk all the way up to the 1230 * cgroup root (root_mem_cgroup). So we have to handle 1231 * dead_memcg from cgroup root separately. 1232 */ 1233 if (last != root_mem_cgroup) 1234 __invalidate_reclaim_iterators(root_mem_cgroup, 1235 dead_memcg); 1236 } 1237 1238 /** 1239 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1240 * @memcg: hierarchy root 1241 * @fn: function to call for each task 1242 * @arg: argument passed to @fn 1243 * 1244 * This function iterates over tasks attached to @memcg or to any of its 1245 * descendants and calls @fn for each task. If @fn returns a non-zero 1246 * value, the function breaks the iteration loop and returns the value. 1247 * Otherwise, it will iterate over all tasks and return 0. 1248 * 1249 * This function must not be called for the root memory cgroup. 1250 */ 1251 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1252 int (*fn)(struct task_struct *, void *), void *arg) 1253 { 1254 struct mem_cgroup *iter; 1255 int ret = 0; 1256 1257 BUG_ON(memcg == root_mem_cgroup); 1258 1259 for_each_mem_cgroup_tree(iter, memcg) { 1260 struct css_task_iter it; 1261 struct task_struct *task; 1262 1263 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1264 while (!ret && (task = css_task_iter_next(&it))) 1265 ret = fn(task, arg); 1266 css_task_iter_end(&it); 1267 if (ret) { 1268 mem_cgroup_iter_break(memcg, iter); 1269 break; 1270 } 1271 } 1272 return ret; 1273 } 1274 1275 #ifdef CONFIG_DEBUG_VM 1276 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) 1277 { 1278 struct mem_cgroup *memcg; 1279 1280 if (mem_cgroup_disabled()) 1281 return; 1282 1283 memcg = page_memcg(page); 1284 1285 if (!memcg) 1286 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page); 1287 else 1288 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); 1289 } 1290 #endif 1291 1292 /** 1293 * lock_page_lruvec - lock and return lruvec for a given page. 1294 * @page: the page 1295 * 1296 * These functions are safe to use under any of the following conditions: 1297 * - page locked 1298 * - PageLRU cleared 1299 * - lock_page_memcg() 1300 * - page->_refcount is zero 1301 */ 1302 struct lruvec *lock_page_lruvec(struct page *page) 1303 { 1304 struct lruvec *lruvec; 1305 struct pglist_data *pgdat = page_pgdat(page); 1306 1307 lruvec = mem_cgroup_page_lruvec(page, pgdat); 1308 spin_lock(&lruvec->lru_lock); 1309 1310 lruvec_memcg_debug(lruvec, page); 1311 1312 return lruvec; 1313 } 1314 1315 struct lruvec *lock_page_lruvec_irq(struct page *page) 1316 { 1317 struct lruvec *lruvec; 1318 struct pglist_data *pgdat = page_pgdat(page); 1319 1320 lruvec = mem_cgroup_page_lruvec(page, pgdat); 1321 spin_lock_irq(&lruvec->lru_lock); 1322 1323 lruvec_memcg_debug(lruvec, page); 1324 1325 return lruvec; 1326 } 1327 1328 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags) 1329 { 1330 struct lruvec *lruvec; 1331 struct pglist_data *pgdat = page_pgdat(page); 1332 1333 lruvec = mem_cgroup_page_lruvec(page, pgdat); 1334 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1335 1336 lruvec_memcg_debug(lruvec, page); 1337 1338 return lruvec; 1339 } 1340 1341 /** 1342 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1343 * @lruvec: mem_cgroup per zone lru vector 1344 * @lru: index of lru list the page is sitting on 1345 * @zid: zone id of the accounted pages 1346 * @nr_pages: positive when adding or negative when removing 1347 * 1348 * This function must be called under lru_lock, just before a page is added 1349 * to or just after a page is removed from an lru list (that ordering being 1350 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1351 */ 1352 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1353 int zid, int nr_pages) 1354 { 1355 struct mem_cgroup_per_node *mz; 1356 unsigned long *lru_size; 1357 long size; 1358 1359 if (mem_cgroup_disabled()) 1360 return; 1361 1362 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1363 lru_size = &mz->lru_zone_size[zid][lru]; 1364 1365 if (nr_pages < 0) 1366 *lru_size += nr_pages; 1367 1368 size = *lru_size; 1369 if (WARN_ONCE(size < 0, 1370 "%s(%p, %d, %d): lru_size %ld\n", 1371 __func__, lruvec, lru, nr_pages, size)) { 1372 VM_BUG_ON(1); 1373 *lru_size = 0; 1374 } 1375 1376 if (nr_pages > 0) 1377 *lru_size += nr_pages; 1378 } 1379 1380 /** 1381 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1382 * @memcg: the memory cgroup 1383 * 1384 * Returns the maximum amount of memory @mem can be charged with, in 1385 * pages. 1386 */ 1387 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1388 { 1389 unsigned long margin = 0; 1390 unsigned long count; 1391 unsigned long limit; 1392 1393 count = page_counter_read(&memcg->memory); 1394 limit = READ_ONCE(memcg->memory.max); 1395 if (count < limit) 1396 margin = limit - count; 1397 1398 if (do_memsw_account()) { 1399 count = page_counter_read(&memcg->memsw); 1400 limit = READ_ONCE(memcg->memsw.max); 1401 if (count < limit) 1402 margin = min(margin, limit - count); 1403 else 1404 margin = 0; 1405 } 1406 1407 return margin; 1408 } 1409 1410 /* 1411 * A routine for checking "mem" is under move_account() or not. 1412 * 1413 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1414 * moving cgroups. This is for waiting at high-memory pressure 1415 * caused by "move". 1416 */ 1417 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1418 { 1419 struct mem_cgroup *from; 1420 struct mem_cgroup *to; 1421 bool ret = false; 1422 /* 1423 * Unlike task_move routines, we access mc.to, mc.from not under 1424 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1425 */ 1426 spin_lock(&mc.lock); 1427 from = mc.from; 1428 to = mc.to; 1429 if (!from) 1430 goto unlock; 1431 1432 ret = mem_cgroup_is_descendant(from, memcg) || 1433 mem_cgroup_is_descendant(to, memcg); 1434 unlock: 1435 spin_unlock(&mc.lock); 1436 return ret; 1437 } 1438 1439 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1440 { 1441 if (mc.moving_task && current != mc.moving_task) { 1442 if (mem_cgroup_under_move(memcg)) { 1443 DEFINE_WAIT(wait); 1444 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1445 /* moving charge context might have finished. */ 1446 if (mc.moving_task) 1447 schedule(); 1448 finish_wait(&mc.waitq, &wait); 1449 return true; 1450 } 1451 } 1452 return false; 1453 } 1454 1455 struct memory_stat { 1456 const char *name; 1457 unsigned int idx; 1458 }; 1459 1460 static const struct memory_stat memory_stats[] = { 1461 { "anon", NR_ANON_MAPPED }, 1462 { "file", NR_FILE_PAGES }, 1463 { "kernel_stack", NR_KERNEL_STACK_KB }, 1464 { "pagetables", NR_PAGETABLE }, 1465 { "percpu", MEMCG_PERCPU_B }, 1466 { "sock", MEMCG_SOCK }, 1467 { "shmem", NR_SHMEM }, 1468 { "file_mapped", NR_FILE_MAPPED }, 1469 { "file_dirty", NR_FILE_DIRTY }, 1470 { "file_writeback", NR_WRITEBACK }, 1471 #ifdef CONFIG_SWAP 1472 { "swapcached", NR_SWAPCACHE }, 1473 #endif 1474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1475 { "anon_thp", NR_ANON_THPS }, 1476 { "file_thp", NR_FILE_THPS }, 1477 { "shmem_thp", NR_SHMEM_THPS }, 1478 #endif 1479 { "inactive_anon", NR_INACTIVE_ANON }, 1480 { "active_anon", NR_ACTIVE_ANON }, 1481 { "inactive_file", NR_INACTIVE_FILE }, 1482 { "active_file", NR_ACTIVE_FILE }, 1483 { "unevictable", NR_UNEVICTABLE }, 1484 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1485 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1486 1487 /* The memory events */ 1488 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1489 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1490 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1491 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1492 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1493 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1494 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1495 }; 1496 1497 /* Translate stat items to the correct unit for memory.stat output */ 1498 static int memcg_page_state_unit(int item) 1499 { 1500 switch (item) { 1501 case MEMCG_PERCPU_B: 1502 case NR_SLAB_RECLAIMABLE_B: 1503 case NR_SLAB_UNRECLAIMABLE_B: 1504 case WORKINGSET_REFAULT_ANON: 1505 case WORKINGSET_REFAULT_FILE: 1506 case WORKINGSET_ACTIVATE_ANON: 1507 case WORKINGSET_ACTIVATE_FILE: 1508 case WORKINGSET_RESTORE_ANON: 1509 case WORKINGSET_RESTORE_FILE: 1510 case WORKINGSET_NODERECLAIM: 1511 return 1; 1512 case NR_KERNEL_STACK_KB: 1513 return SZ_1K; 1514 default: 1515 return PAGE_SIZE; 1516 } 1517 } 1518 1519 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1520 int item) 1521 { 1522 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1523 } 1524 1525 static char *memory_stat_format(struct mem_cgroup *memcg) 1526 { 1527 struct seq_buf s; 1528 int i; 1529 1530 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1531 if (!s.buffer) 1532 return NULL; 1533 1534 /* 1535 * Provide statistics on the state of the memory subsystem as 1536 * well as cumulative event counters that show past behavior. 1537 * 1538 * This list is ordered following a combination of these gradients: 1539 * 1) generic big picture -> specifics and details 1540 * 2) reflecting userspace activity -> reflecting kernel heuristics 1541 * 1542 * Current memory state: 1543 */ 1544 cgroup_rstat_flush(memcg->css.cgroup); 1545 1546 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1547 u64 size; 1548 1549 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1550 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1551 1552 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1553 size += memcg_page_state_output(memcg, 1554 NR_SLAB_RECLAIMABLE_B); 1555 seq_buf_printf(&s, "slab %llu\n", size); 1556 } 1557 } 1558 1559 /* Accumulated memory events */ 1560 1561 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1562 memcg_events(memcg, PGFAULT)); 1563 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1564 memcg_events(memcg, PGMAJFAULT)); 1565 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1566 memcg_events(memcg, PGREFILL)); 1567 seq_buf_printf(&s, "pgscan %lu\n", 1568 memcg_events(memcg, PGSCAN_KSWAPD) + 1569 memcg_events(memcg, PGSCAN_DIRECT)); 1570 seq_buf_printf(&s, "pgsteal %lu\n", 1571 memcg_events(memcg, PGSTEAL_KSWAPD) + 1572 memcg_events(memcg, PGSTEAL_DIRECT)); 1573 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1574 memcg_events(memcg, PGACTIVATE)); 1575 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1576 memcg_events(memcg, PGDEACTIVATE)); 1577 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1578 memcg_events(memcg, PGLAZYFREE)); 1579 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1580 memcg_events(memcg, PGLAZYFREED)); 1581 1582 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1583 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1584 memcg_events(memcg, THP_FAULT_ALLOC)); 1585 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1586 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1587 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1588 1589 /* The above should easily fit into one page */ 1590 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1591 1592 return s.buffer; 1593 } 1594 1595 #define K(x) ((x) << (PAGE_SHIFT-10)) 1596 /** 1597 * mem_cgroup_print_oom_context: Print OOM information relevant to 1598 * memory controller. 1599 * @memcg: The memory cgroup that went over limit 1600 * @p: Task that is going to be killed 1601 * 1602 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1603 * enabled 1604 */ 1605 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1606 { 1607 rcu_read_lock(); 1608 1609 if (memcg) { 1610 pr_cont(",oom_memcg="); 1611 pr_cont_cgroup_path(memcg->css.cgroup); 1612 } else 1613 pr_cont(",global_oom"); 1614 if (p) { 1615 pr_cont(",task_memcg="); 1616 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1617 } 1618 rcu_read_unlock(); 1619 } 1620 1621 /** 1622 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1623 * memory controller. 1624 * @memcg: The memory cgroup that went over limit 1625 */ 1626 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1627 { 1628 char *buf; 1629 1630 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1631 K((u64)page_counter_read(&memcg->memory)), 1632 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1633 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1634 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1635 K((u64)page_counter_read(&memcg->swap)), 1636 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1637 else { 1638 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1639 K((u64)page_counter_read(&memcg->memsw)), 1640 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1641 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1642 K((u64)page_counter_read(&memcg->kmem)), 1643 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1644 } 1645 1646 pr_info("Memory cgroup stats for "); 1647 pr_cont_cgroup_path(memcg->css.cgroup); 1648 pr_cont(":"); 1649 buf = memory_stat_format(memcg); 1650 if (!buf) 1651 return; 1652 pr_info("%s", buf); 1653 kfree(buf); 1654 } 1655 1656 /* 1657 * Return the memory (and swap, if configured) limit for a memcg. 1658 */ 1659 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1660 { 1661 unsigned long max = READ_ONCE(memcg->memory.max); 1662 1663 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1664 if (mem_cgroup_swappiness(memcg)) 1665 max += min(READ_ONCE(memcg->swap.max), 1666 (unsigned long)total_swap_pages); 1667 } else { /* v1 */ 1668 if (mem_cgroup_swappiness(memcg)) { 1669 /* Calculate swap excess capacity from memsw limit */ 1670 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1671 1672 max += min(swap, (unsigned long)total_swap_pages); 1673 } 1674 } 1675 return max; 1676 } 1677 1678 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1679 { 1680 return page_counter_read(&memcg->memory); 1681 } 1682 1683 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1684 int order) 1685 { 1686 struct oom_control oc = { 1687 .zonelist = NULL, 1688 .nodemask = NULL, 1689 .memcg = memcg, 1690 .gfp_mask = gfp_mask, 1691 .order = order, 1692 }; 1693 bool ret = true; 1694 1695 if (mutex_lock_killable(&oom_lock)) 1696 return true; 1697 1698 if (mem_cgroup_margin(memcg) >= (1 << order)) 1699 goto unlock; 1700 1701 /* 1702 * A few threads which were not waiting at mutex_lock_killable() can 1703 * fail to bail out. Therefore, check again after holding oom_lock. 1704 */ 1705 ret = should_force_charge() || out_of_memory(&oc); 1706 1707 unlock: 1708 mutex_unlock(&oom_lock); 1709 return ret; 1710 } 1711 1712 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1713 pg_data_t *pgdat, 1714 gfp_t gfp_mask, 1715 unsigned long *total_scanned) 1716 { 1717 struct mem_cgroup *victim = NULL; 1718 int total = 0; 1719 int loop = 0; 1720 unsigned long excess; 1721 unsigned long nr_scanned; 1722 struct mem_cgroup_reclaim_cookie reclaim = { 1723 .pgdat = pgdat, 1724 }; 1725 1726 excess = soft_limit_excess(root_memcg); 1727 1728 while (1) { 1729 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1730 if (!victim) { 1731 loop++; 1732 if (loop >= 2) { 1733 /* 1734 * If we have not been able to reclaim 1735 * anything, it might because there are 1736 * no reclaimable pages under this hierarchy 1737 */ 1738 if (!total) 1739 break; 1740 /* 1741 * We want to do more targeted reclaim. 1742 * excess >> 2 is not to excessive so as to 1743 * reclaim too much, nor too less that we keep 1744 * coming back to reclaim from this cgroup 1745 */ 1746 if (total >= (excess >> 2) || 1747 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1748 break; 1749 } 1750 continue; 1751 } 1752 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1753 pgdat, &nr_scanned); 1754 *total_scanned += nr_scanned; 1755 if (!soft_limit_excess(root_memcg)) 1756 break; 1757 } 1758 mem_cgroup_iter_break(root_memcg, victim); 1759 return total; 1760 } 1761 1762 #ifdef CONFIG_LOCKDEP 1763 static struct lockdep_map memcg_oom_lock_dep_map = { 1764 .name = "memcg_oom_lock", 1765 }; 1766 #endif 1767 1768 static DEFINE_SPINLOCK(memcg_oom_lock); 1769 1770 /* 1771 * Check OOM-Killer is already running under our hierarchy. 1772 * If someone is running, return false. 1773 */ 1774 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1775 { 1776 struct mem_cgroup *iter, *failed = NULL; 1777 1778 spin_lock(&memcg_oom_lock); 1779 1780 for_each_mem_cgroup_tree(iter, memcg) { 1781 if (iter->oom_lock) { 1782 /* 1783 * this subtree of our hierarchy is already locked 1784 * so we cannot give a lock. 1785 */ 1786 failed = iter; 1787 mem_cgroup_iter_break(memcg, iter); 1788 break; 1789 } else 1790 iter->oom_lock = true; 1791 } 1792 1793 if (failed) { 1794 /* 1795 * OK, we failed to lock the whole subtree so we have 1796 * to clean up what we set up to the failing subtree 1797 */ 1798 for_each_mem_cgroup_tree(iter, memcg) { 1799 if (iter == failed) { 1800 mem_cgroup_iter_break(memcg, iter); 1801 break; 1802 } 1803 iter->oom_lock = false; 1804 } 1805 } else 1806 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1807 1808 spin_unlock(&memcg_oom_lock); 1809 1810 return !failed; 1811 } 1812 1813 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1814 { 1815 struct mem_cgroup *iter; 1816 1817 spin_lock(&memcg_oom_lock); 1818 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1819 for_each_mem_cgroup_tree(iter, memcg) 1820 iter->oom_lock = false; 1821 spin_unlock(&memcg_oom_lock); 1822 } 1823 1824 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1825 { 1826 struct mem_cgroup *iter; 1827 1828 spin_lock(&memcg_oom_lock); 1829 for_each_mem_cgroup_tree(iter, memcg) 1830 iter->under_oom++; 1831 spin_unlock(&memcg_oom_lock); 1832 } 1833 1834 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1835 { 1836 struct mem_cgroup *iter; 1837 1838 /* 1839 * Be careful about under_oom underflows becase a child memcg 1840 * could have been added after mem_cgroup_mark_under_oom. 1841 */ 1842 spin_lock(&memcg_oom_lock); 1843 for_each_mem_cgroup_tree(iter, memcg) 1844 if (iter->under_oom > 0) 1845 iter->under_oom--; 1846 spin_unlock(&memcg_oom_lock); 1847 } 1848 1849 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1850 1851 struct oom_wait_info { 1852 struct mem_cgroup *memcg; 1853 wait_queue_entry_t wait; 1854 }; 1855 1856 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1857 unsigned mode, int sync, void *arg) 1858 { 1859 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1860 struct mem_cgroup *oom_wait_memcg; 1861 struct oom_wait_info *oom_wait_info; 1862 1863 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1864 oom_wait_memcg = oom_wait_info->memcg; 1865 1866 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1867 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1868 return 0; 1869 return autoremove_wake_function(wait, mode, sync, arg); 1870 } 1871 1872 static void memcg_oom_recover(struct mem_cgroup *memcg) 1873 { 1874 /* 1875 * For the following lockless ->under_oom test, the only required 1876 * guarantee is that it must see the state asserted by an OOM when 1877 * this function is called as a result of userland actions 1878 * triggered by the notification of the OOM. This is trivially 1879 * achieved by invoking mem_cgroup_mark_under_oom() before 1880 * triggering notification. 1881 */ 1882 if (memcg && memcg->under_oom) 1883 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1884 } 1885 1886 enum oom_status { 1887 OOM_SUCCESS, 1888 OOM_FAILED, 1889 OOM_ASYNC, 1890 OOM_SKIPPED 1891 }; 1892 1893 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1894 { 1895 enum oom_status ret; 1896 bool locked; 1897 1898 if (order > PAGE_ALLOC_COSTLY_ORDER) 1899 return OOM_SKIPPED; 1900 1901 memcg_memory_event(memcg, MEMCG_OOM); 1902 1903 /* 1904 * We are in the middle of the charge context here, so we 1905 * don't want to block when potentially sitting on a callstack 1906 * that holds all kinds of filesystem and mm locks. 1907 * 1908 * cgroup1 allows disabling the OOM killer and waiting for outside 1909 * handling until the charge can succeed; remember the context and put 1910 * the task to sleep at the end of the page fault when all locks are 1911 * released. 1912 * 1913 * On the other hand, in-kernel OOM killer allows for an async victim 1914 * memory reclaim (oom_reaper) and that means that we are not solely 1915 * relying on the oom victim to make a forward progress and we can 1916 * invoke the oom killer here. 1917 * 1918 * Please note that mem_cgroup_out_of_memory might fail to find a 1919 * victim and then we have to bail out from the charge path. 1920 */ 1921 if (memcg->oom_kill_disable) { 1922 if (!current->in_user_fault) 1923 return OOM_SKIPPED; 1924 css_get(&memcg->css); 1925 current->memcg_in_oom = memcg; 1926 current->memcg_oom_gfp_mask = mask; 1927 current->memcg_oom_order = order; 1928 1929 return OOM_ASYNC; 1930 } 1931 1932 mem_cgroup_mark_under_oom(memcg); 1933 1934 locked = mem_cgroup_oom_trylock(memcg); 1935 1936 if (locked) 1937 mem_cgroup_oom_notify(memcg); 1938 1939 mem_cgroup_unmark_under_oom(memcg); 1940 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1941 ret = OOM_SUCCESS; 1942 else 1943 ret = OOM_FAILED; 1944 1945 if (locked) 1946 mem_cgroup_oom_unlock(memcg); 1947 1948 return ret; 1949 } 1950 1951 /** 1952 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1953 * @handle: actually kill/wait or just clean up the OOM state 1954 * 1955 * This has to be called at the end of a page fault if the memcg OOM 1956 * handler was enabled. 1957 * 1958 * Memcg supports userspace OOM handling where failed allocations must 1959 * sleep on a waitqueue until the userspace task resolves the 1960 * situation. Sleeping directly in the charge context with all kinds 1961 * of locks held is not a good idea, instead we remember an OOM state 1962 * in the task and mem_cgroup_oom_synchronize() has to be called at 1963 * the end of the page fault to complete the OOM handling. 1964 * 1965 * Returns %true if an ongoing memcg OOM situation was detected and 1966 * completed, %false otherwise. 1967 */ 1968 bool mem_cgroup_oom_synchronize(bool handle) 1969 { 1970 struct mem_cgroup *memcg = current->memcg_in_oom; 1971 struct oom_wait_info owait; 1972 bool locked; 1973 1974 /* OOM is global, do not handle */ 1975 if (!memcg) 1976 return false; 1977 1978 if (!handle) 1979 goto cleanup; 1980 1981 owait.memcg = memcg; 1982 owait.wait.flags = 0; 1983 owait.wait.func = memcg_oom_wake_function; 1984 owait.wait.private = current; 1985 INIT_LIST_HEAD(&owait.wait.entry); 1986 1987 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1988 mem_cgroup_mark_under_oom(memcg); 1989 1990 locked = mem_cgroup_oom_trylock(memcg); 1991 1992 if (locked) 1993 mem_cgroup_oom_notify(memcg); 1994 1995 if (locked && !memcg->oom_kill_disable) { 1996 mem_cgroup_unmark_under_oom(memcg); 1997 finish_wait(&memcg_oom_waitq, &owait.wait); 1998 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1999 current->memcg_oom_order); 2000 } else { 2001 schedule(); 2002 mem_cgroup_unmark_under_oom(memcg); 2003 finish_wait(&memcg_oom_waitq, &owait.wait); 2004 } 2005 2006 if (locked) { 2007 mem_cgroup_oom_unlock(memcg); 2008 /* 2009 * There is no guarantee that an OOM-lock contender 2010 * sees the wakeups triggered by the OOM kill 2011 * uncharges. Wake any sleepers explicitely. 2012 */ 2013 memcg_oom_recover(memcg); 2014 } 2015 cleanup: 2016 current->memcg_in_oom = NULL; 2017 css_put(&memcg->css); 2018 return true; 2019 } 2020 2021 /** 2022 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2023 * @victim: task to be killed by the OOM killer 2024 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2025 * 2026 * Returns a pointer to a memory cgroup, which has to be cleaned up 2027 * by killing all belonging OOM-killable tasks. 2028 * 2029 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2030 */ 2031 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2032 struct mem_cgroup *oom_domain) 2033 { 2034 struct mem_cgroup *oom_group = NULL; 2035 struct mem_cgroup *memcg; 2036 2037 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2038 return NULL; 2039 2040 if (!oom_domain) 2041 oom_domain = root_mem_cgroup; 2042 2043 rcu_read_lock(); 2044 2045 memcg = mem_cgroup_from_task(victim); 2046 if (memcg == root_mem_cgroup) 2047 goto out; 2048 2049 /* 2050 * If the victim task has been asynchronously moved to a different 2051 * memory cgroup, we might end up killing tasks outside oom_domain. 2052 * In this case it's better to ignore memory.group.oom. 2053 */ 2054 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2055 goto out; 2056 2057 /* 2058 * Traverse the memory cgroup hierarchy from the victim task's 2059 * cgroup up to the OOMing cgroup (or root) to find the 2060 * highest-level memory cgroup with oom.group set. 2061 */ 2062 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2063 if (memcg->oom_group) 2064 oom_group = memcg; 2065 2066 if (memcg == oom_domain) 2067 break; 2068 } 2069 2070 if (oom_group) 2071 css_get(&oom_group->css); 2072 out: 2073 rcu_read_unlock(); 2074 2075 return oom_group; 2076 } 2077 2078 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2079 { 2080 pr_info("Tasks in "); 2081 pr_cont_cgroup_path(memcg->css.cgroup); 2082 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2083 } 2084 2085 /** 2086 * lock_page_memcg - lock a page and memcg binding 2087 * @page: the page 2088 * 2089 * This function protects unlocked LRU pages from being moved to 2090 * another cgroup. 2091 * 2092 * It ensures lifetime of the locked memcg. Caller is responsible 2093 * for the lifetime of the page. 2094 */ 2095 void lock_page_memcg(struct page *page) 2096 { 2097 struct page *head = compound_head(page); /* rmap on tail pages */ 2098 struct mem_cgroup *memcg; 2099 unsigned long flags; 2100 2101 /* 2102 * The RCU lock is held throughout the transaction. The fast 2103 * path can get away without acquiring the memcg->move_lock 2104 * because page moving starts with an RCU grace period. 2105 */ 2106 rcu_read_lock(); 2107 2108 if (mem_cgroup_disabled()) 2109 return; 2110 again: 2111 memcg = page_memcg(head); 2112 if (unlikely(!memcg)) 2113 return; 2114 2115 #ifdef CONFIG_PROVE_LOCKING 2116 local_irq_save(flags); 2117 might_lock(&memcg->move_lock); 2118 local_irq_restore(flags); 2119 #endif 2120 2121 if (atomic_read(&memcg->moving_account) <= 0) 2122 return; 2123 2124 spin_lock_irqsave(&memcg->move_lock, flags); 2125 if (memcg != page_memcg(head)) { 2126 spin_unlock_irqrestore(&memcg->move_lock, flags); 2127 goto again; 2128 } 2129 2130 /* 2131 * When charge migration first begins, we can have multiple 2132 * critical sections holding the fast-path RCU lock and one 2133 * holding the slowpath move_lock. Track the task who has the 2134 * move_lock for unlock_page_memcg(). 2135 */ 2136 memcg->move_lock_task = current; 2137 memcg->move_lock_flags = flags; 2138 } 2139 EXPORT_SYMBOL(lock_page_memcg); 2140 2141 static void __unlock_page_memcg(struct mem_cgroup *memcg) 2142 { 2143 if (memcg && memcg->move_lock_task == current) { 2144 unsigned long flags = memcg->move_lock_flags; 2145 2146 memcg->move_lock_task = NULL; 2147 memcg->move_lock_flags = 0; 2148 2149 spin_unlock_irqrestore(&memcg->move_lock, flags); 2150 } 2151 2152 rcu_read_unlock(); 2153 } 2154 2155 /** 2156 * unlock_page_memcg - unlock a page and memcg binding 2157 * @page: the page 2158 */ 2159 void unlock_page_memcg(struct page *page) 2160 { 2161 struct page *head = compound_head(page); 2162 2163 __unlock_page_memcg(page_memcg(head)); 2164 } 2165 EXPORT_SYMBOL(unlock_page_memcg); 2166 2167 struct memcg_stock_pcp { 2168 struct mem_cgroup *cached; /* this never be root cgroup */ 2169 unsigned int nr_pages; 2170 2171 #ifdef CONFIG_MEMCG_KMEM 2172 struct obj_cgroup *cached_objcg; 2173 unsigned int nr_bytes; 2174 #endif 2175 2176 struct work_struct work; 2177 unsigned long flags; 2178 #define FLUSHING_CACHED_CHARGE 0 2179 }; 2180 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2181 static DEFINE_MUTEX(percpu_charge_mutex); 2182 2183 #ifdef CONFIG_MEMCG_KMEM 2184 static void drain_obj_stock(struct memcg_stock_pcp *stock); 2185 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2186 struct mem_cgroup *root_memcg); 2187 2188 #else 2189 static inline void drain_obj_stock(struct memcg_stock_pcp *stock) 2190 { 2191 } 2192 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2193 struct mem_cgroup *root_memcg) 2194 { 2195 return false; 2196 } 2197 #endif 2198 2199 /** 2200 * consume_stock: Try to consume stocked charge on this cpu. 2201 * @memcg: memcg to consume from. 2202 * @nr_pages: how many pages to charge. 2203 * 2204 * The charges will only happen if @memcg matches the current cpu's memcg 2205 * stock, and at least @nr_pages are available in that stock. Failure to 2206 * service an allocation will refill the stock. 2207 * 2208 * returns true if successful, false otherwise. 2209 */ 2210 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2211 { 2212 struct memcg_stock_pcp *stock; 2213 unsigned long flags; 2214 bool ret = false; 2215 2216 if (nr_pages > MEMCG_CHARGE_BATCH) 2217 return ret; 2218 2219 local_irq_save(flags); 2220 2221 stock = this_cpu_ptr(&memcg_stock); 2222 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2223 stock->nr_pages -= nr_pages; 2224 ret = true; 2225 } 2226 2227 local_irq_restore(flags); 2228 2229 return ret; 2230 } 2231 2232 /* 2233 * Returns stocks cached in percpu and reset cached information. 2234 */ 2235 static void drain_stock(struct memcg_stock_pcp *stock) 2236 { 2237 struct mem_cgroup *old = stock->cached; 2238 2239 if (!old) 2240 return; 2241 2242 if (stock->nr_pages) { 2243 page_counter_uncharge(&old->memory, stock->nr_pages); 2244 if (do_memsw_account()) 2245 page_counter_uncharge(&old->memsw, stock->nr_pages); 2246 stock->nr_pages = 0; 2247 } 2248 2249 css_put(&old->css); 2250 stock->cached = NULL; 2251 } 2252 2253 static void drain_local_stock(struct work_struct *dummy) 2254 { 2255 struct memcg_stock_pcp *stock; 2256 unsigned long flags; 2257 2258 /* 2259 * The only protection from memory hotplug vs. drain_stock races is 2260 * that we always operate on local CPU stock here with IRQ disabled 2261 */ 2262 local_irq_save(flags); 2263 2264 stock = this_cpu_ptr(&memcg_stock); 2265 drain_obj_stock(stock); 2266 drain_stock(stock); 2267 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2268 2269 local_irq_restore(flags); 2270 } 2271 2272 /* 2273 * Cache charges(val) to local per_cpu area. 2274 * This will be consumed by consume_stock() function, later. 2275 */ 2276 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2277 { 2278 struct memcg_stock_pcp *stock; 2279 unsigned long flags; 2280 2281 local_irq_save(flags); 2282 2283 stock = this_cpu_ptr(&memcg_stock); 2284 if (stock->cached != memcg) { /* reset if necessary */ 2285 drain_stock(stock); 2286 css_get(&memcg->css); 2287 stock->cached = memcg; 2288 } 2289 stock->nr_pages += nr_pages; 2290 2291 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2292 drain_stock(stock); 2293 2294 local_irq_restore(flags); 2295 } 2296 2297 /* 2298 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2299 * of the hierarchy under it. 2300 */ 2301 static void drain_all_stock(struct mem_cgroup *root_memcg) 2302 { 2303 int cpu, curcpu; 2304 2305 /* If someone's already draining, avoid adding running more workers. */ 2306 if (!mutex_trylock(&percpu_charge_mutex)) 2307 return; 2308 /* 2309 * Notify other cpus that system-wide "drain" is running 2310 * We do not care about races with the cpu hotplug because cpu down 2311 * as well as workers from this path always operate on the local 2312 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2313 */ 2314 curcpu = get_cpu(); 2315 for_each_online_cpu(cpu) { 2316 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2317 struct mem_cgroup *memcg; 2318 bool flush = false; 2319 2320 rcu_read_lock(); 2321 memcg = stock->cached; 2322 if (memcg && stock->nr_pages && 2323 mem_cgroup_is_descendant(memcg, root_memcg)) 2324 flush = true; 2325 if (obj_stock_flush_required(stock, root_memcg)) 2326 flush = true; 2327 rcu_read_unlock(); 2328 2329 if (flush && 2330 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2331 if (cpu == curcpu) 2332 drain_local_stock(&stock->work); 2333 else 2334 schedule_work_on(cpu, &stock->work); 2335 } 2336 } 2337 put_cpu(); 2338 mutex_unlock(&percpu_charge_mutex); 2339 } 2340 2341 static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu) 2342 { 2343 int nid; 2344 2345 for_each_node(nid) { 2346 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 2347 unsigned long stat[NR_VM_NODE_STAT_ITEMS]; 2348 struct batched_lruvec_stat *lstatc; 2349 int i; 2350 2351 lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu); 2352 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 2353 stat[i] = lstatc->count[i]; 2354 lstatc->count[i] = 0; 2355 } 2356 2357 do { 2358 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 2359 atomic_long_add(stat[i], &pn->lruvec_stat[i]); 2360 } while ((pn = parent_nodeinfo(pn, nid))); 2361 } 2362 } 2363 2364 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2365 { 2366 struct memcg_stock_pcp *stock; 2367 struct mem_cgroup *memcg; 2368 2369 stock = &per_cpu(memcg_stock, cpu); 2370 drain_stock(stock); 2371 2372 for_each_mem_cgroup(memcg) 2373 memcg_flush_lruvec_page_state(memcg, cpu); 2374 2375 return 0; 2376 } 2377 2378 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2379 unsigned int nr_pages, 2380 gfp_t gfp_mask) 2381 { 2382 unsigned long nr_reclaimed = 0; 2383 2384 do { 2385 unsigned long pflags; 2386 2387 if (page_counter_read(&memcg->memory) <= 2388 READ_ONCE(memcg->memory.high)) 2389 continue; 2390 2391 memcg_memory_event(memcg, MEMCG_HIGH); 2392 2393 psi_memstall_enter(&pflags); 2394 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2395 gfp_mask, true); 2396 psi_memstall_leave(&pflags); 2397 } while ((memcg = parent_mem_cgroup(memcg)) && 2398 !mem_cgroup_is_root(memcg)); 2399 2400 return nr_reclaimed; 2401 } 2402 2403 static void high_work_func(struct work_struct *work) 2404 { 2405 struct mem_cgroup *memcg; 2406 2407 memcg = container_of(work, struct mem_cgroup, high_work); 2408 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2409 } 2410 2411 /* 2412 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2413 * enough to still cause a significant slowdown in most cases, while still 2414 * allowing diagnostics and tracing to proceed without becoming stuck. 2415 */ 2416 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2417 2418 /* 2419 * When calculating the delay, we use these either side of the exponentiation to 2420 * maintain precision and scale to a reasonable number of jiffies (see the table 2421 * below. 2422 * 2423 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2424 * overage ratio to a delay. 2425 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2426 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2427 * to produce a reasonable delay curve. 2428 * 2429 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2430 * reasonable delay curve compared to precision-adjusted overage, not 2431 * penalising heavily at first, but still making sure that growth beyond the 2432 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2433 * example, with a high of 100 megabytes: 2434 * 2435 * +-------+------------------------+ 2436 * | usage | time to allocate in ms | 2437 * +-------+------------------------+ 2438 * | 100M | 0 | 2439 * | 101M | 6 | 2440 * | 102M | 25 | 2441 * | 103M | 57 | 2442 * | 104M | 102 | 2443 * | 105M | 159 | 2444 * | 106M | 230 | 2445 * | 107M | 313 | 2446 * | 108M | 409 | 2447 * | 109M | 518 | 2448 * | 110M | 639 | 2449 * | 111M | 774 | 2450 * | 112M | 921 | 2451 * | 113M | 1081 | 2452 * | 114M | 1254 | 2453 * | 115M | 1439 | 2454 * | 116M | 1638 | 2455 * | 117M | 1849 | 2456 * | 118M | 2000 | 2457 * | 119M | 2000 | 2458 * | 120M | 2000 | 2459 * +-------+------------------------+ 2460 */ 2461 #define MEMCG_DELAY_PRECISION_SHIFT 20 2462 #define MEMCG_DELAY_SCALING_SHIFT 14 2463 2464 static u64 calculate_overage(unsigned long usage, unsigned long high) 2465 { 2466 u64 overage; 2467 2468 if (usage <= high) 2469 return 0; 2470 2471 /* 2472 * Prevent division by 0 in overage calculation by acting as if 2473 * it was a threshold of 1 page 2474 */ 2475 high = max(high, 1UL); 2476 2477 overage = usage - high; 2478 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2479 return div64_u64(overage, high); 2480 } 2481 2482 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2483 { 2484 u64 overage, max_overage = 0; 2485 2486 do { 2487 overage = calculate_overage(page_counter_read(&memcg->memory), 2488 READ_ONCE(memcg->memory.high)); 2489 max_overage = max(overage, max_overage); 2490 } while ((memcg = parent_mem_cgroup(memcg)) && 2491 !mem_cgroup_is_root(memcg)); 2492 2493 return max_overage; 2494 } 2495 2496 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2497 { 2498 u64 overage, max_overage = 0; 2499 2500 do { 2501 overage = calculate_overage(page_counter_read(&memcg->swap), 2502 READ_ONCE(memcg->swap.high)); 2503 if (overage) 2504 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2505 max_overage = max(overage, max_overage); 2506 } while ((memcg = parent_mem_cgroup(memcg)) && 2507 !mem_cgroup_is_root(memcg)); 2508 2509 return max_overage; 2510 } 2511 2512 /* 2513 * Get the number of jiffies that we should penalise a mischievous cgroup which 2514 * is exceeding its memory.high by checking both it and its ancestors. 2515 */ 2516 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2517 unsigned int nr_pages, 2518 u64 max_overage) 2519 { 2520 unsigned long penalty_jiffies; 2521 2522 if (!max_overage) 2523 return 0; 2524 2525 /* 2526 * We use overage compared to memory.high to calculate the number of 2527 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2528 * fairly lenient on small overages, and increasingly harsh when the 2529 * memcg in question makes it clear that it has no intention of stopping 2530 * its crazy behaviour, so we exponentially increase the delay based on 2531 * overage amount. 2532 */ 2533 penalty_jiffies = max_overage * max_overage * HZ; 2534 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2535 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2536 2537 /* 2538 * Factor in the task's own contribution to the overage, such that four 2539 * N-sized allocations are throttled approximately the same as one 2540 * 4N-sized allocation. 2541 * 2542 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2543 * larger the current charge patch is than that. 2544 */ 2545 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2546 } 2547 2548 /* 2549 * Scheduled by try_charge() to be executed from the userland return path 2550 * and reclaims memory over the high limit. 2551 */ 2552 void mem_cgroup_handle_over_high(void) 2553 { 2554 unsigned long penalty_jiffies; 2555 unsigned long pflags; 2556 unsigned long nr_reclaimed; 2557 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2558 int nr_retries = MAX_RECLAIM_RETRIES; 2559 struct mem_cgroup *memcg; 2560 bool in_retry = false; 2561 2562 if (likely(!nr_pages)) 2563 return; 2564 2565 memcg = get_mem_cgroup_from_mm(current->mm); 2566 current->memcg_nr_pages_over_high = 0; 2567 2568 retry_reclaim: 2569 /* 2570 * The allocating task should reclaim at least the batch size, but for 2571 * subsequent retries we only want to do what's necessary to prevent oom 2572 * or breaching resource isolation. 2573 * 2574 * This is distinct from memory.max or page allocator behaviour because 2575 * memory.high is currently batched, whereas memory.max and the page 2576 * allocator run every time an allocation is made. 2577 */ 2578 nr_reclaimed = reclaim_high(memcg, 2579 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2580 GFP_KERNEL); 2581 2582 /* 2583 * memory.high is breached and reclaim is unable to keep up. Throttle 2584 * allocators proactively to slow down excessive growth. 2585 */ 2586 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2587 mem_find_max_overage(memcg)); 2588 2589 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2590 swap_find_max_overage(memcg)); 2591 2592 /* 2593 * Clamp the max delay per usermode return so as to still keep the 2594 * application moving forwards and also permit diagnostics, albeit 2595 * extremely slowly. 2596 */ 2597 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2598 2599 /* 2600 * Don't sleep if the amount of jiffies this memcg owes us is so low 2601 * that it's not even worth doing, in an attempt to be nice to those who 2602 * go only a small amount over their memory.high value and maybe haven't 2603 * been aggressively reclaimed enough yet. 2604 */ 2605 if (penalty_jiffies <= HZ / 100) 2606 goto out; 2607 2608 /* 2609 * If reclaim is making forward progress but we're still over 2610 * memory.high, we want to encourage that rather than doing allocator 2611 * throttling. 2612 */ 2613 if (nr_reclaimed || nr_retries--) { 2614 in_retry = true; 2615 goto retry_reclaim; 2616 } 2617 2618 /* 2619 * If we exit early, we're guaranteed to die (since 2620 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2621 * need to account for any ill-begotten jiffies to pay them off later. 2622 */ 2623 psi_memstall_enter(&pflags); 2624 schedule_timeout_killable(penalty_jiffies); 2625 psi_memstall_leave(&pflags); 2626 2627 out: 2628 css_put(&memcg->css); 2629 } 2630 2631 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2632 unsigned int nr_pages) 2633 { 2634 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2635 int nr_retries = MAX_RECLAIM_RETRIES; 2636 struct mem_cgroup *mem_over_limit; 2637 struct page_counter *counter; 2638 enum oom_status oom_status; 2639 unsigned long nr_reclaimed; 2640 bool may_swap = true; 2641 bool drained = false; 2642 unsigned long pflags; 2643 2644 if (mem_cgroup_is_root(memcg)) 2645 return 0; 2646 retry: 2647 if (consume_stock(memcg, nr_pages)) 2648 return 0; 2649 2650 if (!do_memsw_account() || 2651 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2652 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2653 goto done_restock; 2654 if (do_memsw_account()) 2655 page_counter_uncharge(&memcg->memsw, batch); 2656 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2657 } else { 2658 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2659 may_swap = false; 2660 } 2661 2662 if (batch > nr_pages) { 2663 batch = nr_pages; 2664 goto retry; 2665 } 2666 2667 /* 2668 * Memcg doesn't have a dedicated reserve for atomic 2669 * allocations. But like the global atomic pool, we need to 2670 * put the burden of reclaim on regular allocation requests 2671 * and let these go through as privileged allocations. 2672 */ 2673 if (gfp_mask & __GFP_ATOMIC) 2674 goto force; 2675 2676 /* 2677 * Unlike in global OOM situations, memcg is not in a physical 2678 * memory shortage. Allow dying and OOM-killed tasks to 2679 * bypass the last charges so that they can exit quickly and 2680 * free their memory. 2681 */ 2682 if (unlikely(should_force_charge())) 2683 goto force; 2684 2685 /* 2686 * Prevent unbounded recursion when reclaim operations need to 2687 * allocate memory. This might exceed the limits temporarily, 2688 * but we prefer facilitating memory reclaim and getting back 2689 * under the limit over triggering OOM kills in these cases. 2690 */ 2691 if (unlikely(current->flags & PF_MEMALLOC)) 2692 goto force; 2693 2694 if (unlikely(task_in_memcg_oom(current))) 2695 goto nomem; 2696 2697 if (!gfpflags_allow_blocking(gfp_mask)) 2698 goto nomem; 2699 2700 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2701 2702 psi_memstall_enter(&pflags); 2703 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2704 gfp_mask, may_swap); 2705 psi_memstall_leave(&pflags); 2706 2707 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2708 goto retry; 2709 2710 if (!drained) { 2711 drain_all_stock(mem_over_limit); 2712 drained = true; 2713 goto retry; 2714 } 2715 2716 if (gfp_mask & __GFP_NORETRY) 2717 goto nomem; 2718 /* 2719 * Even though the limit is exceeded at this point, reclaim 2720 * may have been able to free some pages. Retry the charge 2721 * before killing the task. 2722 * 2723 * Only for regular pages, though: huge pages are rather 2724 * unlikely to succeed so close to the limit, and we fall back 2725 * to regular pages anyway in case of failure. 2726 */ 2727 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2728 goto retry; 2729 /* 2730 * At task move, charge accounts can be doubly counted. So, it's 2731 * better to wait until the end of task_move if something is going on. 2732 */ 2733 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2734 goto retry; 2735 2736 if (nr_retries--) 2737 goto retry; 2738 2739 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2740 goto nomem; 2741 2742 if (fatal_signal_pending(current)) 2743 goto force; 2744 2745 /* 2746 * keep retrying as long as the memcg oom killer is able to make 2747 * a forward progress or bypass the charge if the oom killer 2748 * couldn't make any progress. 2749 */ 2750 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2751 get_order(nr_pages * PAGE_SIZE)); 2752 switch (oom_status) { 2753 case OOM_SUCCESS: 2754 nr_retries = MAX_RECLAIM_RETRIES; 2755 goto retry; 2756 case OOM_FAILED: 2757 goto force; 2758 default: 2759 goto nomem; 2760 } 2761 nomem: 2762 if (!(gfp_mask & __GFP_NOFAIL)) 2763 return -ENOMEM; 2764 force: 2765 /* 2766 * The allocation either can't fail or will lead to more memory 2767 * being freed very soon. Allow memory usage go over the limit 2768 * temporarily by force charging it. 2769 */ 2770 page_counter_charge(&memcg->memory, nr_pages); 2771 if (do_memsw_account()) 2772 page_counter_charge(&memcg->memsw, nr_pages); 2773 2774 return 0; 2775 2776 done_restock: 2777 if (batch > nr_pages) 2778 refill_stock(memcg, batch - nr_pages); 2779 2780 /* 2781 * If the hierarchy is above the normal consumption range, schedule 2782 * reclaim on returning to userland. We can perform reclaim here 2783 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2784 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2785 * not recorded as it most likely matches current's and won't 2786 * change in the meantime. As high limit is checked again before 2787 * reclaim, the cost of mismatch is negligible. 2788 */ 2789 do { 2790 bool mem_high, swap_high; 2791 2792 mem_high = page_counter_read(&memcg->memory) > 2793 READ_ONCE(memcg->memory.high); 2794 swap_high = page_counter_read(&memcg->swap) > 2795 READ_ONCE(memcg->swap.high); 2796 2797 /* Don't bother a random interrupted task */ 2798 if (in_interrupt()) { 2799 if (mem_high) { 2800 schedule_work(&memcg->high_work); 2801 break; 2802 } 2803 continue; 2804 } 2805 2806 if (mem_high || swap_high) { 2807 /* 2808 * The allocating tasks in this cgroup will need to do 2809 * reclaim or be throttled to prevent further growth 2810 * of the memory or swap footprints. 2811 * 2812 * Target some best-effort fairness between the tasks, 2813 * and distribute reclaim work and delay penalties 2814 * based on how much each task is actually allocating. 2815 */ 2816 current->memcg_nr_pages_over_high += batch; 2817 set_notify_resume(current); 2818 break; 2819 } 2820 } while ((memcg = parent_mem_cgroup(memcg))); 2821 2822 return 0; 2823 } 2824 2825 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2826 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2827 { 2828 if (mem_cgroup_is_root(memcg)) 2829 return; 2830 2831 page_counter_uncharge(&memcg->memory, nr_pages); 2832 if (do_memsw_account()) 2833 page_counter_uncharge(&memcg->memsw, nr_pages); 2834 } 2835 #endif 2836 2837 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2838 { 2839 VM_BUG_ON_PAGE(page_memcg(page), page); 2840 /* 2841 * Any of the following ensures page's memcg stability: 2842 * 2843 * - the page lock 2844 * - LRU isolation 2845 * - lock_page_memcg() 2846 * - exclusive reference 2847 */ 2848 page->memcg_data = (unsigned long)memcg; 2849 } 2850 2851 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 2852 { 2853 struct mem_cgroup *memcg; 2854 2855 rcu_read_lock(); 2856 retry: 2857 memcg = obj_cgroup_memcg(objcg); 2858 if (unlikely(!css_tryget(&memcg->css))) 2859 goto retry; 2860 rcu_read_unlock(); 2861 2862 return memcg; 2863 } 2864 2865 #ifdef CONFIG_MEMCG_KMEM 2866 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2867 gfp_t gfp, bool new_page) 2868 { 2869 unsigned int objects = objs_per_slab_page(s, page); 2870 unsigned long memcg_data; 2871 void *vec; 2872 2873 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2874 page_to_nid(page)); 2875 if (!vec) 2876 return -ENOMEM; 2877 2878 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2879 if (new_page) { 2880 /* 2881 * If the slab page is brand new and nobody can yet access 2882 * it's memcg_data, no synchronization is required and 2883 * memcg_data can be simply assigned. 2884 */ 2885 page->memcg_data = memcg_data; 2886 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) { 2887 /* 2888 * If the slab page is already in use, somebody can allocate 2889 * and assign obj_cgroups in parallel. In this case the existing 2890 * objcg vector should be reused. 2891 */ 2892 kfree(vec); 2893 return 0; 2894 } 2895 2896 kmemleak_not_leak(vec); 2897 return 0; 2898 } 2899 2900 /* 2901 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2902 * 2903 * A passed kernel object can be a slab object or a generic kernel page, so 2904 * different mechanisms for getting the memory cgroup pointer should be used. 2905 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2906 * can not know for sure how the kernel object is implemented. 2907 * mem_cgroup_from_obj() can be safely used in such cases. 2908 * 2909 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2910 * cgroup_mutex, etc. 2911 */ 2912 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2913 { 2914 struct page *page; 2915 2916 if (mem_cgroup_disabled()) 2917 return NULL; 2918 2919 page = virt_to_head_page(p); 2920 2921 /* 2922 * Slab objects are accounted individually, not per-page. 2923 * Memcg membership data for each individual object is saved in 2924 * the page->obj_cgroups. 2925 */ 2926 if (page_objcgs_check(page)) { 2927 struct obj_cgroup *objcg; 2928 unsigned int off; 2929 2930 off = obj_to_index(page->slab_cache, page, p); 2931 objcg = page_objcgs(page)[off]; 2932 if (objcg) 2933 return obj_cgroup_memcg(objcg); 2934 2935 return NULL; 2936 } 2937 2938 /* 2939 * page_memcg_check() is used here, because page_has_obj_cgroups() 2940 * check above could fail because the object cgroups vector wasn't set 2941 * at that moment, but it can be set concurrently. 2942 * page_memcg_check(page) will guarantee that a proper memory 2943 * cgroup pointer or NULL will be returned. 2944 */ 2945 return page_memcg_check(page); 2946 } 2947 2948 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2949 { 2950 struct obj_cgroup *objcg = NULL; 2951 struct mem_cgroup *memcg; 2952 2953 if (memcg_kmem_bypass()) 2954 return NULL; 2955 2956 rcu_read_lock(); 2957 if (unlikely(active_memcg())) 2958 memcg = active_memcg(); 2959 else 2960 memcg = mem_cgroup_from_task(current); 2961 2962 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2963 objcg = rcu_dereference(memcg->objcg); 2964 if (objcg && obj_cgroup_tryget(objcg)) 2965 break; 2966 objcg = NULL; 2967 } 2968 rcu_read_unlock(); 2969 2970 return objcg; 2971 } 2972 2973 static int memcg_alloc_cache_id(void) 2974 { 2975 int id, size; 2976 int err; 2977 2978 id = ida_simple_get(&memcg_cache_ida, 2979 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2980 if (id < 0) 2981 return id; 2982 2983 if (id < memcg_nr_cache_ids) 2984 return id; 2985 2986 /* 2987 * There's no space for the new id in memcg_caches arrays, 2988 * so we have to grow them. 2989 */ 2990 down_write(&memcg_cache_ids_sem); 2991 2992 size = 2 * (id + 1); 2993 if (size < MEMCG_CACHES_MIN_SIZE) 2994 size = MEMCG_CACHES_MIN_SIZE; 2995 else if (size > MEMCG_CACHES_MAX_SIZE) 2996 size = MEMCG_CACHES_MAX_SIZE; 2997 2998 err = memcg_update_all_list_lrus(size); 2999 if (!err) 3000 memcg_nr_cache_ids = size; 3001 3002 up_write(&memcg_cache_ids_sem); 3003 3004 if (err) { 3005 ida_simple_remove(&memcg_cache_ida, id); 3006 return err; 3007 } 3008 return id; 3009 } 3010 3011 static void memcg_free_cache_id(int id) 3012 { 3013 ida_simple_remove(&memcg_cache_ida, id); 3014 } 3015 3016 /* 3017 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 3018 * @objcg: object cgroup to uncharge 3019 * @nr_pages: number of pages to uncharge 3020 */ 3021 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 3022 unsigned int nr_pages) 3023 { 3024 struct mem_cgroup *memcg; 3025 3026 memcg = get_mem_cgroup_from_objcg(objcg); 3027 3028 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3029 page_counter_uncharge(&memcg->kmem, nr_pages); 3030 refill_stock(memcg, nr_pages); 3031 3032 css_put(&memcg->css); 3033 } 3034 3035 /* 3036 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 3037 * @objcg: object cgroup to charge 3038 * @gfp: reclaim mode 3039 * @nr_pages: number of pages to charge 3040 * 3041 * Returns 0 on success, an error code on failure. 3042 */ 3043 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3044 unsigned int nr_pages) 3045 { 3046 struct page_counter *counter; 3047 struct mem_cgroup *memcg; 3048 int ret; 3049 3050 memcg = get_mem_cgroup_from_objcg(objcg); 3051 3052 ret = try_charge(memcg, gfp, nr_pages); 3053 if (ret) 3054 goto out; 3055 3056 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3057 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3058 3059 /* 3060 * Enforce __GFP_NOFAIL allocation because callers are not 3061 * prepared to see failures and likely do not have any failure 3062 * handling code. 3063 */ 3064 if (gfp & __GFP_NOFAIL) { 3065 page_counter_charge(&memcg->kmem, nr_pages); 3066 goto out; 3067 } 3068 cancel_charge(memcg, nr_pages); 3069 ret = -ENOMEM; 3070 } 3071 out: 3072 css_put(&memcg->css); 3073 3074 return ret; 3075 } 3076 3077 /** 3078 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3079 * @page: page to charge 3080 * @gfp: reclaim mode 3081 * @order: allocation order 3082 * 3083 * Returns 0 on success, an error code on failure. 3084 */ 3085 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3086 { 3087 struct obj_cgroup *objcg; 3088 int ret = 0; 3089 3090 objcg = get_obj_cgroup_from_current(); 3091 if (objcg) { 3092 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3093 if (!ret) { 3094 page->memcg_data = (unsigned long)objcg | 3095 MEMCG_DATA_KMEM; 3096 return 0; 3097 } 3098 obj_cgroup_put(objcg); 3099 } 3100 return ret; 3101 } 3102 3103 /** 3104 * __memcg_kmem_uncharge_page: uncharge a kmem page 3105 * @page: page to uncharge 3106 * @order: allocation order 3107 */ 3108 void __memcg_kmem_uncharge_page(struct page *page, int order) 3109 { 3110 struct obj_cgroup *objcg; 3111 unsigned int nr_pages = 1 << order; 3112 3113 if (!PageMemcgKmem(page)) 3114 return; 3115 3116 objcg = __page_objcg(page); 3117 obj_cgroup_uncharge_pages(objcg, nr_pages); 3118 page->memcg_data = 0; 3119 obj_cgroup_put(objcg); 3120 } 3121 3122 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3123 { 3124 struct memcg_stock_pcp *stock; 3125 unsigned long flags; 3126 bool ret = false; 3127 3128 local_irq_save(flags); 3129 3130 stock = this_cpu_ptr(&memcg_stock); 3131 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3132 stock->nr_bytes -= nr_bytes; 3133 ret = true; 3134 } 3135 3136 local_irq_restore(flags); 3137 3138 return ret; 3139 } 3140 3141 static void drain_obj_stock(struct memcg_stock_pcp *stock) 3142 { 3143 struct obj_cgroup *old = stock->cached_objcg; 3144 3145 if (!old) 3146 return; 3147 3148 if (stock->nr_bytes) { 3149 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3150 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3151 3152 if (nr_pages) 3153 obj_cgroup_uncharge_pages(old, nr_pages); 3154 3155 /* 3156 * The leftover is flushed to the centralized per-memcg value. 3157 * On the next attempt to refill obj stock it will be moved 3158 * to a per-cpu stock (probably, on an other CPU), see 3159 * refill_obj_stock(). 3160 * 3161 * How often it's flushed is a trade-off between the memory 3162 * limit enforcement accuracy and potential CPU contention, 3163 * so it might be changed in the future. 3164 */ 3165 atomic_add(nr_bytes, &old->nr_charged_bytes); 3166 stock->nr_bytes = 0; 3167 } 3168 3169 obj_cgroup_put(old); 3170 stock->cached_objcg = NULL; 3171 } 3172 3173 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3174 struct mem_cgroup *root_memcg) 3175 { 3176 struct mem_cgroup *memcg; 3177 3178 if (stock->cached_objcg) { 3179 memcg = obj_cgroup_memcg(stock->cached_objcg); 3180 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3181 return true; 3182 } 3183 3184 return false; 3185 } 3186 3187 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3188 { 3189 struct memcg_stock_pcp *stock; 3190 unsigned long flags; 3191 3192 local_irq_save(flags); 3193 3194 stock = this_cpu_ptr(&memcg_stock); 3195 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3196 drain_obj_stock(stock); 3197 obj_cgroup_get(objcg); 3198 stock->cached_objcg = objcg; 3199 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); 3200 } 3201 stock->nr_bytes += nr_bytes; 3202 3203 if (stock->nr_bytes > PAGE_SIZE) 3204 drain_obj_stock(stock); 3205 3206 local_irq_restore(flags); 3207 } 3208 3209 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3210 { 3211 unsigned int nr_pages, nr_bytes; 3212 int ret; 3213 3214 if (consume_obj_stock(objcg, size)) 3215 return 0; 3216 3217 /* 3218 * In theory, memcg->nr_charged_bytes can have enough 3219 * pre-charged bytes to satisfy the allocation. However, 3220 * flushing memcg->nr_charged_bytes requires two atomic 3221 * operations, and memcg->nr_charged_bytes can't be big, 3222 * so it's better to ignore it and try grab some new pages. 3223 * memcg->nr_charged_bytes will be flushed in 3224 * refill_obj_stock(), called from this function or 3225 * independently later. 3226 */ 3227 nr_pages = size >> PAGE_SHIFT; 3228 nr_bytes = size & (PAGE_SIZE - 1); 3229 3230 if (nr_bytes) 3231 nr_pages += 1; 3232 3233 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3234 if (!ret && nr_bytes) 3235 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); 3236 3237 return ret; 3238 } 3239 3240 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3241 { 3242 refill_obj_stock(objcg, size); 3243 } 3244 3245 #endif /* CONFIG_MEMCG_KMEM */ 3246 3247 /* 3248 * Because page_memcg(head) is not set on tails, set it now. 3249 */ 3250 void split_page_memcg(struct page *head, unsigned int nr) 3251 { 3252 struct mem_cgroup *memcg = page_memcg(head); 3253 int i; 3254 3255 if (mem_cgroup_disabled() || !memcg) 3256 return; 3257 3258 for (i = 1; i < nr; i++) 3259 head[i].memcg_data = head->memcg_data; 3260 3261 if (PageMemcgKmem(head)) 3262 obj_cgroup_get_many(__page_objcg(head), nr - 1); 3263 else 3264 css_get_many(&memcg->css, nr - 1); 3265 } 3266 3267 #ifdef CONFIG_MEMCG_SWAP 3268 /** 3269 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3270 * @entry: swap entry to be moved 3271 * @from: mem_cgroup which the entry is moved from 3272 * @to: mem_cgroup which the entry is moved to 3273 * 3274 * It succeeds only when the swap_cgroup's record for this entry is the same 3275 * as the mem_cgroup's id of @from. 3276 * 3277 * Returns 0 on success, -EINVAL on failure. 3278 * 3279 * The caller must have charged to @to, IOW, called page_counter_charge() about 3280 * both res and memsw, and called css_get(). 3281 */ 3282 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3283 struct mem_cgroup *from, struct mem_cgroup *to) 3284 { 3285 unsigned short old_id, new_id; 3286 3287 old_id = mem_cgroup_id(from); 3288 new_id = mem_cgroup_id(to); 3289 3290 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3291 mod_memcg_state(from, MEMCG_SWAP, -1); 3292 mod_memcg_state(to, MEMCG_SWAP, 1); 3293 return 0; 3294 } 3295 return -EINVAL; 3296 } 3297 #else 3298 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3299 struct mem_cgroup *from, struct mem_cgroup *to) 3300 { 3301 return -EINVAL; 3302 } 3303 #endif 3304 3305 static DEFINE_MUTEX(memcg_max_mutex); 3306 3307 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3308 unsigned long max, bool memsw) 3309 { 3310 bool enlarge = false; 3311 bool drained = false; 3312 int ret; 3313 bool limits_invariant; 3314 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3315 3316 do { 3317 if (signal_pending(current)) { 3318 ret = -EINTR; 3319 break; 3320 } 3321 3322 mutex_lock(&memcg_max_mutex); 3323 /* 3324 * Make sure that the new limit (memsw or memory limit) doesn't 3325 * break our basic invariant rule memory.max <= memsw.max. 3326 */ 3327 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3328 max <= memcg->memsw.max; 3329 if (!limits_invariant) { 3330 mutex_unlock(&memcg_max_mutex); 3331 ret = -EINVAL; 3332 break; 3333 } 3334 if (max > counter->max) 3335 enlarge = true; 3336 ret = page_counter_set_max(counter, max); 3337 mutex_unlock(&memcg_max_mutex); 3338 3339 if (!ret) 3340 break; 3341 3342 if (!drained) { 3343 drain_all_stock(memcg); 3344 drained = true; 3345 continue; 3346 } 3347 3348 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3349 GFP_KERNEL, !memsw)) { 3350 ret = -EBUSY; 3351 break; 3352 } 3353 } while (true); 3354 3355 if (!ret && enlarge) 3356 memcg_oom_recover(memcg); 3357 3358 return ret; 3359 } 3360 3361 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3362 gfp_t gfp_mask, 3363 unsigned long *total_scanned) 3364 { 3365 unsigned long nr_reclaimed = 0; 3366 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3367 unsigned long reclaimed; 3368 int loop = 0; 3369 struct mem_cgroup_tree_per_node *mctz; 3370 unsigned long excess; 3371 unsigned long nr_scanned; 3372 3373 if (order > 0) 3374 return 0; 3375 3376 mctz = soft_limit_tree_node(pgdat->node_id); 3377 3378 /* 3379 * Do not even bother to check the largest node if the root 3380 * is empty. Do it lockless to prevent lock bouncing. Races 3381 * are acceptable as soft limit is best effort anyway. 3382 */ 3383 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3384 return 0; 3385 3386 /* 3387 * This loop can run a while, specially if mem_cgroup's continuously 3388 * keep exceeding their soft limit and putting the system under 3389 * pressure 3390 */ 3391 do { 3392 if (next_mz) 3393 mz = next_mz; 3394 else 3395 mz = mem_cgroup_largest_soft_limit_node(mctz); 3396 if (!mz) 3397 break; 3398 3399 nr_scanned = 0; 3400 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3401 gfp_mask, &nr_scanned); 3402 nr_reclaimed += reclaimed; 3403 *total_scanned += nr_scanned; 3404 spin_lock_irq(&mctz->lock); 3405 __mem_cgroup_remove_exceeded(mz, mctz); 3406 3407 /* 3408 * If we failed to reclaim anything from this memory cgroup 3409 * it is time to move on to the next cgroup 3410 */ 3411 next_mz = NULL; 3412 if (!reclaimed) 3413 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3414 3415 excess = soft_limit_excess(mz->memcg); 3416 /* 3417 * One school of thought says that we should not add 3418 * back the node to the tree if reclaim returns 0. 3419 * But our reclaim could return 0, simply because due 3420 * to priority we are exposing a smaller subset of 3421 * memory to reclaim from. Consider this as a longer 3422 * term TODO. 3423 */ 3424 /* If excess == 0, no tree ops */ 3425 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3426 spin_unlock_irq(&mctz->lock); 3427 css_put(&mz->memcg->css); 3428 loop++; 3429 /* 3430 * Could not reclaim anything and there are no more 3431 * mem cgroups to try or we seem to be looping without 3432 * reclaiming anything. 3433 */ 3434 if (!nr_reclaimed && 3435 (next_mz == NULL || 3436 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3437 break; 3438 } while (!nr_reclaimed); 3439 if (next_mz) 3440 css_put(&next_mz->memcg->css); 3441 return nr_reclaimed; 3442 } 3443 3444 /* 3445 * Reclaims as many pages from the given memcg as possible. 3446 * 3447 * Caller is responsible for holding css reference for memcg. 3448 */ 3449 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3450 { 3451 int nr_retries = MAX_RECLAIM_RETRIES; 3452 3453 /* we call try-to-free pages for make this cgroup empty */ 3454 lru_add_drain_all(); 3455 3456 drain_all_stock(memcg); 3457 3458 /* try to free all pages in this cgroup */ 3459 while (nr_retries && page_counter_read(&memcg->memory)) { 3460 int progress; 3461 3462 if (signal_pending(current)) 3463 return -EINTR; 3464 3465 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3466 GFP_KERNEL, true); 3467 if (!progress) { 3468 nr_retries--; 3469 /* maybe some writeback is necessary */ 3470 congestion_wait(BLK_RW_ASYNC, HZ/10); 3471 } 3472 3473 } 3474 3475 return 0; 3476 } 3477 3478 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3479 char *buf, size_t nbytes, 3480 loff_t off) 3481 { 3482 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3483 3484 if (mem_cgroup_is_root(memcg)) 3485 return -EINVAL; 3486 return mem_cgroup_force_empty(memcg) ?: nbytes; 3487 } 3488 3489 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3490 struct cftype *cft) 3491 { 3492 return 1; 3493 } 3494 3495 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3496 struct cftype *cft, u64 val) 3497 { 3498 if (val == 1) 3499 return 0; 3500 3501 pr_warn_once("Non-hierarchical mode is deprecated. " 3502 "Please report your usecase to linux-mm@kvack.org if you " 3503 "depend on this functionality.\n"); 3504 3505 return -EINVAL; 3506 } 3507 3508 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3509 { 3510 unsigned long val; 3511 3512 if (mem_cgroup_is_root(memcg)) { 3513 cgroup_rstat_flush(memcg->css.cgroup); 3514 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3515 memcg_page_state(memcg, NR_ANON_MAPPED); 3516 if (swap) 3517 val += memcg_page_state(memcg, MEMCG_SWAP); 3518 } else { 3519 if (!swap) 3520 val = page_counter_read(&memcg->memory); 3521 else 3522 val = page_counter_read(&memcg->memsw); 3523 } 3524 return val; 3525 } 3526 3527 enum { 3528 RES_USAGE, 3529 RES_LIMIT, 3530 RES_MAX_USAGE, 3531 RES_FAILCNT, 3532 RES_SOFT_LIMIT, 3533 }; 3534 3535 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3536 struct cftype *cft) 3537 { 3538 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3539 struct page_counter *counter; 3540 3541 switch (MEMFILE_TYPE(cft->private)) { 3542 case _MEM: 3543 counter = &memcg->memory; 3544 break; 3545 case _MEMSWAP: 3546 counter = &memcg->memsw; 3547 break; 3548 case _KMEM: 3549 counter = &memcg->kmem; 3550 break; 3551 case _TCP: 3552 counter = &memcg->tcpmem; 3553 break; 3554 default: 3555 BUG(); 3556 } 3557 3558 switch (MEMFILE_ATTR(cft->private)) { 3559 case RES_USAGE: 3560 if (counter == &memcg->memory) 3561 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3562 if (counter == &memcg->memsw) 3563 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3564 return (u64)page_counter_read(counter) * PAGE_SIZE; 3565 case RES_LIMIT: 3566 return (u64)counter->max * PAGE_SIZE; 3567 case RES_MAX_USAGE: 3568 return (u64)counter->watermark * PAGE_SIZE; 3569 case RES_FAILCNT: 3570 return counter->failcnt; 3571 case RES_SOFT_LIMIT: 3572 return (u64)memcg->soft_limit * PAGE_SIZE; 3573 default: 3574 BUG(); 3575 } 3576 } 3577 3578 #ifdef CONFIG_MEMCG_KMEM 3579 static int memcg_online_kmem(struct mem_cgroup *memcg) 3580 { 3581 struct obj_cgroup *objcg; 3582 int memcg_id; 3583 3584 if (cgroup_memory_nokmem) 3585 return 0; 3586 3587 BUG_ON(memcg->kmemcg_id >= 0); 3588 BUG_ON(memcg->kmem_state); 3589 3590 memcg_id = memcg_alloc_cache_id(); 3591 if (memcg_id < 0) 3592 return memcg_id; 3593 3594 objcg = obj_cgroup_alloc(); 3595 if (!objcg) { 3596 memcg_free_cache_id(memcg_id); 3597 return -ENOMEM; 3598 } 3599 objcg->memcg = memcg; 3600 rcu_assign_pointer(memcg->objcg, objcg); 3601 3602 static_branch_enable(&memcg_kmem_enabled_key); 3603 3604 memcg->kmemcg_id = memcg_id; 3605 memcg->kmem_state = KMEM_ONLINE; 3606 3607 return 0; 3608 } 3609 3610 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3611 { 3612 struct cgroup_subsys_state *css; 3613 struct mem_cgroup *parent, *child; 3614 int kmemcg_id; 3615 3616 if (memcg->kmem_state != KMEM_ONLINE) 3617 return; 3618 3619 memcg->kmem_state = KMEM_ALLOCATED; 3620 3621 parent = parent_mem_cgroup(memcg); 3622 if (!parent) 3623 parent = root_mem_cgroup; 3624 3625 memcg_reparent_objcgs(memcg, parent); 3626 3627 kmemcg_id = memcg->kmemcg_id; 3628 BUG_ON(kmemcg_id < 0); 3629 3630 /* 3631 * Change kmemcg_id of this cgroup and all its descendants to the 3632 * parent's id, and then move all entries from this cgroup's list_lrus 3633 * to ones of the parent. After we have finished, all list_lrus 3634 * corresponding to this cgroup are guaranteed to remain empty. The 3635 * ordering is imposed by list_lru_node->lock taken by 3636 * memcg_drain_all_list_lrus(). 3637 */ 3638 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3639 css_for_each_descendant_pre(css, &memcg->css) { 3640 child = mem_cgroup_from_css(css); 3641 BUG_ON(child->kmemcg_id != kmemcg_id); 3642 child->kmemcg_id = parent->kmemcg_id; 3643 } 3644 rcu_read_unlock(); 3645 3646 memcg_drain_all_list_lrus(kmemcg_id, parent); 3647 3648 memcg_free_cache_id(kmemcg_id); 3649 } 3650 3651 static void memcg_free_kmem(struct mem_cgroup *memcg) 3652 { 3653 /* css_alloc() failed, offlining didn't happen */ 3654 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3655 memcg_offline_kmem(memcg); 3656 } 3657 #else 3658 static int memcg_online_kmem(struct mem_cgroup *memcg) 3659 { 3660 return 0; 3661 } 3662 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3663 { 3664 } 3665 static void memcg_free_kmem(struct mem_cgroup *memcg) 3666 { 3667 } 3668 #endif /* CONFIG_MEMCG_KMEM */ 3669 3670 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3671 unsigned long max) 3672 { 3673 int ret; 3674 3675 mutex_lock(&memcg_max_mutex); 3676 ret = page_counter_set_max(&memcg->kmem, max); 3677 mutex_unlock(&memcg_max_mutex); 3678 return ret; 3679 } 3680 3681 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3682 { 3683 int ret; 3684 3685 mutex_lock(&memcg_max_mutex); 3686 3687 ret = page_counter_set_max(&memcg->tcpmem, max); 3688 if (ret) 3689 goto out; 3690 3691 if (!memcg->tcpmem_active) { 3692 /* 3693 * The active flag needs to be written after the static_key 3694 * update. This is what guarantees that the socket activation 3695 * function is the last one to run. See mem_cgroup_sk_alloc() 3696 * for details, and note that we don't mark any socket as 3697 * belonging to this memcg until that flag is up. 3698 * 3699 * We need to do this, because static_keys will span multiple 3700 * sites, but we can't control their order. If we mark a socket 3701 * as accounted, but the accounting functions are not patched in 3702 * yet, we'll lose accounting. 3703 * 3704 * We never race with the readers in mem_cgroup_sk_alloc(), 3705 * because when this value change, the code to process it is not 3706 * patched in yet. 3707 */ 3708 static_branch_inc(&memcg_sockets_enabled_key); 3709 memcg->tcpmem_active = true; 3710 } 3711 out: 3712 mutex_unlock(&memcg_max_mutex); 3713 return ret; 3714 } 3715 3716 /* 3717 * The user of this function is... 3718 * RES_LIMIT. 3719 */ 3720 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3721 char *buf, size_t nbytes, loff_t off) 3722 { 3723 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3724 unsigned long nr_pages; 3725 int ret; 3726 3727 buf = strstrip(buf); 3728 ret = page_counter_memparse(buf, "-1", &nr_pages); 3729 if (ret) 3730 return ret; 3731 3732 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3733 case RES_LIMIT: 3734 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3735 ret = -EINVAL; 3736 break; 3737 } 3738 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3739 case _MEM: 3740 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3741 break; 3742 case _MEMSWAP: 3743 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3744 break; 3745 case _KMEM: 3746 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3747 "Please report your usecase to linux-mm@kvack.org if you " 3748 "depend on this functionality.\n"); 3749 ret = memcg_update_kmem_max(memcg, nr_pages); 3750 break; 3751 case _TCP: 3752 ret = memcg_update_tcp_max(memcg, nr_pages); 3753 break; 3754 } 3755 break; 3756 case RES_SOFT_LIMIT: 3757 memcg->soft_limit = nr_pages; 3758 ret = 0; 3759 break; 3760 } 3761 return ret ?: nbytes; 3762 } 3763 3764 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3765 size_t nbytes, loff_t off) 3766 { 3767 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3768 struct page_counter *counter; 3769 3770 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3771 case _MEM: 3772 counter = &memcg->memory; 3773 break; 3774 case _MEMSWAP: 3775 counter = &memcg->memsw; 3776 break; 3777 case _KMEM: 3778 counter = &memcg->kmem; 3779 break; 3780 case _TCP: 3781 counter = &memcg->tcpmem; 3782 break; 3783 default: 3784 BUG(); 3785 } 3786 3787 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3788 case RES_MAX_USAGE: 3789 page_counter_reset_watermark(counter); 3790 break; 3791 case RES_FAILCNT: 3792 counter->failcnt = 0; 3793 break; 3794 default: 3795 BUG(); 3796 } 3797 3798 return nbytes; 3799 } 3800 3801 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3802 struct cftype *cft) 3803 { 3804 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3805 } 3806 3807 #ifdef CONFIG_MMU 3808 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3809 struct cftype *cft, u64 val) 3810 { 3811 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3812 3813 if (val & ~MOVE_MASK) 3814 return -EINVAL; 3815 3816 /* 3817 * No kind of locking is needed in here, because ->can_attach() will 3818 * check this value once in the beginning of the process, and then carry 3819 * on with stale data. This means that changes to this value will only 3820 * affect task migrations starting after the change. 3821 */ 3822 memcg->move_charge_at_immigrate = val; 3823 return 0; 3824 } 3825 #else 3826 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3827 struct cftype *cft, u64 val) 3828 { 3829 return -ENOSYS; 3830 } 3831 #endif 3832 3833 #ifdef CONFIG_NUMA 3834 3835 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3836 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3837 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3838 3839 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3840 int nid, unsigned int lru_mask, bool tree) 3841 { 3842 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3843 unsigned long nr = 0; 3844 enum lru_list lru; 3845 3846 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3847 3848 for_each_lru(lru) { 3849 if (!(BIT(lru) & lru_mask)) 3850 continue; 3851 if (tree) 3852 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3853 else 3854 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3855 } 3856 return nr; 3857 } 3858 3859 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3860 unsigned int lru_mask, 3861 bool tree) 3862 { 3863 unsigned long nr = 0; 3864 enum lru_list lru; 3865 3866 for_each_lru(lru) { 3867 if (!(BIT(lru) & lru_mask)) 3868 continue; 3869 if (tree) 3870 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3871 else 3872 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3873 } 3874 return nr; 3875 } 3876 3877 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3878 { 3879 struct numa_stat { 3880 const char *name; 3881 unsigned int lru_mask; 3882 }; 3883 3884 static const struct numa_stat stats[] = { 3885 { "total", LRU_ALL }, 3886 { "file", LRU_ALL_FILE }, 3887 { "anon", LRU_ALL_ANON }, 3888 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3889 }; 3890 const struct numa_stat *stat; 3891 int nid; 3892 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3893 3894 cgroup_rstat_flush(memcg->css.cgroup); 3895 3896 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3897 seq_printf(m, "%s=%lu", stat->name, 3898 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3899 false)); 3900 for_each_node_state(nid, N_MEMORY) 3901 seq_printf(m, " N%d=%lu", nid, 3902 mem_cgroup_node_nr_lru_pages(memcg, nid, 3903 stat->lru_mask, false)); 3904 seq_putc(m, '\n'); 3905 } 3906 3907 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3908 3909 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3910 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3911 true)); 3912 for_each_node_state(nid, N_MEMORY) 3913 seq_printf(m, " N%d=%lu", nid, 3914 mem_cgroup_node_nr_lru_pages(memcg, nid, 3915 stat->lru_mask, true)); 3916 seq_putc(m, '\n'); 3917 } 3918 3919 return 0; 3920 } 3921 #endif /* CONFIG_NUMA */ 3922 3923 static const unsigned int memcg1_stats[] = { 3924 NR_FILE_PAGES, 3925 NR_ANON_MAPPED, 3926 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3927 NR_ANON_THPS, 3928 #endif 3929 NR_SHMEM, 3930 NR_FILE_MAPPED, 3931 NR_FILE_DIRTY, 3932 NR_WRITEBACK, 3933 MEMCG_SWAP, 3934 }; 3935 3936 static const char *const memcg1_stat_names[] = { 3937 "cache", 3938 "rss", 3939 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3940 "rss_huge", 3941 #endif 3942 "shmem", 3943 "mapped_file", 3944 "dirty", 3945 "writeback", 3946 "swap", 3947 }; 3948 3949 /* Universal VM events cgroup1 shows, original sort order */ 3950 static const unsigned int memcg1_events[] = { 3951 PGPGIN, 3952 PGPGOUT, 3953 PGFAULT, 3954 PGMAJFAULT, 3955 }; 3956 3957 static int memcg_stat_show(struct seq_file *m, void *v) 3958 { 3959 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3960 unsigned long memory, memsw; 3961 struct mem_cgroup *mi; 3962 unsigned int i; 3963 3964 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3965 3966 cgroup_rstat_flush(memcg->css.cgroup); 3967 3968 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3969 unsigned long nr; 3970 3971 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3972 continue; 3973 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 3974 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 3975 } 3976 3977 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3978 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 3979 memcg_events_local(memcg, memcg1_events[i])); 3980 3981 for (i = 0; i < NR_LRU_LISTS; i++) 3982 seq_printf(m, "%s %lu\n", lru_list_name(i), 3983 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 3984 PAGE_SIZE); 3985 3986 /* Hierarchical information */ 3987 memory = memsw = PAGE_COUNTER_MAX; 3988 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3989 memory = min(memory, READ_ONCE(mi->memory.max)); 3990 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 3991 } 3992 seq_printf(m, "hierarchical_memory_limit %llu\n", 3993 (u64)memory * PAGE_SIZE); 3994 if (do_memsw_account()) 3995 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3996 (u64)memsw * PAGE_SIZE); 3997 3998 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3999 unsigned long nr; 4000 4001 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4002 continue; 4003 nr = memcg_page_state(memcg, memcg1_stats[i]); 4004 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4005 (u64)nr * PAGE_SIZE); 4006 } 4007 4008 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4009 seq_printf(m, "total_%s %llu\n", 4010 vm_event_name(memcg1_events[i]), 4011 (u64)memcg_events(memcg, memcg1_events[i])); 4012 4013 for (i = 0; i < NR_LRU_LISTS; i++) 4014 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4015 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4016 PAGE_SIZE); 4017 4018 #ifdef CONFIG_DEBUG_VM 4019 { 4020 pg_data_t *pgdat; 4021 struct mem_cgroup_per_node *mz; 4022 unsigned long anon_cost = 0; 4023 unsigned long file_cost = 0; 4024 4025 for_each_online_pgdat(pgdat) { 4026 mz = memcg->nodeinfo[pgdat->node_id]; 4027 4028 anon_cost += mz->lruvec.anon_cost; 4029 file_cost += mz->lruvec.file_cost; 4030 } 4031 seq_printf(m, "anon_cost %lu\n", anon_cost); 4032 seq_printf(m, "file_cost %lu\n", file_cost); 4033 } 4034 #endif 4035 4036 return 0; 4037 } 4038 4039 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4040 struct cftype *cft) 4041 { 4042 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4043 4044 return mem_cgroup_swappiness(memcg); 4045 } 4046 4047 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4048 struct cftype *cft, u64 val) 4049 { 4050 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4051 4052 if (val > 100) 4053 return -EINVAL; 4054 4055 if (!mem_cgroup_is_root(memcg)) 4056 memcg->swappiness = val; 4057 else 4058 vm_swappiness = val; 4059 4060 return 0; 4061 } 4062 4063 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4064 { 4065 struct mem_cgroup_threshold_ary *t; 4066 unsigned long usage; 4067 int i; 4068 4069 rcu_read_lock(); 4070 if (!swap) 4071 t = rcu_dereference(memcg->thresholds.primary); 4072 else 4073 t = rcu_dereference(memcg->memsw_thresholds.primary); 4074 4075 if (!t) 4076 goto unlock; 4077 4078 usage = mem_cgroup_usage(memcg, swap); 4079 4080 /* 4081 * current_threshold points to threshold just below or equal to usage. 4082 * If it's not true, a threshold was crossed after last 4083 * call of __mem_cgroup_threshold(). 4084 */ 4085 i = t->current_threshold; 4086 4087 /* 4088 * Iterate backward over array of thresholds starting from 4089 * current_threshold and check if a threshold is crossed. 4090 * If none of thresholds below usage is crossed, we read 4091 * only one element of the array here. 4092 */ 4093 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4094 eventfd_signal(t->entries[i].eventfd, 1); 4095 4096 /* i = current_threshold + 1 */ 4097 i++; 4098 4099 /* 4100 * Iterate forward over array of thresholds starting from 4101 * current_threshold+1 and check if a threshold is crossed. 4102 * If none of thresholds above usage is crossed, we read 4103 * only one element of the array here. 4104 */ 4105 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4106 eventfd_signal(t->entries[i].eventfd, 1); 4107 4108 /* Update current_threshold */ 4109 t->current_threshold = i - 1; 4110 unlock: 4111 rcu_read_unlock(); 4112 } 4113 4114 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4115 { 4116 while (memcg) { 4117 __mem_cgroup_threshold(memcg, false); 4118 if (do_memsw_account()) 4119 __mem_cgroup_threshold(memcg, true); 4120 4121 memcg = parent_mem_cgroup(memcg); 4122 } 4123 } 4124 4125 static int compare_thresholds(const void *a, const void *b) 4126 { 4127 const struct mem_cgroup_threshold *_a = a; 4128 const struct mem_cgroup_threshold *_b = b; 4129 4130 if (_a->threshold > _b->threshold) 4131 return 1; 4132 4133 if (_a->threshold < _b->threshold) 4134 return -1; 4135 4136 return 0; 4137 } 4138 4139 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4140 { 4141 struct mem_cgroup_eventfd_list *ev; 4142 4143 spin_lock(&memcg_oom_lock); 4144 4145 list_for_each_entry(ev, &memcg->oom_notify, list) 4146 eventfd_signal(ev->eventfd, 1); 4147 4148 spin_unlock(&memcg_oom_lock); 4149 return 0; 4150 } 4151 4152 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4153 { 4154 struct mem_cgroup *iter; 4155 4156 for_each_mem_cgroup_tree(iter, memcg) 4157 mem_cgroup_oom_notify_cb(iter); 4158 } 4159 4160 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4161 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4162 { 4163 struct mem_cgroup_thresholds *thresholds; 4164 struct mem_cgroup_threshold_ary *new; 4165 unsigned long threshold; 4166 unsigned long usage; 4167 int i, size, ret; 4168 4169 ret = page_counter_memparse(args, "-1", &threshold); 4170 if (ret) 4171 return ret; 4172 4173 mutex_lock(&memcg->thresholds_lock); 4174 4175 if (type == _MEM) { 4176 thresholds = &memcg->thresholds; 4177 usage = mem_cgroup_usage(memcg, false); 4178 } else if (type == _MEMSWAP) { 4179 thresholds = &memcg->memsw_thresholds; 4180 usage = mem_cgroup_usage(memcg, true); 4181 } else 4182 BUG(); 4183 4184 /* Check if a threshold crossed before adding a new one */ 4185 if (thresholds->primary) 4186 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4187 4188 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4189 4190 /* Allocate memory for new array of thresholds */ 4191 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4192 if (!new) { 4193 ret = -ENOMEM; 4194 goto unlock; 4195 } 4196 new->size = size; 4197 4198 /* Copy thresholds (if any) to new array */ 4199 if (thresholds->primary) 4200 memcpy(new->entries, thresholds->primary->entries, 4201 flex_array_size(new, entries, size - 1)); 4202 4203 /* Add new threshold */ 4204 new->entries[size - 1].eventfd = eventfd; 4205 new->entries[size - 1].threshold = threshold; 4206 4207 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4208 sort(new->entries, size, sizeof(*new->entries), 4209 compare_thresholds, NULL); 4210 4211 /* Find current threshold */ 4212 new->current_threshold = -1; 4213 for (i = 0; i < size; i++) { 4214 if (new->entries[i].threshold <= usage) { 4215 /* 4216 * new->current_threshold will not be used until 4217 * rcu_assign_pointer(), so it's safe to increment 4218 * it here. 4219 */ 4220 ++new->current_threshold; 4221 } else 4222 break; 4223 } 4224 4225 /* Free old spare buffer and save old primary buffer as spare */ 4226 kfree(thresholds->spare); 4227 thresholds->spare = thresholds->primary; 4228 4229 rcu_assign_pointer(thresholds->primary, new); 4230 4231 /* To be sure that nobody uses thresholds */ 4232 synchronize_rcu(); 4233 4234 unlock: 4235 mutex_unlock(&memcg->thresholds_lock); 4236 4237 return ret; 4238 } 4239 4240 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4241 struct eventfd_ctx *eventfd, const char *args) 4242 { 4243 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4244 } 4245 4246 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4247 struct eventfd_ctx *eventfd, const char *args) 4248 { 4249 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4250 } 4251 4252 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4253 struct eventfd_ctx *eventfd, enum res_type type) 4254 { 4255 struct mem_cgroup_thresholds *thresholds; 4256 struct mem_cgroup_threshold_ary *new; 4257 unsigned long usage; 4258 int i, j, size, entries; 4259 4260 mutex_lock(&memcg->thresholds_lock); 4261 4262 if (type == _MEM) { 4263 thresholds = &memcg->thresholds; 4264 usage = mem_cgroup_usage(memcg, false); 4265 } else if (type == _MEMSWAP) { 4266 thresholds = &memcg->memsw_thresholds; 4267 usage = mem_cgroup_usage(memcg, true); 4268 } else 4269 BUG(); 4270 4271 if (!thresholds->primary) 4272 goto unlock; 4273 4274 /* Check if a threshold crossed before removing */ 4275 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4276 4277 /* Calculate new number of threshold */ 4278 size = entries = 0; 4279 for (i = 0; i < thresholds->primary->size; i++) { 4280 if (thresholds->primary->entries[i].eventfd != eventfd) 4281 size++; 4282 else 4283 entries++; 4284 } 4285 4286 new = thresholds->spare; 4287 4288 /* If no items related to eventfd have been cleared, nothing to do */ 4289 if (!entries) 4290 goto unlock; 4291 4292 /* Set thresholds array to NULL if we don't have thresholds */ 4293 if (!size) { 4294 kfree(new); 4295 new = NULL; 4296 goto swap_buffers; 4297 } 4298 4299 new->size = size; 4300 4301 /* Copy thresholds and find current threshold */ 4302 new->current_threshold = -1; 4303 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4304 if (thresholds->primary->entries[i].eventfd == eventfd) 4305 continue; 4306 4307 new->entries[j] = thresholds->primary->entries[i]; 4308 if (new->entries[j].threshold <= usage) { 4309 /* 4310 * new->current_threshold will not be used 4311 * until rcu_assign_pointer(), so it's safe to increment 4312 * it here. 4313 */ 4314 ++new->current_threshold; 4315 } 4316 j++; 4317 } 4318 4319 swap_buffers: 4320 /* Swap primary and spare array */ 4321 thresholds->spare = thresholds->primary; 4322 4323 rcu_assign_pointer(thresholds->primary, new); 4324 4325 /* To be sure that nobody uses thresholds */ 4326 synchronize_rcu(); 4327 4328 /* If all events are unregistered, free the spare array */ 4329 if (!new) { 4330 kfree(thresholds->spare); 4331 thresholds->spare = NULL; 4332 } 4333 unlock: 4334 mutex_unlock(&memcg->thresholds_lock); 4335 } 4336 4337 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4338 struct eventfd_ctx *eventfd) 4339 { 4340 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4341 } 4342 4343 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4344 struct eventfd_ctx *eventfd) 4345 { 4346 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4347 } 4348 4349 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4350 struct eventfd_ctx *eventfd, const char *args) 4351 { 4352 struct mem_cgroup_eventfd_list *event; 4353 4354 event = kmalloc(sizeof(*event), GFP_KERNEL); 4355 if (!event) 4356 return -ENOMEM; 4357 4358 spin_lock(&memcg_oom_lock); 4359 4360 event->eventfd = eventfd; 4361 list_add(&event->list, &memcg->oom_notify); 4362 4363 /* already in OOM ? */ 4364 if (memcg->under_oom) 4365 eventfd_signal(eventfd, 1); 4366 spin_unlock(&memcg_oom_lock); 4367 4368 return 0; 4369 } 4370 4371 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4372 struct eventfd_ctx *eventfd) 4373 { 4374 struct mem_cgroup_eventfd_list *ev, *tmp; 4375 4376 spin_lock(&memcg_oom_lock); 4377 4378 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4379 if (ev->eventfd == eventfd) { 4380 list_del(&ev->list); 4381 kfree(ev); 4382 } 4383 } 4384 4385 spin_unlock(&memcg_oom_lock); 4386 } 4387 4388 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4389 { 4390 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4391 4392 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4393 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4394 seq_printf(sf, "oom_kill %lu\n", 4395 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4396 return 0; 4397 } 4398 4399 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4400 struct cftype *cft, u64 val) 4401 { 4402 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4403 4404 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4405 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4406 return -EINVAL; 4407 4408 memcg->oom_kill_disable = val; 4409 if (!val) 4410 memcg_oom_recover(memcg); 4411 4412 return 0; 4413 } 4414 4415 #ifdef CONFIG_CGROUP_WRITEBACK 4416 4417 #include <trace/events/writeback.h> 4418 4419 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4420 { 4421 return wb_domain_init(&memcg->cgwb_domain, gfp); 4422 } 4423 4424 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4425 { 4426 wb_domain_exit(&memcg->cgwb_domain); 4427 } 4428 4429 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4430 { 4431 wb_domain_size_changed(&memcg->cgwb_domain); 4432 } 4433 4434 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4435 { 4436 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4437 4438 if (!memcg->css.parent) 4439 return NULL; 4440 4441 return &memcg->cgwb_domain; 4442 } 4443 4444 /** 4445 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4446 * @wb: bdi_writeback in question 4447 * @pfilepages: out parameter for number of file pages 4448 * @pheadroom: out parameter for number of allocatable pages according to memcg 4449 * @pdirty: out parameter for number of dirty pages 4450 * @pwriteback: out parameter for number of pages under writeback 4451 * 4452 * Determine the numbers of file, headroom, dirty, and writeback pages in 4453 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4454 * is a bit more involved. 4455 * 4456 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4457 * headroom is calculated as the lowest headroom of itself and the 4458 * ancestors. Note that this doesn't consider the actual amount of 4459 * available memory in the system. The caller should further cap 4460 * *@pheadroom accordingly. 4461 */ 4462 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4463 unsigned long *pheadroom, unsigned long *pdirty, 4464 unsigned long *pwriteback) 4465 { 4466 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4467 struct mem_cgroup *parent; 4468 4469 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); 4470 4471 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4472 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4473 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4474 memcg_page_state(memcg, NR_ACTIVE_FILE); 4475 4476 *pheadroom = PAGE_COUNTER_MAX; 4477 while ((parent = parent_mem_cgroup(memcg))) { 4478 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4479 READ_ONCE(memcg->memory.high)); 4480 unsigned long used = page_counter_read(&memcg->memory); 4481 4482 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4483 memcg = parent; 4484 } 4485 } 4486 4487 /* 4488 * Foreign dirty flushing 4489 * 4490 * There's an inherent mismatch between memcg and writeback. The former 4491 * trackes ownership per-page while the latter per-inode. This was a 4492 * deliberate design decision because honoring per-page ownership in the 4493 * writeback path is complicated, may lead to higher CPU and IO overheads 4494 * and deemed unnecessary given that write-sharing an inode across 4495 * different cgroups isn't a common use-case. 4496 * 4497 * Combined with inode majority-writer ownership switching, this works well 4498 * enough in most cases but there are some pathological cases. For 4499 * example, let's say there are two cgroups A and B which keep writing to 4500 * different but confined parts of the same inode. B owns the inode and 4501 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4502 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4503 * triggering background writeback. A will be slowed down without a way to 4504 * make writeback of the dirty pages happen. 4505 * 4506 * Conditions like the above can lead to a cgroup getting repatedly and 4507 * severely throttled after making some progress after each 4508 * dirty_expire_interval while the underyling IO device is almost 4509 * completely idle. 4510 * 4511 * Solving this problem completely requires matching the ownership tracking 4512 * granularities between memcg and writeback in either direction. However, 4513 * the more egregious behaviors can be avoided by simply remembering the 4514 * most recent foreign dirtying events and initiating remote flushes on 4515 * them when local writeback isn't enough to keep the memory clean enough. 4516 * 4517 * The following two functions implement such mechanism. When a foreign 4518 * page - a page whose memcg and writeback ownerships don't match - is 4519 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4520 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4521 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4522 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4523 * foreign bdi_writebacks which haven't expired. Both the numbers of 4524 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4525 * limited to MEMCG_CGWB_FRN_CNT. 4526 * 4527 * The mechanism only remembers IDs and doesn't hold any object references. 4528 * As being wrong occasionally doesn't matter, updates and accesses to the 4529 * records are lockless and racy. 4530 */ 4531 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4532 struct bdi_writeback *wb) 4533 { 4534 struct mem_cgroup *memcg = page_memcg(page); 4535 struct memcg_cgwb_frn *frn; 4536 u64 now = get_jiffies_64(); 4537 u64 oldest_at = now; 4538 int oldest = -1; 4539 int i; 4540 4541 trace_track_foreign_dirty(page, wb); 4542 4543 /* 4544 * Pick the slot to use. If there is already a slot for @wb, keep 4545 * using it. If not replace the oldest one which isn't being 4546 * written out. 4547 */ 4548 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4549 frn = &memcg->cgwb_frn[i]; 4550 if (frn->bdi_id == wb->bdi->id && 4551 frn->memcg_id == wb->memcg_css->id) 4552 break; 4553 if (time_before64(frn->at, oldest_at) && 4554 atomic_read(&frn->done.cnt) == 1) { 4555 oldest = i; 4556 oldest_at = frn->at; 4557 } 4558 } 4559 4560 if (i < MEMCG_CGWB_FRN_CNT) { 4561 /* 4562 * Re-using an existing one. Update timestamp lazily to 4563 * avoid making the cacheline hot. We want them to be 4564 * reasonably up-to-date and significantly shorter than 4565 * dirty_expire_interval as that's what expires the record. 4566 * Use the shorter of 1s and dirty_expire_interval / 8. 4567 */ 4568 unsigned long update_intv = 4569 min_t(unsigned long, HZ, 4570 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4571 4572 if (time_before64(frn->at, now - update_intv)) 4573 frn->at = now; 4574 } else if (oldest >= 0) { 4575 /* replace the oldest free one */ 4576 frn = &memcg->cgwb_frn[oldest]; 4577 frn->bdi_id = wb->bdi->id; 4578 frn->memcg_id = wb->memcg_css->id; 4579 frn->at = now; 4580 } 4581 } 4582 4583 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4584 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4585 { 4586 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4587 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4588 u64 now = jiffies_64; 4589 int i; 4590 4591 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4592 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4593 4594 /* 4595 * If the record is older than dirty_expire_interval, 4596 * writeback on it has already started. No need to kick it 4597 * off again. Also, don't start a new one if there's 4598 * already one in flight. 4599 */ 4600 if (time_after64(frn->at, now - intv) && 4601 atomic_read(&frn->done.cnt) == 1) { 4602 frn->at = 0; 4603 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4604 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4605 WB_REASON_FOREIGN_FLUSH, 4606 &frn->done); 4607 } 4608 } 4609 } 4610 4611 #else /* CONFIG_CGROUP_WRITEBACK */ 4612 4613 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4614 { 4615 return 0; 4616 } 4617 4618 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4619 { 4620 } 4621 4622 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4623 { 4624 } 4625 4626 #endif /* CONFIG_CGROUP_WRITEBACK */ 4627 4628 /* 4629 * DO NOT USE IN NEW FILES. 4630 * 4631 * "cgroup.event_control" implementation. 4632 * 4633 * This is way over-engineered. It tries to support fully configurable 4634 * events for each user. Such level of flexibility is completely 4635 * unnecessary especially in the light of the planned unified hierarchy. 4636 * 4637 * Please deprecate this and replace with something simpler if at all 4638 * possible. 4639 */ 4640 4641 /* 4642 * Unregister event and free resources. 4643 * 4644 * Gets called from workqueue. 4645 */ 4646 static void memcg_event_remove(struct work_struct *work) 4647 { 4648 struct mem_cgroup_event *event = 4649 container_of(work, struct mem_cgroup_event, remove); 4650 struct mem_cgroup *memcg = event->memcg; 4651 4652 remove_wait_queue(event->wqh, &event->wait); 4653 4654 event->unregister_event(memcg, event->eventfd); 4655 4656 /* Notify userspace the event is going away. */ 4657 eventfd_signal(event->eventfd, 1); 4658 4659 eventfd_ctx_put(event->eventfd); 4660 kfree(event); 4661 css_put(&memcg->css); 4662 } 4663 4664 /* 4665 * Gets called on EPOLLHUP on eventfd when user closes it. 4666 * 4667 * Called with wqh->lock held and interrupts disabled. 4668 */ 4669 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4670 int sync, void *key) 4671 { 4672 struct mem_cgroup_event *event = 4673 container_of(wait, struct mem_cgroup_event, wait); 4674 struct mem_cgroup *memcg = event->memcg; 4675 __poll_t flags = key_to_poll(key); 4676 4677 if (flags & EPOLLHUP) { 4678 /* 4679 * If the event has been detached at cgroup removal, we 4680 * can simply return knowing the other side will cleanup 4681 * for us. 4682 * 4683 * We can't race against event freeing since the other 4684 * side will require wqh->lock via remove_wait_queue(), 4685 * which we hold. 4686 */ 4687 spin_lock(&memcg->event_list_lock); 4688 if (!list_empty(&event->list)) { 4689 list_del_init(&event->list); 4690 /* 4691 * We are in atomic context, but cgroup_event_remove() 4692 * may sleep, so we have to call it in workqueue. 4693 */ 4694 schedule_work(&event->remove); 4695 } 4696 spin_unlock(&memcg->event_list_lock); 4697 } 4698 4699 return 0; 4700 } 4701 4702 static void memcg_event_ptable_queue_proc(struct file *file, 4703 wait_queue_head_t *wqh, poll_table *pt) 4704 { 4705 struct mem_cgroup_event *event = 4706 container_of(pt, struct mem_cgroup_event, pt); 4707 4708 event->wqh = wqh; 4709 add_wait_queue(wqh, &event->wait); 4710 } 4711 4712 /* 4713 * DO NOT USE IN NEW FILES. 4714 * 4715 * Parse input and register new cgroup event handler. 4716 * 4717 * Input must be in format '<event_fd> <control_fd> <args>'. 4718 * Interpretation of args is defined by control file implementation. 4719 */ 4720 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4721 char *buf, size_t nbytes, loff_t off) 4722 { 4723 struct cgroup_subsys_state *css = of_css(of); 4724 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4725 struct mem_cgroup_event *event; 4726 struct cgroup_subsys_state *cfile_css; 4727 unsigned int efd, cfd; 4728 struct fd efile; 4729 struct fd cfile; 4730 const char *name; 4731 char *endp; 4732 int ret; 4733 4734 buf = strstrip(buf); 4735 4736 efd = simple_strtoul(buf, &endp, 10); 4737 if (*endp != ' ') 4738 return -EINVAL; 4739 buf = endp + 1; 4740 4741 cfd = simple_strtoul(buf, &endp, 10); 4742 if ((*endp != ' ') && (*endp != '\0')) 4743 return -EINVAL; 4744 buf = endp + 1; 4745 4746 event = kzalloc(sizeof(*event), GFP_KERNEL); 4747 if (!event) 4748 return -ENOMEM; 4749 4750 event->memcg = memcg; 4751 INIT_LIST_HEAD(&event->list); 4752 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4753 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4754 INIT_WORK(&event->remove, memcg_event_remove); 4755 4756 efile = fdget(efd); 4757 if (!efile.file) { 4758 ret = -EBADF; 4759 goto out_kfree; 4760 } 4761 4762 event->eventfd = eventfd_ctx_fileget(efile.file); 4763 if (IS_ERR(event->eventfd)) { 4764 ret = PTR_ERR(event->eventfd); 4765 goto out_put_efile; 4766 } 4767 4768 cfile = fdget(cfd); 4769 if (!cfile.file) { 4770 ret = -EBADF; 4771 goto out_put_eventfd; 4772 } 4773 4774 /* the process need read permission on control file */ 4775 /* AV: shouldn't we check that it's been opened for read instead? */ 4776 ret = file_permission(cfile.file, MAY_READ); 4777 if (ret < 0) 4778 goto out_put_cfile; 4779 4780 /* 4781 * Determine the event callbacks and set them in @event. This used 4782 * to be done via struct cftype but cgroup core no longer knows 4783 * about these events. The following is crude but the whole thing 4784 * is for compatibility anyway. 4785 * 4786 * DO NOT ADD NEW FILES. 4787 */ 4788 name = cfile.file->f_path.dentry->d_name.name; 4789 4790 if (!strcmp(name, "memory.usage_in_bytes")) { 4791 event->register_event = mem_cgroup_usage_register_event; 4792 event->unregister_event = mem_cgroup_usage_unregister_event; 4793 } else if (!strcmp(name, "memory.oom_control")) { 4794 event->register_event = mem_cgroup_oom_register_event; 4795 event->unregister_event = mem_cgroup_oom_unregister_event; 4796 } else if (!strcmp(name, "memory.pressure_level")) { 4797 event->register_event = vmpressure_register_event; 4798 event->unregister_event = vmpressure_unregister_event; 4799 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4800 event->register_event = memsw_cgroup_usage_register_event; 4801 event->unregister_event = memsw_cgroup_usage_unregister_event; 4802 } else { 4803 ret = -EINVAL; 4804 goto out_put_cfile; 4805 } 4806 4807 /* 4808 * Verify @cfile should belong to @css. Also, remaining events are 4809 * automatically removed on cgroup destruction but the removal is 4810 * asynchronous, so take an extra ref on @css. 4811 */ 4812 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4813 &memory_cgrp_subsys); 4814 ret = -EINVAL; 4815 if (IS_ERR(cfile_css)) 4816 goto out_put_cfile; 4817 if (cfile_css != css) { 4818 css_put(cfile_css); 4819 goto out_put_cfile; 4820 } 4821 4822 ret = event->register_event(memcg, event->eventfd, buf); 4823 if (ret) 4824 goto out_put_css; 4825 4826 vfs_poll(efile.file, &event->pt); 4827 4828 spin_lock(&memcg->event_list_lock); 4829 list_add(&event->list, &memcg->event_list); 4830 spin_unlock(&memcg->event_list_lock); 4831 4832 fdput(cfile); 4833 fdput(efile); 4834 4835 return nbytes; 4836 4837 out_put_css: 4838 css_put(css); 4839 out_put_cfile: 4840 fdput(cfile); 4841 out_put_eventfd: 4842 eventfd_ctx_put(event->eventfd); 4843 out_put_efile: 4844 fdput(efile); 4845 out_kfree: 4846 kfree(event); 4847 4848 return ret; 4849 } 4850 4851 static struct cftype mem_cgroup_legacy_files[] = { 4852 { 4853 .name = "usage_in_bytes", 4854 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4855 .read_u64 = mem_cgroup_read_u64, 4856 }, 4857 { 4858 .name = "max_usage_in_bytes", 4859 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4860 .write = mem_cgroup_reset, 4861 .read_u64 = mem_cgroup_read_u64, 4862 }, 4863 { 4864 .name = "limit_in_bytes", 4865 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4866 .write = mem_cgroup_write, 4867 .read_u64 = mem_cgroup_read_u64, 4868 }, 4869 { 4870 .name = "soft_limit_in_bytes", 4871 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4872 .write = mem_cgroup_write, 4873 .read_u64 = mem_cgroup_read_u64, 4874 }, 4875 { 4876 .name = "failcnt", 4877 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4878 .write = mem_cgroup_reset, 4879 .read_u64 = mem_cgroup_read_u64, 4880 }, 4881 { 4882 .name = "stat", 4883 .seq_show = memcg_stat_show, 4884 }, 4885 { 4886 .name = "force_empty", 4887 .write = mem_cgroup_force_empty_write, 4888 }, 4889 { 4890 .name = "use_hierarchy", 4891 .write_u64 = mem_cgroup_hierarchy_write, 4892 .read_u64 = mem_cgroup_hierarchy_read, 4893 }, 4894 { 4895 .name = "cgroup.event_control", /* XXX: for compat */ 4896 .write = memcg_write_event_control, 4897 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4898 }, 4899 { 4900 .name = "swappiness", 4901 .read_u64 = mem_cgroup_swappiness_read, 4902 .write_u64 = mem_cgroup_swappiness_write, 4903 }, 4904 { 4905 .name = "move_charge_at_immigrate", 4906 .read_u64 = mem_cgroup_move_charge_read, 4907 .write_u64 = mem_cgroup_move_charge_write, 4908 }, 4909 { 4910 .name = "oom_control", 4911 .seq_show = mem_cgroup_oom_control_read, 4912 .write_u64 = mem_cgroup_oom_control_write, 4913 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4914 }, 4915 { 4916 .name = "pressure_level", 4917 }, 4918 #ifdef CONFIG_NUMA 4919 { 4920 .name = "numa_stat", 4921 .seq_show = memcg_numa_stat_show, 4922 }, 4923 #endif 4924 { 4925 .name = "kmem.limit_in_bytes", 4926 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4927 .write = mem_cgroup_write, 4928 .read_u64 = mem_cgroup_read_u64, 4929 }, 4930 { 4931 .name = "kmem.usage_in_bytes", 4932 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4933 .read_u64 = mem_cgroup_read_u64, 4934 }, 4935 { 4936 .name = "kmem.failcnt", 4937 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4938 .write = mem_cgroup_reset, 4939 .read_u64 = mem_cgroup_read_u64, 4940 }, 4941 { 4942 .name = "kmem.max_usage_in_bytes", 4943 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4944 .write = mem_cgroup_reset, 4945 .read_u64 = mem_cgroup_read_u64, 4946 }, 4947 #if defined(CONFIG_MEMCG_KMEM) && \ 4948 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 4949 { 4950 .name = "kmem.slabinfo", 4951 .seq_show = memcg_slab_show, 4952 }, 4953 #endif 4954 { 4955 .name = "kmem.tcp.limit_in_bytes", 4956 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4957 .write = mem_cgroup_write, 4958 .read_u64 = mem_cgroup_read_u64, 4959 }, 4960 { 4961 .name = "kmem.tcp.usage_in_bytes", 4962 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4963 .read_u64 = mem_cgroup_read_u64, 4964 }, 4965 { 4966 .name = "kmem.tcp.failcnt", 4967 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4968 .write = mem_cgroup_reset, 4969 .read_u64 = mem_cgroup_read_u64, 4970 }, 4971 { 4972 .name = "kmem.tcp.max_usage_in_bytes", 4973 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4974 .write = mem_cgroup_reset, 4975 .read_u64 = mem_cgroup_read_u64, 4976 }, 4977 { }, /* terminate */ 4978 }; 4979 4980 /* 4981 * Private memory cgroup IDR 4982 * 4983 * Swap-out records and page cache shadow entries need to store memcg 4984 * references in constrained space, so we maintain an ID space that is 4985 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4986 * memory-controlled cgroups to 64k. 4987 * 4988 * However, there usually are many references to the offline CSS after 4989 * the cgroup has been destroyed, such as page cache or reclaimable 4990 * slab objects, that don't need to hang on to the ID. We want to keep 4991 * those dead CSS from occupying IDs, or we might quickly exhaust the 4992 * relatively small ID space and prevent the creation of new cgroups 4993 * even when there are much fewer than 64k cgroups - possibly none. 4994 * 4995 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4996 * be freed and recycled when it's no longer needed, which is usually 4997 * when the CSS is offlined. 4998 * 4999 * The only exception to that are records of swapped out tmpfs/shmem 5000 * pages that need to be attributed to live ancestors on swapin. But 5001 * those references are manageable from userspace. 5002 */ 5003 5004 static DEFINE_IDR(mem_cgroup_idr); 5005 5006 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5007 { 5008 if (memcg->id.id > 0) { 5009 idr_remove(&mem_cgroup_idr, memcg->id.id); 5010 memcg->id.id = 0; 5011 } 5012 } 5013 5014 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5015 unsigned int n) 5016 { 5017 refcount_add(n, &memcg->id.ref); 5018 } 5019 5020 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5021 { 5022 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5023 mem_cgroup_id_remove(memcg); 5024 5025 /* Memcg ID pins CSS */ 5026 css_put(&memcg->css); 5027 } 5028 } 5029 5030 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5031 { 5032 mem_cgroup_id_put_many(memcg, 1); 5033 } 5034 5035 /** 5036 * mem_cgroup_from_id - look up a memcg from a memcg id 5037 * @id: the memcg id to look up 5038 * 5039 * Caller must hold rcu_read_lock(). 5040 */ 5041 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5042 { 5043 WARN_ON_ONCE(!rcu_read_lock_held()); 5044 return idr_find(&mem_cgroup_idr, id); 5045 } 5046 5047 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5048 { 5049 struct mem_cgroup_per_node *pn; 5050 int tmp = node; 5051 /* 5052 * This routine is called against possible nodes. 5053 * But it's BUG to call kmalloc() against offline node. 5054 * 5055 * TODO: this routine can waste much memory for nodes which will 5056 * never be onlined. It's better to use memory hotplug callback 5057 * function. 5058 */ 5059 if (!node_state(node, N_NORMAL_MEMORY)) 5060 tmp = -1; 5061 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5062 if (!pn) 5063 return 1; 5064 5065 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5066 GFP_KERNEL_ACCOUNT); 5067 if (!pn->lruvec_stat_local) { 5068 kfree(pn); 5069 return 1; 5070 } 5071 5072 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat, 5073 GFP_KERNEL_ACCOUNT); 5074 if (!pn->lruvec_stat_cpu) { 5075 free_percpu(pn->lruvec_stat_local); 5076 kfree(pn); 5077 return 1; 5078 } 5079 5080 lruvec_init(&pn->lruvec); 5081 pn->usage_in_excess = 0; 5082 pn->on_tree = false; 5083 pn->memcg = memcg; 5084 5085 memcg->nodeinfo[node] = pn; 5086 return 0; 5087 } 5088 5089 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5090 { 5091 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5092 5093 if (!pn) 5094 return; 5095 5096 free_percpu(pn->lruvec_stat_cpu); 5097 free_percpu(pn->lruvec_stat_local); 5098 kfree(pn); 5099 } 5100 5101 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5102 { 5103 int node; 5104 5105 for_each_node(node) 5106 free_mem_cgroup_per_node_info(memcg, node); 5107 free_percpu(memcg->vmstats_percpu); 5108 kfree(memcg); 5109 } 5110 5111 static void mem_cgroup_free(struct mem_cgroup *memcg) 5112 { 5113 int cpu; 5114 5115 memcg_wb_domain_exit(memcg); 5116 /* 5117 * Flush percpu lruvec stats to guarantee the value 5118 * correctness on parent's and all ancestor levels. 5119 */ 5120 for_each_online_cpu(cpu) 5121 memcg_flush_lruvec_page_state(memcg, cpu); 5122 __mem_cgroup_free(memcg); 5123 } 5124 5125 static struct mem_cgroup *mem_cgroup_alloc(void) 5126 { 5127 struct mem_cgroup *memcg; 5128 unsigned int size; 5129 int node; 5130 int __maybe_unused i; 5131 long error = -ENOMEM; 5132 5133 size = sizeof(struct mem_cgroup); 5134 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5135 5136 memcg = kzalloc(size, GFP_KERNEL); 5137 if (!memcg) 5138 return ERR_PTR(error); 5139 5140 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5141 1, MEM_CGROUP_ID_MAX, 5142 GFP_KERNEL); 5143 if (memcg->id.id < 0) { 5144 error = memcg->id.id; 5145 goto fail; 5146 } 5147 5148 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5149 GFP_KERNEL_ACCOUNT); 5150 if (!memcg->vmstats_percpu) 5151 goto fail; 5152 5153 for_each_node(node) 5154 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5155 goto fail; 5156 5157 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5158 goto fail; 5159 5160 INIT_WORK(&memcg->high_work, high_work_func); 5161 INIT_LIST_HEAD(&memcg->oom_notify); 5162 mutex_init(&memcg->thresholds_lock); 5163 spin_lock_init(&memcg->move_lock); 5164 vmpressure_init(&memcg->vmpressure); 5165 INIT_LIST_HEAD(&memcg->event_list); 5166 spin_lock_init(&memcg->event_list_lock); 5167 memcg->socket_pressure = jiffies; 5168 #ifdef CONFIG_MEMCG_KMEM 5169 memcg->kmemcg_id = -1; 5170 INIT_LIST_HEAD(&memcg->objcg_list); 5171 #endif 5172 #ifdef CONFIG_CGROUP_WRITEBACK 5173 INIT_LIST_HEAD(&memcg->cgwb_list); 5174 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5175 memcg->cgwb_frn[i].done = 5176 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5177 #endif 5178 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5179 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5180 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5181 memcg->deferred_split_queue.split_queue_len = 0; 5182 #endif 5183 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5184 return memcg; 5185 fail: 5186 mem_cgroup_id_remove(memcg); 5187 __mem_cgroup_free(memcg); 5188 return ERR_PTR(error); 5189 } 5190 5191 static struct cgroup_subsys_state * __ref 5192 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5193 { 5194 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5195 struct mem_cgroup *memcg, *old_memcg; 5196 long error = -ENOMEM; 5197 5198 old_memcg = set_active_memcg(parent); 5199 memcg = mem_cgroup_alloc(); 5200 set_active_memcg(old_memcg); 5201 if (IS_ERR(memcg)) 5202 return ERR_CAST(memcg); 5203 5204 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5205 memcg->soft_limit = PAGE_COUNTER_MAX; 5206 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5207 if (parent) { 5208 memcg->swappiness = mem_cgroup_swappiness(parent); 5209 memcg->oom_kill_disable = parent->oom_kill_disable; 5210 5211 page_counter_init(&memcg->memory, &parent->memory); 5212 page_counter_init(&memcg->swap, &parent->swap); 5213 page_counter_init(&memcg->kmem, &parent->kmem); 5214 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5215 } else { 5216 page_counter_init(&memcg->memory, NULL); 5217 page_counter_init(&memcg->swap, NULL); 5218 page_counter_init(&memcg->kmem, NULL); 5219 page_counter_init(&memcg->tcpmem, NULL); 5220 5221 root_mem_cgroup = memcg; 5222 return &memcg->css; 5223 } 5224 5225 /* The following stuff does not apply to the root */ 5226 error = memcg_online_kmem(memcg); 5227 if (error) 5228 goto fail; 5229 5230 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5231 static_branch_inc(&memcg_sockets_enabled_key); 5232 5233 return &memcg->css; 5234 fail: 5235 mem_cgroup_id_remove(memcg); 5236 mem_cgroup_free(memcg); 5237 return ERR_PTR(error); 5238 } 5239 5240 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5241 { 5242 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5243 5244 /* 5245 * A memcg must be visible for memcg_expand_shrinker_maps() 5246 * by the time the maps are allocated. So, we allocate maps 5247 * here, when for_each_mem_cgroup() can't skip it. 5248 */ 5249 if (memcg_alloc_shrinker_maps(memcg)) { 5250 mem_cgroup_id_remove(memcg); 5251 return -ENOMEM; 5252 } 5253 5254 /* Online state pins memcg ID, memcg ID pins CSS */ 5255 refcount_set(&memcg->id.ref, 1); 5256 css_get(css); 5257 return 0; 5258 } 5259 5260 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5261 { 5262 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5263 struct mem_cgroup_event *event, *tmp; 5264 5265 /* 5266 * Unregister events and notify userspace. 5267 * Notify userspace about cgroup removing only after rmdir of cgroup 5268 * directory to avoid race between userspace and kernelspace. 5269 */ 5270 spin_lock(&memcg->event_list_lock); 5271 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5272 list_del_init(&event->list); 5273 schedule_work(&event->remove); 5274 } 5275 spin_unlock(&memcg->event_list_lock); 5276 5277 page_counter_set_min(&memcg->memory, 0); 5278 page_counter_set_low(&memcg->memory, 0); 5279 5280 memcg_offline_kmem(memcg); 5281 wb_memcg_offline(memcg); 5282 5283 drain_all_stock(memcg); 5284 5285 mem_cgroup_id_put(memcg); 5286 } 5287 5288 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5289 { 5290 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5291 5292 invalidate_reclaim_iterators(memcg); 5293 } 5294 5295 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5296 { 5297 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5298 int __maybe_unused i; 5299 5300 #ifdef CONFIG_CGROUP_WRITEBACK 5301 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5302 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5303 #endif 5304 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5305 static_branch_dec(&memcg_sockets_enabled_key); 5306 5307 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5308 static_branch_dec(&memcg_sockets_enabled_key); 5309 5310 vmpressure_cleanup(&memcg->vmpressure); 5311 cancel_work_sync(&memcg->high_work); 5312 mem_cgroup_remove_from_trees(memcg); 5313 memcg_free_shrinker_maps(memcg); 5314 memcg_free_kmem(memcg); 5315 mem_cgroup_free(memcg); 5316 } 5317 5318 /** 5319 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5320 * @css: the target css 5321 * 5322 * Reset the states of the mem_cgroup associated with @css. This is 5323 * invoked when the userland requests disabling on the default hierarchy 5324 * but the memcg is pinned through dependency. The memcg should stop 5325 * applying policies and should revert to the vanilla state as it may be 5326 * made visible again. 5327 * 5328 * The current implementation only resets the essential configurations. 5329 * This needs to be expanded to cover all the visible parts. 5330 */ 5331 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5332 { 5333 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5334 5335 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5336 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5337 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5338 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5339 page_counter_set_min(&memcg->memory, 0); 5340 page_counter_set_low(&memcg->memory, 0); 5341 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5342 memcg->soft_limit = PAGE_COUNTER_MAX; 5343 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5344 memcg_wb_domain_size_changed(memcg); 5345 } 5346 5347 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5348 { 5349 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5350 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5351 struct memcg_vmstats_percpu *statc; 5352 long delta, v; 5353 int i; 5354 5355 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5356 5357 for (i = 0; i < MEMCG_NR_STAT; i++) { 5358 /* 5359 * Collect the aggregated propagation counts of groups 5360 * below us. We're in a per-cpu loop here and this is 5361 * a global counter, so the first cycle will get them. 5362 */ 5363 delta = memcg->vmstats.state_pending[i]; 5364 if (delta) 5365 memcg->vmstats.state_pending[i] = 0; 5366 5367 /* Add CPU changes on this level since the last flush */ 5368 v = READ_ONCE(statc->state[i]); 5369 if (v != statc->state_prev[i]) { 5370 delta += v - statc->state_prev[i]; 5371 statc->state_prev[i] = v; 5372 } 5373 5374 if (!delta) 5375 continue; 5376 5377 /* Aggregate counts on this level and propagate upwards */ 5378 memcg->vmstats.state[i] += delta; 5379 if (parent) 5380 parent->vmstats.state_pending[i] += delta; 5381 } 5382 5383 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 5384 delta = memcg->vmstats.events_pending[i]; 5385 if (delta) 5386 memcg->vmstats.events_pending[i] = 0; 5387 5388 v = READ_ONCE(statc->events[i]); 5389 if (v != statc->events_prev[i]) { 5390 delta += v - statc->events_prev[i]; 5391 statc->events_prev[i] = v; 5392 } 5393 5394 if (!delta) 5395 continue; 5396 5397 memcg->vmstats.events[i] += delta; 5398 if (parent) 5399 parent->vmstats.events_pending[i] += delta; 5400 } 5401 } 5402 5403 #ifdef CONFIG_MMU 5404 /* Handlers for move charge at task migration. */ 5405 static int mem_cgroup_do_precharge(unsigned long count) 5406 { 5407 int ret; 5408 5409 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5410 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5411 if (!ret) { 5412 mc.precharge += count; 5413 return ret; 5414 } 5415 5416 /* Try charges one by one with reclaim, but do not retry */ 5417 while (count--) { 5418 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5419 if (ret) 5420 return ret; 5421 mc.precharge++; 5422 cond_resched(); 5423 } 5424 return 0; 5425 } 5426 5427 union mc_target { 5428 struct page *page; 5429 swp_entry_t ent; 5430 }; 5431 5432 enum mc_target_type { 5433 MC_TARGET_NONE = 0, 5434 MC_TARGET_PAGE, 5435 MC_TARGET_SWAP, 5436 MC_TARGET_DEVICE, 5437 }; 5438 5439 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5440 unsigned long addr, pte_t ptent) 5441 { 5442 struct page *page = vm_normal_page(vma, addr, ptent); 5443 5444 if (!page || !page_mapped(page)) 5445 return NULL; 5446 if (PageAnon(page)) { 5447 if (!(mc.flags & MOVE_ANON)) 5448 return NULL; 5449 } else { 5450 if (!(mc.flags & MOVE_FILE)) 5451 return NULL; 5452 } 5453 if (!get_page_unless_zero(page)) 5454 return NULL; 5455 5456 return page; 5457 } 5458 5459 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5460 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5461 pte_t ptent, swp_entry_t *entry) 5462 { 5463 struct page *page = NULL; 5464 swp_entry_t ent = pte_to_swp_entry(ptent); 5465 5466 if (!(mc.flags & MOVE_ANON)) 5467 return NULL; 5468 5469 /* 5470 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5471 * a device and because they are not accessible by CPU they are store 5472 * as special swap entry in the CPU page table. 5473 */ 5474 if (is_device_private_entry(ent)) { 5475 page = device_private_entry_to_page(ent); 5476 /* 5477 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5478 * a refcount of 1 when free (unlike normal page) 5479 */ 5480 if (!page_ref_add_unless(page, 1, 1)) 5481 return NULL; 5482 return page; 5483 } 5484 5485 if (non_swap_entry(ent)) 5486 return NULL; 5487 5488 /* 5489 * Because lookup_swap_cache() updates some statistics counter, 5490 * we call find_get_page() with swapper_space directly. 5491 */ 5492 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5493 entry->val = ent.val; 5494 5495 return page; 5496 } 5497 #else 5498 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5499 pte_t ptent, swp_entry_t *entry) 5500 { 5501 return NULL; 5502 } 5503 #endif 5504 5505 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5506 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5507 { 5508 if (!vma->vm_file) /* anonymous vma */ 5509 return NULL; 5510 if (!(mc.flags & MOVE_FILE)) 5511 return NULL; 5512 5513 /* page is moved even if it's not RSS of this task(page-faulted). */ 5514 /* shmem/tmpfs may report page out on swap: account for that too. */ 5515 return find_get_incore_page(vma->vm_file->f_mapping, 5516 linear_page_index(vma, addr)); 5517 } 5518 5519 /** 5520 * mem_cgroup_move_account - move account of the page 5521 * @page: the page 5522 * @compound: charge the page as compound or small page 5523 * @from: mem_cgroup which the page is moved from. 5524 * @to: mem_cgroup which the page is moved to. @from != @to. 5525 * 5526 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5527 * 5528 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5529 * from old cgroup. 5530 */ 5531 static int mem_cgroup_move_account(struct page *page, 5532 bool compound, 5533 struct mem_cgroup *from, 5534 struct mem_cgroup *to) 5535 { 5536 struct lruvec *from_vec, *to_vec; 5537 struct pglist_data *pgdat; 5538 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5539 int ret; 5540 5541 VM_BUG_ON(from == to); 5542 VM_BUG_ON_PAGE(PageLRU(page), page); 5543 VM_BUG_ON(compound && !PageTransHuge(page)); 5544 5545 /* 5546 * Prevent mem_cgroup_migrate() from looking at 5547 * page's memory cgroup of its source page while we change it. 5548 */ 5549 ret = -EBUSY; 5550 if (!trylock_page(page)) 5551 goto out; 5552 5553 ret = -EINVAL; 5554 if (page_memcg(page) != from) 5555 goto out_unlock; 5556 5557 pgdat = page_pgdat(page); 5558 from_vec = mem_cgroup_lruvec(from, pgdat); 5559 to_vec = mem_cgroup_lruvec(to, pgdat); 5560 5561 lock_page_memcg(page); 5562 5563 if (PageAnon(page)) { 5564 if (page_mapped(page)) { 5565 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5566 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5567 if (PageTransHuge(page)) { 5568 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5569 -nr_pages); 5570 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5571 nr_pages); 5572 } 5573 } 5574 } else { 5575 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5576 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5577 5578 if (PageSwapBacked(page)) { 5579 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5580 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5581 } 5582 5583 if (page_mapped(page)) { 5584 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5585 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5586 } 5587 5588 if (PageDirty(page)) { 5589 struct address_space *mapping = page_mapping(page); 5590 5591 if (mapping_can_writeback(mapping)) { 5592 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5593 -nr_pages); 5594 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5595 nr_pages); 5596 } 5597 } 5598 } 5599 5600 if (PageWriteback(page)) { 5601 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5602 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5603 } 5604 5605 /* 5606 * All state has been migrated, let's switch to the new memcg. 5607 * 5608 * It is safe to change page's memcg here because the page 5609 * is referenced, charged, isolated, and locked: we can't race 5610 * with (un)charging, migration, LRU putback, or anything else 5611 * that would rely on a stable page's memory cgroup. 5612 * 5613 * Note that lock_page_memcg is a memcg lock, not a page lock, 5614 * to save space. As soon as we switch page's memory cgroup to a 5615 * new memcg that isn't locked, the above state can change 5616 * concurrently again. Make sure we're truly done with it. 5617 */ 5618 smp_mb(); 5619 5620 css_get(&to->css); 5621 css_put(&from->css); 5622 5623 page->memcg_data = (unsigned long)to; 5624 5625 __unlock_page_memcg(from); 5626 5627 ret = 0; 5628 5629 local_irq_disable(); 5630 mem_cgroup_charge_statistics(to, page, nr_pages); 5631 memcg_check_events(to, page); 5632 mem_cgroup_charge_statistics(from, page, -nr_pages); 5633 memcg_check_events(from, page); 5634 local_irq_enable(); 5635 out_unlock: 5636 unlock_page(page); 5637 out: 5638 return ret; 5639 } 5640 5641 /** 5642 * get_mctgt_type - get target type of moving charge 5643 * @vma: the vma the pte to be checked belongs 5644 * @addr: the address corresponding to the pte to be checked 5645 * @ptent: the pte to be checked 5646 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5647 * 5648 * Returns 5649 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5650 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5651 * move charge. if @target is not NULL, the page is stored in target->page 5652 * with extra refcnt got(Callers should handle it). 5653 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5654 * target for charge migration. if @target is not NULL, the entry is stored 5655 * in target->ent. 5656 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5657 * (so ZONE_DEVICE page and thus not on the lru). 5658 * For now we such page is charge like a regular page would be as for all 5659 * intent and purposes it is just special memory taking the place of a 5660 * regular page. 5661 * 5662 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5663 * 5664 * Called with pte lock held. 5665 */ 5666 5667 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5668 unsigned long addr, pte_t ptent, union mc_target *target) 5669 { 5670 struct page *page = NULL; 5671 enum mc_target_type ret = MC_TARGET_NONE; 5672 swp_entry_t ent = { .val = 0 }; 5673 5674 if (pte_present(ptent)) 5675 page = mc_handle_present_pte(vma, addr, ptent); 5676 else if (is_swap_pte(ptent)) 5677 page = mc_handle_swap_pte(vma, ptent, &ent); 5678 else if (pte_none(ptent)) 5679 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5680 5681 if (!page && !ent.val) 5682 return ret; 5683 if (page) { 5684 /* 5685 * Do only loose check w/o serialization. 5686 * mem_cgroup_move_account() checks the page is valid or 5687 * not under LRU exclusion. 5688 */ 5689 if (page_memcg(page) == mc.from) { 5690 ret = MC_TARGET_PAGE; 5691 if (is_device_private_page(page)) 5692 ret = MC_TARGET_DEVICE; 5693 if (target) 5694 target->page = page; 5695 } 5696 if (!ret || !target) 5697 put_page(page); 5698 } 5699 /* 5700 * There is a swap entry and a page doesn't exist or isn't charged. 5701 * But we cannot move a tail-page in a THP. 5702 */ 5703 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5704 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5705 ret = MC_TARGET_SWAP; 5706 if (target) 5707 target->ent = ent; 5708 } 5709 return ret; 5710 } 5711 5712 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5713 /* 5714 * We don't consider PMD mapped swapping or file mapped pages because THP does 5715 * not support them for now. 5716 * Caller should make sure that pmd_trans_huge(pmd) is true. 5717 */ 5718 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5719 unsigned long addr, pmd_t pmd, union mc_target *target) 5720 { 5721 struct page *page = NULL; 5722 enum mc_target_type ret = MC_TARGET_NONE; 5723 5724 if (unlikely(is_swap_pmd(pmd))) { 5725 VM_BUG_ON(thp_migration_supported() && 5726 !is_pmd_migration_entry(pmd)); 5727 return ret; 5728 } 5729 page = pmd_page(pmd); 5730 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5731 if (!(mc.flags & MOVE_ANON)) 5732 return ret; 5733 if (page_memcg(page) == mc.from) { 5734 ret = MC_TARGET_PAGE; 5735 if (target) { 5736 get_page(page); 5737 target->page = page; 5738 } 5739 } 5740 return ret; 5741 } 5742 #else 5743 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5744 unsigned long addr, pmd_t pmd, union mc_target *target) 5745 { 5746 return MC_TARGET_NONE; 5747 } 5748 #endif 5749 5750 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5751 unsigned long addr, unsigned long end, 5752 struct mm_walk *walk) 5753 { 5754 struct vm_area_struct *vma = walk->vma; 5755 pte_t *pte; 5756 spinlock_t *ptl; 5757 5758 ptl = pmd_trans_huge_lock(pmd, vma); 5759 if (ptl) { 5760 /* 5761 * Note their can not be MC_TARGET_DEVICE for now as we do not 5762 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5763 * this might change. 5764 */ 5765 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5766 mc.precharge += HPAGE_PMD_NR; 5767 spin_unlock(ptl); 5768 return 0; 5769 } 5770 5771 if (pmd_trans_unstable(pmd)) 5772 return 0; 5773 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5774 for (; addr != end; pte++, addr += PAGE_SIZE) 5775 if (get_mctgt_type(vma, addr, *pte, NULL)) 5776 mc.precharge++; /* increment precharge temporarily */ 5777 pte_unmap_unlock(pte - 1, ptl); 5778 cond_resched(); 5779 5780 return 0; 5781 } 5782 5783 static const struct mm_walk_ops precharge_walk_ops = { 5784 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5785 }; 5786 5787 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5788 { 5789 unsigned long precharge; 5790 5791 mmap_read_lock(mm); 5792 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5793 mmap_read_unlock(mm); 5794 5795 precharge = mc.precharge; 5796 mc.precharge = 0; 5797 5798 return precharge; 5799 } 5800 5801 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5802 { 5803 unsigned long precharge = mem_cgroup_count_precharge(mm); 5804 5805 VM_BUG_ON(mc.moving_task); 5806 mc.moving_task = current; 5807 return mem_cgroup_do_precharge(precharge); 5808 } 5809 5810 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5811 static void __mem_cgroup_clear_mc(void) 5812 { 5813 struct mem_cgroup *from = mc.from; 5814 struct mem_cgroup *to = mc.to; 5815 5816 /* we must uncharge all the leftover precharges from mc.to */ 5817 if (mc.precharge) { 5818 cancel_charge(mc.to, mc.precharge); 5819 mc.precharge = 0; 5820 } 5821 /* 5822 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5823 * we must uncharge here. 5824 */ 5825 if (mc.moved_charge) { 5826 cancel_charge(mc.from, mc.moved_charge); 5827 mc.moved_charge = 0; 5828 } 5829 /* we must fixup refcnts and charges */ 5830 if (mc.moved_swap) { 5831 /* uncharge swap account from the old cgroup */ 5832 if (!mem_cgroup_is_root(mc.from)) 5833 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5834 5835 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5836 5837 /* 5838 * we charged both to->memory and to->memsw, so we 5839 * should uncharge to->memory. 5840 */ 5841 if (!mem_cgroup_is_root(mc.to)) 5842 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5843 5844 mc.moved_swap = 0; 5845 } 5846 memcg_oom_recover(from); 5847 memcg_oom_recover(to); 5848 wake_up_all(&mc.waitq); 5849 } 5850 5851 static void mem_cgroup_clear_mc(void) 5852 { 5853 struct mm_struct *mm = mc.mm; 5854 5855 /* 5856 * we must clear moving_task before waking up waiters at the end of 5857 * task migration. 5858 */ 5859 mc.moving_task = NULL; 5860 __mem_cgroup_clear_mc(); 5861 spin_lock(&mc.lock); 5862 mc.from = NULL; 5863 mc.to = NULL; 5864 mc.mm = NULL; 5865 spin_unlock(&mc.lock); 5866 5867 mmput(mm); 5868 } 5869 5870 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5871 { 5872 struct cgroup_subsys_state *css; 5873 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5874 struct mem_cgroup *from; 5875 struct task_struct *leader, *p; 5876 struct mm_struct *mm; 5877 unsigned long move_flags; 5878 int ret = 0; 5879 5880 /* charge immigration isn't supported on the default hierarchy */ 5881 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5882 return 0; 5883 5884 /* 5885 * Multi-process migrations only happen on the default hierarchy 5886 * where charge immigration is not used. Perform charge 5887 * immigration if @tset contains a leader and whine if there are 5888 * multiple. 5889 */ 5890 p = NULL; 5891 cgroup_taskset_for_each_leader(leader, css, tset) { 5892 WARN_ON_ONCE(p); 5893 p = leader; 5894 memcg = mem_cgroup_from_css(css); 5895 } 5896 if (!p) 5897 return 0; 5898 5899 /* 5900 * We are now commited to this value whatever it is. Changes in this 5901 * tunable will only affect upcoming migrations, not the current one. 5902 * So we need to save it, and keep it going. 5903 */ 5904 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5905 if (!move_flags) 5906 return 0; 5907 5908 from = mem_cgroup_from_task(p); 5909 5910 VM_BUG_ON(from == memcg); 5911 5912 mm = get_task_mm(p); 5913 if (!mm) 5914 return 0; 5915 /* We move charges only when we move a owner of the mm */ 5916 if (mm->owner == p) { 5917 VM_BUG_ON(mc.from); 5918 VM_BUG_ON(mc.to); 5919 VM_BUG_ON(mc.precharge); 5920 VM_BUG_ON(mc.moved_charge); 5921 VM_BUG_ON(mc.moved_swap); 5922 5923 spin_lock(&mc.lock); 5924 mc.mm = mm; 5925 mc.from = from; 5926 mc.to = memcg; 5927 mc.flags = move_flags; 5928 spin_unlock(&mc.lock); 5929 /* We set mc.moving_task later */ 5930 5931 ret = mem_cgroup_precharge_mc(mm); 5932 if (ret) 5933 mem_cgroup_clear_mc(); 5934 } else { 5935 mmput(mm); 5936 } 5937 return ret; 5938 } 5939 5940 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5941 { 5942 if (mc.to) 5943 mem_cgroup_clear_mc(); 5944 } 5945 5946 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5947 unsigned long addr, unsigned long end, 5948 struct mm_walk *walk) 5949 { 5950 int ret = 0; 5951 struct vm_area_struct *vma = walk->vma; 5952 pte_t *pte; 5953 spinlock_t *ptl; 5954 enum mc_target_type target_type; 5955 union mc_target target; 5956 struct page *page; 5957 5958 ptl = pmd_trans_huge_lock(pmd, vma); 5959 if (ptl) { 5960 if (mc.precharge < HPAGE_PMD_NR) { 5961 spin_unlock(ptl); 5962 return 0; 5963 } 5964 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5965 if (target_type == MC_TARGET_PAGE) { 5966 page = target.page; 5967 if (!isolate_lru_page(page)) { 5968 if (!mem_cgroup_move_account(page, true, 5969 mc.from, mc.to)) { 5970 mc.precharge -= HPAGE_PMD_NR; 5971 mc.moved_charge += HPAGE_PMD_NR; 5972 } 5973 putback_lru_page(page); 5974 } 5975 put_page(page); 5976 } else if (target_type == MC_TARGET_DEVICE) { 5977 page = target.page; 5978 if (!mem_cgroup_move_account(page, true, 5979 mc.from, mc.to)) { 5980 mc.precharge -= HPAGE_PMD_NR; 5981 mc.moved_charge += HPAGE_PMD_NR; 5982 } 5983 put_page(page); 5984 } 5985 spin_unlock(ptl); 5986 return 0; 5987 } 5988 5989 if (pmd_trans_unstable(pmd)) 5990 return 0; 5991 retry: 5992 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5993 for (; addr != end; addr += PAGE_SIZE) { 5994 pte_t ptent = *(pte++); 5995 bool device = false; 5996 swp_entry_t ent; 5997 5998 if (!mc.precharge) 5999 break; 6000 6001 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6002 case MC_TARGET_DEVICE: 6003 device = true; 6004 fallthrough; 6005 case MC_TARGET_PAGE: 6006 page = target.page; 6007 /* 6008 * We can have a part of the split pmd here. Moving it 6009 * can be done but it would be too convoluted so simply 6010 * ignore such a partial THP and keep it in original 6011 * memcg. There should be somebody mapping the head. 6012 */ 6013 if (PageTransCompound(page)) 6014 goto put; 6015 if (!device && isolate_lru_page(page)) 6016 goto put; 6017 if (!mem_cgroup_move_account(page, false, 6018 mc.from, mc.to)) { 6019 mc.precharge--; 6020 /* we uncharge from mc.from later. */ 6021 mc.moved_charge++; 6022 } 6023 if (!device) 6024 putback_lru_page(page); 6025 put: /* get_mctgt_type() gets the page */ 6026 put_page(page); 6027 break; 6028 case MC_TARGET_SWAP: 6029 ent = target.ent; 6030 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6031 mc.precharge--; 6032 mem_cgroup_id_get_many(mc.to, 1); 6033 /* we fixup other refcnts and charges later. */ 6034 mc.moved_swap++; 6035 } 6036 break; 6037 default: 6038 break; 6039 } 6040 } 6041 pte_unmap_unlock(pte - 1, ptl); 6042 cond_resched(); 6043 6044 if (addr != end) { 6045 /* 6046 * We have consumed all precharges we got in can_attach(). 6047 * We try charge one by one, but don't do any additional 6048 * charges to mc.to if we have failed in charge once in attach() 6049 * phase. 6050 */ 6051 ret = mem_cgroup_do_precharge(1); 6052 if (!ret) 6053 goto retry; 6054 } 6055 6056 return ret; 6057 } 6058 6059 static const struct mm_walk_ops charge_walk_ops = { 6060 .pmd_entry = mem_cgroup_move_charge_pte_range, 6061 }; 6062 6063 static void mem_cgroup_move_charge(void) 6064 { 6065 lru_add_drain_all(); 6066 /* 6067 * Signal lock_page_memcg() to take the memcg's move_lock 6068 * while we're moving its pages to another memcg. Then wait 6069 * for already started RCU-only updates to finish. 6070 */ 6071 atomic_inc(&mc.from->moving_account); 6072 synchronize_rcu(); 6073 retry: 6074 if (unlikely(!mmap_read_trylock(mc.mm))) { 6075 /* 6076 * Someone who are holding the mmap_lock might be waiting in 6077 * waitq. So we cancel all extra charges, wake up all waiters, 6078 * and retry. Because we cancel precharges, we might not be able 6079 * to move enough charges, but moving charge is a best-effort 6080 * feature anyway, so it wouldn't be a big problem. 6081 */ 6082 __mem_cgroup_clear_mc(); 6083 cond_resched(); 6084 goto retry; 6085 } 6086 /* 6087 * When we have consumed all precharges and failed in doing 6088 * additional charge, the page walk just aborts. 6089 */ 6090 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6091 NULL); 6092 6093 mmap_read_unlock(mc.mm); 6094 atomic_dec(&mc.from->moving_account); 6095 } 6096 6097 static void mem_cgroup_move_task(void) 6098 { 6099 if (mc.to) { 6100 mem_cgroup_move_charge(); 6101 mem_cgroup_clear_mc(); 6102 } 6103 } 6104 #else /* !CONFIG_MMU */ 6105 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6106 { 6107 return 0; 6108 } 6109 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6110 { 6111 } 6112 static void mem_cgroup_move_task(void) 6113 { 6114 } 6115 #endif 6116 6117 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6118 { 6119 if (value == PAGE_COUNTER_MAX) 6120 seq_puts(m, "max\n"); 6121 else 6122 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6123 6124 return 0; 6125 } 6126 6127 static u64 memory_current_read(struct cgroup_subsys_state *css, 6128 struct cftype *cft) 6129 { 6130 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6131 6132 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6133 } 6134 6135 static int memory_min_show(struct seq_file *m, void *v) 6136 { 6137 return seq_puts_memcg_tunable(m, 6138 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6139 } 6140 6141 static ssize_t memory_min_write(struct kernfs_open_file *of, 6142 char *buf, size_t nbytes, loff_t off) 6143 { 6144 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6145 unsigned long min; 6146 int err; 6147 6148 buf = strstrip(buf); 6149 err = page_counter_memparse(buf, "max", &min); 6150 if (err) 6151 return err; 6152 6153 page_counter_set_min(&memcg->memory, min); 6154 6155 return nbytes; 6156 } 6157 6158 static int memory_low_show(struct seq_file *m, void *v) 6159 { 6160 return seq_puts_memcg_tunable(m, 6161 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6162 } 6163 6164 static ssize_t memory_low_write(struct kernfs_open_file *of, 6165 char *buf, size_t nbytes, loff_t off) 6166 { 6167 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6168 unsigned long low; 6169 int err; 6170 6171 buf = strstrip(buf); 6172 err = page_counter_memparse(buf, "max", &low); 6173 if (err) 6174 return err; 6175 6176 page_counter_set_low(&memcg->memory, low); 6177 6178 return nbytes; 6179 } 6180 6181 static int memory_high_show(struct seq_file *m, void *v) 6182 { 6183 return seq_puts_memcg_tunable(m, 6184 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6185 } 6186 6187 static ssize_t memory_high_write(struct kernfs_open_file *of, 6188 char *buf, size_t nbytes, loff_t off) 6189 { 6190 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6191 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6192 bool drained = false; 6193 unsigned long high; 6194 int err; 6195 6196 buf = strstrip(buf); 6197 err = page_counter_memparse(buf, "max", &high); 6198 if (err) 6199 return err; 6200 6201 page_counter_set_high(&memcg->memory, high); 6202 6203 for (;;) { 6204 unsigned long nr_pages = page_counter_read(&memcg->memory); 6205 unsigned long reclaimed; 6206 6207 if (nr_pages <= high) 6208 break; 6209 6210 if (signal_pending(current)) 6211 break; 6212 6213 if (!drained) { 6214 drain_all_stock(memcg); 6215 drained = true; 6216 continue; 6217 } 6218 6219 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6220 GFP_KERNEL, true); 6221 6222 if (!reclaimed && !nr_retries--) 6223 break; 6224 } 6225 6226 memcg_wb_domain_size_changed(memcg); 6227 return nbytes; 6228 } 6229 6230 static int memory_max_show(struct seq_file *m, void *v) 6231 { 6232 return seq_puts_memcg_tunable(m, 6233 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6234 } 6235 6236 static ssize_t memory_max_write(struct kernfs_open_file *of, 6237 char *buf, size_t nbytes, loff_t off) 6238 { 6239 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6240 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6241 bool drained = false; 6242 unsigned long max; 6243 int err; 6244 6245 buf = strstrip(buf); 6246 err = page_counter_memparse(buf, "max", &max); 6247 if (err) 6248 return err; 6249 6250 xchg(&memcg->memory.max, max); 6251 6252 for (;;) { 6253 unsigned long nr_pages = page_counter_read(&memcg->memory); 6254 6255 if (nr_pages <= max) 6256 break; 6257 6258 if (signal_pending(current)) 6259 break; 6260 6261 if (!drained) { 6262 drain_all_stock(memcg); 6263 drained = true; 6264 continue; 6265 } 6266 6267 if (nr_reclaims) { 6268 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6269 GFP_KERNEL, true)) 6270 nr_reclaims--; 6271 continue; 6272 } 6273 6274 memcg_memory_event(memcg, MEMCG_OOM); 6275 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6276 break; 6277 } 6278 6279 memcg_wb_domain_size_changed(memcg); 6280 return nbytes; 6281 } 6282 6283 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6284 { 6285 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6286 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6287 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6288 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6289 seq_printf(m, "oom_kill %lu\n", 6290 atomic_long_read(&events[MEMCG_OOM_KILL])); 6291 } 6292 6293 static int memory_events_show(struct seq_file *m, void *v) 6294 { 6295 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6296 6297 __memory_events_show(m, memcg->memory_events); 6298 return 0; 6299 } 6300 6301 static int memory_events_local_show(struct seq_file *m, void *v) 6302 { 6303 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6304 6305 __memory_events_show(m, memcg->memory_events_local); 6306 return 0; 6307 } 6308 6309 static int memory_stat_show(struct seq_file *m, void *v) 6310 { 6311 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6312 char *buf; 6313 6314 buf = memory_stat_format(memcg); 6315 if (!buf) 6316 return -ENOMEM; 6317 seq_puts(m, buf); 6318 kfree(buf); 6319 return 0; 6320 } 6321 6322 #ifdef CONFIG_NUMA 6323 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6324 int item) 6325 { 6326 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6327 } 6328 6329 static int memory_numa_stat_show(struct seq_file *m, void *v) 6330 { 6331 int i; 6332 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6333 6334 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6335 int nid; 6336 6337 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6338 continue; 6339 6340 seq_printf(m, "%s", memory_stats[i].name); 6341 for_each_node_state(nid, N_MEMORY) { 6342 u64 size; 6343 struct lruvec *lruvec; 6344 6345 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6346 size = lruvec_page_state_output(lruvec, 6347 memory_stats[i].idx); 6348 seq_printf(m, " N%d=%llu", nid, size); 6349 } 6350 seq_putc(m, '\n'); 6351 } 6352 6353 return 0; 6354 } 6355 #endif 6356 6357 static int memory_oom_group_show(struct seq_file *m, void *v) 6358 { 6359 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6360 6361 seq_printf(m, "%d\n", memcg->oom_group); 6362 6363 return 0; 6364 } 6365 6366 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6367 char *buf, size_t nbytes, loff_t off) 6368 { 6369 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6370 int ret, oom_group; 6371 6372 buf = strstrip(buf); 6373 if (!buf) 6374 return -EINVAL; 6375 6376 ret = kstrtoint(buf, 0, &oom_group); 6377 if (ret) 6378 return ret; 6379 6380 if (oom_group != 0 && oom_group != 1) 6381 return -EINVAL; 6382 6383 memcg->oom_group = oom_group; 6384 6385 return nbytes; 6386 } 6387 6388 static struct cftype memory_files[] = { 6389 { 6390 .name = "current", 6391 .flags = CFTYPE_NOT_ON_ROOT, 6392 .read_u64 = memory_current_read, 6393 }, 6394 { 6395 .name = "min", 6396 .flags = CFTYPE_NOT_ON_ROOT, 6397 .seq_show = memory_min_show, 6398 .write = memory_min_write, 6399 }, 6400 { 6401 .name = "low", 6402 .flags = CFTYPE_NOT_ON_ROOT, 6403 .seq_show = memory_low_show, 6404 .write = memory_low_write, 6405 }, 6406 { 6407 .name = "high", 6408 .flags = CFTYPE_NOT_ON_ROOT, 6409 .seq_show = memory_high_show, 6410 .write = memory_high_write, 6411 }, 6412 { 6413 .name = "max", 6414 .flags = CFTYPE_NOT_ON_ROOT, 6415 .seq_show = memory_max_show, 6416 .write = memory_max_write, 6417 }, 6418 { 6419 .name = "events", 6420 .flags = CFTYPE_NOT_ON_ROOT, 6421 .file_offset = offsetof(struct mem_cgroup, events_file), 6422 .seq_show = memory_events_show, 6423 }, 6424 { 6425 .name = "events.local", 6426 .flags = CFTYPE_NOT_ON_ROOT, 6427 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6428 .seq_show = memory_events_local_show, 6429 }, 6430 { 6431 .name = "stat", 6432 .seq_show = memory_stat_show, 6433 }, 6434 #ifdef CONFIG_NUMA 6435 { 6436 .name = "numa_stat", 6437 .seq_show = memory_numa_stat_show, 6438 }, 6439 #endif 6440 { 6441 .name = "oom.group", 6442 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6443 .seq_show = memory_oom_group_show, 6444 .write = memory_oom_group_write, 6445 }, 6446 { } /* terminate */ 6447 }; 6448 6449 struct cgroup_subsys memory_cgrp_subsys = { 6450 .css_alloc = mem_cgroup_css_alloc, 6451 .css_online = mem_cgroup_css_online, 6452 .css_offline = mem_cgroup_css_offline, 6453 .css_released = mem_cgroup_css_released, 6454 .css_free = mem_cgroup_css_free, 6455 .css_reset = mem_cgroup_css_reset, 6456 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6457 .can_attach = mem_cgroup_can_attach, 6458 .cancel_attach = mem_cgroup_cancel_attach, 6459 .post_attach = mem_cgroup_move_task, 6460 .dfl_cftypes = memory_files, 6461 .legacy_cftypes = mem_cgroup_legacy_files, 6462 .early_init = 0, 6463 }; 6464 6465 /* 6466 * This function calculates an individual cgroup's effective 6467 * protection which is derived from its own memory.min/low, its 6468 * parent's and siblings' settings, as well as the actual memory 6469 * distribution in the tree. 6470 * 6471 * The following rules apply to the effective protection values: 6472 * 6473 * 1. At the first level of reclaim, effective protection is equal to 6474 * the declared protection in memory.min and memory.low. 6475 * 6476 * 2. To enable safe delegation of the protection configuration, at 6477 * subsequent levels the effective protection is capped to the 6478 * parent's effective protection. 6479 * 6480 * 3. To make complex and dynamic subtrees easier to configure, the 6481 * user is allowed to overcommit the declared protection at a given 6482 * level. If that is the case, the parent's effective protection is 6483 * distributed to the children in proportion to how much protection 6484 * they have declared and how much of it they are utilizing. 6485 * 6486 * This makes distribution proportional, but also work-conserving: 6487 * if one cgroup claims much more protection than it uses memory, 6488 * the unused remainder is available to its siblings. 6489 * 6490 * 4. Conversely, when the declared protection is undercommitted at a 6491 * given level, the distribution of the larger parental protection 6492 * budget is NOT proportional. A cgroup's protection from a sibling 6493 * is capped to its own memory.min/low setting. 6494 * 6495 * 5. However, to allow protecting recursive subtrees from each other 6496 * without having to declare each individual cgroup's fixed share 6497 * of the ancestor's claim to protection, any unutilized - 6498 * "floating" - protection from up the tree is distributed in 6499 * proportion to each cgroup's *usage*. This makes the protection 6500 * neutral wrt sibling cgroups and lets them compete freely over 6501 * the shared parental protection budget, but it protects the 6502 * subtree as a whole from neighboring subtrees. 6503 * 6504 * Note that 4. and 5. are not in conflict: 4. is about protecting 6505 * against immediate siblings whereas 5. is about protecting against 6506 * neighboring subtrees. 6507 */ 6508 static unsigned long effective_protection(unsigned long usage, 6509 unsigned long parent_usage, 6510 unsigned long setting, 6511 unsigned long parent_effective, 6512 unsigned long siblings_protected) 6513 { 6514 unsigned long protected; 6515 unsigned long ep; 6516 6517 protected = min(usage, setting); 6518 /* 6519 * If all cgroups at this level combined claim and use more 6520 * protection then what the parent affords them, distribute 6521 * shares in proportion to utilization. 6522 * 6523 * We are using actual utilization rather than the statically 6524 * claimed protection in order to be work-conserving: claimed 6525 * but unused protection is available to siblings that would 6526 * otherwise get a smaller chunk than what they claimed. 6527 */ 6528 if (siblings_protected > parent_effective) 6529 return protected * parent_effective / siblings_protected; 6530 6531 /* 6532 * Ok, utilized protection of all children is within what the 6533 * parent affords them, so we know whatever this child claims 6534 * and utilizes is effectively protected. 6535 * 6536 * If there is unprotected usage beyond this value, reclaim 6537 * will apply pressure in proportion to that amount. 6538 * 6539 * If there is unutilized protection, the cgroup will be fully 6540 * shielded from reclaim, but we do return a smaller value for 6541 * protection than what the group could enjoy in theory. This 6542 * is okay. With the overcommit distribution above, effective 6543 * protection is always dependent on how memory is actually 6544 * consumed among the siblings anyway. 6545 */ 6546 ep = protected; 6547 6548 /* 6549 * If the children aren't claiming (all of) the protection 6550 * afforded to them by the parent, distribute the remainder in 6551 * proportion to the (unprotected) memory of each cgroup. That 6552 * way, cgroups that aren't explicitly prioritized wrt each 6553 * other compete freely over the allowance, but they are 6554 * collectively protected from neighboring trees. 6555 * 6556 * We're using unprotected memory for the weight so that if 6557 * some cgroups DO claim explicit protection, we don't protect 6558 * the same bytes twice. 6559 * 6560 * Check both usage and parent_usage against the respective 6561 * protected values. One should imply the other, but they 6562 * aren't read atomically - make sure the division is sane. 6563 */ 6564 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6565 return ep; 6566 if (parent_effective > siblings_protected && 6567 parent_usage > siblings_protected && 6568 usage > protected) { 6569 unsigned long unclaimed; 6570 6571 unclaimed = parent_effective - siblings_protected; 6572 unclaimed *= usage - protected; 6573 unclaimed /= parent_usage - siblings_protected; 6574 6575 ep += unclaimed; 6576 } 6577 6578 return ep; 6579 } 6580 6581 /** 6582 * mem_cgroup_protected - check if memory consumption is in the normal range 6583 * @root: the top ancestor of the sub-tree being checked 6584 * @memcg: the memory cgroup to check 6585 * 6586 * WARNING: This function is not stateless! It can only be used as part 6587 * of a top-down tree iteration, not for isolated queries. 6588 */ 6589 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6590 struct mem_cgroup *memcg) 6591 { 6592 unsigned long usage, parent_usage; 6593 struct mem_cgroup *parent; 6594 6595 if (mem_cgroup_disabled()) 6596 return; 6597 6598 if (!root) 6599 root = root_mem_cgroup; 6600 6601 /* 6602 * Effective values of the reclaim targets are ignored so they 6603 * can be stale. Have a look at mem_cgroup_protection for more 6604 * details. 6605 * TODO: calculation should be more robust so that we do not need 6606 * that special casing. 6607 */ 6608 if (memcg == root) 6609 return; 6610 6611 usage = page_counter_read(&memcg->memory); 6612 if (!usage) 6613 return; 6614 6615 parent = parent_mem_cgroup(memcg); 6616 /* No parent means a non-hierarchical mode on v1 memcg */ 6617 if (!parent) 6618 return; 6619 6620 if (parent == root) { 6621 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6622 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6623 return; 6624 } 6625 6626 parent_usage = page_counter_read(&parent->memory); 6627 6628 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6629 READ_ONCE(memcg->memory.min), 6630 READ_ONCE(parent->memory.emin), 6631 atomic_long_read(&parent->memory.children_min_usage))); 6632 6633 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6634 READ_ONCE(memcg->memory.low), 6635 READ_ONCE(parent->memory.elow), 6636 atomic_long_read(&parent->memory.children_low_usage))); 6637 } 6638 6639 static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg, 6640 gfp_t gfp) 6641 { 6642 unsigned int nr_pages = thp_nr_pages(page); 6643 int ret; 6644 6645 ret = try_charge(memcg, gfp, nr_pages); 6646 if (ret) 6647 goto out; 6648 6649 css_get(&memcg->css); 6650 commit_charge(page, memcg); 6651 6652 local_irq_disable(); 6653 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6654 memcg_check_events(memcg, page); 6655 local_irq_enable(); 6656 out: 6657 return ret; 6658 } 6659 6660 /** 6661 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6662 * @page: page to charge 6663 * @mm: mm context of the victim 6664 * @gfp_mask: reclaim mode 6665 * 6666 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6667 * pages according to @gfp_mask if necessary. 6668 * 6669 * Do not use this for pages allocated for swapin. 6670 * 6671 * Returns 0 on success. Otherwise, an error code is returned. 6672 */ 6673 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6674 { 6675 struct mem_cgroup *memcg; 6676 int ret; 6677 6678 if (mem_cgroup_disabled()) 6679 return 0; 6680 6681 memcg = get_mem_cgroup_from_mm(mm); 6682 ret = __mem_cgroup_charge(page, memcg, gfp_mask); 6683 css_put(&memcg->css); 6684 6685 return ret; 6686 } 6687 6688 /** 6689 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin 6690 * @page: page to charge 6691 * @mm: mm context of the victim 6692 * @gfp: reclaim mode 6693 * @entry: swap entry for which the page is allocated 6694 * 6695 * This function charges a page allocated for swapin. Please call this before 6696 * adding the page to the swapcache. 6697 * 6698 * Returns 0 on success. Otherwise, an error code is returned. 6699 */ 6700 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 6701 gfp_t gfp, swp_entry_t entry) 6702 { 6703 struct mem_cgroup *memcg; 6704 unsigned short id; 6705 int ret; 6706 6707 if (mem_cgroup_disabled()) 6708 return 0; 6709 6710 id = lookup_swap_cgroup_id(entry); 6711 rcu_read_lock(); 6712 memcg = mem_cgroup_from_id(id); 6713 if (!memcg || !css_tryget_online(&memcg->css)) 6714 memcg = get_mem_cgroup_from_mm(mm); 6715 rcu_read_unlock(); 6716 6717 ret = __mem_cgroup_charge(page, memcg, gfp); 6718 6719 css_put(&memcg->css); 6720 return ret; 6721 } 6722 6723 /* 6724 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 6725 * @entry: swap entry for which the page is charged 6726 * 6727 * Call this function after successfully adding the charged page to swapcache. 6728 * 6729 * Note: This function assumes the page for which swap slot is being uncharged 6730 * is order 0 page. 6731 */ 6732 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 6733 { 6734 /* 6735 * Cgroup1's unified memory+swap counter has been charged with the 6736 * new swapcache page, finish the transfer by uncharging the swap 6737 * slot. The swap slot would also get uncharged when it dies, but 6738 * it can stick around indefinitely and we'd count the page twice 6739 * the entire time. 6740 * 6741 * Cgroup2 has separate resource counters for memory and swap, 6742 * so this is a non-issue here. Memory and swap charge lifetimes 6743 * correspond 1:1 to page and swap slot lifetimes: we charge the 6744 * page to memory here, and uncharge swap when the slot is freed. 6745 */ 6746 if (!mem_cgroup_disabled() && do_memsw_account()) { 6747 /* 6748 * The swap entry might not get freed for a long time, 6749 * let's not wait for it. The page already received a 6750 * memory+swap charge, drop the swap entry duplicate. 6751 */ 6752 mem_cgroup_uncharge_swap(entry, 1); 6753 } 6754 } 6755 6756 struct uncharge_gather { 6757 struct mem_cgroup *memcg; 6758 unsigned long nr_memory; 6759 unsigned long pgpgout; 6760 unsigned long nr_kmem; 6761 struct page *dummy_page; 6762 }; 6763 6764 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6765 { 6766 memset(ug, 0, sizeof(*ug)); 6767 } 6768 6769 static void uncharge_batch(const struct uncharge_gather *ug) 6770 { 6771 unsigned long flags; 6772 6773 if (ug->nr_memory) { 6774 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 6775 if (do_memsw_account()) 6776 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 6777 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6778 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6779 memcg_oom_recover(ug->memcg); 6780 } 6781 6782 local_irq_save(flags); 6783 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6784 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 6785 memcg_check_events(ug->memcg, ug->dummy_page); 6786 local_irq_restore(flags); 6787 6788 /* drop reference from uncharge_page */ 6789 css_put(&ug->memcg->css); 6790 } 6791 6792 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6793 { 6794 unsigned long nr_pages; 6795 struct mem_cgroup *memcg; 6796 struct obj_cgroup *objcg; 6797 6798 VM_BUG_ON_PAGE(PageLRU(page), page); 6799 6800 /* 6801 * Nobody should be changing or seriously looking at 6802 * page memcg or objcg at this point, we have fully 6803 * exclusive access to the page. 6804 */ 6805 if (PageMemcgKmem(page)) { 6806 objcg = __page_objcg(page); 6807 /* 6808 * This get matches the put at the end of the function and 6809 * kmem pages do not hold memcg references anymore. 6810 */ 6811 memcg = get_mem_cgroup_from_objcg(objcg); 6812 } else { 6813 memcg = __page_memcg(page); 6814 } 6815 6816 if (!memcg) 6817 return; 6818 6819 if (ug->memcg != memcg) { 6820 if (ug->memcg) { 6821 uncharge_batch(ug); 6822 uncharge_gather_clear(ug); 6823 } 6824 ug->memcg = memcg; 6825 ug->dummy_page = page; 6826 6827 /* pairs with css_put in uncharge_batch */ 6828 css_get(&memcg->css); 6829 } 6830 6831 nr_pages = compound_nr(page); 6832 6833 if (PageMemcgKmem(page)) { 6834 ug->nr_memory += nr_pages; 6835 ug->nr_kmem += nr_pages; 6836 6837 page->memcg_data = 0; 6838 obj_cgroup_put(objcg); 6839 } else { 6840 /* LRU pages aren't accounted at the root level */ 6841 if (!mem_cgroup_is_root(memcg)) 6842 ug->nr_memory += nr_pages; 6843 ug->pgpgout++; 6844 6845 page->memcg_data = 0; 6846 } 6847 6848 css_put(&memcg->css); 6849 } 6850 6851 /** 6852 * mem_cgroup_uncharge - uncharge a page 6853 * @page: page to uncharge 6854 * 6855 * Uncharge a page previously charged with mem_cgroup_charge(). 6856 */ 6857 void mem_cgroup_uncharge(struct page *page) 6858 { 6859 struct uncharge_gather ug; 6860 6861 if (mem_cgroup_disabled()) 6862 return; 6863 6864 /* Don't touch page->lru of any random page, pre-check: */ 6865 if (!page_memcg(page)) 6866 return; 6867 6868 uncharge_gather_clear(&ug); 6869 uncharge_page(page, &ug); 6870 uncharge_batch(&ug); 6871 } 6872 6873 /** 6874 * mem_cgroup_uncharge_list - uncharge a list of page 6875 * @page_list: list of pages to uncharge 6876 * 6877 * Uncharge a list of pages previously charged with 6878 * mem_cgroup_charge(). 6879 */ 6880 void mem_cgroup_uncharge_list(struct list_head *page_list) 6881 { 6882 struct uncharge_gather ug; 6883 struct page *page; 6884 6885 if (mem_cgroup_disabled()) 6886 return; 6887 6888 uncharge_gather_clear(&ug); 6889 list_for_each_entry(page, page_list, lru) 6890 uncharge_page(page, &ug); 6891 if (ug.memcg) 6892 uncharge_batch(&ug); 6893 } 6894 6895 /** 6896 * mem_cgroup_migrate - charge a page's replacement 6897 * @oldpage: currently circulating page 6898 * @newpage: replacement page 6899 * 6900 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6901 * be uncharged upon free. 6902 * 6903 * Both pages must be locked, @newpage->mapping must be set up. 6904 */ 6905 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6906 { 6907 struct mem_cgroup *memcg; 6908 unsigned int nr_pages; 6909 unsigned long flags; 6910 6911 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6912 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6913 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6914 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6915 newpage); 6916 6917 if (mem_cgroup_disabled()) 6918 return; 6919 6920 /* Page cache replacement: new page already charged? */ 6921 if (page_memcg(newpage)) 6922 return; 6923 6924 memcg = page_memcg(oldpage); 6925 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); 6926 if (!memcg) 6927 return; 6928 6929 /* Force-charge the new page. The old one will be freed soon */ 6930 nr_pages = thp_nr_pages(newpage); 6931 6932 page_counter_charge(&memcg->memory, nr_pages); 6933 if (do_memsw_account()) 6934 page_counter_charge(&memcg->memsw, nr_pages); 6935 6936 css_get(&memcg->css); 6937 commit_charge(newpage, memcg); 6938 6939 local_irq_save(flags); 6940 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 6941 memcg_check_events(memcg, newpage); 6942 local_irq_restore(flags); 6943 } 6944 6945 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6946 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6947 6948 void mem_cgroup_sk_alloc(struct sock *sk) 6949 { 6950 struct mem_cgroup *memcg; 6951 6952 if (!mem_cgroup_sockets_enabled) 6953 return; 6954 6955 /* Do not associate the sock with unrelated interrupted task's memcg. */ 6956 if (in_interrupt()) 6957 return; 6958 6959 rcu_read_lock(); 6960 memcg = mem_cgroup_from_task(current); 6961 if (memcg == root_mem_cgroup) 6962 goto out; 6963 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6964 goto out; 6965 if (css_tryget(&memcg->css)) 6966 sk->sk_memcg = memcg; 6967 out: 6968 rcu_read_unlock(); 6969 } 6970 6971 void mem_cgroup_sk_free(struct sock *sk) 6972 { 6973 if (sk->sk_memcg) 6974 css_put(&sk->sk_memcg->css); 6975 } 6976 6977 /** 6978 * mem_cgroup_charge_skmem - charge socket memory 6979 * @memcg: memcg to charge 6980 * @nr_pages: number of pages to charge 6981 * 6982 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6983 * @memcg's configured limit, %false if the charge had to be forced. 6984 */ 6985 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 6986 { 6987 gfp_t gfp_mask = GFP_KERNEL; 6988 6989 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6990 struct page_counter *fail; 6991 6992 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6993 memcg->tcpmem_pressure = 0; 6994 return true; 6995 } 6996 page_counter_charge(&memcg->tcpmem, nr_pages); 6997 memcg->tcpmem_pressure = 1; 6998 return false; 6999 } 7000 7001 /* Don't block in the packet receive path */ 7002 if (in_softirq()) 7003 gfp_mask = GFP_NOWAIT; 7004 7005 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7006 7007 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7008 return true; 7009 7010 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7011 return false; 7012 } 7013 7014 /** 7015 * mem_cgroup_uncharge_skmem - uncharge socket memory 7016 * @memcg: memcg to uncharge 7017 * @nr_pages: number of pages to uncharge 7018 */ 7019 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7020 { 7021 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7022 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7023 return; 7024 } 7025 7026 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7027 7028 refill_stock(memcg, nr_pages); 7029 } 7030 7031 static int __init cgroup_memory(char *s) 7032 { 7033 char *token; 7034 7035 while ((token = strsep(&s, ",")) != NULL) { 7036 if (!*token) 7037 continue; 7038 if (!strcmp(token, "nosocket")) 7039 cgroup_memory_nosocket = true; 7040 if (!strcmp(token, "nokmem")) 7041 cgroup_memory_nokmem = true; 7042 } 7043 return 0; 7044 } 7045 __setup("cgroup.memory=", cgroup_memory); 7046 7047 /* 7048 * subsys_initcall() for memory controller. 7049 * 7050 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7051 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7052 * basically everything that doesn't depend on a specific mem_cgroup structure 7053 * should be initialized from here. 7054 */ 7055 static int __init mem_cgroup_init(void) 7056 { 7057 int cpu, node; 7058 7059 /* 7060 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7061 * used for per-memcg-per-cpu caching of per-node statistics. In order 7062 * to work fine, we should make sure that the overfill threshold can't 7063 * exceed S32_MAX / PAGE_SIZE. 7064 */ 7065 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7066 7067 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7068 memcg_hotplug_cpu_dead); 7069 7070 for_each_possible_cpu(cpu) 7071 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7072 drain_local_stock); 7073 7074 for_each_node(node) { 7075 struct mem_cgroup_tree_per_node *rtpn; 7076 7077 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7078 node_online(node) ? node : NUMA_NO_NODE); 7079 7080 rtpn->rb_root = RB_ROOT; 7081 rtpn->rb_rightmost = NULL; 7082 spin_lock_init(&rtpn->lock); 7083 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7084 } 7085 7086 return 0; 7087 } 7088 subsys_initcall(mem_cgroup_init); 7089 7090 #ifdef CONFIG_MEMCG_SWAP 7091 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7092 { 7093 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7094 /* 7095 * The root cgroup cannot be destroyed, so it's refcount must 7096 * always be >= 1. 7097 */ 7098 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7099 VM_BUG_ON(1); 7100 break; 7101 } 7102 memcg = parent_mem_cgroup(memcg); 7103 if (!memcg) 7104 memcg = root_mem_cgroup; 7105 } 7106 return memcg; 7107 } 7108 7109 /** 7110 * mem_cgroup_swapout - transfer a memsw charge to swap 7111 * @page: page whose memsw charge to transfer 7112 * @entry: swap entry to move the charge to 7113 * 7114 * Transfer the memsw charge of @page to @entry. 7115 */ 7116 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7117 { 7118 struct mem_cgroup *memcg, *swap_memcg; 7119 unsigned int nr_entries; 7120 unsigned short oldid; 7121 7122 VM_BUG_ON_PAGE(PageLRU(page), page); 7123 VM_BUG_ON_PAGE(page_count(page), page); 7124 7125 if (mem_cgroup_disabled()) 7126 return; 7127 7128 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7129 return; 7130 7131 memcg = page_memcg(page); 7132 7133 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7134 if (!memcg) 7135 return; 7136 7137 /* 7138 * In case the memcg owning these pages has been offlined and doesn't 7139 * have an ID allocated to it anymore, charge the closest online 7140 * ancestor for the swap instead and transfer the memory+swap charge. 7141 */ 7142 swap_memcg = mem_cgroup_id_get_online(memcg); 7143 nr_entries = thp_nr_pages(page); 7144 /* Get references for the tail pages, too */ 7145 if (nr_entries > 1) 7146 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7147 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7148 nr_entries); 7149 VM_BUG_ON_PAGE(oldid, page); 7150 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7151 7152 page->memcg_data = 0; 7153 7154 if (!mem_cgroup_is_root(memcg)) 7155 page_counter_uncharge(&memcg->memory, nr_entries); 7156 7157 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7158 if (!mem_cgroup_is_root(swap_memcg)) 7159 page_counter_charge(&swap_memcg->memsw, nr_entries); 7160 page_counter_uncharge(&memcg->memsw, nr_entries); 7161 } 7162 7163 /* 7164 * Interrupts should be disabled here because the caller holds the 7165 * i_pages lock which is taken with interrupts-off. It is 7166 * important here to have the interrupts disabled because it is the 7167 * only synchronisation we have for updating the per-CPU variables. 7168 */ 7169 VM_BUG_ON(!irqs_disabled()); 7170 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7171 memcg_check_events(memcg, page); 7172 7173 css_put(&memcg->css); 7174 } 7175 7176 /** 7177 * mem_cgroup_try_charge_swap - try charging swap space for a page 7178 * @page: page being added to swap 7179 * @entry: swap entry to charge 7180 * 7181 * Try to charge @page's memcg for the swap space at @entry. 7182 * 7183 * Returns 0 on success, -ENOMEM on failure. 7184 */ 7185 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7186 { 7187 unsigned int nr_pages = thp_nr_pages(page); 7188 struct page_counter *counter; 7189 struct mem_cgroup *memcg; 7190 unsigned short oldid; 7191 7192 if (mem_cgroup_disabled()) 7193 return 0; 7194 7195 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7196 return 0; 7197 7198 memcg = page_memcg(page); 7199 7200 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7201 if (!memcg) 7202 return 0; 7203 7204 if (!entry.val) { 7205 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7206 return 0; 7207 } 7208 7209 memcg = mem_cgroup_id_get_online(memcg); 7210 7211 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7212 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7213 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7214 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7215 mem_cgroup_id_put(memcg); 7216 return -ENOMEM; 7217 } 7218 7219 /* Get references for the tail pages, too */ 7220 if (nr_pages > 1) 7221 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7222 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7223 VM_BUG_ON_PAGE(oldid, page); 7224 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7225 7226 return 0; 7227 } 7228 7229 /** 7230 * mem_cgroup_uncharge_swap - uncharge swap space 7231 * @entry: swap entry to uncharge 7232 * @nr_pages: the amount of swap space to uncharge 7233 */ 7234 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7235 { 7236 struct mem_cgroup *memcg; 7237 unsigned short id; 7238 7239 id = swap_cgroup_record(entry, 0, nr_pages); 7240 rcu_read_lock(); 7241 memcg = mem_cgroup_from_id(id); 7242 if (memcg) { 7243 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7244 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7245 page_counter_uncharge(&memcg->swap, nr_pages); 7246 else 7247 page_counter_uncharge(&memcg->memsw, nr_pages); 7248 } 7249 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7250 mem_cgroup_id_put_many(memcg, nr_pages); 7251 } 7252 rcu_read_unlock(); 7253 } 7254 7255 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7256 { 7257 long nr_swap_pages = get_nr_swap_pages(); 7258 7259 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7260 return nr_swap_pages; 7261 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7262 nr_swap_pages = min_t(long, nr_swap_pages, 7263 READ_ONCE(memcg->swap.max) - 7264 page_counter_read(&memcg->swap)); 7265 return nr_swap_pages; 7266 } 7267 7268 bool mem_cgroup_swap_full(struct page *page) 7269 { 7270 struct mem_cgroup *memcg; 7271 7272 VM_BUG_ON_PAGE(!PageLocked(page), page); 7273 7274 if (vm_swap_full()) 7275 return true; 7276 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7277 return false; 7278 7279 memcg = page_memcg(page); 7280 if (!memcg) 7281 return false; 7282 7283 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7284 unsigned long usage = page_counter_read(&memcg->swap); 7285 7286 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7287 usage * 2 >= READ_ONCE(memcg->swap.max)) 7288 return true; 7289 } 7290 7291 return false; 7292 } 7293 7294 static int __init setup_swap_account(char *s) 7295 { 7296 if (!strcmp(s, "1")) 7297 cgroup_memory_noswap = false; 7298 else if (!strcmp(s, "0")) 7299 cgroup_memory_noswap = true; 7300 return 1; 7301 } 7302 __setup("swapaccount=", setup_swap_account); 7303 7304 static u64 swap_current_read(struct cgroup_subsys_state *css, 7305 struct cftype *cft) 7306 { 7307 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7308 7309 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7310 } 7311 7312 static int swap_high_show(struct seq_file *m, void *v) 7313 { 7314 return seq_puts_memcg_tunable(m, 7315 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7316 } 7317 7318 static ssize_t swap_high_write(struct kernfs_open_file *of, 7319 char *buf, size_t nbytes, loff_t off) 7320 { 7321 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7322 unsigned long high; 7323 int err; 7324 7325 buf = strstrip(buf); 7326 err = page_counter_memparse(buf, "max", &high); 7327 if (err) 7328 return err; 7329 7330 page_counter_set_high(&memcg->swap, high); 7331 7332 return nbytes; 7333 } 7334 7335 static int swap_max_show(struct seq_file *m, void *v) 7336 { 7337 return seq_puts_memcg_tunable(m, 7338 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7339 } 7340 7341 static ssize_t swap_max_write(struct kernfs_open_file *of, 7342 char *buf, size_t nbytes, loff_t off) 7343 { 7344 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7345 unsigned long max; 7346 int err; 7347 7348 buf = strstrip(buf); 7349 err = page_counter_memparse(buf, "max", &max); 7350 if (err) 7351 return err; 7352 7353 xchg(&memcg->swap.max, max); 7354 7355 return nbytes; 7356 } 7357 7358 static int swap_events_show(struct seq_file *m, void *v) 7359 { 7360 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7361 7362 seq_printf(m, "high %lu\n", 7363 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7364 seq_printf(m, "max %lu\n", 7365 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7366 seq_printf(m, "fail %lu\n", 7367 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7368 7369 return 0; 7370 } 7371 7372 static struct cftype swap_files[] = { 7373 { 7374 .name = "swap.current", 7375 .flags = CFTYPE_NOT_ON_ROOT, 7376 .read_u64 = swap_current_read, 7377 }, 7378 { 7379 .name = "swap.high", 7380 .flags = CFTYPE_NOT_ON_ROOT, 7381 .seq_show = swap_high_show, 7382 .write = swap_high_write, 7383 }, 7384 { 7385 .name = "swap.max", 7386 .flags = CFTYPE_NOT_ON_ROOT, 7387 .seq_show = swap_max_show, 7388 .write = swap_max_write, 7389 }, 7390 { 7391 .name = "swap.events", 7392 .flags = CFTYPE_NOT_ON_ROOT, 7393 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7394 .seq_show = swap_events_show, 7395 }, 7396 { } /* terminate */ 7397 }; 7398 7399 static struct cftype memsw_files[] = { 7400 { 7401 .name = "memsw.usage_in_bytes", 7402 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7403 .read_u64 = mem_cgroup_read_u64, 7404 }, 7405 { 7406 .name = "memsw.max_usage_in_bytes", 7407 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7408 .write = mem_cgroup_reset, 7409 .read_u64 = mem_cgroup_read_u64, 7410 }, 7411 { 7412 .name = "memsw.limit_in_bytes", 7413 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7414 .write = mem_cgroup_write, 7415 .read_u64 = mem_cgroup_read_u64, 7416 }, 7417 { 7418 .name = "memsw.failcnt", 7419 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7420 .write = mem_cgroup_reset, 7421 .read_u64 = mem_cgroup_read_u64, 7422 }, 7423 { }, /* terminate */ 7424 }; 7425 7426 /* 7427 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7428 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7429 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7430 * boot parameter. This may result in premature OOPS inside 7431 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7432 */ 7433 static int __init mem_cgroup_swap_init(void) 7434 { 7435 /* No memory control -> no swap control */ 7436 if (mem_cgroup_disabled()) 7437 cgroup_memory_noswap = true; 7438 7439 if (cgroup_memory_noswap) 7440 return 0; 7441 7442 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7443 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7444 7445 return 0; 7446 } 7447 core_initcall(mem_cgroup_swap_init); 7448 7449 #endif /* CONFIG_MEMCG_SWAP */ 7450