1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/hugetlb.h> 39 #include <linux/pagemap.h> 40 #include <linux/smp.h> 41 #include <linux/page-flags.h> 42 #include <linux/backing-dev.h> 43 #include <linux/bit_spinlock.h> 44 #include <linux/rcupdate.h> 45 #include <linux/limits.h> 46 #include <linux/export.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swap.h> 51 #include <linux/swapops.h> 52 #include <linux/spinlock.h> 53 #include <linux/eventfd.h> 54 #include <linux/poll.h> 55 #include <linux/sort.h> 56 #include <linux/fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/vmpressure.h> 59 #include <linux/mm_inline.h> 60 #include <linux/swap_cgroup.h> 61 #include <linux/cpu.h> 62 #include <linux/oom.h> 63 #include <linux/lockdep.h> 64 #include <linux/file.h> 65 #include <linux/tracehook.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 71 #include <asm/uaccess.h> 72 73 #include <trace/events/vmscan.h> 74 75 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 76 EXPORT_SYMBOL(memory_cgrp_subsys); 77 78 struct mem_cgroup *root_mem_cgroup __read_mostly; 79 80 #define MEM_CGROUP_RECLAIM_RETRIES 5 81 82 /* Socket memory accounting disabled? */ 83 static bool cgroup_memory_nosocket; 84 85 /* Kernel memory accounting disabled? */ 86 static bool cgroup_memory_nokmem; 87 88 /* Whether the swap controller is active */ 89 #ifdef CONFIG_MEMCG_SWAP 90 int do_swap_account __read_mostly; 91 #else 92 #define do_swap_account 0 93 #endif 94 95 /* Whether legacy memory+swap accounting is active */ 96 static bool do_memsw_account(void) 97 { 98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 99 } 100 101 static const char * const mem_cgroup_stat_names[] = { 102 "cache", 103 "rss", 104 "rss_huge", 105 "mapped_file", 106 "dirty", 107 "writeback", 108 "swap", 109 }; 110 111 static const char * const mem_cgroup_events_names[] = { 112 "pgpgin", 113 "pgpgout", 114 "pgfault", 115 "pgmajfault", 116 }; 117 118 static const char * const mem_cgroup_lru_names[] = { 119 "inactive_anon", 120 "active_anon", 121 "inactive_file", 122 "active_file", 123 "unevictable", 124 }; 125 126 #define THRESHOLDS_EVENTS_TARGET 128 127 #define SOFTLIMIT_EVENTS_TARGET 1024 128 #define NUMAINFO_EVENTS_TARGET 1024 129 130 /* 131 * Cgroups above their limits are maintained in a RB-Tree, independent of 132 * their hierarchy representation 133 */ 134 135 struct mem_cgroup_tree_per_node { 136 struct rb_root rb_root; 137 spinlock_t lock; 138 }; 139 140 struct mem_cgroup_tree { 141 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 142 }; 143 144 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 145 146 /* for OOM */ 147 struct mem_cgroup_eventfd_list { 148 struct list_head list; 149 struct eventfd_ctx *eventfd; 150 }; 151 152 /* 153 * cgroup_event represents events which userspace want to receive. 154 */ 155 struct mem_cgroup_event { 156 /* 157 * memcg which the event belongs to. 158 */ 159 struct mem_cgroup *memcg; 160 /* 161 * eventfd to signal userspace about the event. 162 */ 163 struct eventfd_ctx *eventfd; 164 /* 165 * Each of these stored in a list by the cgroup. 166 */ 167 struct list_head list; 168 /* 169 * register_event() callback will be used to add new userspace 170 * waiter for changes related to this event. Use eventfd_signal() 171 * on eventfd to send notification to userspace. 172 */ 173 int (*register_event)(struct mem_cgroup *memcg, 174 struct eventfd_ctx *eventfd, const char *args); 175 /* 176 * unregister_event() callback will be called when userspace closes 177 * the eventfd or on cgroup removing. This callback must be set, 178 * if you want provide notification functionality. 179 */ 180 void (*unregister_event)(struct mem_cgroup *memcg, 181 struct eventfd_ctx *eventfd); 182 /* 183 * All fields below needed to unregister event when 184 * userspace closes eventfd. 185 */ 186 poll_table pt; 187 wait_queue_head_t *wqh; 188 wait_queue_t wait; 189 struct work_struct remove; 190 }; 191 192 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 193 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 194 195 /* Stuffs for move charges at task migration. */ 196 /* 197 * Types of charges to be moved. 198 */ 199 #define MOVE_ANON 0x1U 200 #define MOVE_FILE 0x2U 201 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 202 203 /* "mc" and its members are protected by cgroup_mutex */ 204 static struct move_charge_struct { 205 spinlock_t lock; /* for from, to */ 206 struct mm_struct *mm; 207 struct mem_cgroup *from; 208 struct mem_cgroup *to; 209 unsigned long flags; 210 unsigned long precharge; 211 unsigned long moved_charge; 212 unsigned long moved_swap; 213 struct task_struct *moving_task; /* a task moving charges */ 214 wait_queue_head_t waitq; /* a waitq for other context */ 215 } mc = { 216 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 217 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 218 }; 219 220 /* 221 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 222 * limit reclaim to prevent infinite loops, if they ever occur. 223 */ 224 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 225 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 226 227 enum charge_type { 228 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 229 MEM_CGROUP_CHARGE_TYPE_ANON, 230 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 231 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 232 NR_CHARGE_TYPE, 233 }; 234 235 /* for encoding cft->private value on file */ 236 enum res_type { 237 _MEM, 238 _MEMSWAP, 239 _OOM_TYPE, 240 _KMEM, 241 _TCP, 242 }; 243 244 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 245 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 246 #define MEMFILE_ATTR(val) ((val) & 0xffff) 247 /* Used for OOM nofiier */ 248 #define OOM_CONTROL (0) 249 250 /* Some nice accessors for the vmpressure. */ 251 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 252 { 253 if (!memcg) 254 memcg = root_mem_cgroup; 255 return &memcg->vmpressure; 256 } 257 258 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 259 { 260 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 261 } 262 263 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 264 { 265 return (memcg == root_mem_cgroup); 266 } 267 268 #ifndef CONFIG_SLOB 269 /* 270 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 271 * The main reason for not using cgroup id for this: 272 * this works better in sparse environments, where we have a lot of memcgs, 273 * but only a few kmem-limited. Or also, if we have, for instance, 200 274 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 275 * 200 entry array for that. 276 * 277 * The current size of the caches array is stored in memcg_nr_cache_ids. It 278 * will double each time we have to increase it. 279 */ 280 static DEFINE_IDA(memcg_cache_ida); 281 int memcg_nr_cache_ids; 282 283 /* Protects memcg_nr_cache_ids */ 284 static DECLARE_RWSEM(memcg_cache_ids_sem); 285 286 void memcg_get_cache_ids(void) 287 { 288 down_read(&memcg_cache_ids_sem); 289 } 290 291 void memcg_put_cache_ids(void) 292 { 293 up_read(&memcg_cache_ids_sem); 294 } 295 296 /* 297 * MIN_SIZE is different than 1, because we would like to avoid going through 298 * the alloc/free process all the time. In a small machine, 4 kmem-limited 299 * cgroups is a reasonable guess. In the future, it could be a parameter or 300 * tunable, but that is strictly not necessary. 301 * 302 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 303 * this constant directly from cgroup, but it is understandable that this is 304 * better kept as an internal representation in cgroup.c. In any case, the 305 * cgrp_id space is not getting any smaller, and we don't have to necessarily 306 * increase ours as well if it increases. 307 */ 308 #define MEMCG_CACHES_MIN_SIZE 4 309 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 310 311 /* 312 * A lot of the calls to the cache allocation functions are expected to be 313 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 314 * conditional to this static branch, we'll have to allow modules that does 315 * kmem_cache_alloc and the such to see this symbol as well 316 */ 317 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 318 EXPORT_SYMBOL(memcg_kmem_enabled_key); 319 320 #endif /* !CONFIG_SLOB */ 321 322 /** 323 * mem_cgroup_css_from_page - css of the memcg associated with a page 324 * @page: page of interest 325 * 326 * If memcg is bound to the default hierarchy, css of the memcg associated 327 * with @page is returned. The returned css remains associated with @page 328 * until it is released. 329 * 330 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 331 * is returned. 332 */ 333 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 334 { 335 struct mem_cgroup *memcg; 336 337 memcg = page->mem_cgroup; 338 339 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 340 memcg = root_mem_cgroup; 341 342 return &memcg->css; 343 } 344 345 /** 346 * page_cgroup_ino - return inode number of the memcg a page is charged to 347 * @page: the page 348 * 349 * Look up the closest online ancestor of the memory cgroup @page is charged to 350 * and return its inode number or 0 if @page is not charged to any cgroup. It 351 * is safe to call this function without holding a reference to @page. 352 * 353 * Note, this function is inherently racy, because there is nothing to prevent 354 * the cgroup inode from getting torn down and potentially reallocated a moment 355 * after page_cgroup_ino() returns, so it only should be used by callers that 356 * do not care (such as procfs interfaces). 357 */ 358 ino_t page_cgroup_ino(struct page *page) 359 { 360 struct mem_cgroup *memcg; 361 unsigned long ino = 0; 362 363 rcu_read_lock(); 364 memcg = READ_ONCE(page->mem_cgroup); 365 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 366 memcg = parent_mem_cgroup(memcg); 367 if (memcg) 368 ino = cgroup_ino(memcg->css.cgroup); 369 rcu_read_unlock(); 370 return ino; 371 } 372 373 static struct mem_cgroup_per_node * 374 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 375 { 376 int nid = page_to_nid(page); 377 378 return memcg->nodeinfo[nid]; 379 } 380 381 static struct mem_cgroup_tree_per_node * 382 soft_limit_tree_node(int nid) 383 { 384 return soft_limit_tree.rb_tree_per_node[nid]; 385 } 386 387 static struct mem_cgroup_tree_per_node * 388 soft_limit_tree_from_page(struct page *page) 389 { 390 int nid = page_to_nid(page); 391 392 return soft_limit_tree.rb_tree_per_node[nid]; 393 } 394 395 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 396 struct mem_cgroup_tree_per_node *mctz, 397 unsigned long new_usage_in_excess) 398 { 399 struct rb_node **p = &mctz->rb_root.rb_node; 400 struct rb_node *parent = NULL; 401 struct mem_cgroup_per_node *mz_node; 402 403 if (mz->on_tree) 404 return; 405 406 mz->usage_in_excess = new_usage_in_excess; 407 if (!mz->usage_in_excess) 408 return; 409 while (*p) { 410 parent = *p; 411 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 412 tree_node); 413 if (mz->usage_in_excess < mz_node->usage_in_excess) 414 p = &(*p)->rb_left; 415 /* 416 * We can't avoid mem cgroups that are over their soft 417 * limit by the same amount 418 */ 419 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 420 p = &(*p)->rb_right; 421 } 422 rb_link_node(&mz->tree_node, parent, p); 423 rb_insert_color(&mz->tree_node, &mctz->rb_root); 424 mz->on_tree = true; 425 } 426 427 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 428 struct mem_cgroup_tree_per_node *mctz) 429 { 430 if (!mz->on_tree) 431 return; 432 rb_erase(&mz->tree_node, &mctz->rb_root); 433 mz->on_tree = false; 434 } 435 436 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 437 struct mem_cgroup_tree_per_node *mctz) 438 { 439 unsigned long flags; 440 441 spin_lock_irqsave(&mctz->lock, flags); 442 __mem_cgroup_remove_exceeded(mz, mctz); 443 spin_unlock_irqrestore(&mctz->lock, flags); 444 } 445 446 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 447 { 448 unsigned long nr_pages = page_counter_read(&memcg->memory); 449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 450 unsigned long excess = 0; 451 452 if (nr_pages > soft_limit) 453 excess = nr_pages - soft_limit; 454 455 return excess; 456 } 457 458 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 459 { 460 unsigned long excess; 461 struct mem_cgroup_per_node *mz; 462 struct mem_cgroup_tree_per_node *mctz; 463 464 mctz = soft_limit_tree_from_page(page); 465 /* 466 * Necessary to update all ancestors when hierarchy is used. 467 * because their event counter is not touched. 468 */ 469 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 470 mz = mem_cgroup_page_nodeinfo(memcg, page); 471 excess = soft_limit_excess(memcg); 472 /* 473 * We have to update the tree if mz is on RB-tree or 474 * mem is over its softlimit. 475 */ 476 if (excess || mz->on_tree) { 477 unsigned long flags; 478 479 spin_lock_irqsave(&mctz->lock, flags); 480 /* if on-tree, remove it */ 481 if (mz->on_tree) 482 __mem_cgroup_remove_exceeded(mz, mctz); 483 /* 484 * Insert again. mz->usage_in_excess will be updated. 485 * If excess is 0, no tree ops. 486 */ 487 __mem_cgroup_insert_exceeded(mz, mctz, excess); 488 spin_unlock_irqrestore(&mctz->lock, flags); 489 } 490 } 491 } 492 493 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 494 { 495 struct mem_cgroup_tree_per_node *mctz; 496 struct mem_cgroup_per_node *mz; 497 int nid; 498 499 for_each_node(nid) { 500 mz = mem_cgroup_nodeinfo(memcg, nid); 501 mctz = soft_limit_tree_node(nid); 502 mem_cgroup_remove_exceeded(mz, mctz); 503 } 504 } 505 506 static struct mem_cgroup_per_node * 507 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 508 { 509 struct rb_node *rightmost = NULL; 510 struct mem_cgroup_per_node *mz; 511 512 retry: 513 mz = NULL; 514 rightmost = rb_last(&mctz->rb_root); 515 if (!rightmost) 516 goto done; /* Nothing to reclaim from */ 517 518 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node); 519 /* 520 * Remove the node now but someone else can add it back, 521 * we will to add it back at the end of reclaim to its correct 522 * position in the tree. 523 */ 524 __mem_cgroup_remove_exceeded(mz, mctz); 525 if (!soft_limit_excess(mz->memcg) || 526 !css_tryget_online(&mz->memcg->css)) 527 goto retry; 528 done: 529 return mz; 530 } 531 532 static struct mem_cgroup_per_node * 533 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 534 { 535 struct mem_cgroup_per_node *mz; 536 537 spin_lock_irq(&mctz->lock); 538 mz = __mem_cgroup_largest_soft_limit_node(mctz); 539 spin_unlock_irq(&mctz->lock); 540 return mz; 541 } 542 543 /* 544 * Return page count for single (non recursive) @memcg. 545 * 546 * Implementation Note: reading percpu statistics for memcg. 547 * 548 * Both of vmstat[] and percpu_counter has threshold and do periodic 549 * synchronization to implement "quick" read. There are trade-off between 550 * reading cost and precision of value. Then, we may have a chance to implement 551 * a periodic synchronization of counter in memcg's counter. 552 * 553 * But this _read() function is used for user interface now. The user accounts 554 * memory usage by memory cgroup and he _always_ requires exact value because 555 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 556 * have to visit all online cpus and make sum. So, for now, unnecessary 557 * synchronization is not implemented. (just implemented for cpu hotplug) 558 * 559 * If there are kernel internal actions which can make use of some not-exact 560 * value, and reading all cpu value can be performance bottleneck in some 561 * common workload, threshold and synchronization as vmstat[] should be 562 * implemented. 563 */ 564 static unsigned long 565 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 566 { 567 long val = 0; 568 int cpu; 569 570 /* Per-cpu values can be negative, use a signed accumulator */ 571 for_each_possible_cpu(cpu) 572 val += per_cpu(memcg->stat->count[idx], cpu); 573 /* 574 * Summing races with updates, so val may be negative. Avoid exposing 575 * transient negative values. 576 */ 577 if (val < 0) 578 val = 0; 579 return val; 580 } 581 582 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 583 enum mem_cgroup_events_index idx) 584 { 585 unsigned long val = 0; 586 int cpu; 587 588 for_each_possible_cpu(cpu) 589 val += per_cpu(memcg->stat->events[idx], cpu); 590 return val; 591 } 592 593 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 594 struct page *page, 595 bool compound, int nr_pages) 596 { 597 /* 598 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 599 * counted as CACHE even if it's on ANON LRU. 600 */ 601 if (PageAnon(page)) 602 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 603 nr_pages); 604 else 605 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 606 nr_pages); 607 608 if (compound) { 609 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 610 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 611 nr_pages); 612 } 613 614 /* pagein of a big page is an event. So, ignore page size */ 615 if (nr_pages > 0) 616 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 617 else { 618 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 619 nr_pages = -nr_pages; /* for event */ 620 } 621 622 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 623 } 624 625 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 626 int nid, unsigned int lru_mask) 627 { 628 unsigned long nr = 0; 629 struct mem_cgroup_per_node *mz; 630 enum lru_list lru; 631 632 VM_BUG_ON((unsigned)nid >= nr_node_ids); 633 634 for_each_lru(lru) { 635 if (!(BIT(lru) & lru_mask)) 636 continue; 637 mz = mem_cgroup_nodeinfo(memcg, nid); 638 nr += mz->lru_size[lru]; 639 } 640 return nr; 641 } 642 643 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 644 unsigned int lru_mask) 645 { 646 unsigned long nr = 0; 647 int nid; 648 649 for_each_node_state(nid, N_MEMORY) 650 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 651 return nr; 652 } 653 654 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 655 enum mem_cgroup_events_target target) 656 { 657 unsigned long val, next; 658 659 val = __this_cpu_read(memcg->stat->nr_page_events); 660 next = __this_cpu_read(memcg->stat->targets[target]); 661 /* from time_after() in jiffies.h */ 662 if ((long)next - (long)val < 0) { 663 switch (target) { 664 case MEM_CGROUP_TARGET_THRESH: 665 next = val + THRESHOLDS_EVENTS_TARGET; 666 break; 667 case MEM_CGROUP_TARGET_SOFTLIMIT: 668 next = val + SOFTLIMIT_EVENTS_TARGET; 669 break; 670 case MEM_CGROUP_TARGET_NUMAINFO: 671 next = val + NUMAINFO_EVENTS_TARGET; 672 break; 673 default: 674 break; 675 } 676 __this_cpu_write(memcg->stat->targets[target], next); 677 return true; 678 } 679 return false; 680 } 681 682 /* 683 * Check events in order. 684 * 685 */ 686 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 687 { 688 /* threshold event is triggered in finer grain than soft limit */ 689 if (unlikely(mem_cgroup_event_ratelimit(memcg, 690 MEM_CGROUP_TARGET_THRESH))) { 691 bool do_softlimit; 692 bool do_numainfo __maybe_unused; 693 694 do_softlimit = mem_cgroup_event_ratelimit(memcg, 695 MEM_CGROUP_TARGET_SOFTLIMIT); 696 #if MAX_NUMNODES > 1 697 do_numainfo = mem_cgroup_event_ratelimit(memcg, 698 MEM_CGROUP_TARGET_NUMAINFO); 699 #endif 700 mem_cgroup_threshold(memcg); 701 if (unlikely(do_softlimit)) 702 mem_cgroup_update_tree(memcg, page); 703 #if MAX_NUMNODES > 1 704 if (unlikely(do_numainfo)) 705 atomic_inc(&memcg->numainfo_events); 706 #endif 707 } 708 } 709 710 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 711 { 712 /* 713 * mm_update_next_owner() may clear mm->owner to NULL 714 * if it races with swapoff, page migration, etc. 715 * So this can be called with p == NULL. 716 */ 717 if (unlikely(!p)) 718 return NULL; 719 720 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 721 } 722 EXPORT_SYMBOL(mem_cgroup_from_task); 723 724 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 725 { 726 struct mem_cgroup *memcg = NULL; 727 728 rcu_read_lock(); 729 do { 730 /* 731 * Page cache insertions can happen withou an 732 * actual mm context, e.g. during disk probing 733 * on boot, loopback IO, acct() writes etc. 734 */ 735 if (unlikely(!mm)) 736 memcg = root_mem_cgroup; 737 else { 738 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 739 if (unlikely(!memcg)) 740 memcg = root_mem_cgroup; 741 } 742 } while (!css_tryget_online(&memcg->css)); 743 rcu_read_unlock(); 744 return memcg; 745 } 746 747 /** 748 * mem_cgroup_iter - iterate over memory cgroup hierarchy 749 * @root: hierarchy root 750 * @prev: previously returned memcg, NULL on first invocation 751 * @reclaim: cookie for shared reclaim walks, NULL for full walks 752 * 753 * Returns references to children of the hierarchy below @root, or 754 * @root itself, or %NULL after a full round-trip. 755 * 756 * Caller must pass the return value in @prev on subsequent 757 * invocations for reference counting, or use mem_cgroup_iter_break() 758 * to cancel a hierarchy walk before the round-trip is complete. 759 * 760 * Reclaimers can specify a zone and a priority level in @reclaim to 761 * divide up the memcgs in the hierarchy among all concurrent 762 * reclaimers operating on the same zone and priority. 763 */ 764 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 765 struct mem_cgroup *prev, 766 struct mem_cgroup_reclaim_cookie *reclaim) 767 { 768 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 769 struct cgroup_subsys_state *css = NULL; 770 struct mem_cgroup *memcg = NULL; 771 struct mem_cgroup *pos = NULL; 772 773 if (mem_cgroup_disabled()) 774 return NULL; 775 776 if (!root) 777 root = root_mem_cgroup; 778 779 if (prev && !reclaim) 780 pos = prev; 781 782 if (!root->use_hierarchy && root != root_mem_cgroup) { 783 if (prev) 784 goto out; 785 return root; 786 } 787 788 rcu_read_lock(); 789 790 if (reclaim) { 791 struct mem_cgroup_per_node *mz; 792 793 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 794 iter = &mz->iter[reclaim->priority]; 795 796 if (prev && reclaim->generation != iter->generation) 797 goto out_unlock; 798 799 while (1) { 800 pos = READ_ONCE(iter->position); 801 if (!pos || css_tryget(&pos->css)) 802 break; 803 /* 804 * css reference reached zero, so iter->position will 805 * be cleared by ->css_released. However, we should not 806 * rely on this happening soon, because ->css_released 807 * is called from a work queue, and by busy-waiting we 808 * might block it. So we clear iter->position right 809 * away. 810 */ 811 (void)cmpxchg(&iter->position, pos, NULL); 812 } 813 } 814 815 if (pos) 816 css = &pos->css; 817 818 for (;;) { 819 css = css_next_descendant_pre(css, &root->css); 820 if (!css) { 821 /* 822 * Reclaimers share the hierarchy walk, and a 823 * new one might jump in right at the end of 824 * the hierarchy - make sure they see at least 825 * one group and restart from the beginning. 826 */ 827 if (!prev) 828 continue; 829 break; 830 } 831 832 /* 833 * Verify the css and acquire a reference. The root 834 * is provided by the caller, so we know it's alive 835 * and kicking, and don't take an extra reference. 836 */ 837 memcg = mem_cgroup_from_css(css); 838 839 if (css == &root->css) 840 break; 841 842 if (css_tryget(css)) 843 break; 844 845 memcg = NULL; 846 } 847 848 if (reclaim) { 849 /* 850 * The position could have already been updated by a competing 851 * thread, so check that the value hasn't changed since we read 852 * it to avoid reclaiming from the same cgroup twice. 853 */ 854 (void)cmpxchg(&iter->position, pos, memcg); 855 856 if (pos) 857 css_put(&pos->css); 858 859 if (!memcg) 860 iter->generation++; 861 else if (!prev) 862 reclaim->generation = iter->generation; 863 } 864 865 out_unlock: 866 rcu_read_unlock(); 867 out: 868 if (prev && prev != root) 869 css_put(&prev->css); 870 871 return memcg; 872 } 873 874 /** 875 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 876 * @root: hierarchy root 877 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 878 */ 879 void mem_cgroup_iter_break(struct mem_cgroup *root, 880 struct mem_cgroup *prev) 881 { 882 if (!root) 883 root = root_mem_cgroup; 884 if (prev && prev != root) 885 css_put(&prev->css); 886 } 887 888 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 889 { 890 struct mem_cgroup *memcg = dead_memcg; 891 struct mem_cgroup_reclaim_iter *iter; 892 struct mem_cgroup_per_node *mz; 893 int nid; 894 int i; 895 896 while ((memcg = parent_mem_cgroup(memcg))) { 897 for_each_node(nid) { 898 mz = mem_cgroup_nodeinfo(memcg, nid); 899 for (i = 0; i <= DEF_PRIORITY; i++) { 900 iter = &mz->iter[i]; 901 cmpxchg(&iter->position, 902 dead_memcg, NULL); 903 } 904 } 905 } 906 } 907 908 /* 909 * Iteration constructs for visiting all cgroups (under a tree). If 910 * loops are exited prematurely (break), mem_cgroup_iter_break() must 911 * be used for reference counting. 912 */ 913 #define for_each_mem_cgroup_tree(iter, root) \ 914 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 915 iter != NULL; \ 916 iter = mem_cgroup_iter(root, iter, NULL)) 917 918 #define for_each_mem_cgroup(iter) \ 919 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 920 iter != NULL; \ 921 iter = mem_cgroup_iter(NULL, iter, NULL)) 922 923 /** 924 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 925 * @page: the page 926 * @zone: zone of the page 927 * 928 * This function is only safe when following the LRU page isolation 929 * and putback protocol: the LRU lock must be held, and the page must 930 * either be PageLRU() or the caller must have isolated/allocated it. 931 */ 932 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 933 { 934 struct mem_cgroup_per_node *mz; 935 struct mem_cgroup *memcg; 936 struct lruvec *lruvec; 937 938 if (mem_cgroup_disabled()) { 939 lruvec = &pgdat->lruvec; 940 goto out; 941 } 942 943 memcg = page->mem_cgroup; 944 /* 945 * Swapcache readahead pages are added to the LRU - and 946 * possibly migrated - before they are charged. 947 */ 948 if (!memcg) 949 memcg = root_mem_cgroup; 950 951 mz = mem_cgroup_page_nodeinfo(memcg, page); 952 lruvec = &mz->lruvec; 953 out: 954 /* 955 * Since a node can be onlined after the mem_cgroup was created, 956 * we have to be prepared to initialize lruvec->zone here; 957 * and if offlined then reonlined, we need to reinitialize it. 958 */ 959 if (unlikely(lruvec->pgdat != pgdat)) 960 lruvec->pgdat = pgdat; 961 return lruvec; 962 } 963 964 /** 965 * mem_cgroup_update_lru_size - account for adding or removing an lru page 966 * @lruvec: mem_cgroup per zone lru vector 967 * @lru: index of lru list the page is sitting on 968 * @nr_pages: positive when adding or negative when removing 969 * 970 * This function must be called under lru_lock, just before a page is added 971 * to or just after a page is removed from an lru list (that ordering being 972 * so as to allow it to check that lru_size 0 is consistent with list_empty). 973 */ 974 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 975 int nr_pages) 976 { 977 struct mem_cgroup_per_node *mz; 978 unsigned long *lru_size; 979 long size; 980 bool empty; 981 982 if (mem_cgroup_disabled()) 983 return; 984 985 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 986 lru_size = mz->lru_size + lru; 987 empty = list_empty(lruvec->lists + lru); 988 989 if (nr_pages < 0) 990 *lru_size += nr_pages; 991 992 size = *lru_size; 993 if (WARN_ONCE(size < 0 || empty != !size, 994 "%s(%p, %d, %d): lru_size %ld but %sempty\n", 995 __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) { 996 VM_BUG_ON(1); 997 *lru_size = 0; 998 } 999 1000 if (nr_pages > 0) 1001 *lru_size += nr_pages; 1002 } 1003 1004 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1005 { 1006 struct mem_cgroup *task_memcg; 1007 struct task_struct *p; 1008 bool ret; 1009 1010 p = find_lock_task_mm(task); 1011 if (p) { 1012 task_memcg = get_mem_cgroup_from_mm(p->mm); 1013 task_unlock(p); 1014 } else { 1015 /* 1016 * All threads may have already detached their mm's, but the oom 1017 * killer still needs to detect if they have already been oom 1018 * killed to prevent needlessly killing additional tasks. 1019 */ 1020 rcu_read_lock(); 1021 task_memcg = mem_cgroup_from_task(task); 1022 css_get(&task_memcg->css); 1023 rcu_read_unlock(); 1024 } 1025 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1026 css_put(&task_memcg->css); 1027 return ret; 1028 } 1029 1030 /** 1031 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1032 * @memcg: the memory cgroup 1033 * 1034 * Returns the maximum amount of memory @mem can be charged with, in 1035 * pages. 1036 */ 1037 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1038 { 1039 unsigned long margin = 0; 1040 unsigned long count; 1041 unsigned long limit; 1042 1043 count = page_counter_read(&memcg->memory); 1044 limit = READ_ONCE(memcg->memory.limit); 1045 if (count < limit) 1046 margin = limit - count; 1047 1048 if (do_memsw_account()) { 1049 count = page_counter_read(&memcg->memsw); 1050 limit = READ_ONCE(memcg->memsw.limit); 1051 if (count <= limit) 1052 margin = min(margin, limit - count); 1053 else 1054 margin = 0; 1055 } 1056 1057 return margin; 1058 } 1059 1060 /* 1061 * A routine for checking "mem" is under move_account() or not. 1062 * 1063 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1064 * moving cgroups. This is for waiting at high-memory pressure 1065 * caused by "move". 1066 */ 1067 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1068 { 1069 struct mem_cgroup *from; 1070 struct mem_cgroup *to; 1071 bool ret = false; 1072 /* 1073 * Unlike task_move routines, we access mc.to, mc.from not under 1074 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1075 */ 1076 spin_lock(&mc.lock); 1077 from = mc.from; 1078 to = mc.to; 1079 if (!from) 1080 goto unlock; 1081 1082 ret = mem_cgroup_is_descendant(from, memcg) || 1083 mem_cgroup_is_descendant(to, memcg); 1084 unlock: 1085 spin_unlock(&mc.lock); 1086 return ret; 1087 } 1088 1089 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1090 { 1091 if (mc.moving_task && current != mc.moving_task) { 1092 if (mem_cgroup_under_move(memcg)) { 1093 DEFINE_WAIT(wait); 1094 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1095 /* moving charge context might have finished. */ 1096 if (mc.moving_task) 1097 schedule(); 1098 finish_wait(&mc.waitq, &wait); 1099 return true; 1100 } 1101 } 1102 return false; 1103 } 1104 1105 #define K(x) ((x) << (PAGE_SHIFT-10)) 1106 /** 1107 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1108 * @memcg: The memory cgroup that went over limit 1109 * @p: Task that is going to be killed 1110 * 1111 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1112 * enabled 1113 */ 1114 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1115 { 1116 struct mem_cgroup *iter; 1117 unsigned int i; 1118 1119 rcu_read_lock(); 1120 1121 if (p) { 1122 pr_info("Task in "); 1123 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1124 pr_cont(" killed as a result of limit of "); 1125 } else { 1126 pr_info("Memory limit reached of cgroup "); 1127 } 1128 1129 pr_cont_cgroup_path(memcg->css.cgroup); 1130 pr_cont("\n"); 1131 1132 rcu_read_unlock(); 1133 1134 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1135 K((u64)page_counter_read(&memcg->memory)), 1136 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1137 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1138 K((u64)page_counter_read(&memcg->memsw)), 1139 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1140 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1141 K((u64)page_counter_read(&memcg->kmem)), 1142 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1143 1144 for_each_mem_cgroup_tree(iter, memcg) { 1145 pr_info("Memory cgroup stats for "); 1146 pr_cont_cgroup_path(iter->css.cgroup); 1147 pr_cont(":"); 1148 1149 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1150 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1151 continue; 1152 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1153 K(mem_cgroup_read_stat(iter, i))); 1154 } 1155 1156 for (i = 0; i < NR_LRU_LISTS; i++) 1157 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1158 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1159 1160 pr_cont("\n"); 1161 } 1162 } 1163 1164 /* 1165 * This function returns the number of memcg under hierarchy tree. Returns 1166 * 1(self count) if no children. 1167 */ 1168 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1169 { 1170 int num = 0; 1171 struct mem_cgroup *iter; 1172 1173 for_each_mem_cgroup_tree(iter, memcg) 1174 num++; 1175 return num; 1176 } 1177 1178 /* 1179 * Return the memory (and swap, if configured) limit for a memcg. 1180 */ 1181 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1182 { 1183 unsigned long limit; 1184 1185 limit = memcg->memory.limit; 1186 if (mem_cgroup_swappiness(memcg)) { 1187 unsigned long memsw_limit; 1188 unsigned long swap_limit; 1189 1190 memsw_limit = memcg->memsw.limit; 1191 swap_limit = memcg->swap.limit; 1192 swap_limit = min(swap_limit, (unsigned long)total_swap_pages); 1193 limit = min(limit + swap_limit, memsw_limit); 1194 } 1195 return limit; 1196 } 1197 1198 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1199 int order) 1200 { 1201 struct oom_control oc = { 1202 .zonelist = NULL, 1203 .nodemask = NULL, 1204 .memcg = memcg, 1205 .gfp_mask = gfp_mask, 1206 .order = order, 1207 }; 1208 struct mem_cgroup *iter; 1209 unsigned long chosen_points = 0; 1210 unsigned long totalpages; 1211 unsigned int points = 0; 1212 struct task_struct *chosen = NULL; 1213 1214 mutex_lock(&oom_lock); 1215 1216 /* 1217 * If current has a pending SIGKILL or is exiting, then automatically 1218 * select it. The goal is to allow it to allocate so that it may 1219 * quickly exit and free its memory. 1220 */ 1221 if (task_will_free_mem(current)) { 1222 mark_oom_victim(current); 1223 wake_oom_reaper(current); 1224 goto unlock; 1225 } 1226 1227 check_panic_on_oom(&oc, CONSTRAINT_MEMCG); 1228 totalpages = mem_cgroup_get_limit(memcg) ? : 1; 1229 for_each_mem_cgroup_tree(iter, memcg) { 1230 struct css_task_iter it; 1231 struct task_struct *task; 1232 1233 css_task_iter_start(&iter->css, &it); 1234 while ((task = css_task_iter_next(&it))) { 1235 switch (oom_scan_process_thread(&oc, task)) { 1236 case OOM_SCAN_SELECT: 1237 if (chosen) 1238 put_task_struct(chosen); 1239 chosen = task; 1240 chosen_points = ULONG_MAX; 1241 get_task_struct(chosen); 1242 /* fall through */ 1243 case OOM_SCAN_CONTINUE: 1244 continue; 1245 case OOM_SCAN_ABORT: 1246 css_task_iter_end(&it); 1247 mem_cgroup_iter_break(memcg, iter); 1248 if (chosen) 1249 put_task_struct(chosen); 1250 /* Set a dummy value to return "true". */ 1251 chosen = (void *) 1; 1252 goto unlock; 1253 case OOM_SCAN_OK: 1254 break; 1255 }; 1256 points = oom_badness(task, memcg, NULL, totalpages); 1257 if (!points || points < chosen_points) 1258 continue; 1259 /* Prefer thread group leaders for display purposes */ 1260 if (points == chosen_points && 1261 thread_group_leader(chosen)) 1262 continue; 1263 1264 if (chosen) 1265 put_task_struct(chosen); 1266 chosen = task; 1267 chosen_points = points; 1268 get_task_struct(chosen); 1269 } 1270 css_task_iter_end(&it); 1271 } 1272 1273 if (chosen) { 1274 points = chosen_points * 1000 / totalpages; 1275 oom_kill_process(&oc, chosen, points, totalpages, 1276 "Memory cgroup out of memory"); 1277 } 1278 unlock: 1279 mutex_unlock(&oom_lock); 1280 return chosen; 1281 } 1282 1283 #if MAX_NUMNODES > 1 1284 1285 /** 1286 * test_mem_cgroup_node_reclaimable 1287 * @memcg: the target memcg 1288 * @nid: the node ID to be checked. 1289 * @noswap : specify true here if the user wants flle only information. 1290 * 1291 * This function returns whether the specified memcg contains any 1292 * reclaimable pages on a node. Returns true if there are any reclaimable 1293 * pages in the node. 1294 */ 1295 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1296 int nid, bool noswap) 1297 { 1298 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1299 return true; 1300 if (noswap || !total_swap_pages) 1301 return false; 1302 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1303 return true; 1304 return false; 1305 1306 } 1307 1308 /* 1309 * Always updating the nodemask is not very good - even if we have an empty 1310 * list or the wrong list here, we can start from some node and traverse all 1311 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1312 * 1313 */ 1314 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1315 { 1316 int nid; 1317 /* 1318 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1319 * pagein/pageout changes since the last update. 1320 */ 1321 if (!atomic_read(&memcg->numainfo_events)) 1322 return; 1323 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1324 return; 1325 1326 /* make a nodemask where this memcg uses memory from */ 1327 memcg->scan_nodes = node_states[N_MEMORY]; 1328 1329 for_each_node_mask(nid, node_states[N_MEMORY]) { 1330 1331 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1332 node_clear(nid, memcg->scan_nodes); 1333 } 1334 1335 atomic_set(&memcg->numainfo_events, 0); 1336 atomic_set(&memcg->numainfo_updating, 0); 1337 } 1338 1339 /* 1340 * Selecting a node where we start reclaim from. Because what we need is just 1341 * reducing usage counter, start from anywhere is O,K. Considering 1342 * memory reclaim from current node, there are pros. and cons. 1343 * 1344 * Freeing memory from current node means freeing memory from a node which 1345 * we'll use or we've used. So, it may make LRU bad. And if several threads 1346 * hit limits, it will see a contention on a node. But freeing from remote 1347 * node means more costs for memory reclaim because of memory latency. 1348 * 1349 * Now, we use round-robin. Better algorithm is welcomed. 1350 */ 1351 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1352 { 1353 int node; 1354 1355 mem_cgroup_may_update_nodemask(memcg); 1356 node = memcg->last_scanned_node; 1357 1358 node = next_node_in(node, memcg->scan_nodes); 1359 /* 1360 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1361 * last time it really checked all the LRUs due to rate limiting. 1362 * Fallback to the current node in that case for simplicity. 1363 */ 1364 if (unlikely(node == MAX_NUMNODES)) 1365 node = numa_node_id(); 1366 1367 memcg->last_scanned_node = node; 1368 return node; 1369 } 1370 #else 1371 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1372 { 1373 return 0; 1374 } 1375 #endif 1376 1377 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1378 pg_data_t *pgdat, 1379 gfp_t gfp_mask, 1380 unsigned long *total_scanned) 1381 { 1382 struct mem_cgroup *victim = NULL; 1383 int total = 0; 1384 int loop = 0; 1385 unsigned long excess; 1386 unsigned long nr_scanned; 1387 struct mem_cgroup_reclaim_cookie reclaim = { 1388 .pgdat = pgdat, 1389 .priority = 0, 1390 }; 1391 1392 excess = soft_limit_excess(root_memcg); 1393 1394 while (1) { 1395 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1396 if (!victim) { 1397 loop++; 1398 if (loop >= 2) { 1399 /* 1400 * If we have not been able to reclaim 1401 * anything, it might because there are 1402 * no reclaimable pages under this hierarchy 1403 */ 1404 if (!total) 1405 break; 1406 /* 1407 * We want to do more targeted reclaim. 1408 * excess >> 2 is not to excessive so as to 1409 * reclaim too much, nor too less that we keep 1410 * coming back to reclaim from this cgroup 1411 */ 1412 if (total >= (excess >> 2) || 1413 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1414 break; 1415 } 1416 continue; 1417 } 1418 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1419 pgdat, &nr_scanned); 1420 *total_scanned += nr_scanned; 1421 if (!soft_limit_excess(root_memcg)) 1422 break; 1423 } 1424 mem_cgroup_iter_break(root_memcg, victim); 1425 return total; 1426 } 1427 1428 #ifdef CONFIG_LOCKDEP 1429 static struct lockdep_map memcg_oom_lock_dep_map = { 1430 .name = "memcg_oom_lock", 1431 }; 1432 #endif 1433 1434 static DEFINE_SPINLOCK(memcg_oom_lock); 1435 1436 /* 1437 * Check OOM-Killer is already running under our hierarchy. 1438 * If someone is running, return false. 1439 */ 1440 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1441 { 1442 struct mem_cgroup *iter, *failed = NULL; 1443 1444 spin_lock(&memcg_oom_lock); 1445 1446 for_each_mem_cgroup_tree(iter, memcg) { 1447 if (iter->oom_lock) { 1448 /* 1449 * this subtree of our hierarchy is already locked 1450 * so we cannot give a lock. 1451 */ 1452 failed = iter; 1453 mem_cgroup_iter_break(memcg, iter); 1454 break; 1455 } else 1456 iter->oom_lock = true; 1457 } 1458 1459 if (failed) { 1460 /* 1461 * OK, we failed to lock the whole subtree so we have 1462 * to clean up what we set up to the failing subtree 1463 */ 1464 for_each_mem_cgroup_tree(iter, memcg) { 1465 if (iter == failed) { 1466 mem_cgroup_iter_break(memcg, iter); 1467 break; 1468 } 1469 iter->oom_lock = false; 1470 } 1471 } else 1472 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1473 1474 spin_unlock(&memcg_oom_lock); 1475 1476 return !failed; 1477 } 1478 1479 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1480 { 1481 struct mem_cgroup *iter; 1482 1483 spin_lock(&memcg_oom_lock); 1484 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1485 for_each_mem_cgroup_tree(iter, memcg) 1486 iter->oom_lock = false; 1487 spin_unlock(&memcg_oom_lock); 1488 } 1489 1490 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1491 { 1492 struct mem_cgroup *iter; 1493 1494 spin_lock(&memcg_oom_lock); 1495 for_each_mem_cgroup_tree(iter, memcg) 1496 iter->under_oom++; 1497 spin_unlock(&memcg_oom_lock); 1498 } 1499 1500 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1501 { 1502 struct mem_cgroup *iter; 1503 1504 /* 1505 * When a new child is created while the hierarchy is under oom, 1506 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1507 */ 1508 spin_lock(&memcg_oom_lock); 1509 for_each_mem_cgroup_tree(iter, memcg) 1510 if (iter->under_oom > 0) 1511 iter->under_oom--; 1512 spin_unlock(&memcg_oom_lock); 1513 } 1514 1515 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1516 1517 struct oom_wait_info { 1518 struct mem_cgroup *memcg; 1519 wait_queue_t wait; 1520 }; 1521 1522 static int memcg_oom_wake_function(wait_queue_t *wait, 1523 unsigned mode, int sync, void *arg) 1524 { 1525 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1526 struct mem_cgroup *oom_wait_memcg; 1527 struct oom_wait_info *oom_wait_info; 1528 1529 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1530 oom_wait_memcg = oom_wait_info->memcg; 1531 1532 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1533 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1534 return 0; 1535 return autoremove_wake_function(wait, mode, sync, arg); 1536 } 1537 1538 static void memcg_oom_recover(struct mem_cgroup *memcg) 1539 { 1540 /* 1541 * For the following lockless ->under_oom test, the only required 1542 * guarantee is that it must see the state asserted by an OOM when 1543 * this function is called as a result of userland actions 1544 * triggered by the notification of the OOM. This is trivially 1545 * achieved by invoking mem_cgroup_mark_under_oom() before 1546 * triggering notification. 1547 */ 1548 if (memcg && memcg->under_oom) 1549 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1550 } 1551 1552 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1553 { 1554 if (!current->memcg_may_oom) 1555 return; 1556 /* 1557 * We are in the middle of the charge context here, so we 1558 * don't want to block when potentially sitting on a callstack 1559 * that holds all kinds of filesystem and mm locks. 1560 * 1561 * Also, the caller may handle a failed allocation gracefully 1562 * (like optional page cache readahead) and so an OOM killer 1563 * invocation might not even be necessary. 1564 * 1565 * That's why we don't do anything here except remember the 1566 * OOM context and then deal with it at the end of the page 1567 * fault when the stack is unwound, the locks are released, 1568 * and when we know whether the fault was overall successful. 1569 */ 1570 css_get(&memcg->css); 1571 current->memcg_in_oom = memcg; 1572 current->memcg_oom_gfp_mask = mask; 1573 current->memcg_oom_order = order; 1574 } 1575 1576 /** 1577 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1578 * @handle: actually kill/wait or just clean up the OOM state 1579 * 1580 * This has to be called at the end of a page fault if the memcg OOM 1581 * handler was enabled. 1582 * 1583 * Memcg supports userspace OOM handling where failed allocations must 1584 * sleep on a waitqueue until the userspace task resolves the 1585 * situation. Sleeping directly in the charge context with all kinds 1586 * of locks held is not a good idea, instead we remember an OOM state 1587 * in the task and mem_cgroup_oom_synchronize() has to be called at 1588 * the end of the page fault to complete the OOM handling. 1589 * 1590 * Returns %true if an ongoing memcg OOM situation was detected and 1591 * completed, %false otherwise. 1592 */ 1593 bool mem_cgroup_oom_synchronize(bool handle) 1594 { 1595 struct mem_cgroup *memcg = current->memcg_in_oom; 1596 struct oom_wait_info owait; 1597 bool locked; 1598 1599 /* OOM is global, do not handle */ 1600 if (!memcg) 1601 return false; 1602 1603 if (!handle || oom_killer_disabled) 1604 goto cleanup; 1605 1606 owait.memcg = memcg; 1607 owait.wait.flags = 0; 1608 owait.wait.func = memcg_oom_wake_function; 1609 owait.wait.private = current; 1610 INIT_LIST_HEAD(&owait.wait.task_list); 1611 1612 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1613 mem_cgroup_mark_under_oom(memcg); 1614 1615 locked = mem_cgroup_oom_trylock(memcg); 1616 1617 if (locked) 1618 mem_cgroup_oom_notify(memcg); 1619 1620 if (locked && !memcg->oom_kill_disable) { 1621 mem_cgroup_unmark_under_oom(memcg); 1622 finish_wait(&memcg_oom_waitq, &owait.wait); 1623 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1624 current->memcg_oom_order); 1625 } else { 1626 schedule(); 1627 mem_cgroup_unmark_under_oom(memcg); 1628 finish_wait(&memcg_oom_waitq, &owait.wait); 1629 } 1630 1631 if (locked) { 1632 mem_cgroup_oom_unlock(memcg); 1633 /* 1634 * There is no guarantee that an OOM-lock contender 1635 * sees the wakeups triggered by the OOM kill 1636 * uncharges. Wake any sleepers explicitely. 1637 */ 1638 memcg_oom_recover(memcg); 1639 } 1640 cleanup: 1641 current->memcg_in_oom = NULL; 1642 css_put(&memcg->css); 1643 return true; 1644 } 1645 1646 /** 1647 * lock_page_memcg - lock a page->mem_cgroup binding 1648 * @page: the page 1649 * 1650 * This function protects unlocked LRU pages from being moved to 1651 * another cgroup and stabilizes their page->mem_cgroup binding. 1652 */ 1653 void lock_page_memcg(struct page *page) 1654 { 1655 struct mem_cgroup *memcg; 1656 unsigned long flags; 1657 1658 /* 1659 * The RCU lock is held throughout the transaction. The fast 1660 * path can get away without acquiring the memcg->move_lock 1661 * because page moving starts with an RCU grace period. 1662 */ 1663 rcu_read_lock(); 1664 1665 if (mem_cgroup_disabled()) 1666 return; 1667 again: 1668 memcg = page->mem_cgroup; 1669 if (unlikely(!memcg)) 1670 return; 1671 1672 if (atomic_read(&memcg->moving_account) <= 0) 1673 return; 1674 1675 spin_lock_irqsave(&memcg->move_lock, flags); 1676 if (memcg != page->mem_cgroup) { 1677 spin_unlock_irqrestore(&memcg->move_lock, flags); 1678 goto again; 1679 } 1680 1681 /* 1682 * When charge migration first begins, we can have locked and 1683 * unlocked page stat updates happening concurrently. Track 1684 * the task who has the lock for unlock_page_memcg(). 1685 */ 1686 memcg->move_lock_task = current; 1687 memcg->move_lock_flags = flags; 1688 1689 return; 1690 } 1691 EXPORT_SYMBOL(lock_page_memcg); 1692 1693 /** 1694 * unlock_page_memcg - unlock a page->mem_cgroup binding 1695 * @page: the page 1696 */ 1697 void unlock_page_memcg(struct page *page) 1698 { 1699 struct mem_cgroup *memcg = page->mem_cgroup; 1700 1701 if (memcg && memcg->move_lock_task == current) { 1702 unsigned long flags = memcg->move_lock_flags; 1703 1704 memcg->move_lock_task = NULL; 1705 memcg->move_lock_flags = 0; 1706 1707 spin_unlock_irqrestore(&memcg->move_lock, flags); 1708 } 1709 1710 rcu_read_unlock(); 1711 } 1712 EXPORT_SYMBOL(unlock_page_memcg); 1713 1714 /* 1715 * size of first charge trial. "32" comes from vmscan.c's magic value. 1716 * TODO: maybe necessary to use big numbers in big irons. 1717 */ 1718 #define CHARGE_BATCH 32U 1719 struct memcg_stock_pcp { 1720 struct mem_cgroup *cached; /* this never be root cgroup */ 1721 unsigned int nr_pages; 1722 struct work_struct work; 1723 unsigned long flags; 1724 #define FLUSHING_CACHED_CHARGE 0 1725 }; 1726 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1727 static DEFINE_MUTEX(percpu_charge_mutex); 1728 1729 /** 1730 * consume_stock: Try to consume stocked charge on this cpu. 1731 * @memcg: memcg to consume from. 1732 * @nr_pages: how many pages to charge. 1733 * 1734 * The charges will only happen if @memcg matches the current cpu's memcg 1735 * stock, and at least @nr_pages are available in that stock. Failure to 1736 * service an allocation will refill the stock. 1737 * 1738 * returns true if successful, false otherwise. 1739 */ 1740 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1741 { 1742 struct memcg_stock_pcp *stock; 1743 bool ret = false; 1744 1745 if (nr_pages > CHARGE_BATCH) 1746 return ret; 1747 1748 stock = &get_cpu_var(memcg_stock); 1749 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1750 stock->nr_pages -= nr_pages; 1751 ret = true; 1752 } 1753 put_cpu_var(memcg_stock); 1754 return ret; 1755 } 1756 1757 /* 1758 * Returns stocks cached in percpu and reset cached information. 1759 */ 1760 static void drain_stock(struct memcg_stock_pcp *stock) 1761 { 1762 struct mem_cgroup *old = stock->cached; 1763 1764 if (stock->nr_pages) { 1765 page_counter_uncharge(&old->memory, stock->nr_pages); 1766 if (do_memsw_account()) 1767 page_counter_uncharge(&old->memsw, stock->nr_pages); 1768 css_put_many(&old->css, stock->nr_pages); 1769 stock->nr_pages = 0; 1770 } 1771 stock->cached = NULL; 1772 } 1773 1774 /* 1775 * This must be called under preempt disabled or must be called by 1776 * a thread which is pinned to local cpu. 1777 */ 1778 static void drain_local_stock(struct work_struct *dummy) 1779 { 1780 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 1781 drain_stock(stock); 1782 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1783 } 1784 1785 /* 1786 * Cache charges(val) to local per_cpu area. 1787 * This will be consumed by consume_stock() function, later. 1788 */ 1789 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1790 { 1791 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1792 1793 if (stock->cached != memcg) { /* reset if necessary */ 1794 drain_stock(stock); 1795 stock->cached = memcg; 1796 } 1797 stock->nr_pages += nr_pages; 1798 put_cpu_var(memcg_stock); 1799 } 1800 1801 /* 1802 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1803 * of the hierarchy under it. 1804 */ 1805 static void drain_all_stock(struct mem_cgroup *root_memcg) 1806 { 1807 int cpu, curcpu; 1808 1809 /* If someone's already draining, avoid adding running more workers. */ 1810 if (!mutex_trylock(&percpu_charge_mutex)) 1811 return; 1812 /* Notify other cpus that system-wide "drain" is running */ 1813 get_online_cpus(); 1814 curcpu = get_cpu(); 1815 for_each_online_cpu(cpu) { 1816 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1817 struct mem_cgroup *memcg; 1818 1819 memcg = stock->cached; 1820 if (!memcg || !stock->nr_pages) 1821 continue; 1822 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1823 continue; 1824 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1825 if (cpu == curcpu) 1826 drain_local_stock(&stock->work); 1827 else 1828 schedule_work_on(cpu, &stock->work); 1829 } 1830 } 1831 put_cpu(); 1832 put_online_cpus(); 1833 mutex_unlock(&percpu_charge_mutex); 1834 } 1835 1836 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 1837 unsigned long action, 1838 void *hcpu) 1839 { 1840 int cpu = (unsigned long)hcpu; 1841 struct memcg_stock_pcp *stock; 1842 1843 if (action == CPU_ONLINE) 1844 return NOTIFY_OK; 1845 1846 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 1847 return NOTIFY_OK; 1848 1849 stock = &per_cpu(memcg_stock, cpu); 1850 drain_stock(stock); 1851 return NOTIFY_OK; 1852 } 1853 1854 static void reclaim_high(struct mem_cgroup *memcg, 1855 unsigned int nr_pages, 1856 gfp_t gfp_mask) 1857 { 1858 do { 1859 if (page_counter_read(&memcg->memory) <= memcg->high) 1860 continue; 1861 mem_cgroup_events(memcg, MEMCG_HIGH, 1); 1862 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 1863 } while ((memcg = parent_mem_cgroup(memcg))); 1864 } 1865 1866 static void high_work_func(struct work_struct *work) 1867 { 1868 struct mem_cgroup *memcg; 1869 1870 memcg = container_of(work, struct mem_cgroup, high_work); 1871 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL); 1872 } 1873 1874 /* 1875 * Scheduled by try_charge() to be executed from the userland return path 1876 * and reclaims memory over the high limit. 1877 */ 1878 void mem_cgroup_handle_over_high(void) 1879 { 1880 unsigned int nr_pages = current->memcg_nr_pages_over_high; 1881 struct mem_cgroup *memcg; 1882 1883 if (likely(!nr_pages)) 1884 return; 1885 1886 memcg = get_mem_cgroup_from_mm(current->mm); 1887 reclaim_high(memcg, nr_pages, GFP_KERNEL); 1888 css_put(&memcg->css); 1889 current->memcg_nr_pages_over_high = 0; 1890 } 1891 1892 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 1893 unsigned int nr_pages) 1894 { 1895 unsigned int batch = max(CHARGE_BATCH, nr_pages); 1896 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1897 struct mem_cgroup *mem_over_limit; 1898 struct page_counter *counter; 1899 unsigned long nr_reclaimed; 1900 bool may_swap = true; 1901 bool drained = false; 1902 1903 if (mem_cgroup_is_root(memcg)) 1904 return 0; 1905 retry: 1906 if (consume_stock(memcg, nr_pages)) 1907 return 0; 1908 1909 if (!do_memsw_account() || 1910 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 1911 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 1912 goto done_restock; 1913 if (do_memsw_account()) 1914 page_counter_uncharge(&memcg->memsw, batch); 1915 mem_over_limit = mem_cgroup_from_counter(counter, memory); 1916 } else { 1917 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 1918 may_swap = false; 1919 } 1920 1921 if (batch > nr_pages) { 1922 batch = nr_pages; 1923 goto retry; 1924 } 1925 1926 /* 1927 * Unlike in global OOM situations, memcg is not in a physical 1928 * memory shortage. Allow dying and OOM-killed tasks to 1929 * bypass the last charges so that they can exit quickly and 1930 * free their memory. 1931 */ 1932 if (unlikely(test_thread_flag(TIF_MEMDIE) || 1933 fatal_signal_pending(current) || 1934 current->flags & PF_EXITING)) 1935 goto force; 1936 1937 if (unlikely(task_in_memcg_oom(current))) 1938 goto nomem; 1939 1940 if (!gfpflags_allow_blocking(gfp_mask)) 1941 goto nomem; 1942 1943 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 1944 1945 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 1946 gfp_mask, may_swap); 1947 1948 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 1949 goto retry; 1950 1951 if (!drained) { 1952 drain_all_stock(mem_over_limit); 1953 drained = true; 1954 goto retry; 1955 } 1956 1957 if (gfp_mask & __GFP_NORETRY) 1958 goto nomem; 1959 /* 1960 * Even though the limit is exceeded at this point, reclaim 1961 * may have been able to free some pages. Retry the charge 1962 * before killing the task. 1963 * 1964 * Only for regular pages, though: huge pages are rather 1965 * unlikely to succeed so close to the limit, and we fall back 1966 * to regular pages anyway in case of failure. 1967 */ 1968 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 1969 goto retry; 1970 /* 1971 * At task move, charge accounts can be doubly counted. So, it's 1972 * better to wait until the end of task_move if something is going on. 1973 */ 1974 if (mem_cgroup_wait_acct_move(mem_over_limit)) 1975 goto retry; 1976 1977 if (nr_retries--) 1978 goto retry; 1979 1980 if (gfp_mask & __GFP_NOFAIL) 1981 goto force; 1982 1983 if (fatal_signal_pending(current)) 1984 goto force; 1985 1986 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 1987 1988 mem_cgroup_oom(mem_over_limit, gfp_mask, 1989 get_order(nr_pages * PAGE_SIZE)); 1990 nomem: 1991 if (!(gfp_mask & __GFP_NOFAIL)) 1992 return -ENOMEM; 1993 force: 1994 /* 1995 * The allocation either can't fail or will lead to more memory 1996 * being freed very soon. Allow memory usage go over the limit 1997 * temporarily by force charging it. 1998 */ 1999 page_counter_charge(&memcg->memory, nr_pages); 2000 if (do_memsw_account()) 2001 page_counter_charge(&memcg->memsw, nr_pages); 2002 css_get_many(&memcg->css, nr_pages); 2003 2004 return 0; 2005 2006 done_restock: 2007 css_get_many(&memcg->css, batch); 2008 if (batch > nr_pages) 2009 refill_stock(memcg, batch - nr_pages); 2010 2011 /* 2012 * If the hierarchy is above the normal consumption range, schedule 2013 * reclaim on returning to userland. We can perform reclaim here 2014 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2015 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2016 * not recorded as it most likely matches current's and won't 2017 * change in the meantime. As high limit is checked again before 2018 * reclaim, the cost of mismatch is negligible. 2019 */ 2020 do { 2021 if (page_counter_read(&memcg->memory) > memcg->high) { 2022 /* Don't bother a random interrupted task */ 2023 if (in_interrupt()) { 2024 schedule_work(&memcg->high_work); 2025 break; 2026 } 2027 current->memcg_nr_pages_over_high += batch; 2028 set_notify_resume(current); 2029 break; 2030 } 2031 } while ((memcg = parent_mem_cgroup(memcg))); 2032 2033 return 0; 2034 } 2035 2036 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2037 { 2038 if (mem_cgroup_is_root(memcg)) 2039 return; 2040 2041 page_counter_uncharge(&memcg->memory, nr_pages); 2042 if (do_memsw_account()) 2043 page_counter_uncharge(&memcg->memsw, nr_pages); 2044 2045 css_put_many(&memcg->css, nr_pages); 2046 } 2047 2048 static void lock_page_lru(struct page *page, int *isolated) 2049 { 2050 struct zone *zone = page_zone(page); 2051 2052 spin_lock_irq(zone_lru_lock(zone)); 2053 if (PageLRU(page)) { 2054 struct lruvec *lruvec; 2055 2056 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2057 ClearPageLRU(page); 2058 del_page_from_lru_list(page, lruvec, page_lru(page)); 2059 *isolated = 1; 2060 } else 2061 *isolated = 0; 2062 } 2063 2064 static void unlock_page_lru(struct page *page, int isolated) 2065 { 2066 struct zone *zone = page_zone(page); 2067 2068 if (isolated) { 2069 struct lruvec *lruvec; 2070 2071 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2072 VM_BUG_ON_PAGE(PageLRU(page), page); 2073 SetPageLRU(page); 2074 add_page_to_lru_list(page, lruvec, page_lru(page)); 2075 } 2076 spin_unlock_irq(zone_lru_lock(zone)); 2077 } 2078 2079 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2080 bool lrucare) 2081 { 2082 int isolated; 2083 2084 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2085 2086 /* 2087 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2088 * may already be on some other mem_cgroup's LRU. Take care of it. 2089 */ 2090 if (lrucare) 2091 lock_page_lru(page, &isolated); 2092 2093 /* 2094 * Nobody should be changing or seriously looking at 2095 * page->mem_cgroup at this point: 2096 * 2097 * - the page is uncharged 2098 * 2099 * - the page is off-LRU 2100 * 2101 * - an anonymous fault has exclusive page access, except for 2102 * a locked page table 2103 * 2104 * - a page cache insertion, a swapin fault, or a migration 2105 * have the page locked 2106 */ 2107 page->mem_cgroup = memcg; 2108 2109 if (lrucare) 2110 unlock_page_lru(page, isolated); 2111 } 2112 2113 #ifndef CONFIG_SLOB 2114 static int memcg_alloc_cache_id(void) 2115 { 2116 int id, size; 2117 int err; 2118 2119 id = ida_simple_get(&memcg_cache_ida, 2120 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2121 if (id < 0) 2122 return id; 2123 2124 if (id < memcg_nr_cache_ids) 2125 return id; 2126 2127 /* 2128 * There's no space for the new id in memcg_caches arrays, 2129 * so we have to grow them. 2130 */ 2131 down_write(&memcg_cache_ids_sem); 2132 2133 size = 2 * (id + 1); 2134 if (size < MEMCG_CACHES_MIN_SIZE) 2135 size = MEMCG_CACHES_MIN_SIZE; 2136 else if (size > MEMCG_CACHES_MAX_SIZE) 2137 size = MEMCG_CACHES_MAX_SIZE; 2138 2139 err = memcg_update_all_caches(size); 2140 if (!err) 2141 err = memcg_update_all_list_lrus(size); 2142 if (!err) 2143 memcg_nr_cache_ids = size; 2144 2145 up_write(&memcg_cache_ids_sem); 2146 2147 if (err) { 2148 ida_simple_remove(&memcg_cache_ida, id); 2149 return err; 2150 } 2151 return id; 2152 } 2153 2154 static void memcg_free_cache_id(int id) 2155 { 2156 ida_simple_remove(&memcg_cache_ida, id); 2157 } 2158 2159 struct memcg_kmem_cache_create_work { 2160 struct mem_cgroup *memcg; 2161 struct kmem_cache *cachep; 2162 struct work_struct work; 2163 }; 2164 2165 static void memcg_kmem_cache_create_func(struct work_struct *w) 2166 { 2167 struct memcg_kmem_cache_create_work *cw = 2168 container_of(w, struct memcg_kmem_cache_create_work, work); 2169 struct mem_cgroup *memcg = cw->memcg; 2170 struct kmem_cache *cachep = cw->cachep; 2171 2172 memcg_create_kmem_cache(memcg, cachep); 2173 2174 css_put(&memcg->css); 2175 kfree(cw); 2176 } 2177 2178 /* 2179 * Enqueue the creation of a per-memcg kmem_cache. 2180 */ 2181 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2182 struct kmem_cache *cachep) 2183 { 2184 struct memcg_kmem_cache_create_work *cw; 2185 2186 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2187 if (!cw) 2188 return; 2189 2190 css_get(&memcg->css); 2191 2192 cw->memcg = memcg; 2193 cw->cachep = cachep; 2194 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2195 2196 schedule_work(&cw->work); 2197 } 2198 2199 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2200 struct kmem_cache *cachep) 2201 { 2202 /* 2203 * We need to stop accounting when we kmalloc, because if the 2204 * corresponding kmalloc cache is not yet created, the first allocation 2205 * in __memcg_schedule_kmem_cache_create will recurse. 2206 * 2207 * However, it is better to enclose the whole function. Depending on 2208 * the debugging options enabled, INIT_WORK(), for instance, can 2209 * trigger an allocation. This too, will make us recurse. Because at 2210 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2211 * the safest choice is to do it like this, wrapping the whole function. 2212 */ 2213 current->memcg_kmem_skip_account = 1; 2214 __memcg_schedule_kmem_cache_create(memcg, cachep); 2215 current->memcg_kmem_skip_account = 0; 2216 } 2217 2218 static inline bool memcg_kmem_bypass(void) 2219 { 2220 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2221 return true; 2222 return false; 2223 } 2224 2225 /** 2226 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2227 * @cachep: the original global kmem cache 2228 * 2229 * Return the kmem_cache we're supposed to use for a slab allocation. 2230 * We try to use the current memcg's version of the cache. 2231 * 2232 * If the cache does not exist yet, if we are the first user of it, we 2233 * create it asynchronously in a workqueue and let the current allocation 2234 * go through with the original cache. 2235 * 2236 * This function takes a reference to the cache it returns to assure it 2237 * won't get destroyed while we are working with it. Once the caller is 2238 * done with it, memcg_kmem_put_cache() must be called to release the 2239 * reference. 2240 */ 2241 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2242 { 2243 struct mem_cgroup *memcg; 2244 struct kmem_cache *memcg_cachep; 2245 int kmemcg_id; 2246 2247 VM_BUG_ON(!is_root_cache(cachep)); 2248 2249 if (memcg_kmem_bypass()) 2250 return cachep; 2251 2252 if (current->memcg_kmem_skip_account) 2253 return cachep; 2254 2255 memcg = get_mem_cgroup_from_mm(current->mm); 2256 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2257 if (kmemcg_id < 0) 2258 goto out; 2259 2260 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2261 if (likely(memcg_cachep)) 2262 return memcg_cachep; 2263 2264 /* 2265 * If we are in a safe context (can wait, and not in interrupt 2266 * context), we could be be predictable and return right away. 2267 * This would guarantee that the allocation being performed 2268 * already belongs in the new cache. 2269 * 2270 * However, there are some clashes that can arrive from locking. 2271 * For instance, because we acquire the slab_mutex while doing 2272 * memcg_create_kmem_cache, this means no further allocation 2273 * could happen with the slab_mutex held. So it's better to 2274 * defer everything. 2275 */ 2276 memcg_schedule_kmem_cache_create(memcg, cachep); 2277 out: 2278 css_put(&memcg->css); 2279 return cachep; 2280 } 2281 2282 /** 2283 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2284 * @cachep: the cache returned by memcg_kmem_get_cache 2285 */ 2286 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2287 { 2288 if (!is_root_cache(cachep)) 2289 css_put(&cachep->memcg_params.memcg->css); 2290 } 2291 2292 /** 2293 * memcg_kmem_charge: charge a kmem page 2294 * @page: page to charge 2295 * @gfp: reclaim mode 2296 * @order: allocation order 2297 * @memcg: memory cgroup to charge 2298 * 2299 * Returns 0 on success, an error code on failure. 2300 */ 2301 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2302 struct mem_cgroup *memcg) 2303 { 2304 unsigned int nr_pages = 1 << order; 2305 struct page_counter *counter; 2306 int ret; 2307 2308 ret = try_charge(memcg, gfp, nr_pages); 2309 if (ret) 2310 return ret; 2311 2312 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2313 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2314 cancel_charge(memcg, nr_pages); 2315 return -ENOMEM; 2316 } 2317 2318 page->mem_cgroup = memcg; 2319 2320 return 0; 2321 } 2322 2323 /** 2324 * memcg_kmem_charge: charge a kmem page to the current memory cgroup 2325 * @page: page to charge 2326 * @gfp: reclaim mode 2327 * @order: allocation order 2328 * 2329 * Returns 0 on success, an error code on failure. 2330 */ 2331 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2332 { 2333 struct mem_cgroup *memcg; 2334 int ret = 0; 2335 2336 if (memcg_kmem_bypass()) 2337 return 0; 2338 2339 memcg = get_mem_cgroup_from_mm(current->mm); 2340 if (!mem_cgroup_is_root(memcg)) { 2341 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2342 if (!ret) 2343 __SetPageKmemcg(page); 2344 } 2345 css_put(&memcg->css); 2346 return ret; 2347 } 2348 /** 2349 * memcg_kmem_uncharge: uncharge a kmem page 2350 * @page: page to uncharge 2351 * @order: allocation order 2352 */ 2353 void memcg_kmem_uncharge(struct page *page, int order) 2354 { 2355 struct mem_cgroup *memcg = page->mem_cgroup; 2356 unsigned int nr_pages = 1 << order; 2357 2358 if (!memcg) 2359 return; 2360 2361 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2362 2363 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2364 page_counter_uncharge(&memcg->kmem, nr_pages); 2365 2366 page_counter_uncharge(&memcg->memory, nr_pages); 2367 if (do_memsw_account()) 2368 page_counter_uncharge(&memcg->memsw, nr_pages); 2369 2370 page->mem_cgroup = NULL; 2371 2372 /* slab pages do not have PageKmemcg flag set */ 2373 if (PageKmemcg(page)) 2374 __ClearPageKmemcg(page); 2375 2376 css_put_many(&memcg->css, nr_pages); 2377 } 2378 #endif /* !CONFIG_SLOB */ 2379 2380 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2381 2382 /* 2383 * Because tail pages are not marked as "used", set it. We're under 2384 * zone_lru_lock and migration entries setup in all page mappings. 2385 */ 2386 void mem_cgroup_split_huge_fixup(struct page *head) 2387 { 2388 int i; 2389 2390 if (mem_cgroup_disabled()) 2391 return; 2392 2393 for (i = 1; i < HPAGE_PMD_NR; i++) 2394 head[i].mem_cgroup = head->mem_cgroup; 2395 2396 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2397 HPAGE_PMD_NR); 2398 } 2399 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2400 2401 #ifdef CONFIG_MEMCG_SWAP 2402 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2403 bool charge) 2404 { 2405 int val = (charge) ? 1 : -1; 2406 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2407 } 2408 2409 /** 2410 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2411 * @entry: swap entry to be moved 2412 * @from: mem_cgroup which the entry is moved from 2413 * @to: mem_cgroup which the entry is moved to 2414 * 2415 * It succeeds only when the swap_cgroup's record for this entry is the same 2416 * as the mem_cgroup's id of @from. 2417 * 2418 * Returns 0 on success, -EINVAL on failure. 2419 * 2420 * The caller must have charged to @to, IOW, called page_counter_charge() about 2421 * both res and memsw, and called css_get(). 2422 */ 2423 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2424 struct mem_cgroup *from, struct mem_cgroup *to) 2425 { 2426 unsigned short old_id, new_id; 2427 2428 old_id = mem_cgroup_id(from); 2429 new_id = mem_cgroup_id(to); 2430 2431 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2432 mem_cgroup_swap_statistics(from, false); 2433 mem_cgroup_swap_statistics(to, true); 2434 return 0; 2435 } 2436 return -EINVAL; 2437 } 2438 #else 2439 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2440 struct mem_cgroup *from, struct mem_cgroup *to) 2441 { 2442 return -EINVAL; 2443 } 2444 #endif 2445 2446 static DEFINE_MUTEX(memcg_limit_mutex); 2447 2448 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2449 unsigned long limit) 2450 { 2451 unsigned long curusage; 2452 unsigned long oldusage; 2453 bool enlarge = false; 2454 int retry_count; 2455 int ret; 2456 2457 /* 2458 * For keeping hierarchical_reclaim simple, how long we should retry 2459 * is depends on callers. We set our retry-count to be function 2460 * of # of children which we should visit in this loop. 2461 */ 2462 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2463 mem_cgroup_count_children(memcg); 2464 2465 oldusage = page_counter_read(&memcg->memory); 2466 2467 do { 2468 if (signal_pending(current)) { 2469 ret = -EINTR; 2470 break; 2471 } 2472 2473 mutex_lock(&memcg_limit_mutex); 2474 if (limit > memcg->memsw.limit) { 2475 mutex_unlock(&memcg_limit_mutex); 2476 ret = -EINVAL; 2477 break; 2478 } 2479 if (limit > memcg->memory.limit) 2480 enlarge = true; 2481 ret = page_counter_limit(&memcg->memory, limit); 2482 mutex_unlock(&memcg_limit_mutex); 2483 2484 if (!ret) 2485 break; 2486 2487 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2488 2489 curusage = page_counter_read(&memcg->memory); 2490 /* Usage is reduced ? */ 2491 if (curusage >= oldusage) 2492 retry_count--; 2493 else 2494 oldusage = curusage; 2495 } while (retry_count); 2496 2497 if (!ret && enlarge) 2498 memcg_oom_recover(memcg); 2499 2500 return ret; 2501 } 2502 2503 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2504 unsigned long limit) 2505 { 2506 unsigned long curusage; 2507 unsigned long oldusage; 2508 bool enlarge = false; 2509 int retry_count; 2510 int ret; 2511 2512 /* see mem_cgroup_resize_res_limit */ 2513 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2514 mem_cgroup_count_children(memcg); 2515 2516 oldusage = page_counter_read(&memcg->memsw); 2517 2518 do { 2519 if (signal_pending(current)) { 2520 ret = -EINTR; 2521 break; 2522 } 2523 2524 mutex_lock(&memcg_limit_mutex); 2525 if (limit < memcg->memory.limit) { 2526 mutex_unlock(&memcg_limit_mutex); 2527 ret = -EINVAL; 2528 break; 2529 } 2530 if (limit > memcg->memsw.limit) 2531 enlarge = true; 2532 ret = page_counter_limit(&memcg->memsw, limit); 2533 mutex_unlock(&memcg_limit_mutex); 2534 2535 if (!ret) 2536 break; 2537 2538 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2539 2540 curusage = page_counter_read(&memcg->memsw); 2541 /* Usage is reduced ? */ 2542 if (curusage >= oldusage) 2543 retry_count--; 2544 else 2545 oldusage = curusage; 2546 } while (retry_count); 2547 2548 if (!ret && enlarge) 2549 memcg_oom_recover(memcg); 2550 2551 return ret; 2552 } 2553 2554 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2555 gfp_t gfp_mask, 2556 unsigned long *total_scanned) 2557 { 2558 unsigned long nr_reclaimed = 0; 2559 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2560 unsigned long reclaimed; 2561 int loop = 0; 2562 struct mem_cgroup_tree_per_node *mctz; 2563 unsigned long excess; 2564 unsigned long nr_scanned; 2565 2566 if (order > 0) 2567 return 0; 2568 2569 mctz = soft_limit_tree_node(pgdat->node_id); 2570 2571 /* 2572 * Do not even bother to check the largest node if the root 2573 * is empty. Do it lockless to prevent lock bouncing. Races 2574 * are acceptable as soft limit is best effort anyway. 2575 */ 2576 if (RB_EMPTY_ROOT(&mctz->rb_root)) 2577 return 0; 2578 2579 /* 2580 * This loop can run a while, specially if mem_cgroup's continuously 2581 * keep exceeding their soft limit and putting the system under 2582 * pressure 2583 */ 2584 do { 2585 if (next_mz) 2586 mz = next_mz; 2587 else 2588 mz = mem_cgroup_largest_soft_limit_node(mctz); 2589 if (!mz) 2590 break; 2591 2592 nr_scanned = 0; 2593 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2594 gfp_mask, &nr_scanned); 2595 nr_reclaimed += reclaimed; 2596 *total_scanned += nr_scanned; 2597 spin_lock_irq(&mctz->lock); 2598 __mem_cgroup_remove_exceeded(mz, mctz); 2599 2600 /* 2601 * If we failed to reclaim anything from this memory cgroup 2602 * it is time to move on to the next cgroup 2603 */ 2604 next_mz = NULL; 2605 if (!reclaimed) 2606 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2607 2608 excess = soft_limit_excess(mz->memcg); 2609 /* 2610 * One school of thought says that we should not add 2611 * back the node to the tree if reclaim returns 0. 2612 * But our reclaim could return 0, simply because due 2613 * to priority we are exposing a smaller subset of 2614 * memory to reclaim from. Consider this as a longer 2615 * term TODO. 2616 */ 2617 /* If excess == 0, no tree ops */ 2618 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2619 spin_unlock_irq(&mctz->lock); 2620 css_put(&mz->memcg->css); 2621 loop++; 2622 /* 2623 * Could not reclaim anything and there are no more 2624 * mem cgroups to try or we seem to be looping without 2625 * reclaiming anything. 2626 */ 2627 if (!nr_reclaimed && 2628 (next_mz == NULL || 2629 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2630 break; 2631 } while (!nr_reclaimed); 2632 if (next_mz) 2633 css_put(&next_mz->memcg->css); 2634 return nr_reclaimed; 2635 } 2636 2637 /* 2638 * Test whether @memcg has children, dead or alive. Note that this 2639 * function doesn't care whether @memcg has use_hierarchy enabled and 2640 * returns %true if there are child csses according to the cgroup 2641 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2642 */ 2643 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2644 { 2645 bool ret; 2646 2647 rcu_read_lock(); 2648 ret = css_next_child(NULL, &memcg->css); 2649 rcu_read_unlock(); 2650 return ret; 2651 } 2652 2653 /* 2654 * Reclaims as many pages from the given memcg as possible. 2655 * 2656 * Caller is responsible for holding css reference for memcg. 2657 */ 2658 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2659 { 2660 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2661 2662 /* we call try-to-free pages for make this cgroup empty */ 2663 lru_add_drain_all(); 2664 /* try to free all pages in this cgroup */ 2665 while (nr_retries && page_counter_read(&memcg->memory)) { 2666 int progress; 2667 2668 if (signal_pending(current)) 2669 return -EINTR; 2670 2671 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2672 GFP_KERNEL, true); 2673 if (!progress) { 2674 nr_retries--; 2675 /* maybe some writeback is necessary */ 2676 congestion_wait(BLK_RW_ASYNC, HZ/10); 2677 } 2678 2679 } 2680 2681 return 0; 2682 } 2683 2684 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2685 char *buf, size_t nbytes, 2686 loff_t off) 2687 { 2688 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2689 2690 if (mem_cgroup_is_root(memcg)) 2691 return -EINVAL; 2692 return mem_cgroup_force_empty(memcg) ?: nbytes; 2693 } 2694 2695 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2696 struct cftype *cft) 2697 { 2698 return mem_cgroup_from_css(css)->use_hierarchy; 2699 } 2700 2701 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2702 struct cftype *cft, u64 val) 2703 { 2704 int retval = 0; 2705 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2706 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2707 2708 if (memcg->use_hierarchy == val) 2709 return 0; 2710 2711 /* 2712 * If parent's use_hierarchy is set, we can't make any modifications 2713 * in the child subtrees. If it is unset, then the change can 2714 * occur, provided the current cgroup has no children. 2715 * 2716 * For the root cgroup, parent_mem is NULL, we allow value to be 2717 * set if there are no children. 2718 */ 2719 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2720 (val == 1 || val == 0)) { 2721 if (!memcg_has_children(memcg)) 2722 memcg->use_hierarchy = val; 2723 else 2724 retval = -EBUSY; 2725 } else 2726 retval = -EINVAL; 2727 2728 return retval; 2729 } 2730 2731 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat) 2732 { 2733 struct mem_cgroup *iter; 2734 int i; 2735 2736 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT); 2737 2738 for_each_mem_cgroup_tree(iter, memcg) { 2739 for (i = 0; i < MEMCG_NR_STAT; i++) 2740 stat[i] += mem_cgroup_read_stat(iter, i); 2741 } 2742 } 2743 2744 static void tree_events(struct mem_cgroup *memcg, unsigned long *events) 2745 { 2746 struct mem_cgroup *iter; 2747 int i; 2748 2749 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS); 2750 2751 for_each_mem_cgroup_tree(iter, memcg) { 2752 for (i = 0; i < MEMCG_NR_EVENTS; i++) 2753 events[i] += mem_cgroup_read_events(iter, i); 2754 } 2755 } 2756 2757 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2758 { 2759 unsigned long val = 0; 2760 2761 if (mem_cgroup_is_root(memcg)) { 2762 struct mem_cgroup *iter; 2763 2764 for_each_mem_cgroup_tree(iter, memcg) { 2765 val += mem_cgroup_read_stat(iter, 2766 MEM_CGROUP_STAT_CACHE); 2767 val += mem_cgroup_read_stat(iter, 2768 MEM_CGROUP_STAT_RSS); 2769 if (swap) 2770 val += mem_cgroup_read_stat(iter, 2771 MEM_CGROUP_STAT_SWAP); 2772 } 2773 } else { 2774 if (!swap) 2775 val = page_counter_read(&memcg->memory); 2776 else 2777 val = page_counter_read(&memcg->memsw); 2778 } 2779 return val; 2780 } 2781 2782 enum { 2783 RES_USAGE, 2784 RES_LIMIT, 2785 RES_MAX_USAGE, 2786 RES_FAILCNT, 2787 RES_SOFT_LIMIT, 2788 }; 2789 2790 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2791 struct cftype *cft) 2792 { 2793 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2794 struct page_counter *counter; 2795 2796 switch (MEMFILE_TYPE(cft->private)) { 2797 case _MEM: 2798 counter = &memcg->memory; 2799 break; 2800 case _MEMSWAP: 2801 counter = &memcg->memsw; 2802 break; 2803 case _KMEM: 2804 counter = &memcg->kmem; 2805 break; 2806 case _TCP: 2807 counter = &memcg->tcpmem; 2808 break; 2809 default: 2810 BUG(); 2811 } 2812 2813 switch (MEMFILE_ATTR(cft->private)) { 2814 case RES_USAGE: 2815 if (counter == &memcg->memory) 2816 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2817 if (counter == &memcg->memsw) 2818 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2819 return (u64)page_counter_read(counter) * PAGE_SIZE; 2820 case RES_LIMIT: 2821 return (u64)counter->limit * PAGE_SIZE; 2822 case RES_MAX_USAGE: 2823 return (u64)counter->watermark * PAGE_SIZE; 2824 case RES_FAILCNT: 2825 return counter->failcnt; 2826 case RES_SOFT_LIMIT: 2827 return (u64)memcg->soft_limit * PAGE_SIZE; 2828 default: 2829 BUG(); 2830 } 2831 } 2832 2833 #ifndef CONFIG_SLOB 2834 static int memcg_online_kmem(struct mem_cgroup *memcg) 2835 { 2836 int memcg_id; 2837 2838 if (cgroup_memory_nokmem) 2839 return 0; 2840 2841 BUG_ON(memcg->kmemcg_id >= 0); 2842 BUG_ON(memcg->kmem_state); 2843 2844 memcg_id = memcg_alloc_cache_id(); 2845 if (memcg_id < 0) 2846 return memcg_id; 2847 2848 static_branch_inc(&memcg_kmem_enabled_key); 2849 /* 2850 * A memory cgroup is considered kmem-online as soon as it gets 2851 * kmemcg_id. Setting the id after enabling static branching will 2852 * guarantee no one starts accounting before all call sites are 2853 * patched. 2854 */ 2855 memcg->kmemcg_id = memcg_id; 2856 memcg->kmem_state = KMEM_ONLINE; 2857 2858 return 0; 2859 } 2860 2861 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2862 { 2863 struct cgroup_subsys_state *css; 2864 struct mem_cgroup *parent, *child; 2865 int kmemcg_id; 2866 2867 if (memcg->kmem_state != KMEM_ONLINE) 2868 return; 2869 /* 2870 * Clear the online state before clearing memcg_caches array 2871 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 2872 * guarantees that no cache will be created for this cgroup 2873 * after we are done (see memcg_create_kmem_cache()). 2874 */ 2875 memcg->kmem_state = KMEM_ALLOCATED; 2876 2877 memcg_deactivate_kmem_caches(memcg); 2878 2879 kmemcg_id = memcg->kmemcg_id; 2880 BUG_ON(kmemcg_id < 0); 2881 2882 parent = parent_mem_cgroup(memcg); 2883 if (!parent) 2884 parent = root_mem_cgroup; 2885 2886 /* 2887 * Change kmemcg_id of this cgroup and all its descendants to the 2888 * parent's id, and then move all entries from this cgroup's list_lrus 2889 * to ones of the parent. After we have finished, all list_lrus 2890 * corresponding to this cgroup are guaranteed to remain empty. The 2891 * ordering is imposed by list_lru_node->lock taken by 2892 * memcg_drain_all_list_lrus(). 2893 */ 2894 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 2895 css_for_each_descendant_pre(css, &memcg->css) { 2896 child = mem_cgroup_from_css(css); 2897 BUG_ON(child->kmemcg_id != kmemcg_id); 2898 child->kmemcg_id = parent->kmemcg_id; 2899 if (!memcg->use_hierarchy) 2900 break; 2901 } 2902 rcu_read_unlock(); 2903 2904 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2905 2906 memcg_free_cache_id(kmemcg_id); 2907 } 2908 2909 static void memcg_free_kmem(struct mem_cgroup *memcg) 2910 { 2911 /* css_alloc() failed, offlining didn't happen */ 2912 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 2913 memcg_offline_kmem(memcg); 2914 2915 if (memcg->kmem_state == KMEM_ALLOCATED) { 2916 memcg_destroy_kmem_caches(memcg); 2917 static_branch_dec(&memcg_kmem_enabled_key); 2918 WARN_ON(page_counter_read(&memcg->kmem)); 2919 } 2920 } 2921 #else 2922 static int memcg_online_kmem(struct mem_cgroup *memcg) 2923 { 2924 return 0; 2925 } 2926 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2927 { 2928 } 2929 static void memcg_free_kmem(struct mem_cgroup *memcg) 2930 { 2931 } 2932 #endif /* !CONFIG_SLOB */ 2933 2934 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2935 unsigned long limit) 2936 { 2937 int ret; 2938 2939 mutex_lock(&memcg_limit_mutex); 2940 ret = page_counter_limit(&memcg->kmem, limit); 2941 mutex_unlock(&memcg_limit_mutex); 2942 return ret; 2943 } 2944 2945 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) 2946 { 2947 int ret; 2948 2949 mutex_lock(&memcg_limit_mutex); 2950 2951 ret = page_counter_limit(&memcg->tcpmem, limit); 2952 if (ret) 2953 goto out; 2954 2955 if (!memcg->tcpmem_active) { 2956 /* 2957 * The active flag needs to be written after the static_key 2958 * update. This is what guarantees that the socket activation 2959 * function is the last one to run. See sock_update_memcg() for 2960 * details, and note that we don't mark any socket as belonging 2961 * to this memcg until that flag is up. 2962 * 2963 * We need to do this, because static_keys will span multiple 2964 * sites, but we can't control their order. If we mark a socket 2965 * as accounted, but the accounting functions are not patched in 2966 * yet, we'll lose accounting. 2967 * 2968 * We never race with the readers in sock_update_memcg(), 2969 * because when this value change, the code to process it is not 2970 * patched in yet. 2971 */ 2972 static_branch_inc(&memcg_sockets_enabled_key); 2973 memcg->tcpmem_active = true; 2974 } 2975 out: 2976 mutex_unlock(&memcg_limit_mutex); 2977 return ret; 2978 } 2979 2980 /* 2981 * The user of this function is... 2982 * RES_LIMIT. 2983 */ 2984 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2985 char *buf, size_t nbytes, loff_t off) 2986 { 2987 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2988 unsigned long nr_pages; 2989 int ret; 2990 2991 buf = strstrip(buf); 2992 ret = page_counter_memparse(buf, "-1", &nr_pages); 2993 if (ret) 2994 return ret; 2995 2996 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2997 case RES_LIMIT: 2998 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 2999 ret = -EINVAL; 3000 break; 3001 } 3002 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3003 case _MEM: 3004 ret = mem_cgroup_resize_limit(memcg, nr_pages); 3005 break; 3006 case _MEMSWAP: 3007 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 3008 break; 3009 case _KMEM: 3010 ret = memcg_update_kmem_limit(memcg, nr_pages); 3011 break; 3012 case _TCP: 3013 ret = memcg_update_tcp_limit(memcg, nr_pages); 3014 break; 3015 } 3016 break; 3017 case RES_SOFT_LIMIT: 3018 memcg->soft_limit = nr_pages; 3019 ret = 0; 3020 break; 3021 } 3022 return ret ?: nbytes; 3023 } 3024 3025 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3026 size_t nbytes, loff_t off) 3027 { 3028 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3029 struct page_counter *counter; 3030 3031 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3032 case _MEM: 3033 counter = &memcg->memory; 3034 break; 3035 case _MEMSWAP: 3036 counter = &memcg->memsw; 3037 break; 3038 case _KMEM: 3039 counter = &memcg->kmem; 3040 break; 3041 case _TCP: 3042 counter = &memcg->tcpmem; 3043 break; 3044 default: 3045 BUG(); 3046 } 3047 3048 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3049 case RES_MAX_USAGE: 3050 page_counter_reset_watermark(counter); 3051 break; 3052 case RES_FAILCNT: 3053 counter->failcnt = 0; 3054 break; 3055 default: 3056 BUG(); 3057 } 3058 3059 return nbytes; 3060 } 3061 3062 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3063 struct cftype *cft) 3064 { 3065 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3066 } 3067 3068 #ifdef CONFIG_MMU 3069 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3070 struct cftype *cft, u64 val) 3071 { 3072 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3073 3074 if (val & ~MOVE_MASK) 3075 return -EINVAL; 3076 3077 /* 3078 * No kind of locking is needed in here, because ->can_attach() will 3079 * check this value once in the beginning of the process, and then carry 3080 * on with stale data. This means that changes to this value will only 3081 * affect task migrations starting after the change. 3082 */ 3083 memcg->move_charge_at_immigrate = val; 3084 return 0; 3085 } 3086 #else 3087 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3088 struct cftype *cft, u64 val) 3089 { 3090 return -ENOSYS; 3091 } 3092 #endif 3093 3094 #ifdef CONFIG_NUMA 3095 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3096 { 3097 struct numa_stat { 3098 const char *name; 3099 unsigned int lru_mask; 3100 }; 3101 3102 static const struct numa_stat stats[] = { 3103 { "total", LRU_ALL }, 3104 { "file", LRU_ALL_FILE }, 3105 { "anon", LRU_ALL_ANON }, 3106 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3107 }; 3108 const struct numa_stat *stat; 3109 int nid; 3110 unsigned long nr; 3111 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3112 3113 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3114 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3115 seq_printf(m, "%s=%lu", stat->name, nr); 3116 for_each_node_state(nid, N_MEMORY) { 3117 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3118 stat->lru_mask); 3119 seq_printf(m, " N%d=%lu", nid, nr); 3120 } 3121 seq_putc(m, '\n'); 3122 } 3123 3124 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3125 struct mem_cgroup *iter; 3126 3127 nr = 0; 3128 for_each_mem_cgroup_tree(iter, memcg) 3129 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3130 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3131 for_each_node_state(nid, N_MEMORY) { 3132 nr = 0; 3133 for_each_mem_cgroup_tree(iter, memcg) 3134 nr += mem_cgroup_node_nr_lru_pages( 3135 iter, nid, stat->lru_mask); 3136 seq_printf(m, " N%d=%lu", nid, nr); 3137 } 3138 seq_putc(m, '\n'); 3139 } 3140 3141 return 0; 3142 } 3143 #endif /* CONFIG_NUMA */ 3144 3145 static int memcg_stat_show(struct seq_file *m, void *v) 3146 { 3147 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3148 unsigned long memory, memsw; 3149 struct mem_cgroup *mi; 3150 unsigned int i; 3151 3152 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3153 MEM_CGROUP_STAT_NSTATS); 3154 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3155 MEM_CGROUP_EVENTS_NSTATS); 3156 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3157 3158 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3159 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3160 continue; 3161 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3162 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3163 } 3164 3165 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3166 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3167 mem_cgroup_read_events(memcg, i)); 3168 3169 for (i = 0; i < NR_LRU_LISTS; i++) 3170 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3171 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3172 3173 /* Hierarchical information */ 3174 memory = memsw = PAGE_COUNTER_MAX; 3175 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3176 memory = min(memory, mi->memory.limit); 3177 memsw = min(memsw, mi->memsw.limit); 3178 } 3179 seq_printf(m, "hierarchical_memory_limit %llu\n", 3180 (u64)memory * PAGE_SIZE); 3181 if (do_memsw_account()) 3182 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3183 (u64)memsw * PAGE_SIZE); 3184 3185 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3186 unsigned long long val = 0; 3187 3188 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3189 continue; 3190 for_each_mem_cgroup_tree(mi, memcg) 3191 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3192 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3193 } 3194 3195 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3196 unsigned long long val = 0; 3197 3198 for_each_mem_cgroup_tree(mi, memcg) 3199 val += mem_cgroup_read_events(mi, i); 3200 seq_printf(m, "total_%s %llu\n", 3201 mem_cgroup_events_names[i], val); 3202 } 3203 3204 for (i = 0; i < NR_LRU_LISTS; i++) { 3205 unsigned long long val = 0; 3206 3207 for_each_mem_cgroup_tree(mi, memcg) 3208 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3209 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3210 } 3211 3212 #ifdef CONFIG_DEBUG_VM 3213 { 3214 pg_data_t *pgdat; 3215 struct mem_cgroup_per_node *mz; 3216 struct zone_reclaim_stat *rstat; 3217 unsigned long recent_rotated[2] = {0, 0}; 3218 unsigned long recent_scanned[2] = {0, 0}; 3219 3220 for_each_online_pgdat(pgdat) { 3221 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3222 rstat = &mz->lruvec.reclaim_stat; 3223 3224 recent_rotated[0] += rstat->recent_rotated[0]; 3225 recent_rotated[1] += rstat->recent_rotated[1]; 3226 recent_scanned[0] += rstat->recent_scanned[0]; 3227 recent_scanned[1] += rstat->recent_scanned[1]; 3228 } 3229 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3230 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3231 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3232 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3233 } 3234 #endif 3235 3236 return 0; 3237 } 3238 3239 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3240 struct cftype *cft) 3241 { 3242 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3243 3244 return mem_cgroup_swappiness(memcg); 3245 } 3246 3247 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3248 struct cftype *cft, u64 val) 3249 { 3250 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3251 3252 if (val > 100) 3253 return -EINVAL; 3254 3255 if (css->parent) 3256 memcg->swappiness = val; 3257 else 3258 vm_swappiness = val; 3259 3260 return 0; 3261 } 3262 3263 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3264 { 3265 struct mem_cgroup_threshold_ary *t; 3266 unsigned long usage; 3267 int i; 3268 3269 rcu_read_lock(); 3270 if (!swap) 3271 t = rcu_dereference(memcg->thresholds.primary); 3272 else 3273 t = rcu_dereference(memcg->memsw_thresholds.primary); 3274 3275 if (!t) 3276 goto unlock; 3277 3278 usage = mem_cgroup_usage(memcg, swap); 3279 3280 /* 3281 * current_threshold points to threshold just below or equal to usage. 3282 * If it's not true, a threshold was crossed after last 3283 * call of __mem_cgroup_threshold(). 3284 */ 3285 i = t->current_threshold; 3286 3287 /* 3288 * Iterate backward over array of thresholds starting from 3289 * current_threshold and check if a threshold is crossed. 3290 * If none of thresholds below usage is crossed, we read 3291 * only one element of the array here. 3292 */ 3293 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3294 eventfd_signal(t->entries[i].eventfd, 1); 3295 3296 /* i = current_threshold + 1 */ 3297 i++; 3298 3299 /* 3300 * Iterate forward over array of thresholds starting from 3301 * current_threshold+1 and check if a threshold is crossed. 3302 * If none of thresholds above usage is crossed, we read 3303 * only one element of the array here. 3304 */ 3305 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3306 eventfd_signal(t->entries[i].eventfd, 1); 3307 3308 /* Update current_threshold */ 3309 t->current_threshold = i - 1; 3310 unlock: 3311 rcu_read_unlock(); 3312 } 3313 3314 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3315 { 3316 while (memcg) { 3317 __mem_cgroup_threshold(memcg, false); 3318 if (do_memsw_account()) 3319 __mem_cgroup_threshold(memcg, true); 3320 3321 memcg = parent_mem_cgroup(memcg); 3322 } 3323 } 3324 3325 static int compare_thresholds(const void *a, const void *b) 3326 { 3327 const struct mem_cgroup_threshold *_a = a; 3328 const struct mem_cgroup_threshold *_b = b; 3329 3330 if (_a->threshold > _b->threshold) 3331 return 1; 3332 3333 if (_a->threshold < _b->threshold) 3334 return -1; 3335 3336 return 0; 3337 } 3338 3339 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3340 { 3341 struct mem_cgroup_eventfd_list *ev; 3342 3343 spin_lock(&memcg_oom_lock); 3344 3345 list_for_each_entry(ev, &memcg->oom_notify, list) 3346 eventfd_signal(ev->eventfd, 1); 3347 3348 spin_unlock(&memcg_oom_lock); 3349 return 0; 3350 } 3351 3352 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3353 { 3354 struct mem_cgroup *iter; 3355 3356 for_each_mem_cgroup_tree(iter, memcg) 3357 mem_cgroup_oom_notify_cb(iter); 3358 } 3359 3360 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3361 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3362 { 3363 struct mem_cgroup_thresholds *thresholds; 3364 struct mem_cgroup_threshold_ary *new; 3365 unsigned long threshold; 3366 unsigned long usage; 3367 int i, size, ret; 3368 3369 ret = page_counter_memparse(args, "-1", &threshold); 3370 if (ret) 3371 return ret; 3372 3373 mutex_lock(&memcg->thresholds_lock); 3374 3375 if (type == _MEM) { 3376 thresholds = &memcg->thresholds; 3377 usage = mem_cgroup_usage(memcg, false); 3378 } else if (type == _MEMSWAP) { 3379 thresholds = &memcg->memsw_thresholds; 3380 usage = mem_cgroup_usage(memcg, true); 3381 } else 3382 BUG(); 3383 3384 /* Check if a threshold crossed before adding a new one */ 3385 if (thresholds->primary) 3386 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3387 3388 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3389 3390 /* Allocate memory for new array of thresholds */ 3391 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3392 GFP_KERNEL); 3393 if (!new) { 3394 ret = -ENOMEM; 3395 goto unlock; 3396 } 3397 new->size = size; 3398 3399 /* Copy thresholds (if any) to new array */ 3400 if (thresholds->primary) { 3401 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3402 sizeof(struct mem_cgroup_threshold)); 3403 } 3404 3405 /* Add new threshold */ 3406 new->entries[size - 1].eventfd = eventfd; 3407 new->entries[size - 1].threshold = threshold; 3408 3409 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3410 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3411 compare_thresholds, NULL); 3412 3413 /* Find current threshold */ 3414 new->current_threshold = -1; 3415 for (i = 0; i < size; i++) { 3416 if (new->entries[i].threshold <= usage) { 3417 /* 3418 * new->current_threshold will not be used until 3419 * rcu_assign_pointer(), so it's safe to increment 3420 * it here. 3421 */ 3422 ++new->current_threshold; 3423 } else 3424 break; 3425 } 3426 3427 /* Free old spare buffer and save old primary buffer as spare */ 3428 kfree(thresholds->spare); 3429 thresholds->spare = thresholds->primary; 3430 3431 rcu_assign_pointer(thresholds->primary, new); 3432 3433 /* To be sure that nobody uses thresholds */ 3434 synchronize_rcu(); 3435 3436 unlock: 3437 mutex_unlock(&memcg->thresholds_lock); 3438 3439 return ret; 3440 } 3441 3442 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3443 struct eventfd_ctx *eventfd, const char *args) 3444 { 3445 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3446 } 3447 3448 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3449 struct eventfd_ctx *eventfd, const char *args) 3450 { 3451 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3452 } 3453 3454 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3455 struct eventfd_ctx *eventfd, enum res_type type) 3456 { 3457 struct mem_cgroup_thresholds *thresholds; 3458 struct mem_cgroup_threshold_ary *new; 3459 unsigned long usage; 3460 int i, j, size; 3461 3462 mutex_lock(&memcg->thresholds_lock); 3463 3464 if (type == _MEM) { 3465 thresholds = &memcg->thresholds; 3466 usage = mem_cgroup_usage(memcg, false); 3467 } else if (type == _MEMSWAP) { 3468 thresholds = &memcg->memsw_thresholds; 3469 usage = mem_cgroup_usage(memcg, true); 3470 } else 3471 BUG(); 3472 3473 if (!thresholds->primary) 3474 goto unlock; 3475 3476 /* Check if a threshold crossed before removing */ 3477 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3478 3479 /* Calculate new number of threshold */ 3480 size = 0; 3481 for (i = 0; i < thresholds->primary->size; i++) { 3482 if (thresholds->primary->entries[i].eventfd != eventfd) 3483 size++; 3484 } 3485 3486 new = thresholds->spare; 3487 3488 /* Set thresholds array to NULL if we don't have thresholds */ 3489 if (!size) { 3490 kfree(new); 3491 new = NULL; 3492 goto swap_buffers; 3493 } 3494 3495 new->size = size; 3496 3497 /* Copy thresholds and find current threshold */ 3498 new->current_threshold = -1; 3499 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3500 if (thresholds->primary->entries[i].eventfd == eventfd) 3501 continue; 3502 3503 new->entries[j] = thresholds->primary->entries[i]; 3504 if (new->entries[j].threshold <= usage) { 3505 /* 3506 * new->current_threshold will not be used 3507 * until rcu_assign_pointer(), so it's safe to increment 3508 * it here. 3509 */ 3510 ++new->current_threshold; 3511 } 3512 j++; 3513 } 3514 3515 swap_buffers: 3516 /* Swap primary and spare array */ 3517 thresholds->spare = thresholds->primary; 3518 3519 rcu_assign_pointer(thresholds->primary, new); 3520 3521 /* To be sure that nobody uses thresholds */ 3522 synchronize_rcu(); 3523 3524 /* If all events are unregistered, free the spare array */ 3525 if (!new) { 3526 kfree(thresholds->spare); 3527 thresholds->spare = NULL; 3528 } 3529 unlock: 3530 mutex_unlock(&memcg->thresholds_lock); 3531 } 3532 3533 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3534 struct eventfd_ctx *eventfd) 3535 { 3536 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3537 } 3538 3539 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3540 struct eventfd_ctx *eventfd) 3541 { 3542 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3543 } 3544 3545 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3546 struct eventfd_ctx *eventfd, const char *args) 3547 { 3548 struct mem_cgroup_eventfd_list *event; 3549 3550 event = kmalloc(sizeof(*event), GFP_KERNEL); 3551 if (!event) 3552 return -ENOMEM; 3553 3554 spin_lock(&memcg_oom_lock); 3555 3556 event->eventfd = eventfd; 3557 list_add(&event->list, &memcg->oom_notify); 3558 3559 /* already in OOM ? */ 3560 if (memcg->under_oom) 3561 eventfd_signal(eventfd, 1); 3562 spin_unlock(&memcg_oom_lock); 3563 3564 return 0; 3565 } 3566 3567 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3568 struct eventfd_ctx *eventfd) 3569 { 3570 struct mem_cgroup_eventfd_list *ev, *tmp; 3571 3572 spin_lock(&memcg_oom_lock); 3573 3574 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3575 if (ev->eventfd == eventfd) { 3576 list_del(&ev->list); 3577 kfree(ev); 3578 } 3579 } 3580 3581 spin_unlock(&memcg_oom_lock); 3582 } 3583 3584 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3585 { 3586 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3587 3588 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3589 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3590 return 0; 3591 } 3592 3593 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3594 struct cftype *cft, u64 val) 3595 { 3596 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3597 3598 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3599 if (!css->parent || !((val == 0) || (val == 1))) 3600 return -EINVAL; 3601 3602 memcg->oom_kill_disable = val; 3603 if (!val) 3604 memcg_oom_recover(memcg); 3605 3606 return 0; 3607 } 3608 3609 #ifdef CONFIG_CGROUP_WRITEBACK 3610 3611 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3612 { 3613 return &memcg->cgwb_list; 3614 } 3615 3616 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3617 { 3618 return wb_domain_init(&memcg->cgwb_domain, gfp); 3619 } 3620 3621 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3622 { 3623 wb_domain_exit(&memcg->cgwb_domain); 3624 } 3625 3626 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3627 { 3628 wb_domain_size_changed(&memcg->cgwb_domain); 3629 } 3630 3631 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3632 { 3633 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3634 3635 if (!memcg->css.parent) 3636 return NULL; 3637 3638 return &memcg->cgwb_domain; 3639 } 3640 3641 /** 3642 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3643 * @wb: bdi_writeback in question 3644 * @pfilepages: out parameter for number of file pages 3645 * @pheadroom: out parameter for number of allocatable pages according to memcg 3646 * @pdirty: out parameter for number of dirty pages 3647 * @pwriteback: out parameter for number of pages under writeback 3648 * 3649 * Determine the numbers of file, headroom, dirty, and writeback pages in 3650 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3651 * is a bit more involved. 3652 * 3653 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3654 * headroom is calculated as the lowest headroom of itself and the 3655 * ancestors. Note that this doesn't consider the actual amount of 3656 * available memory in the system. The caller should further cap 3657 * *@pheadroom accordingly. 3658 */ 3659 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3660 unsigned long *pheadroom, unsigned long *pdirty, 3661 unsigned long *pwriteback) 3662 { 3663 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3664 struct mem_cgroup *parent; 3665 3666 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3667 3668 /* this should eventually include NR_UNSTABLE_NFS */ 3669 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3670 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3671 (1 << LRU_ACTIVE_FILE)); 3672 *pheadroom = PAGE_COUNTER_MAX; 3673 3674 while ((parent = parent_mem_cgroup(memcg))) { 3675 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3676 unsigned long used = page_counter_read(&memcg->memory); 3677 3678 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3679 memcg = parent; 3680 } 3681 } 3682 3683 #else /* CONFIG_CGROUP_WRITEBACK */ 3684 3685 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3686 { 3687 return 0; 3688 } 3689 3690 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3691 { 3692 } 3693 3694 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3695 { 3696 } 3697 3698 #endif /* CONFIG_CGROUP_WRITEBACK */ 3699 3700 /* 3701 * DO NOT USE IN NEW FILES. 3702 * 3703 * "cgroup.event_control" implementation. 3704 * 3705 * This is way over-engineered. It tries to support fully configurable 3706 * events for each user. Such level of flexibility is completely 3707 * unnecessary especially in the light of the planned unified hierarchy. 3708 * 3709 * Please deprecate this and replace with something simpler if at all 3710 * possible. 3711 */ 3712 3713 /* 3714 * Unregister event and free resources. 3715 * 3716 * Gets called from workqueue. 3717 */ 3718 static void memcg_event_remove(struct work_struct *work) 3719 { 3720 struct mem_cgroup_event *event = 3721 container_of(work, struct mem_cgroup_event, remove); 3722 struct mem_cgroup *memcg = event->memcg; 3723 3724 remove_wait_queue(event->wqh, &event->wait); 3725 3726 event->unregister_event(memcg, event->eventfd); 3727 3728 /* Notify userspace the event is going away. */ 3729 eventfd_signal(event->eventfd, 1); 3730 3731 eventfd_ctx_put(event->eventfd); 3732 kfree(event); 3733 css_put(&memcg->css); 3734 } 3735 3736 /* 3737 * Gets called on POLLHUP on eventfd when user closes it. 3738 * 3739 * Called with wqh->lock held and interrupts disabled. 3740 */ 3741 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3742 int sync, void *key) 3743 { 3744 struct mem_cgroup_event *event = 3745 container_of(wait, struct mem_cgroup_event, wait); 3746 struct mem_cgroup *memcg = event->memcg; 3747 unsigned long flags = (unsigned long)key; 3748 3749 if (flags & POLLHUP) { 3750 /* 3751 * If the event has been detached at cgroup removal, we 3752 * can simply return knowing the other side will cleanup 3753 * for us. 3754 * 3755 * We can't race against event freeing since the other 3756 * side will require wqh->lock via remove_wait_queue(), 3757 * which we hold. 3758 */ 3759 spin_lock(&memcg->event_list_lock); 3760 if (!list_empty(&event->list)) { 3761 list_del_init(&event->list); 3762 /* 3763 * We are in atomic context, but cgroup_event_remove() 3764 * may sleep, so we have to call it in workqueue. 3765 */ 3766 schedule_work(&event->remove); 3767 } 3768 spin_unlock(&memcg->event_list_lock); 3769 } 3770 3771 return 0; 3772 } 3773 3774 static void memcg_event_ptable_queue_proc(struct file *file, 3775 wait_queue_head_t *wqh, poll_table *pt) 3776 { 3777 struct mem_cgroup_event *event = 3778 container_of(pt, struct mem_cgroup_event, pt); 3779 3780 event->wqh = wqh; 3781 add_wait_queue(wqh, &event->wait); 3782 } 3783 3784 /* 3785 * DO NOT USE IN NEW FILES. 3786 * 3787 * Parse input and register new cgroup event handler. 3788 * 3789 * Input must be in format '<event_fd> <control_fd> <args>'. 3790 * Interpretation of args is defined by control file implementation. 3791 */ 3792 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3793 char *buf, size_t nbytes, loff_t off) 3794 { 3795 struct cgroup_subsys_state *css = of_css(of); 3796 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3797 struct mem_cgroup_event *event; 3798 struct cgroup_subsys_state *cfile_css; 3799 unsigned int efd, cfd; 3800 struct fd efile; 3801 struct fd cfile; 3802 const char *name; 3803 char *endp; 3804 int ret; 3805 3806 buf = strstrip(buf); 3807 3808 efd = simple_strtoul(buf, &endp, 10); 3809 if (*endp != ' ') 3810 return -EINVAL; 3811 buf = endp + 1; 3812 3813 cfd = simple_strtoul(buf, &endp, 10); 3814 if ((*endp != ' ') && (*endp != '\0')) 3815 return -EINVAL; 3816 buf = endp + 1; 3817 3818 event = kzalloc(sizeof(*event), GFP_KERNEL); 3819 if (!event) 3820 return -ENOMEM; 3821 3822 event->memcg = memcg; 3823 INIT_LIST_HEAD(&event->list); 3824 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3825 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3826 INIT_WORK(&event->remove, memcg_event_remove); 3827 3828 efile = fdget(efd); 3829 if (!efile.file) { 3830 ret = -EBADF; 3831 goto out_kfree; 3832 } 3833 3834 event->eventfd = eventfd_ctx_fileget(efile.file); 3835 if (IS_ERR(event->eventfd)) { 3836 ret = PTR_ERR(event->eventfd); 3837 goto out_put_efile; 3838 } 3839 3840 cfile = fdget(cfd); 3841 if (!cfile.file) { 3842 ret = -EBADF; 3843 goto out_put_eventfd; 3844 } 3845 3846 /* the process need read permission on control file */ 3847 /* AV: shouldn't we check that it's been opened for read instead? */ 3848 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3849 if (ret < 0) 3850 goto out_put_cfile; 3851 3852 /* 3853 * Determine the event callbacks and set them in @event. This used 3854 * to be done via struct cftype but cgroup core no longer knows 3855 * about these events. The following is crude but the whole thing 3856 * is for compatibility anyway. 3857 * 3858 * DO NOT ADD NEW FILES. 3859 */ 3860 name = cfile.file->f_path.dentry->d_name.name; 3861 3862 if (!strcmp(name, "memory.usage_in_bytes")) { 3863 event->register_event = mem_cgroup_usage_register_event; 3864 event->unregister_event = mem_cgroup_usage_unregister_event; 3865 } else if (!strcmp(name, "memory.oom_control")) { 3866 event->register_event = mem_cgroup_oom_register_event; 3867 event->unregister_event = mem_cgroup_oom_unregister_event; 3868 } else if (!strcmp(name, "memory.pressure_level")) { 3869 event->register_event = vmpressure_register_event; 3870 event->unregister_event = vmpressure_unregister_event; 3871 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3872 event->register_event = memsw_cgroup_usage_register_event; 3873 event->unregister_event = memsw_cgroup_usage_unregister_event; 3874 } else { 3875 ret = -EINVAL; 3876 goto out_put_cfile; 3877 } 3878 3879 /* 3880 * Verify @cfile should belong to @css. Also, remaining events are 3881 * automatically removed on cgroup destruction but the removal is 3882 * asynchronous, so take an extra ref on @css. 3883 */ 3884 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3885 &memory_cgrp_subsys); 3886 ret = -EINVAL; 3887 if (IS_ERR(cfile_css)) 3888 goto out_put_cfile; 3889 if (cfile_css != css) { 3890 css_put(cfile_css); 3891 goto out_put_cfile; 3892 } 3893 3894 ret = event->register_event(memcg, event->eventfd, buf); 3895 if (ret) 3896 goto out_put_css; 3897 3898 efile.file->f_op->poll(efile.file, &event->pt); 3899 3900 spin_lock(&memcg->event_list_lock); 3901 list_add(&event->list, &memcg->event_list); 3902 spin_unlock(&memcg->event_list_lock); 3903 3904 fdput(cfile); 3905 fdput(efile); 3906 3907 return nbytes; 3908 3909 out_put_css: 3910 css_put(css); 3911 out_put_cfile: 3912 fdput(cfile); 3913 out_put_eventfd: 3914 eventfd_ctx_put(event->eventfd); 3915 out_put_efile: 3916 fdput(efile); 3917 out_kfree: 3918 kfree(event); 3919 3920 return ret; 3921 } 3922 3923 static struct cftype mem_cgroup_legacy_files[] = { 3924 { 3925 .name = "usage_in_bytes", 3926 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3927 .read_u64 = mem_cgroup_read_u64, 3928 }, 3929 { 3930 .name = "max_usage_in_bytes", 3931 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 3932 .write = mem_cgroup_reset, 3933 .read_u64 = mem_cgroup_read_u64, 3934 }, 3935 { 3936 .name = "limit_in_bytes", 3937 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 3938 .write = mem_cgroup_write, 3939 .read_u64 = mem_cgroup_read_u64, 3940 }, 3941 { 3942 .name = "soft_limit_in_bytes", 3943 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 3944 .write = mem_cgroup_write, 3945 .read_u64 = mem_cgroup_read_u64, 3946 }, 3947 { 3948 .name = "failcnt", 3949 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 3950 .write = mem_cgroup_reset, 3951 .read_u64 = mem_cgroup_read_u64, 3952 }, 3953 { 3954 .name = "stat", 3955 .seq_show = memcg_stat_show, 3956 }, 3957 { 3958 .name = "force_empty", 3959 .write = mem_cgroup_force_empty_write, 3960 }, 3961 { 3962 .name = "use_hierarchy", 3963 .write_u64 = mem_cgroup_hierarchy_write, 3964 .read_u64 = mem_cgroup_hierarchy_read, 3965 }, 3966 { 3967 .name = "cgroup.event_control", /* XXX: for compat */ 3968 .write = memcg_write_event_control, 3969 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 3970 }, 3971 { 3972 .name = "swappiness", 3973 .read_u64 = mem_cgroup_swappiness_read, 3974 .write_u64 = mem_cgroup_swappiness_write, 3975 }, 3976 { 3977 .name = "move_charge_at_immigrate", 3978 .read_u64 = mem_cgroup_move_charge_read, 3979 .write_u64 = mem_cgroup_move_charge_write, 3980 }, 3981 { 3982 .name = "oom_control", 3983 .seq_show = mem_cgroup_oom_control_read, 3984 .write_u64 = mem_cgroup_oom_control_write, 3985 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 3986 }, 3987 { 3988 .name = "pressure_level", 3989 }, 3990 #ifdef CONFIG_NUMA 3991 { 3992 .name = "numa_stat", 3993 .seq_show = memcg_numa_stat_show, 3994 }, 3995 #endif 3996 { 3997 .name = "kmem.limit_in_bytes", 3998 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 3999 .write = mem_cgroup_write, 4000 .read_u64 = mem_cgroup_read_u64, 4001 }, 4002 { 4003 .name = "kmem.usage_in_bytes", 4004 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4005 .read_u64 = mem_cgroup_read_u64, 4006 }, 4007 { 4008 .name = "kmem.failcnt", 4009 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4010 .write = mem_cgroup_reset, 4011 .read_u64 = mem_cgroup_read_u64, 4012 }, 4013 { 4014 .name = "kmem.max_usage_in_bytes", 4015 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4016 .write = mem_cgroup_reset, 4017 .read_u64 = mem_cgroup_read_u64, 4018 }, 4019 #ifdef CONFIG_SLABINFO 4020 { 4021 .name = "kmem.slabinfo", 4022 .seq_start = slab_start, 4023 .seq_next = slab_next, 4024 .seq_stop = slab_stop, 4025 .seq_show = memcg_slab_show, 4026 }, 4027 #endif 4028 { 4029 .name = "kmem.tcp.limit_in_bytes", 4030 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4031 .write = mem_cgroup_write, 4032 .read_u64 = mem_cgroup_read_u64, 4033 }, 4034 { 4035 .name = "kmem.tcp.usage_in_bytes", 4036 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4037 .read_u64 = mem_cgroup_read_u64, 4038 }, 4039 { 4040 .name = "kmem.tcp.failcnt", 4041 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4042 .write = mem_cgroup_reset, 4043 .read_u64 = mem_cgroup_read_u64, 4044 }, 4045 { 4046 .name = "kmem.tcp.max_usage_in_bytes", 4047 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4048 .write = mem_cgroup_reset, 4049 .read_u64 = mem_cgroup_read_u64, 4050 }, 4051 { }, /* terminate */ 4052 }; 4053 4054 /* 4055 * Private memory cgroup IDR 4056 * 4057 * Swap-out records and page cache shadow entries need to store memcg 4058 * references in constrained space, so we maintain an ID space that is 4059 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4060 * memory-controlled cgroups to 64k. 4061 * 4062 * However, there usually are many references to the oflline CSS after 4063 * the cgroup has been destroyed, such as page cache or reclaimable 4064 * slab objects, that don't need to hang on to the ID. We want to keep 4065 * those dead CSS from occupying IDs, or we might quickly exhaust the 4066 * relatively small ID space and prevent the creation of new cgroups 4067 * even when there are much fewer than 64k cgroups - possibly none. 4068 * 4069 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4070 * be freed and recycled when it's no longer needed, which is usually 4071 * when the CSS is offlined. 4072 * 4073 * The only exception to that are records of swapped out tmpfs/shmem 4074 * pages that need to be attributed to live ancestors on swapin. But 4075 * those references are manageable from userspace. 4076 */ 4077 4078 static DEFINE_IDR(mem_cgroup_idr); 4079 4080 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4081 { 4082 atomic_add(n, &memcg->id.ref); 4083 } 4084 4085 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 4086 { 4087 while (!atomic_inc_not_zero(&memcg->id.ref)) { 4088 /* 4089 * The root cgroup cannot be destroyed, so it's refcount must 4090 * always be >= 1. 4091 */ 4092 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 4093 VM_BUG_ON(1); 4094 break; 4095 } 4096 memcg = parent_mem_cgroup(memcg); 4097 if (!memcg) 4098 memcg = root_mem_cgroup; 4099 } 4100 return memcg; 4101 } 4102 4103 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4104 { 4105 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4106 idr_remove(&mem_cgroup_idr, memcg->id.id); 4107 memcg->id.id = 0; 4108 4109 /* Memcg ID pins CSS */ 4110 css_put(&memcg->css); 4111 } 4112 } 4113 4114 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4115 { 4116 mem_cgroup_id_get_many(memcg, 1); 4117 } 4118 4119 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4120 { 4121 mem_cgroup_id_put_many(memcg, 1); 4122 } 4123 4124 /** 4125 * mem_cgroup_from_id - look up a memcg from a memcg id 4126 * @id: the memcg id to look up 4127 * 4128 * Caller must hold rcu_read_lock(). 4129 */ 4130 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4131 { 4132 WARN_ON_ONCE(!rcu_read_lock_held()); 4133 return idr_find(&mem_cgroup_idr, id); 4134 } 4135 4136 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4137 { 4138 struct mem_cgroup_per_node *pn; 4139 int tmp = node; 4140 /* 4141 * This routine is called against possible nodes. 4142 * But it's BUG to call kmalloc() against offline node. 4143 * 4144 * TODO: this routine can waste much memory for nodes which will 4145 * never be onlined. It's better to use memory hotplug callback 4146 * function. 4147 */ 4148 if (!node_state(node, N_NORMAL_MEMORY)) 4149 tmp = -1; 4150 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4151 if (!pn) 4152 return 1; 4153 4154 lruvec_init(&pn->lruvec); 4155 pn->usage_in_excess = 0; 4156 pn->on_tree = false; 4157 pn->memcg = memcg; 4158 4159 memcg->nodeinfo[node] = pn; 4160 return 0; 4161 } 4162 4163 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4164 { 4165 kfree(memcg->nodeinfo[node]); 4166 } 4167 4168 static void mem_cgroup_free(struct mem_cgroup *memcg) 4169 { 4170 int node; 4171 4172 memcg_wb_domain_exit(memcg); 4173 for_each_node(node) 4174 free_mem_cgroup_per_node_info(memcg, node); 4175 free_percpu(memcg->stat); 4176 kfree(memcg); 4177 } 4178 4179 static struct mem_cgroup *mem_cgroup_alloc(void) 4180 { 4181 struct mem_cgroup *memcg; 4182 size_t size; 4183 int node; 4184 4185 size = sizeof(struct mem_cgroup); 4186 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4187 4188 memcg = kzalloc(size, GFP_KERNEL); 4189 if (!memcg) 4190 return NULL; 4191 4192 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4193 1, MEM_CGROUP_ID_MAX, 4194 GFP_KERNEL); 4195 if (memcg->id.id < 0) 4196 goto fail; 4197 4198 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4199 if (!memcg->stat) 4200 goto fail; 4201 4202 for_each_node(node) 4203 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4204 goto fail; 4205 4206 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4207 goto fail; 4208 4209 INIT_WORK(&memcg->high_work, high_work_func); 4210 memcg->last_scanned_node = MAX_NUMNODES; 4211 INIT_LIST_HEAD(&memcg->oom_notify); 4212 mutex_init(&memcg->thresholds_lock); 4213 spin_lock_init(&memcg->move_lock); 4214 vmpressure_init(&memcg->vmpressure); 4215 INIT_LIST_HEAD(&memcg->event_list); 4216 spin_lock_init(&memcg->event_list_lock); 4217 memcg->socket_pressure = jiffies; 4218 #ifndef CONFIG_SLOB 4219 memcg->kmemcg_id = -1; 4220 #endif 4221 #ifdef CONFIG_CGROUP_WRITEBACK 4222 INIT_LIST_HEAD(&memcg->cgwb_list); 4223 #endif 4224 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4225 return memcg; 4226 fail: 4227 if (memcg->id.id > 0) 4228 idr_remove(&mem_cgroup_idr, memcg->id.id); 4229 mem_cgroup_free(memcg); 4230 return NULL; 4231 } 4232 4233 static struct cgroup_subsys_state * __ref 4234 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4235 { 4236 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4237 struct mem_cgroup *memcg; 4238 long error = -ENOMEM; 4239 4240 memcg = mem_cgroup_alloc(); 4241 if (!memcg) 4242 return ERR_PTR(error); 4243 4244 memcg->high = PAGE_COUNTER_MAX; 4245 memcg->soft_limit = PAGE_COUNTER_MAX; 4246 if (parent) { 4247 memcg->swappiness = mem_cgroup_swappiness(parent); 4248 memcg->oom_kill_disable = parent->oom_kill_disable; 4249 } 4250 if (parent && parent->use_hierarchy) { 4251 memcg->use_hierarchy = true; 4252 page_counter_init(&memcg->memory, &parent->memory); 4253 page_counter_init(&memcg->swap, &parent->swap); 4254 page_counter_init(&memcg->memsw, &parent->memsw); 4255 page_counter_init(&memcg->kmem, &parent->kmem); 4256 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4257 } else { 4258 page_counter_init(&memcg->memory, NULL); 4259 page_counter_init(&memcg->swap, NULL); 4260 page_counter_init(&memcg->memsw, NULL); 4261 page_counter_init(&memcg->kmem, NULL); 4262 page_counter_init(&memcg->tcpmem, NULL); 4263 /* 4264 * Deeper hierachy with use_hierarchy == false doesn't make 4265 * much sense so let cgroup subsystem know about this 4266 * unfortunate state in our controller. 4267 */ 4268 if (parent != root_mem_cgroup) 4269 memory_cgrp_subsys.broken_hierarchy = true; 4270 } 4271 4272 /* The following stuff does not apply to the root */ 4273 if (!parent) { 4274 root_mem_cgroup = memcg; 4275 return &memcg->css; 4276 } 4277 4278 error = memcg_online_kmem(memcg); 4279 if (error) 4280 goto fail; 4281 4282 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4283 static_branch_inc(&memcg_sockets_enabled_key); 4284 4285 return &memcg->css; 4286 fail: 4287 mem_cgroup_free(memcg); 4288 return ERR_PTR(-ENOMEM); 4289 } 4290 4291 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4292 { 4293 /* Online state pins memcg ID, memcg ID pins CSS */ 4294 mem_cgroup_id_get(mem_cgroup_from_css(css)); 4295 css_get(css); 4296 return 0; 4297 } 4298 4299 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4300 { 4301 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4302 struct mem_cgroup_event *event, *tmp; 4303 4304 /* 4305 * Unregister events and notify userspace. 4306 * Notify userspace about cgroup removing only after rmdir of cgroup 4307 * directory to avoid race between userspace and kernelspace. 4308 */ 4309 spin_lock(&memcg->event_list_lock); 4310 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4311 list_del_init(&event->list); 4312 schedule_work(&event->remove); 4313 } 4314 spin_unlock(&memcg->event_list_lock); 4315 4316 memcg_offline_kmem(memcg); 4317 wb_memcg_offline(memcg); 4318 4319 mem_cgroup_id_put(memcg); 4320 } 4321 4322 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4323 { 4324 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4325 4326 invalidate_reclaim_iterators(memcg); 4327 } 4328 4329 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4330 { 4331 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4332 4333 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4334 static_branch_dec(&memcg_sockets_enabled_key); 4335 4336 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4337 static_branch_dec(&memcg_sockets_enabled_key); 4338 4339 vmpressure_cleanup(&memcg->vmpressure); 4340 cancel_work_sync(&memcg->high_work); 4341 mem_cgroup_remove_from_trees(memcg); 4342 memcg_free_kmem(memcg); 4343 mem_cgroup_free(memcg); 4344 } 4345 4346 /** 4347 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4348 * @css: the target css 4349 * 4350 * Reset the states of the mem_cgroup associated with @css. This is 4351 * invoked when the userland requests disabling on the default hierarchy 4352 * but the memcg is pinned through dependency. The memcg should stop 4353 * applying policies and should revert to the vanilla state as it may be 4354 * made visible again. 4355 * 4356 * The current implementation only resets the essential configurations. 4357 * This needs to be expanded to cover all the visible parts. 4358 */ 4359 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4360 { 4361 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4362 4363 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX); 4364 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX); 4365 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX); 4366 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX); 4367 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX); 4368 memcg->low = 0; 4369 memcg->high = PAGE_COUNTER_MAX; 4370 memcg->soft_limit = PAGE_COUNTER_MAX; 4371 memcg_wb_domain_size_changed(memcg); 4372 } 4373 4374 #ifdef CONFIG_MMU 4375 /* Handlers for move charge at task migration. */ 4376 static int mem_cgroup_do_precharge(unsigned long count) 4377 { 4378 int ret; 4379 4380 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4381 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4382 if (!ret) { 4383 mc.precharge += count; 4384 return ret; 4385 } 4386 4387 /* Try charges one by one with reclaim */ 4388 while (count--) { 4389 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4390 if (ret) 4391 return ret; 4392 mc.precharge++; 4393 cond_resched(); 4394 } 4395 return 0; 4396 } 4397 4398 union mc_target { 4399 struct page *page; 4400 swp_entry_t ent; 4401 }; 4402 4403 enum mc_target_type { 4404 MC_TARGET_NONE = 0, 4405 MC_TARGET_PAGE, 4406 MC_TARGET_SWAP, 4407 }; 4408 4409 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4410 unsigned long addr, pte_t ptent) 4411 { 4412 struct page *page = vm_normal_page(vma, addr, ptent); 4413 4414 if (!page || !page_mapped(page)) 4415 return NULL; 4416 if (PageAnon(page)) { 4417 if (!(mc.flags & MOVE_ANON)) 4418 return NULL; 4419 } else { 4420 if (!(mc.flags & MOVE_FILE)) 4421 return NULL; 4422 } 4423 if (!get_page_unless_zero(page)) 4424 return NULL; 4425 4426 return page; 4427 } 4428 4429 #ifdef CONFIG_SWAP 4430 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4431 pte_t ptent, swp_entry_t *entry) 4432 { 4433 struct page *page = NULL; 4434 swp_entry_t ent = pte_to_swp_entry(ptent); 4435 4436 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4437 return NULL; 4438 /* 4439 * Because lookup_swap_cache() updates some statistics counter, 4440 * we call find_get_page() with swapper_space directly. 4441 */ 4442 page = find_get_page(swap_address_space(ent), ent.val); 4443 if (do_memsw_account()) 4444 entry->val = ent.val; 4445 4446 return page; 4447 } 4448 #else 4449 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4450 pte_t ptent, swp_entry_t *entry) 4451 { 4452 return NULL; 4453 } 4454 #endif 4455 4456 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4457 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4458 { 4459 struct page *page = NULL; 4460 struct address_space *mapping; 4461 pgoff_t pgoff; 4462 4463 if (!vma->vm_file) /* anonymous vma */ 4464 return NULL; 4465 if (!(mc.flags & MOVE_FILE)) 4466 return NULL; 4467 4468 mapping = vma->vm_file->f_mapping; 4469 pgoff = linear_page_index(vma, addr); 4470 4471 /* page is moved even if it's not RSS of this task(page-faulted). */ 4472 #ifdef CONFIG_SWAP 4473 /* shmem/tmpfs may report page out on swap: account for that too. */ 4474 if (shmem_mapping(mapping)) { 4475 page = find_get_entry(mapping, pgoff); 4476 if (radix_tree_exceptional_entry(page)) { 4477 swp_entry_t swp = radix_to_swp_entry(page); 4478 if (do_memsw_account()) 4479 *entry = swp; 4480 page = find_get_page(swap_address_space(swp), swp.val); 4481 } 4482 } else 4483 page = find_get_page(mapping, pgoff); 4484 #else 4485 page = find_get_page(mapping, pgoff); 4486 #endif 4487 return page; 4488 } 4489 4490 /** 4491 * mem_cgroup_move_account - move account of the page 4492 * @page: the page 4493 * @compound: charge the page as compound or small page 4494 * @from: mem_cgroup which the page is moved from. 4495 * @to: mem_cgroup which the page is moved to. @from != @to. 4496 * 4497 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4498 * 4499 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4500 * from old cgroup. 4501 */ 4502 static int mem_cgroup_move_account(struct page *page, 4503 bool compound, 4504 struct mem_cgroup *from, 4505 struct mem_cgroup *to) 4506 { 4507 unsigned long flags; 4508 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4509 int ret; 4510 bool anon; 4511 4512 VM_BUG_ON(from == to); 4513 VM_BUG_ON_PAGE(PageLRU(page), page); 4514 VM_BUG_ON(compound && !PageTransHuge(page)); 4515 4516 /* 4517 * Prevent mem_cgroup_migrate() from looking at 4518 * page->mem_cgroup of its source page while we change it. 4519 */ 4520 ret = -EBUSY; 4521 if (!trylock_page(page)) 4522 goto out; 4523 4524 ret = -EINVAL; 4525 if (page->mem_cgroup != from) 4526 goto out_unlock; 4527 4528 anon = PageAnon(page); 4529 4530 spin_lock_irqsave(&from->move_lock, flags); 4531 4532 if (!anon && page_mapped(page)) { 4533 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4534 nr_pages); 4535 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4536 nr_pages); 4537 } 4538 4539 /* 4540 * move_lock grabbed above and caller set from->moving_account, so 4541 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4542 * So mapping should be stable for dirty pages. 4543 */ 4544 if (!anon && PageDirty(page)) { 4545 struct address_space *mapping = page_mapping(page); 4546 4547 if (mapping_cap_account_dirty(mapping)) { 4548 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4549 nr_pages); 4550 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4551 nr_pages); 4552 } 4553 } 4554 4555 if (PageWriteback(page)) { 4556 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4557 nr_pages); 4558 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4559 nr_pages); 4560 } 4561 4562 /* 4563 * It is safe to change page->mem_cgroup here because the page 4564 * is referenced, charged, and isolated - we can't race with 4565 * uncharging, charging, migration, or LRU putback. 4566 */ 4567 4568 /* caller should have done css_get */ 4569 page->mem_cgroup = to; 4570 spin_unlock_irqrestore(&from->move_lock, flags); 4571 4572 ret = 0; 4573 4574 local_irq_disable(); 4575 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4576 memcg_check_events(to, page); 4577 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4578 memcg_check_events(from, page); 4579 local_irq_enable(); 4580 out_unlock: 4581 unlock_page(page); 4582 out: 4583 return ret; 4584 } 4585 4586 /** 4587 * get_mctgt_type - get target type of moving charge 4588 * @vma: the vma the pte to be checked belongs 4589 * @addr: the address corresponding to the pte to be checked 4590 * @ptent: the pte to be checked 4591 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4592 * 4593 * Returns 4594 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4595 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4596 * move charge. if @target is not NULL, the page is stored in target->page 4597 * with extra refcnt got(Callers should handle it). 4598 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4599 * target for charge migration. if @target is not NULL, the entry is stored 4600 * in target->ent. 4601 * 4602 * Called with pte lock held. 4603 */ 4604 4605 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4606 unsigned long addr, pte_t ptent, union mc_target *target) 4607 { 4608 struct page *page = NULL; 4609 enum mc_target_type ret = MC_TARGET_NONE; 4610 swp_entry_t ent = { .val = 0 }; 4611 4612 if (pte_present(ptent)) 4613 page = mc_handle_present_pte(vma, addr, ptent); 4614 else if (is_swap_pte(ptent)) 4615 page = mc_handle_swap_pte(vma, ptent, &ent); 4616 else if (pte_none(ptent)) 4617 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4618 4619 if (!page && !ent.val) 4620 return ret; 4621 if (page) { 4622 /* 4623 * Do only loose check w/o serialization. 4624 * mem_cgroup_move_account() checks the page is valid or 4625 * not under LRU exclusion. 4626 */ 4627 if (page->mem_cgroup == mc.from) { 4628 ret = MC_TARGET_PAGE; 4629 if (target) 4630 target->page = page; 4631 } 4632 if (!ret || !target) 4633 put_page(page); 4634 } 4635 /* There is a swap entry and a page doesn't exist or isn't charged */ 4636 if (ent.val && !ret && 4637 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4638 ret = MC_TARGET_SWAP; 4639 if (target) 4640 target->ent = ent; 4641 } 4642 return ret; 4643 } 4644 4645 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4646 /* 4647 * We don't consider swapping or file mapped pages because THP does not 4648 * support them for now. 4649 * Caller should make sure that pmd_trans_huge(pmd) is true. 4650 */ 4651 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4652 unsigned long addr, pmd_t pmd, union mc_target *target) 4653 { 4654 struct page *page = NULL; 4655 enum mc_target_type ret = MC_TARGET_NONE; 4656 4657 page = pmd_page(pmd); 4658 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4659 if (!(mc.flags & MOVE_ANON)) 4660 return ret; 4661 if (page->mem_cgroup == mc.from) { 4662 ret = MC_TARGET_PAGE; 4663 if (target) { 4664 get_page(page); 4665 target->page = page; 4666 } 4667 } 4668 return ret; 4669 } 4670 #else 4671 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4672 unsigned long addr, pmd_t pmd, union mc_target *target) 4673 { 4674 return MC_TARGET_NONE; 4675 } 4676 #endif 4677 4678 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4679 unsigned long addr, unsigned long end, 4680 struct mm_walk *walk) 4681 { 4682 struct vm_area_struct *vma = walk->vma; 4683 pte_t *pte; 4684 spinlock_t *ptl; 4685 4686 ptl = pmd_trans_huge_lock(pmd, vma); 4687 if (ptl) { 4688 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4689 mc.precharge += HPAGE_PMD_NR; 4690 spin_unlock(ptl); 4691 return 0; 4692 } 4693 4694 if (pmd_trans_unstable(pmd)) 4695 return 0; 4696 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4697 for (; addr != end; pte++, addr += PAGE_SIZE) 4698 if (get_mctgt_type(vma, addr, *pte, NULL)) 4699 mc.precharge++; /* increment precharge temporarily */ 4700 pte_unmap_unlock(pte - 1, ptl); 4701 cond_resched(); 4702 4703 return 0; 4704 } 4705 4706 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4707 { 4708 unsigned long precharge; 4709 4710 struct mm_walk mem_cgroup_count_precharge_walk = { 4711 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4712 .mm = mm, 4713 }; 4714 down_read(&mm->mmap_sem); 4715 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk); 4716 up_read(&mm->mmap_sem); 4717 4718 precharge = mc.precharge; 4719 mc.precharge = 0; 4720 4721 return precharge; 4722 } 4723 4724 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4725 { 4726 unsigned long precharge = mem_cgroup_count_precharge(mm); 4727 4728 VM_BUG_ON(mc.moving_task); 4729 mc.moving_task = current; 4730 return mem_cgroup_do_precharge(precharge); 4731 } 4732 4733 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4734 static void __mem_cgroup_clear_mc(void) 4735 { 4736 struct mem_cgroup *from = mc.from; 4737 struct mem_cgroup *to = mc.to; 4738 4739 /* we must uncharge all the leftover precharges from mc.to */ 4740 if (mc.precharge) { 4741 cancel_charge(mc.to, mc.precharge); 4742 mc.precharge = 0; 4743 } 4744 /* 4745 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4746 * we must uncharge here. 4747 */ 4748 if (mc.moved_charge) { 4749 cancel_charge(mc.from, mc.moved_charge); 4750 mc.moved_charge = 0; 4751 } 4752 /* we must fixup refcnts and charges */ 4753 if (mc.moved_swap) { 4754 /* uncharge swap account from the old cgroup */ 4755 if (!mem_cgroup_is_root(mc.from)) 4756 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4757 4758 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 4759 4760 /* 4761 * we charged both to->memory and to->memsw, so we 4762 * should uncharge to->memory. 4763 */ 4764 if (!mem_cgroup_is_root(mc.to)) 4765 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4766 4767 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 4768 css_put_many(&mc.to->css, mc.moved_swap); 4769 4770 mc.moved_swap = 0; 4771 } 4772 memcg_oom_recover(from); 4773 memcg_oom_recover(to); 4774 wake_up_all(&mc.waitq); 4775 } 4776 4777 static void mem_cgroup_clear_mc(void) 4778 { 4779 struct mm_struct *mm = mc.mm; 4780 4781 /* 4782 * we must clear moving_task before waking up waiters at the end of 4783 * task migration. 4784 */ 4785 mc.moving_task = NULL; 4786 __mem_cgroup_clear_mc(); 4787 spin_lock(&mc.lock); 4788 mc.from = NULL; 4789 mc.to = NULL; 4790 mc.mm = NULL; 4791 spin_unlock(&mc.lock); 4792 4793 mmput(mm); 4794 } 4795 4796 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4797 { 4798 struct cgroup_subsys_state *css; 4799 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 4800 struct mem_cgroup *from; 4801 struct task_struct *leader, *p; 4802 struct mm_struct *mm; 4803 unsigned long move_flags; 4804 int ret = 0; 4805 4806 /* charge immigration isn't supported on the default hierarchy */ 4807 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4808 return 0; 4809 4810 /* 4811 * Multi-process migrations only happen on the default hierarchy 4812 * where charge immigration is not used. Perform charge 4813 * immigration if @tset contains a leader and whine if there are 4814 * multiple. 4815 */ 4816 p = NULL; 4817 cgroup_taskset_for_each_leader(leader, css, tset) { 4818 WARN_ON_ONCE(p); 4819 p = leader; 4820 memcg = mem_cgroup_from_css(css); 4821 } 4822 if (!p) 4823 return 0; 4824 4825 /* 4826 * We are now commited to this value whatever it is. Changes in this 4827 * tunable will only affect upcoming migrations, not the current one. 4828 * So we need to save it, and keep it going. 4829 */ 4830 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4831 if (!move_flags) 4832 return 0; 4833 4834 from = mem_cgroup_from_task(p); 4835 4836 VM_BUG_ON(from == memcg); 4837 4838 mm = get_task_mm(p); 4839 if (!mm) 4840 return 0; 4841 /* We move charges only when we move a owner of the mm */ 4842 if (mm->owner == p) { 4843 VM_BUG_ON(mc.from); 4844 VM_BUG_ON(mc.to); 4845 VM_BUG_ON(mc.precharge); 4846 VM_BUG_ON(mc.moved_charge); 4847 VM_BUG_ON(mc.moved_swap); 4848 4849 spin_lock(&mc.lock); 4850 mc.mm = mm; 4851 mc.from = from; 4852 mc.to = memcg; 4853 mc.flags = move_flags; 4854 spin_unlock(&mc.lock); 4855 /* We set mc.moving_task later */ 4856 4857 ret = mem_cgroup_precharge_mc(mm); 4858 if (ret) 4859 mem_cgroup_clear_mc(); 4860 } else { 4861 mmput(mm); 4862 } 4863 return ret; 4864 } 4865 4866 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4867 { 4868 if (mc.to) 4869 mem_cgroup_clear_mc(); 4870 } 4871 4872 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4873 unsigned long addr, unsigned long end, 4874 struct mm_walk *walk) 4875 { 4876 int ret = 0; 4877 struct vm_area_struct *vma = walk->vma; 4878 pte_t *pte; 4879 spinlock_t *ptl; 4880 enum mc_target_type target_type; 4881 union mc_target target; 4882 struct page *page; 4883 4884 ptl = pmd_trans_huge_lock(pmd, vma); 4885 if (ptl) { 4886 if (mc.precharge < HPAGE_PMD_NR) { 4887 spin_unlock(ptl); 4888 return 0; 4889 } 4890 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4891 if (target_type == MC_TARGET_PAGE) { 4892 page = target.page; 4893 if (!isolate_lru_page(page)) { 4894 if (!mem_cgroup_move_account(page, true, 4895 mc.from, mc.to)) { 4896 mc.precharge -= HPAGE_PMD_NR; 4897 mc.moved_charge += HPAGE_PMD_NR; 4898 } 4899 putback_lru_page(page); 4900 } 4901 put_page(page); 4902 } 4903 spin_unlock(ptl); 4904 return 0; 4905 } 4906 4907 if (pmd_trans_unstable(pmd)) 4908 return 0; 4909 retry: 4910 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4911 for (; addr != end; addr += PAGE_SIZE) { 4912 pte_t ptent = *(pte++); 4913 swp_entry_t ent; 4914 4915 if (!mc.precharge) 4916 break; 4917 4918 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4919 case MC_TARGET_PAGE: 4920 page = target.page; 4921 /* 4922 * We can have a part of the split pmd here. Moving it 4923 * can be done but it would be too convoluted so simply 4924 * ignore such a partial THP and keep it in original 4925 * memcg. There should be somebody mapping the head. 4926 */ 4927 if (PageTransCompound(page)) 4928 goto put; 4929 if (isolate_lru_page(page)) 4930 goto put; 4931 if (!mem_cgroup_move_account(page, false, 4932 mc.from, mc.to)) { 4933 mc.precharge--; 4934 /* we uncharge from mc.from later. */ 4935 mc.moved_charge++; 4936 } 4937 putback_lru_page(page); 4938 put: /* get_mctgt_type() gets the page */ 4939 put_page(page); 4940 break; 4941 case MC_TARGET_SWAP: 4942 ent = target.ent; 4943 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4944 mc.precharge--; 4945 /* we fixup refcnts and charges later. */ 4946 mc.moved_swap++; 4947 } 4948 break; 4949 default: 4950 break; 4951 } 4952 } 4953 pte_unmap_unlock(pte - 1, ptl); 4954 cond_resched(); 4955 4956 if (addr != end) { 4957 /* 4958 * We have consumed all precharges we got in can_attach(). 4959 * We try charge one by one, but don't do any additional 4960 * charges to mc.to if we have failed in charge once in attach() 4961 * phase. 4962 */ 4963 ret = mem_cgroup_do_precharge(1); 4964 if (!ret) 4965 goto retry; 4966 } 4967 4968 return ret; 4969 } 4970 4971 static void mem_cgroup_move_charge(void) 4972 { 4973 struct mm_walk mem_cgroup_move_charge_walk = { 4974 .pmd_entry = mem_cgroup_move_charge_pte_range, 4975 .mm = mc.mm, 4976 }; 4977 4978 lru_add_drain_all(); 4979 /* 4980 * Signal lock_page_memcg() to take the memcg's move_lock 4981 * while we're moving its pages to another memcg. Then wait 4982 * for already started RCU-only updates to finish. 4983 */ 4984 atomic_inc(&mc.from->moving_account); 4985 synchronize_rcu(); 4986 retry: 4987 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 4988 /* 4989 * Someone who are holding the mmap_sem might be waiting in 4990 * waitq. So we cancel all extra charges, wake up all waiters, 4991 * and retry. Because we cancel precharges, we might not be able 4992 * to move enough charges, but moving charge is a best-effort 4993 * feature anyway, so it wouldn't be a big problem. 4994 */ 4995 __mem_cgroup_clear_mc(); 4996 cond_resched(); 4997 goto retry; 4998 } 4999 /* 5000 * When we have consumed all precharges and failed in doing 5001 * additional charge, the page walk just aborts. 5002 */ 5003 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 5004 up_read(&mc.mm->mmap_sem); 5005 atomic_dec(&mc.from->moving_account); 5006 } 5007 5008 static void mem_cgroup_move_task(void) 5009 { 5010 if (mc.to) { 5011 mem_cgroup_move_charge(); 5012 mem_cgroup_clear_mc(); 5013 } 5014 } 5015 #else /* !CONFIG_MMU */ 5016 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5017 { 5018 return 0; 5019 } 5020 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5021 { 5022 } 5023 static void mem_cgroup_move_task(void) 5024 { 5025 } 5026 #endif 5027 5028 /* 5029 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5030 * to verify whether we're attached to the default hierarchy on each mount 5031 * attempt. 5032 */ 5033 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5034 { 5035 /* 5036 * use_hierarchy is forced on the default hierarchy. cgroup core 5037 * guarantees that @root doesn't have any children, so turning it 5038 * on for the root memcg is enough. 5039 */ 5040 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5041 root_mem_cgroup->use_hierarchy = true; 5042 else 5043 root_mem_cgroup->use_hierarchy = false; 5044 } 5045 5046 static u64 memory_current_read(struct cgroup_subsys_state *css, 5047 struct cftype *cft) 5048 { 5049 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5050 5051 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5052 } 5053 5054 static int memory_low_show(struct seq_file *m, void *v) 5055 { 5056 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5057 unsigned long low = READ_ONCE(memcg->low); 5058 5059 if (low == PAGE_COUNTER_MAX) 5060 seq_puts(m, "max\n"); 5061 else 5062 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5063 5064 return 0; 5065 } 5066 5067 static ssize_t memory_low_write(struct kernfs_open_file *of, 5068 char *buf, size_t nbytes, loff_t off) 5069 { 5070 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5071 unsigned long low; 5072 int err; 5073 5074 buf = strstrip(buf); 5075 err = page_counter_memparse(buf, "max", &low); 5076 if (err) 5077 return err; 5078 5079 memcg->low = low; 5080 5081 return nbytes; 5082 } 5083 5084 static int memory_high_show(struct seq_file *m, void *v) 5085 { 5086 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5087 unsigned long high = READ_ONCE(memcg->high); 5088 5089 if (high == PAGE_COUNTER_MAX) 5090 seq_puts(m, "max\n"); 5091 else 5092 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5093 5094 return 0; 5095 } 5096 5097 static ssize_t memory_high_write(struct kernfs_open_file *of, 5098 char *buf, size_t nbytes, loff_t off) 5099 { 5100 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5101 unsigned long nr_pages; 5102 unsigned long high; 5103 int err; 5104 5105 buf = strstrip(buf); 5106 err = page_counter_memparse(buf, "max", &high); 5107 if (err) 5108 return err; 5109 5110 memcg->high = high; 5111 5112 nr_pages = page_counter_read(&memcg->memory); 5113 if (nr_pages > high) 5114 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5115 GFP_KERNEL, true); 5116 5117 memcg_wb_domain_size_changed(memcg); 5118 return nbytes; 5119 } 5120 5121 static int memory_max_show(struct seq_file *m, void *v) 5122 { 5123 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5124 unsigned long max = READ_ONCE(memcg->memory.limit); 5125 5126 if (max == PAGE_COUNTER_MAX) 5127 seq_puts(m, "max\n"); 5128 else 5129 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5130 5131 return 0; 5132 } 5133 5134 static ssize_t memory_max_write(struct kernfs_open_file *of, 5135 char *buf, size_t nbytes, loff_t off) 5136 { 5137 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5138 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5139 bool drained = false; 5140 unsigned long max; 5141 int err; 5142 5143 buf = strstrip(buf); 5144 err = page_counter_memparse(buf, "max", &max); 5145 if (err) 5146 return err; 5147 5148 xchg(&memcg->memory.limit, max); 5149 5150 for (;;) { 5151 unsigned long nr_pages = page_counter_read(&memcg->memory); 5152 5153 if (nr_pages <= max) 5154 break; 5155 5156 if (signal_pending(current)) { 5157 err = -EINTR; 5158 break; 5159 } 5160 5161 if (!drained) { 5162 drain_all_stock(memcg); 5163 drained = true; 5164 continue; 5165 } 5166 5167 if (nr_reclaims) { 5168 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5169 GFP_KERNEL, true)) 5170 nr_reclaims--; 5171 continue; 5172 } 5173 5174 mem_cgroup_events(memcg, MEMCG_OOM, 1); 5175 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5176 break; 5177 } 5178 5179 memcg_wb_domain_size_changed(memcg); 5180 return nbytes; 5181 } 5182 5183 static int memory_events_show(struct seq_file *m, void *v) 5184 { 5185 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5186 5187 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5188 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5189 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5190 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5191 5192 return 0; 5193 } 5194 5195 static int memory_stat_show(struct seq_file *m, void *v) 5196 { 5197 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5198 unsigned long stat[MEMCG_NR_STAT]; 5199 unsigned long events[MEMCG_NR_EVENTS]; 5200 int i; 5201 5202 /* 5203 * Provide statistics on the state of the memory subsystem as 5204 * well as cumulative event counters that show past behavior. 5205 * 5206 * This list is ordered following a combination of these gradients: 5207 * 1) generic big picture -> specifics and details 5208 * 2) reflecting userspace activity -> reflecting kernel heuristics 5209 * 5210 * Current memory state: 5211 */ 5212 5213 tree_stat(memcg, stat); 5214 tree_events(memcg, events); 5215 5216 seq_printf(m, "anon %llu\n", 5217 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5218 seq_printf(m, "file %llu\n", 5219 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5220 seq_printf(m, "kernel_stack %llu\n", 5221 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5222 seq_printf(m, "slab %llu\n", 5223 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] + 5224 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5225 seq_printf(m, "sock %llu\n", 5226 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5227 5228 seq_printf(m, "file_mapped %llu\n", 5229 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE); 5230 seq_printf(m, "file_dirty %llu\n", 5231 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE); 5232 seq_printf(m, "file_writeback %llu\n", 5233 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE); 5234 5235 for (i = 0; i < NR_LRU_LISTS; i++) { 5236 struct mem_cgroup *mi; 5237 unsigned long val = 0; 5238 5239 for_each_mem_cgroup_tree(mi, memcg) 5240 val += mem_cgroup_nr_lru_pages(mi, BIT(i)); 5241 seq_printf(m, "%s %llu\n", 5242 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5243 } 5244 5245 seq_printf(m, "slab_reclaimable %llu\n", 5246 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE); 5247 seq_printf(m, "slab_unreclaimable %llu\n", 5248 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5249 5250 /* Accumulated memory events */ 5251 5252 seq_printf(m, "pgfault %lu\n", 5253 events[MEM_CGROUP_EVENTS_PGFAULT]); 5254 seq_printf(m, "pgmajfault %lu\n", 5255 events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 5256 5257 return 0; 5258 } 5259 5260 static struct cftype memory_files[] = { 5261 { 5262 .name = "current", 5263 .flags = CFTYPE_NOT_ON_ROOT, 5264 .read_u64 = memory_current_read, 5265 }, 5266 { 5267 .name = "low", 5268 .flags = CFTYPE_NOT_ON_ROOT, 5269 .seq_show = memory_low_show, 5270 .write = memory_low_write, 5271 }, 5272 { 5273 .name = "high", 5274 .flags = CFTYPE_NOT_ON_ROOT, 5275 .seq_show = memory_high_show, 5276 .write = memory_high_write, 5277 }, 5278 { 5279 .name = "max", 5280 .flags = CFTYPE_NOT_ON_ROOT, 5281 .seq_show = memory_max_show, 5282 .write = memory_max_write, 5283 }, 5284 { 5285 .name = "events", 5286 .flags = CFTYPE_NOT_ON_ROOT, 5287 .file_offset = offsetof(struct mem_cgroup, events_file), 5288 .seq_show = memory_events_show, 5289 }, 5290 { 5291 .name = "stat", 5292 .flags = CFTYPE_NOT_ON_ROOT, 5293 .seq_show = memory_stat_show, 5294 }, 5295 { } /* terminate */ 5296 }; 5297 5298 struct cgroup_subsys memory_cgrp_subsys = { 5299 .css_alloc = mem_cgroup_css_alloc, 5300 .css_online = mem_cgroup_css_online, 5301 .css_offline = mem_cgroup_css_offline, 5302 .css_released = mem_cgroup_css_released, 5303 .css_free = mem_cgroup_css_free, 5304 .css_reset = mem_cgroup_css_reset, 5305 .can_attach = mem_cgroup_can_attach, 5306 .cancel_attach = mem_cgroup_cancel_attach, 5307 .post_attach = mem_cgroup_move_task, 5308 .bind = mem_cgroup_bind, 5309 .dfl_cftypes = memory_files, 5310 .legacy_cftypes = mem_cgroup_legacy_files, 5311 .early_init = 0, 5312 }; 5313 5314 /** 5315 * mem_cgroup_low - check if memory consumption is below the normal range 5316 * @root: the highest ancestor to consider 5317 * @memcg: the memory cgroup to check 5318 * 5319 * Returns %true if memory consumption of @memcg, and that of all 5320 * configurable ancestors up to @root, is below the normal range. 5321 */ 5322 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5323 { 5324 if (mem_cgroup_disabled()) 5325 return false; 5326 5327 /* 5328 * The toplevel group doesn't have a configurable range, so 5329 * it's never low when looked at directly, and it is not 5330 * considered an ancestor when assessing the hierarchy. 5331 */ 5332 5333 if (memcg == root_mem_cgroup) 5334 return false; 5335 5336 if (page_counter_read(&memcg->memory) >= memcg->low) 5337 return false; 5338 5339 while (memcg != root) { 5340 memcg = parent_mem_cgroup(memcg); 5341 5342 if (memcg == root_mem_cgroup) 5343 break; 5344 5345 if (page_counter_read(&memcg->memory) >= memcg->low) 5346 return false; 5347 } 5348 return true; 5349 } 5350 5351 /** 5352 * mem_cgroup_try_charge - try charging a page 5353 * @page: page to charge 5354 * @mm: mm context of the victim 5355 * @gfp_mask: reclaim mode 5356 * @memcgp: charged memcg return 5357 * @compound: charge the page as compound or small page 5358 * 5359 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5360 * pages according to @gfp_mask if necessary. 5361 * 5362 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5363 * Otherwise, an error code is returned. 5364 * 5365 * After page->mapping has been set up, the caller must finalize the 5366 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5367 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5368 */ 5369 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5370 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5371 bool compound) 5372 { 5373 struct mem_cgroup *memcg = NULL; 5374 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5375 int ret = 0; 5376 5377 if (mem_cgroup_disabled()) 5378 goto out; 5379 5380 if (PageSwapCache(page)) { 5381 /* 5382 * Every swap fault against a single page tries to charge the 5383 * page, bail as early as possible. shmem_unuse() encounters 5384 * already charged pages, too. The USED bit is protected by 5385 * the page lock, which serializes swap cache removal, which 5386 * in turn serializes uncharging. 5387 */ 5388 VM_BUG_ON_PAGE(!PageLocked(page), page); 5389 if (page->mem_cgroup) 5390 goto out; 5391 5392 if (do_swap_account) { 5393 swp_entry_t ent = { .val = page_private(page), }; 5394 unsigned short id = lookup_swap_cgroup_id(ent); 5395 5396 rcu_read_lock(); 5397 memcg = mem_cgroup_from_id(id); 5398 if (memcg && !css_tryget_online(&memcg->css)) 5399 memcg = NULL; 5400 rcu_read_unlock(); 5401 } 5402 } 5403 5404 if (!memcg) 5405 memcg = get_mem_cgroup_from_mm(mm); 5406 5407 ret = try_charge(memcg, gfp_mask, nr_pages); 5408 5409 css_put(&memcg->css); 5410 out: 5411 *memcgp = memcg; 5412 return ret; 5413 } 5414 5415 /** 5416 * mem_cgroup_commit_charge - commit a page charge 5417 * @page: page to charge 5418 * @memcg: memcg to charge the page to 5419 * @lrucare: page might be on LRU already 5420 * @compound: charge the page as compound or small page 5421 * 5422 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5423 * after page->mapping has been set up. This must happen atomically 5424 * as part of the page instantiation, i.e. under the page table lock 5425 * for anonymous pages, under the page lock for page and swap cache. 5426 * 5427 * In addition, the page must not be on the LRU during the commit, to 5428 * prevent racing with task migration. If it might be, use @lrucare. 5429 * 5430 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5431 */ 5432 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5433 bool lrucare, bool compound) 5434 { 5435 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5436 5437 VM_BUG_ON_PAGE(!page->mapping, page); 5438 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5439 5440 if (mem_cgroup_disabled()) 5441 return; 5442 /* 5443 * Swap faults will attempt to charge the same page multiple 5444 * times. But reuse_swap_page() might have removed the page 5445 * from swapcache already, so we can't check PageSwapCache(). 5446 */ 5447 if (!memcg) 5448 return; 5449 5450 commit_charge(page, memcg, lrucare); 5451 5452 local_irq_disable(); 5453 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 5454 memcg_check_events(memcg, page); 5455 local_irq_enable(); 5456 5457 if (do_memsw_account() && PageSwapCache(page)) { 5458 swp_entry_t entry = { .val = page_private(page) }; 5459 /* 5460 * The swap entry might not get freed for a long time, 5461 * let's not wait for it. The page already received a 5462 * memory+swap charge, drop the swap entry duplicate. 5463 */ 5464 mem_cgroup_uncharge_swap(entry); 5465 } 5466 } 5467 5468 /** 5469 * mem_cgroup_cancel_charge - cancel a page charge 5470 * @page: page to charge 5471 * @memcg: memcg to charge the page to 5472 * @compound: charge the page as compound or small page 5473 * 5474 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5475 */ 5476 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 5477 bool compound) 5478 { 5479 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5480 5481 if (mem_cgroup_disabled()) 5482 return; 5483 /* 5484 * Swap faults will attempt to charge the same page multiple 5485 * times. But reuse_swap_page() might have removed the page 5486 * from swapcache already, so we can't check PageSwapCache(). 5487 */ 5488 if (!memcg) 5489 return; 5490 5491 cancel_charge(memcg, nr_pages); 5492 } 5493 5494 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5495 unsigned long nr_anon, unsigned long nr_file, 5496 unsigned long nr_huge, unsigned long nr_kmem, 5497 struct page *dummy_page) 5498 { 5499 unsigned long nr_pages = nr_anon + nr_file + nr_kmem; 5500 unsigned long flags; 5501 5502 if (!mem_cgroup_is_root(memcg)) { 5503 page_counter_uncharge(&memcg->memory, nr_pages); 5504 if (do_memsw_account()) 5505 page_counter_uncharge(&memcg->memsw, nr_pages); 5506 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) 5507 page_counter_uncharge(&memcg->kmem, nr_kmem); 5508 memcg_oom_recover(memcg); 5509 } 5510 5511 local_irq_save(flags); 5512 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5513 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5514 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5515 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5516 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5517 memcg_check_events(memcg, dummy_page); 5518 local_irq_restore(flags); 5519 5520 if (!mem_cgroup_is_root(memcg)) 5521 css_put_many(&memcg->css, nr_pages); 5522 } 5523 5524 static void uncharge_list(struct list_head *page_list) 5525 { 5526 struct mem_cgroup *memcg = NULL; 5527 unsigned long nr_anon = 0; 5528 unsigned long nr_file = 0; 5529 unsigned long nr_huge = 0; 5530 unsigned long nr_kmem = 0; 5531 unsigned long pgpgout = 0; 5532 struct list_head *next; 5533 struct page *page; 5534 5535 /* 5536 * Note that the list can be a single page->lru; hence the 5537 * do-while loop instead of a simple list_for_each_entry(). 5538 */ 5539 next = page_list->next; 5540 do { 5541 page = list_entry(next, struct page, lru); 5542 next = page->lru.next; 5543 5544 VM_BUG_ON_PAGE(PageLRU(page), page); 5545 VM_BUG_ON_PAGE(page_count(page), page); 5546 5547 if (!page->mem_cgroup) 5548 continue; 5549 5550 /* 5551 * Nobody should be changing or seriously looking at 5552 * page->mem_cgroup at this point, we have fully 5553 * exclusive access to the page. 5554 */ 5555 5556 if (memcg != page->mem_cgroup) { 5557 if (memcg) { 5558 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5559 nr_huge, nr_kmem, page); 5560 pgpgout = nr_anon = nr_file = 5561 nr_huge = nr_kmem = 0; 5562 } 5563 memcg = page->mem_cgroup; 5564 } 5565 5566 if (!PageKmemcg(page)) { 5567 unsigned int nr_pages = 1; 5568 5569 if (PageTransHuge(page)) { 5570 nr_pages <<= compound_order(page); 5571 nr_huge += nr_pages; 5572 } 5573 if (PageAnon(page)) 5574 nr_anon += nr_pages; 5575 else 5576 nr_file += nr_pages; 5577 pgpgout++; 5578 } else { 5579 nr_kmem += 1 << compound_order(page); 5580 __ClearPageKmemcg(page); 5581 } 5582 5583 page->mem_cgroup = NULL; 5584 } while (next != page_list); 5585 5586 if (memcg) 5587 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5588 nr_huge, nr_kmem, page); 5589 } 5590 5591 /** 5592 * mem_cgroup_uncharge - uncharge a page 5593 * @page: page to uncharge 5594 * 5595 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5596 * mem_cgroup_commit_charge(). 5597 */ 5598 void mem_cgroup_uncharge(struct page *page) 5599 { 5600 if (mem_cgroup_disabled()) 5601 return; 5602 5603 /* Don't touch page->lru of any random page, pre-check: */ 5604 if (!page->mem_cgroup) 5605 return; 5606 5607 INIT_LIST_HEAD(&page->lru); 5608 uncharge_list(&page->lru); 5609 } 5610 5611 /** 5612 * mem_cgroup_uncharge_list - uncharge a list of page 5613 * @page_list: list of pages to uncharge 5614 * 5615 * Uncharge a list of pages previously charged with 5616 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5617 */ 5618 void mem_cgroup_uncharge_list(struct list_head *page_list) 5619 { 5620 if (mem_cgroup_disabled()) 5621 return; 5622 5623 if (!list_empty(page_list)) 5624 uncharge_list(page_list); 5625 } 5626 5627 /** 5628 * mem_cgroup_migrate - charge a page's replacement 5629 * @oldpage: currently circulating page 5630 * @newpage: replacement page 5631 * 5632 * Charge @newpage as a replacement page for @oldpage. @oldpage will 5633 * be uncharged upon free. 5634 * 5635 * Both pages must be locked, @newpage->mapping must be set up. 5636 */ 5637 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 5638 { 5639 struct mem_cgroup *memcg; 5640 unsigned int nr_pages; 5641 bool compound; 5642 unsigned long flags; 5643 5644 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5645 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5646 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5647 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5648 newpage); 5649 5650 if (mem_cgroup_disabled()) 5651 return; 5652 5653 /* Page cache replacement: new page already charged? */ 5654 if (newpage->mem_cgroup) 5655 return; 5656 5657 /* Swapcache readahead pages can get replaced before being charged */ 5658 memcg = oldpage->mem_cgroup; 5659 if (!memcg) 5660 return; 5661 5662 /* Force-charge the new page. The old one will be freed soon */ 5663 compound = PageTransHuge(newpage); 5664 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 5665 5666 page_counter_charge(&memcg->memory, nr_pages); 5667 if (do_memsw_account()) 5668 page_counter_charge(&memcg->memsw, nr_pages); 5669 css_get_many(&memcg->css, nr_pages); 5670 5671 commit_charge(newpage, memcg, false); 5672 5673 local_irq_save(flags); 5674 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5675 memcg_check_events(memcg, newpage); 5676 local_irq_restore(flags); 5677 } 5678 5679 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5680 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5681 5682 void sock_update_memcg(struct sock *sk) 5683 { 5684 struct mem_cgroup *memcg; 5685 5686 /* Socket cloning can throw us here with sk_cgrp already 5687 * filled. It won't however, necessarily happen from 5688 * process context. So the test for root memcg given 5689 * the current task's memcg won't help us in this case. 5690 * 5691 * Respecting the original socket's memcg is a better 5692 * decision in this case. 5693 */ 5694 if (sk->sk_memcg) { 5695 BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); 5696 css_get(&sk->sk_memcg->css); 5697 return; 5698 } 5699 5700 rcu_read_lock(); 5701 memcg = mem_cgroup_from_task(current); 5702 if (memcg == root_mem_cgroup) 5703 goto out; 5704 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 5705 goto out; 5706 if (css_tryget_online(&memcg->css)) 5707 sk->sk_memcg = memcg; 5708 out: 5709 rcu_read_unlock(); 5710 } 5711 EXPORT_SYMBOL(sock_update_memcg); 5712 5713 void sock_release_memcg(struct sock *sk) 5714 { 5715 WARN_ON(!sk->sk_memcg); 5716 css_put(&sk->sk_memcg->css); 5717 } 5718 5719 /** 5720 * mem_cgroup_charge_skmem - charge socket memory 5721 * @memcg: memcg to charge 5722 * @nr_pages: number of pages to charge 5723 * 5724 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5725 * @memcg's configured limit, %false if the charge had to be forced. 5726 */ 5727 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5728 { 5729 gfp_t gfp_mask = GFP_KERNEL; 5730 5731 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5732 struct page_counter *fail; 5733 5734 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 5735 memcg->tcpmem_pressure = 0; 5736 return true; 5737 } 5738 page_counter_charge(&memcg->tcpmem, nr_pages); 5739 memcg->tcpmem_pressure = 1; 5740 return false; 5741 } 5742 5743 /* Don't block in the packet receive path */ 5744 if (in_softirq()) 5745 gfp_mask = GFP_NOWAIT; 5746 5747 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages); 5748 5749 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 5750 return true; 5751 5752 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 5753 return false; 5754 } 5755 5756 /** 5757 * mem_cgroup_uncharge_skmem - uncharge socket memory 5758 * @memcg - memcg to uncharge 5759 * @nr_pages - number of pages to uncharge 5760 */ 5761 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5762 { 5763 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5764 page_counter_uncharge(&memcg->tcpmem, nr_pages); 5765 return; 5766 } 5767 5768 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages); 5769 5770 page_counter_uncharge(&memcg->memory, nr_pages); 5771 css_put_many(&memcg->css, nr_pages); 5772 } 5773 5774 static int __init cgroup_memory(char *s) 5775 { 5776 char *token; 5777 5778 while ((token = strsep(&s, ",")) != NULL) { 5779 if (!*token) 5780 continue; 5781 if (!strcmp(token, "nosocket")) 5782 cgroup_memory_nosocket = true; 5783 if (!strcmp(token, "nokmem")) 5784 cgroup_memory_nokmem = true; 5785 } 5786 return 0; 5787 } 5788 __setup("cgroup.memory=", cgroup_memory); 5789 5790 /* 5791 * subsys_initcall() for memory controller. 5792 * 5793 * Some parts like hotcpu_notifier() have to be initialized from this context 5794 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5795 * everything that doesn't depend on a specific mem_cgroup structure should 5796 * be initialized from here. 5797 */ 5798 static int __init mem_cgroup_init(void) 5799 { 5800 int cpu, node; 5801 5802 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5803 5804 for_each_possible_cpu(cpu) 5805 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5806 drain_local_stock); 5807 5808 for_each_node(node) { 5809 struct mem_cgroup_tree_per_node *rtpn; 5810 5811 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5812 node_online(node) ? node : NUMA_NO_NODE); 5813 5814 rtpn->rb_root = RB_ROOT; 5815 spin_lock_init(&rtpn->lock); 5816 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5817 } 5818 5819 return 0; 5820 } 5821 subsys_initcall(mem_cgroup_init); 5822 5823 #ifdef CONFIG_MEMCG_SWAP 5824 /** 5825 * mem_cgroup_swapout - transfer a memsw charge to swap 5826 * @page: page whose memsw charge to transfer 5827 * @entry: swap entry to move the charge to 5828 * 5829 * Transfer the memsw charge of @page to @entry. 5830 */ 5831 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5832 { 5833 struct mem_cgroup *memcg, *swap_memcg; 5834 unsigned short oldid; 5835 5836 VM_BUG_ON_PAGE(PageLRU(page), page); 5837 VM_BUG_ON_PAGE(page_count(page), page); 5838 5839 if (!do_memsw_account()) 5840 return; 5841 5842 memcg = page->mem_cgroup; 5843 5844 /* Readahead page, never charged */ 5845 if (!memcg) 5846 return; 5847 5848 /* 5849 * In case the memcg owning these pages has been offlined and doesn't 5850 * have an ID allocated to it anymore, charge the closest online 5851 * ancestor for the swap instead and transfer the memory+swap charge. 5852 */ 5853 swap_memcg = mem_cgroup_id_get_online(memcg); 5854 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); 5855 VM_BUG_ON_PAGE(oldid, page); 5856 mem_cgroup_swap_statistics(swap_memcg, true); 5857 5858 page->mem_cgroup = NULL; 5859 5860 if (!mem_cgroup_is_root(memcg)) 5861 page_counter_uncharge(&memcg->memory, 1); 5862 5863 if (memcg != swap_memcg) { 5864 if (!mem_cgroup_is_root(swap_memcg)) 5865 page_counter_charge(&swap_memcg->memsw, 1); 5866 page_counter_uncharge(&memcg->memsw, 1); 5867 } 5868 5869 /* 5870 * Interrupts should be disabled here because the caller holds the 5871 * mapping->tree_lock lock which is taken with interrupts-off. It is 5872 * important here to have the interrupts disabled because it is the 5873 * only synchronisation we have for udpating the per-CPU variables. 5874 */ 5875 VM_BUG_ON(!irqs_disabled()); 5876 mem_cgroup_charge_statistics(memcg, page, false, -1); 5877 memcg_check_events(memcg, page); 5878 5879 if (!mem_cgroup_is_root(memcg)) 5880 css_put(&memcg->css); 5881 } 5882 5883 /* 5884 * mem_cgroup_try_charge_swap - try charging a swap entry 5885 * @page: page being added to swap 5886 * @entry: swap entry to charge 5887 * 5888 * Try to charge @entry to the memcg that @page belongs to. 5889 * 5890 * Returns 0 on success, -ENOMEM on failure. 5891 */ 5892 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 5893 { 5894 struct mem_cgroup *memcg; 5895 struct page_counter *counter; 5896 unsigned short oldid; 5897 5898 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 5899 return 0; 5900 5901 memcg = page->mem_cgroup; 5902 5903 /* Readahead page, never charged */ 5904 if (!memcg) 5905 return 0; 5906 5907 memcg = mem_cgroup_id_get_online(memcg); 5908 5909 if (!mem_cgroup_is_root(memcg) && 5910 !page_counter_try_charge(&memcg->swap, 1, &counter)) { 5911 mem_cgroup_id_put(memcg); 5912 return -ENOMEM; 5913 } 5914 5915 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5916 VM_BUG_ON_PAGE(oldid, page); 5917 mem_cgroup_swap_statistics(memcg, true); 5918 5919 return 0; 5920 } 5921 5922 /** 5923 * mem_cgroup_uncharge_swap - uncharge a swap entry 5924 * @entry: swap entry to uncharge 5925 * 5926 * Drop the swap charge associated with @entry. 5927 */ 5928 void mem_cgroup_uncharge_swap(swp_entry_t entry) 5929 { 5930 struct mem_cgroup *memcg; 5931 unsigned short id; 5932 5933 if (!do_swap_account) 5934 return; 5935 5936 id = swap_cgroup_record(entry, 0); 5937 rcu_read_lock(); 5938 memcg = mem_cgroup_from_id(id); 5939 if (memcg) { 5940 if (!mem_cgroup_is_root(memcg)) { 5941 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5942 page_counter_uncharge(&memcg->swap, 1); 5943 else 5944 page_counter_uncharge(&memcg->memsw, 1); 5945 } 5946 mem_cgroup_swap_statistics(memcg, false); 5947 mem_cgroup_id_put(memcg); 5948 } 5949 rcu_read_unlock(); 5950 } 5951 5952 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5953 { 5954 long nr_swap_pages = get_nr_swap_pages(); 5955 5956 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5957 return nr_swap_pages; 5958 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 5959 nr_swap_pages = min_t(long, nr_swap_pages, 5960 READ_ONCE(memcg->swap.limit) - 5961 page_counter_read(&memcg->swap)); 5962 return nr_swap_pages; 5963 } 5964 5965 bool mem_cgroup_swap_full(struct page *page) 5966 { 5967 struct mem_cgroup *memcg; 5968 5969 VM_BUG_ON_PAGE(!PageLocked(page), page); 5970 5971 if (vm_swap_full()) 5972 return true; 5973 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5974 return false; 5975 5976 memcg = page->mem_cgroup; 5977 if (!memcg) 5978 return false; 5979 5980 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 5981 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) 5982 return true; 5983 5984 return false; 5985 } 5986 5987 /* for remember boot option*/ 5988 #ifdef CONFIG_MEMCG_SWAP_ENABLED 5989 static int really_do_swap_account __initdata = 1; 5990 #else 5991 static int really_do_swap_account __initdata; 5992 #endif 5993 5994 static int __init enable_swap_account(char *s) 5995 { 5996 if (!strcmp(s, "1")) 5997 really_do_swap_account = 1; 5998 else if (!strcmp(s, "0")) 5999 really_do_swap_account = 0; 6000 return 1; 6001 } 6002 __setup("swapaccount=", enable_swap_account); 6003 6004 static u64 swap_current_read(struct cgroup_subsys_state *css, 6005 struct cftype *cft) 6006 { 6007 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6008 6009 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6010 } 6011 6012 static int swap_max_show(struct seq_file *m, void *v) 6013 { 6014 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6015 unsigned long max = READ_ONCE(memcg->swap.limit); 6016 6017 if (max == PAGE_COUNTER_MAX) 6018 seq_puts(m, "max\n"); 6019 else 6020 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 6021 6022 return 0; 6023 } 6024 6025 static ssize_t swap_max_write(struct kernfs_open_file *of, 6026 char *buf, size_t nbytes, loff_t off) 6027 { 6028 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6029 unsigned long max; 6030 int err; 6031 6032 buf = strstrip(buf); 6033 err = page_counter_memparse(buf, "max", &max); 6034 if (err) 6035 return err; 6036 6037 mutex_lock(&memcg_limit_mutex); 6038 err = page_counter_limit(&memcg->swap, max); 6039 mutex_unlock(&memcg_limit_mutex); 6040 if (err) 6041 return err; 6042 6043 return nbytes; 6044 } 6045 6046 static struct cftype swap_files[] = { 6047 { 6048 .name = "swap.current", 6049 .flags = CFTYPE_NOT_ON_ROOT, 6050 .read_u64 = swap_current_read, 6051 }, 6052 { 6053 .name = "swap.max", 6054 .flags = CFTYPE_NOT_ON_ROOT, 6055 .seq_show = swap_max_show, 6056 .write = swap_max_write, 6057 }, 6058 { } /* terminate */ 6059 }; 6060 6061 static struct cftype memsw_cgroup_files[] = { 6062 { 6063 .name = "memsw.usage_in_bytes", 6064 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6065 .read_u64 = mem_cgroup_read_u64, 6066 }, 6067 { 6068 .name = "memsw.max_usage_in_bytes", 6069 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6070 .write = mem_cgroup_reset, 6071 .read_u64 = mem_cgroup_read_u64, 6072 }, 6073 { 6074 .name = "memsw.limit_in_bytes", 6075 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6076 .write = mem_cgroup_write, 6077 .read_u64 = mem_cgroup_read_u64, 6078 }, 6079 { 6080 .name = "memsw.failcnt", 6081 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6082 .write = mem_cgroup_reset, 6083 .read_u64 = mem_cgroup_read_u64, 6084 }, 6085 { }, /* terminate */ 6086 }; 6087 6088 static int __init mem_cgroup_swap_init(void) 6089 { 6090 if (!mem_cgroup_disabled() && really_do_swap_account) { 6091 do_swap_account = 1; 6092 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6093 swap_files)); 6094 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6095 memsw_cgroup_files)); 6096 } 6097 return 0; 6098 } 6099 subsys_initcall(mem_cgroup_swap_init); 6100 6101 #endif /* CONFIG_MEMCG_SWAP */ 6102