1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/hugetlb.h> 39 #include <linux/pagemap.h> 40 #include <linux/smp.h> 41 #include <linux/page-flags.h> 42 #include <linux/backing-dev.h> 43 #include <linux/bit_spinlock.h> 44 #include <linux/rcupdate.h> 45 #include <linux/limits.h> 46 #include <linux/export.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swap.h> 51 #include <linux/swapops.h> 52 #include <linux/spinlock.h> 53 #include <linux/eventfd.h> 54 #include <linux/poll.h> 55 #include <linux/sort.h> 56 #include <linux/fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/vmpressure.h> 59 #include <linux/mm_inline.h> 60 #include <linux/swap_cgroup.h> 61 #include <linux/cpu.h> 62 #include <linux/oom.h> 63 #include <linux/lockdep.h> 64 #include <linux/file.h> 65 #include <linux/tracehook.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 71 #include <asm/uaccess.h> 72 73 #include <trace/events/vmscan.h> 74 75 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 76 EXPORT_SYMBOL(memory_cgrp_subsys); 77 78 struct mem_cgroup *root_mem_cgroup __read_mostly; 79 80 #define MEM_CGROUP_RECLAIM_RETRIES 5 81 82 /* Socket memory accounting disabled? */ 83 static bool cgroup_memory_nosocket; 84 85 /* Kernel memory accounting disabled? */ 86 static bool cgroup_memory_nokmem; 87 88 /* Whether the swap controller is active */ 89 #ifdef CONFIG_MEMCG_SWAP 90 int do_swap_account __read_mostly; 91 #else 92 #define do_swap_account 0 93 #endif 94 95 /* Whether legacy memory+swap accounting is active */ 96 static bool do_memsw_account(void) 97 { 98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 99 } 100 101 static const char * const mem_cgroup_stat_names[] = { 102 "cache", 103 "rss", 104 "rss_huge", 105 "mapped_file", 106 "dirty", 107 "writeback", 108 "swap", 109 }; 110 111 static const char * const mem_cgroup_events_names[] = { 112 "pgpgin", 113 "pgpgout", 114 "pgfault", 115 "pgmajfault", 116 }; 117 118 static const char * const mem_cgroup_lru_names[] = { 119 "inactive_anon", 120 "active_anon", 121 "inactive_file", 122 "active_file", 123 "unevictable", 124 }; 125 126 #define THRESHOLDS_EVENTS_TARGET 128 127 #define SOFTLIMIT_EVENTS_TARGET 1024 128 #define NUMAINFO_EVENTS_TARGET 1024 129 130 /* 131 * Cgroups above their limits are maintained in a RB-Tree, independent of 132 * their hierarchy representation 133 */ 134 135 struct mem_cgroup_tree_per_node { 136 struct rb_root rb_root; 137 spinlock_t lock; 138 }; 139 140 struct mem_cgroup_tree { 141 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 142 }; 143 144 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 145 146 /* for OOM */ 147 struct mem_cgroup_eventfd_list { 148 struct list_head list; 149 struct eventfd_ctx *eventfd; 150 }; 151 152 /* 153 * cgroup_event represents events which userspace want to receive. 154 */ 155 struct mem_cgroup_event { 156 /* 157 * memcg which the event belongs to. 158 */ 159 struct mem_cgroup *memcg; 160 /* 161 * eventfd to signal userspace about the event. 162 */ 163 struct eventfd_ctx *eventfd; 164 /* 165 * Each of these stored in a list by the cgroup. 166 */ 167 struct list_head list; 168 /* 169 * register_event() callback will be used to add new userspace 170 * waiter for changes related to this event. Use eventfd_signal() 171 * on eventfd to send notification to userspace. 172 */ 173 int (*register_event)(struct mem_cgroup *memcg, 174 struct eventfd_ctx *eventfd, const char *args); 175 /* 176 * unregister_event() callback will be called when userspace closes 177 * the eventfd or on cgroup removing. This callback must be set, 178 * if you want provide notification functionality. 179 */ 180 void (*unregister_event)(struct mem_cgroup *memcg, 181 struct eventfd_ctx *eventfd); 182 /* 183 * All fields below needed to unregister event when 184 * userspace closes eventfd. 185 */ 186 poll_table pt; 187 wait_queue_head_t *wqh; 188 wait_queue_t wait; 189 struct work_struct remove; 190 }; 191 192 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 193 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 194 195 /* Stuffs for move charges at task migration. */ 196 /* 197 * Types of charges to be moved. 198 */ 199 #define MOVE_ANON 0x1U 200 #define MOVE_FILE 0x2U 201 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 202 203 /* "mc" and its members are protected by cgroup_mutex */ 204 static struct move_charge_struct { 205 spinlock_t lock; /* for from, to */ 206 struct mm_struct *mm; 207 struct mem_cgroup *from; 208 struct mem_cgroup *to; 209 unsigned long flags; 210 unsigned long precharge; 211 unsigned long moved_charge; 212 unsigned long moved_swap; 213 struct task_struct *moving_task; /* a task moving charges */ 214 wait_queue_head_t waitq; /* a waitq for other context */ 215 } mc = { 216 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 217 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 218 }; 219 220 /* 221 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 222 * limit reclaim to prevent infinite loops, if they ever occur. 223 */ 224 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 225 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 226 227 enum charge_type { 228 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 229 MEM_CGROUP_CHARGE_TYPE_ANON, 230 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 231 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 232 NR_CHARGE_TYPE, 233 }; 234 235 /* for encoding cft->private value on file */ 236 enum res_type { 237 _MEM, 238 _MEMSWAP, 239 _OOM_TYPE, 240 _KMEM, 241 _TCP, 242 }; 243 244 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 245 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 246 #define MEMFILE_ATTR(val) ((val) & 0xffff) 247 /* Used for OOM nofiier */ 248 #define OOM_CONTROL (0) 249 250 /* Some nice accessors for the vmpressure. */ 251 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 252 { 253 if (!memcg) 254 memcg = root_mem_cgroup; 255 return &memcg->vmpressure; 256 } 257 258 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 259 { 260 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 261 } 262 263 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 264 { 265 return (memcg == root_mem_cgroup); 266 } 267 268 #ifndef CONFIG_SLOB 269 /* 270 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 271 * The main reason for not using cgroup id for this: 272 * this works better in sparse environments, where we have a lot of memcgs, 273 * but only a few kmem-limited. Or also, if we have, for instance, 200 274 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 275 * 200 entry array for that. 276 * 277 * The current size of the caches array is stored in memcg_nr_cache_ids. It 278 * will double each time we have to increase it. 279 */ 280 static DEFINE_IDA(memcg_cache_ida); 281 int memcg_nr_cache_ids; 282 283 /* Protects memcg_nr_cache_ids */ 284 static DECLARE_RWSEM(memcg_cache_ids_sem); 285 286 void memcg_get_cache_ids(void) 287 { 288 down_read(&memcg_cache_ids_sem); 289 } 290 291 void memcg_put_cache_ids(void) 292 { 293 up_read(&memcg_cache_ids_sem); 294 } 295 296 /* 297 * MIN_SIZE is different than 1, because we would like to avoid going through 298 * the alloc/free process all the time. In a small machine, 4 kmem-limited 299 * cgroups is a reasonable guess. In the future, it could be a parameter or 300 * tunable, but that is strictly not necessary. 301 * 302 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 303 * this constant directly from cgroup, but it is understandable that this is 304 * better kept as an internal representation in cgroup.c. In any case, the 305 * cgrp_id space is not getting any smaller, and we don't have to necessarily 306 * increase ours as well if it increases. 307 */ 308 #define MEMCG_CACHES_MIN_SIZE 4 309 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 310 311 /* 312 * A lot of the calls to the cache allocation functions are expected to be 313 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 314 * conditional to this static branch, we'll have to allow modules that does 315 * kmem_cache_alloc and the such to see this symbol as well 316 */ 317 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 318 EXPORT_SYMBOL(memcg_kmem_enabled_key); 319 320 #endif /* !CONFIG_SLOB */ 321 322 /** 323 * mem_cgroup_css_from_page - css of the memcg associated with a page 324 * @page: page of interest 325 * 326 * If memcg is bound to the default hierarchy, css of the memcg associated 327 * with @page is returned. The returned css remains associated with @page 328 * until it is released. 329 * 330 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 331 * is returned. 332 */ 333 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 334 { 335 struct mem_cgroup *memcg; 336 337 memcg = page->mem_cgroup; 338 339 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 340 memcg = root_mem_cgroup; 341 342 return &memcg->css; 343 } 344 345 /** 346 * page_cgroup_ino - return inode number of the memcg a page is charged to 347 * @page: the page 348 * 349 * Look up the closest online ancestor of the memory cgroup @page is charged to 350 * and return its inode number or 0 if @page is not charged to any cgroup. It 351 * is safe to call this function without holding a reference to @page. 352 * 353 * Note, this function is inherently racy, because there is nothing to prevent 354 * the cgroup inode from getting torn down and potentially reallocated a moment 355 * after page_cgroup_ino() returns, so it only should be used by callers that 356 * do not care (such as procfs interfaces). 357 */ 358 ino_t page_cgroup_ino(struct page *page) 359 { 360 struct mem_cgroup *memcg; 361 unsigned long ino = 0; 362 363 rcu_read_lock(); 364 memcg = READ_ONCE(page->mem_cgroup); 365 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 366 memcg = parent_mem_cgroup(memcg); 367 if (memcg) 368 ino = cgroup_ino(memcg->css.cgroup); 369 rcu_read_unlock(); 370 return ino; 371 } 372 373 static struct mem_cgroup_per_node * 374 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 375 { 376 int nid = page_to_nid(page); 377 378 return memcg->nodeinfo[nid]; 379 } 380 381 static struct mem_cgroup_tree_per_node * 382 soft_limit_tree_node(int nid) 383 { 384 return soft_limit_tree.rb_tree_per_node[nid]; 385 } 386 387 static struct mem_cgroup_tree_per_node * 388 soft_limit_tree_from_page(struct page *page) 389 { 390 int nid = page_to_nid(page); 391 392 return soft_limit_tree.rb_tree_per_node[nid]; 393 } 394 395 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 396 struct mem_cgroup_tree_per_node *mctz, 397 unsigned long new_usage_in_excess) 398 { 399 struct rb_node **p = &mctz->rb_root.rb_node; 400 struct rb_node *parent = NULL; 401 struct mem_cgroup_per_node *mz_node; 402 403 if (mz->on_tree) 404 return; 405 406 mz->usage_in_excess = new_usage_in_excess; 407 if (!mz->usage_in_excess) 408 return; 409 while (*p) { 410 parent = *p; 411 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 412 tree_node); 413 if (mz->usage_in_excess < mz_node->usage_in_excess) 414 p = &(*p)->rb_left; 415 /* 416 * We can't avoid mem cgroups that are over their soft 417 * limit by the same amount 418 */ 419 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 420 p = &(*p)->rb_right; 421 } 422 rb_link_node(&mz->tree_node, parent, p); 423 rb_insert_color(&mz->tree_node, &mctz->rb_root); 424 mz->on_tree = true; 425 } 426 427 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 428 struct mem_cgroup_tree_per_node *mctz) 429 { 430 if (!mz->on_tree) 431 return; 432 rb_erase(&mz->tree_node, &mctz->rb_root); 433 mz->on_tree = false; 434 } 435 436 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 437 struct mem_cgroup_tree_per_node *mctz) 438 { 439 unsigned long flags; 440 441 spin_lock_irqsave(&mctz->lock, flags); 442 __mem_cgroup_remove_exceeded(mz, mctz); 443 spin_unlock_irqrestore(&mctz->lock, flags); 444 } 445 446 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 447 { 448 unsigned long nr_pages = page_counter_read(&memcg->memory); 449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 450 unsigned long excess = 0; 451 452 if (nr_pages > soft_limit) 453 excess = nr_pages - soft_limit; 454 455 return excess; 456 } 457 458 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 459 { 460 unsigned long excess; 461 struct mem_cgroup_per_node *mz; 462 struct mem_cgroup_tree_per_node *mctz; 463 464 mctz = soft_limit_tree_from_page(page); 465 /* 466 * Necessary to update all ancestors when hierarchy is used. 467 * because their event counter is not touched. 468 */ 469 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 470 mz = mem_cgroup_page_nodeinfo(memcg, page); 471 excess = soft_limit_excess(memcg); 472 /* 473 * We have to update the tree if mz is on RB-tree or 474 * mem is over its softlimit. 475 */ 476 if (excess || mz->on_tree) { 477 unsigned long flags; 478 479 spin_lock_irqsave(&mctz->lock, flags); 480 /* if on-tree, remove it */ 481 if (mz->on_tree) 482 __mem_cgroup_remove_exceeded(mz, mctz); 483 /* 484 * Insert again. mz->usage_in_excess will be updated. 485 * If excess is 0, no tree ops. 486 */ 487 __mem_cgroup_insert_exceeded(mz, mctz, excess); 488 spin_unlock_irqrestore(&mctz->lock, flags); 489 } 490 } 491 } 492 493 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 494 { 495 struct mem_cgroup_tree_per_node *mctz; 496 struct mem_cgroup_per_node *mz; 497 int nid; 498 499 for_each_node(nid) { 500 mz = mem_cgroup_nodeinfo(memcg, nid); 501 mctz = soft_limit_tree_node(nid); 502 mem_cgroup_remove_exceeded(mz, mctz); 503 } 504 } 505 506 static struct mem_cgroup_per_node * 507 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 508 { 509 struct rb_node *rightmost = NULL; 510 struct mem_cgroup_per_node *mz; 511 512 retry: 513 mz = NULL; 514 rightmost = rb_last(&mctz->rb_root); 515 if (!rightmost) 516 goto done; /* Nothing to reclaim from */ 517 518 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node); 519 /* 520 * Remove the node now but someone else can add it back, 521 * we will to add it back at the end of reclaim to its correct 522 * position in the tree. 523 */ 524 __mem_cgroup_remove_exceeded(mz, mctz); 525 if (!soft_limit_excess(mz->memcg) || 526 !css_tryget_online(&mz->memcg->css)) 527 goto retry; 528 done: 529 return mz; 530 } 531 532 static struct mem_cgroup_per_node * 533 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 534 { 535 struct mem_cgroup_per_node *mz; 536 537 spin_lock_irq(&mctz->lock); 538 mz = __mem_cgroup_largest_soft_limit_node(mctz); 539 spin_unlock_irq(&mctz->lock); 540 return mz; 541 } 542 543 /* 544 * Return page count for single (non recursive) @memcg. 545 * 546 * Implementation Note: reading percpu statistics for memcg. 547 * 548 * Both of vmstat[] and percpu_counter has threshold and do periodic 549 * synchronization to implement "quick" read. There are trade-off between 550 * reading cost and precision of value. Then, we may have a chance to implement 551 * a periodic synchronization of counter in memcg's counter. 552 * 553 * But this _read() function is used for user interface now. The user accounts 554 * memory usage by memory cgroup and he _always_ requires exact value because 555 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 556 * have to visit all online cpus and make sum. So, for now, unnecessary 557 * synchronization is not implemented. (just implemented for cpu hotplug) 558 * 559 * If there are kernel internal actions which can make use of some not-exact 560 * value, and reading all cpu value can be performance bottleneck in some 561 * common workload, threshold and synchronization as vmstat[] should be 562 * implemented. 563 */ 564 static unsigned long 565 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 566 { 567 long val = 0; 568 int cpu; 569 570 /* Per-cpu values can be negative, use a signed accumulator */ 571 for_each_possible_cpu(cpu) 572 val += per_cpu(memcg->stat->count[idx], cpu); 573 /* 574 * Summing races with updates, so val may be negative. Avoid exposing 575 * transient negative values. 576 */ 577 if (val < 0) 578 val = 0; 579 return val; 580 } 581 582 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 583 enum mem_cgroup_events_index idx) 584 { 585 unsigned long val = 0; 586 int cpu; 587 588 for_each_possible_cpu(cpu) 589 val += per_cpu(memcg->stat->events[idx], cpu); 590 return val; 591 } 592 593 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 594 struct page *page, 595 bool compound, int nr_pages) 596 { 597 /* 598 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 599 * counted as CACHE even if it's on ANON LRU. 600 */ 601 if (PageAnon(page)) 602 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 603 nr_pages); 604 else 605 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 606 nr_pages); 607 608 if (compound) { 609 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 610 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 611 nr_pages); 612 } 613 614 /* pagein of a big page is an event. So, ignore page size */ 615 if (nr_pages > 0) 616 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 617 else { 618 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 619 nr_pages = -nr_pages; /* for event */ 620 } 621 622 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 623 } 624 625 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 626 int nid, unsigned int lru_mask) 627 { 628 unsigned long nr = 0; 629 struct mem_cgroup_per_node *mz; 630 enum lru_list lru; 631 632 VM_BUG_ON((unsigned)nid >= nr_node_ids); 633 634 for_each_lru(lru) { 635 if (!(BIT(lru) & lru_mask)) 636 continue; 637 mz = mem_cgroup_nodeinfo(memcg, nid); 638 nr += mz->lru_size[lru]; 639 } 640 return nr; 641 } 642 643 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 644 unsigned int lru_mask) 645 { 646 unsigned long nr = 0; 647 int nid; 648 649 for_each_node_state(nid, N_MEMORY) 650 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 651 return nr; 652 } 653 654 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 655 enum mem_cgroup_events_target target) 656 { 657 unsigned long val, next; 658 659 val = __this_cpu_read(memcg->stat->nr_page_events); 660 next = __this_cpu_read(memcg->stat->targets[target]); 661 /* from time_after() in jiffies.h */ 662 if ((long)next - (long)val < 0) { 663 switch (target) { 664 case MEM_CGROUP_TARGET_THRESH: 665 next = val + THRESHOLDS_EVENTS_TARGET; 666 break; 667 case MEM_CGROUP_TARGET_SOFTLIMIT: 668 next = val + SOFTLIMIT_EVENTS_TARGET; 669 break; 670 case MEM_CGROUP_TARGET_NUMAINFO: 671 next = val + NUMAINFO_EVENTS_TARGET; 672 break; 673 default: 674 break; 675 } 676 __this_cpu_write(memcg->stat->targets[target], next); 677 return true; 678 } 679 return false; 680 } 681 682 /* 683 * Check events in order. 684 * 685 */ 686 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 687 { 688 /* threshold event is triggered in finer grain than soft limit */ 689 if (unlikely(mem_cgroup_event_ratelimit(memcg, 690 MEM_CGROUP_TARGET_THRESH))) { 691 bool do_softlimit; 692 bool do_numainfo __maybe_unused; 693 694 do_softlimit = mem_cgroup_event_ratelimit(memcg, 695 MEM_CGROUP_TARGET_SOFTLIMIT); 696 #if MAX_NUMNODES > 1 697 do_numainfo = mem_cgroup_event_ratelimit(memcg, 698 MEM_CGROUP_TARGET_NUMAINFO); 699 #endif 700 mem_cgroup_threshold(memcg); 701 if (unlikely(do_softlimit)) 702 mem_cgroup_update_tree(memcg, page); 703 #if MAX_NUMNODES > 1 704 if (unlikely(do_numainfo)) 705 atomic_inc(&memcg->numainfo_events); 706 #endif 707 } 708 } 709 710 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 711 { 712 /* 713 * mm_update_next_owner() may clear mm->owner to NULL 714 * if it races with swapoff, page migration, etc. 715 * So this can be called with p == NULL. 716 */ 717 if (unlikely(!p)) 718 return NULL; 719 720 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 721 } 722 EXPORT_SYMBOL(mem_cgroup_from_task); 723 724 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 725 { 726 struct mem_cgroup *memcg = NULL; 727 728 rcu_read_lock(); 729 do { 730 /* 731 * Page cache insertions can happen withou an 732 * actual mm context, e.g. during disk probing 733 * on boot, loopback IO, acct() writes etc. 734 */ 735 if (unlikely(!mm)) 736 memcg = root_mem_cgroup; 737 else { 738 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 739 if (unlikely(!memcg)) 740 memcg = root_mem_cgroup; 741 } 742 } while (!css_tryget_online(&memcg->css)); 743 rcu_read_unlock(); 744 return memcg; 745 } 746 747 /** 748 * mem_cgroup_iter - iterate over memory cgroup hierarchy 749 * @root: hierarchy root 750 * @prev: previously returned memcg, NULL on first invocation 751 * @reclaim: cookie for shared reclaim walks, NULL for full walks 752 * 753 * Returns references to children of the hierarchy below @root, or 754 * @root itself, or %NULL after a full round-trip. 755 * 756 * Caller must pass the return value in @prev on subsequent 757 * invocations for reference counting, or use mem_cgroup_iter_break() 758 * to cancel a hierarchy walk before the round-trip is complete. 759 * 760 * Reclaimers can specify a zone and a priority level in @reclaim to 761 * divide up the memcgs in the hierarchy among all concurrent 762 * reclaimers operating on the same zone and priority. 763 */ 764 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 765 struct mem_cgroup *prev, 766 struct mem_cgroup_reclaim_cookie *reclaim) 767 { 768 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 769 struct cgroup_subsys_state *css = NULL; 770 struct mem_cgroup *memcg = NULL; 771 struct mem_cgroup *pos = NULL; 772 773 if (mem_cgroup_disabled()) 774 return NULL; 775 776 if (!root) 777 root = root_mem_cgroup; 778 779 if (prev && !reclaim) 780 pos = prev; 781 782 if (!root->use_hierarchy && root != root_mem_cgroup) { 783 if (prev) 784 goto out; 785 return root; 786 } 787 788 rcu_read_lock(); 789 790 if (reclaim) { 791 struct mem_cgroup_per_node *mz; 792 793 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 794 iter = &mz->iter[reclaim->priority]; 795 796 if (prev && reclaim->generation != iter->generation) 797 goto out_unlock; 798 799 while (1) { 800 pos = READ_ONCE(iter->position); 801 if (!pos || css_tryget(&pos->css)) 802 break; 803 /* 804 * css reference reached zero, so iter->position will 805 * be cleared by ->css_released. However, we should not 806 * rely on this happening soon, because ->css_released 807 * is called from a work queue, and by busy-waiting we 808 * might block it. So we clear iter->position right 809 * away. 810 */ 811 (void)cmpxchg(&iter->position, pos, NULL); 812 } 813 } 814 815 if (pos) 816 css = &pos->css; 817 818 for (;;) { 819 css = css_next_descendant_pre(css, &root->css); 820 if (!css) { 821 /* 822 * Reclaimers share the hierarchy walk, and a 823 * new one might jump in right at the end of 824 * the hierarchy - make sure they see at least 825 * one group and restart from the beginning. 826 */ 827 if (!prev) 828 continue; 829 break; 830 } 831 832 /* 833 * Verify the css and acquire a reference. The root 834 * is provided by the caller, so we know it's alive 835 * and kicking, and don't take an extra reference. 836 */ 837 memcg = mem_cgroup_from_css(css); 838 839 if (css == &root->css) 840 break; 841 842 if (css_tryget(css)) 843 break; 844 845 memcg = NULL; 846 } 847 848 if (reclaim) { 849 /* 850 * The position could have already been updated by a competing 851 * thread, so check that the value hasn't changed since we read 852 * it to avoid reclaiming from the same cgroup twice. 853 */ 854 (void)cmpxchg(&iter->position, pos, memcg); 855 856 if (pos) 857 css_put(&pos->css); 858 859 if (!memcg) 860 iter->generation++; 861 else if (!prev) 862 reclaim->generation = iter->generation; 863 } 864 865 out_unlock: 866 rcu_read_unlock(); 867 out: 868 if (prev && prev != root) 869 css_put(&prev->css); 870 871 return memcg; 872 } 873 874 /** 875 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 876 * @root: hierarchy root 877 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 878 */ 879 void mem_cgroup_iter_break(struct mem_cgroup *root, 880 struct mem_cgroup *prev) 881 { 882 if (!root) 883 root = root_mem_cgroup; 884 if (prev && prev != root) 885 css_put(&prev->css); 886 } 887 888 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 889 { 890 struct mem_cgroup *memcg = dead_memcg; 891 struct mem_cgroup_reclaim_iter *iter; 892 struct mem_cgroup_per_node *mz; 893 int nid; 894 int i; 895 896 while ((memcg = parent_mem_cgroup(memcg))) { 897 for_each_node(nid) { 898 mz = mem_cgroup_nodeinfo(memcg, nid); 899 for (i = 0; i <= DEF_PRIORITY; i++) { 900 iter = &mz->iter[i]; 901 cmpxchg(&iter->position, 902 dead_memcg, NULL); 903 } 904 } 905 } 906 } 907 908 /* 909 * Iteration constructs for visiting all cgroups (under a tree). If 910 * loops are exited prematurely (break), mem_cgroup_iter_break() must 911 * be used for reference counting. 912 */ 913 #define for_each_mem_cgroup_tree(iter, root) \ 914 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 915 iter != NULL; \ 916 iter = mem_cgroup_iter(root, iter, NULL)) 917 918 #define for_each_mem_cgroup(iter) \ 919 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 920 iter != NULL; \ 921 iter = mem_cgroup_iter(NULL, iter, NULL)) 922 923 /** 924 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 925 * @memcg: hierarchy root 926 * @fn: function to call for each task 927 * @arg: argument passed to @fn 928 * 929 * This function iterates over tasks attached to @memcg or to any of its 930 * descendants and calls @fn for each task. If @fn returns a non-zero 931 * value, the function breaks the iteration loop and returns the value. 932 * Otherwise, it will iterate over all tasks and return 0. 933 * 934 * This function must not be called for the root memory cgroup. 935 */ 936 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 937 int (*fn)(struct task_struct *, void *), void *arg) 938 { 939 struct mem_cgroup *iter; 940 int ret = 0; 941 942 BUG_ON(memcg == root_mem_cgroup); 943 944 for_each_mem_cgroup_tree(iter, memcg) { 945 struct css_task_iter it; 946 struct task_struct *task; 947 948 css_task_iter_start(&iter->css, &it); 949 while (!ret && (task = css_task_iter_next(&it))) 950 ret = fn(task, arg); 951 css_task_iter_end(&it); 952 if (ret) { 953 mem_cgroup_iter_break(memcg, iter); 954 break; 955 } 956 } 957 return ret; 958 } 959 960 /** 961 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 962 * @page: the page 963 * @zone: zone of the page 964 * 965 * This function is only safe when following the LRU page isolation 966 * and putback protocol: the LRU lock must be held, and the page must 967 * either be PageLRU() or the caller must have isolated/allocated it. 968 */ 969 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 970 { 971 struct mem_cgroup_per_node *mz; 972 struct mem_cgroup *memcg; 973 struct lruvec *lruvec; 974 975 if (mem_cgroup_disabled()) { 976 lruvec = &pgdat->lruvec; 977 goto out; 978 } 979 980 memcg = page->mem_cgroup; 981 /* 982 * Swapcache readahead pages are added to the LRU - and 983 * possibly migrated - before they are charged. 984 */ 985 if (!memcg) 986 memcg = root_mem_cgroup; 987 988 mz = mem_cgroup_page_nodeinfo(memcg, page); 989 lruvec = &mz->lruvec; 990 out: 991 /* 992 * Since a node can be onlined after the mem_cgroup was created, 993 * we have to be prepared to initialize lruvec->zone here; 994 * and if offlined then reonlined, we need to reinitialize it. 995 */ 996 if (unlikely(lruvec->pgdat != pgdat)) 997 lruvec->pgdat = pgdat; 998 return lruvec; 999 } 1000 1001 /** 1002 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1003 * @lruvec: mem_cgroup per zone lru vector 1004 * @lru: index of lru list the page is sitting on 1005 * @nr_pages: positive when adding or negative when removing 1006 * 1007 * This function must be called under lru_lock, just before a page is added 1008 * to or just after a page is removed from an lru list (that ordering being 1009 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1010 */ 1011 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1012 int nr_pages) 1013 { 1014 struct mem_cgroup_per_node *mz; 1015 unsigned long *lru_size; 1016 long size; 1017 bool empty; 1018 1019 if (mem_cgroup_disabled()) 1020 return; 1021 1022 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1023 lru_size = mz->lru_size + lru; 1024 empty = list_empty(lruvec->lists + lru); 1025 1026 if (nr_pages < 0) 1027 *lru_size += nr_pages; 1028 1029 size = *lru_size; 1030 if (WARN_ONCE(size < 0 || empty != !size, 1031 "%s(%p, %d, %d): lru_size %ld but %sempty\n", 1032 __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) { 1033 VM_BUG_ON(1); 1034 *lru_size = 0; 1035 } 1036 1037 if (nr_pages > 0) 1038 *lru_size += nr_pages; 1039 } 1040 1041 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1042 { 1043 struct mem_cgroup *task_memcg; 1044 struct task_struct *p; 1045 bool ret; 1046 1047 p = find_lock_task_mm(task); 1048 if (p) { 1049 task_memcg = get_mem_cgroup_from_mm(p->mm); 1050 task_unlock(p); 1051 } else { 1052 /* 1053 * All threads may have already detached their mm's, but the oom 1054 * killer still needs to detect if they have already been oom 1055 * killed to prevent needlessly killing additional tasks. 1056 */ 1057 rcu_read_lock(); 1058 task_memcg = mem_cgroup_from_task(task); 1059 css_get(&task_memcg->css); 1060 rcu_read_unlock(); 1061 } 1062 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1063 css_put(&task_memcg->css); 1064 return ret; 1065 } 1066 1067 /** 1068 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1069 * @memcg: the memory cgroup 1070 * 1071 * Returns the maximum amount of memory @mem can be charged with, in 1072 * pages. 1073 */ 1074 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1075 { 1076 unsigned long margin = 0; 1077 unsigned long count; 1078 unsigned long limit; 1079 1080 count = page_counter_read(&memcg->memory); 1081 limit = READ_ONCE(memcg->memory.limit); 1082 if (count < limit) 1083 margin = limit - count; 1084 1085 if (do_memsw_account()) { 1086 count = page_counter_read(&memcg->memsw); 1087 limit = READ_ONCE(memcg->memsw.limit); 1088 if (count <= limit) 1089 margin = min(margin, limit - count); 1090 else 1091 margin = 0; 1092 } 1093 1094 return margin; 1095 } 1096 1097 /* 1098 * A routine for checking "mem" is under move_account() or not. 1099 * 1100 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1101 * moving cgroups. This is for waiting at high-memory pressure 1102 * caused by "move". 1103 */ 1104 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1105 { 1106 struct mem_cgroup *from; 1107 struct mem_cgroup *to; 1108 bool ret = false; 1109 /* 1110 * Unlike task_move routines, we access mc.to, mc.from not under 1111 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1112 */ 1113 spin_lock(&mc.lock); 1114 from = mc.from; 1115 to = mc.to; 1116 if (!from) 1117 goto unlock; 1118 1119 ret = mem_cgroup_is_descendant(from, memcg) || 1120 mem_cgroup_is_descendant(to, memcg); 1121 unlock: 1122 spin_unlock(&mc.lock); 1123 return ret; 1124 } 1125 1126 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1127 { 1128 if (mc.moving_task && current != mc.moving_task) { 1129 if (mem_cgroup_under_move(memcg)) { 1130 DEFINE_WAIT(wait); 1131 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1132 /* moving charge context might have finished. */ 1133 if (mc.moving_task) 1134 schedule(); 1135 finish_wait(&mc.waitq, &wait); 1136 return true; 1137 } 1138 } 1139 return false; 1140 } 1141 1142 #define K(x) ((x) << (PAGE_SHIFT-10)) 1143 /** 1144 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1145 * @memcg: The memory cgroup that went over limit 1146 * @p: Task that is going to be killed 1147 * 1148 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1149 * enabled 1150 */ 1151 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1152 { 1153 struct mem_cgroup *iter; 1154 unsigned int i; 1155 1156 rcu_read_lock(); 1157 1158 if (p) { 1159 pr_info("Task in "); 1160 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1161 pr_cont(" killed as a result of limit of "); 1162 } else { 1163 pr_info("Memory limit reached of cgroup "); 1164 } 1165 1166 pr_cont_cgroup_path(memcg->css.cgroup); 1167 pr_cont("\n"); 1168 1169 rcu_read_unlock(); 1170 1171 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1172 K((u64)page_counter_read(&memcg->memory)), 1173 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1174 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1175 K((u64)page_counter_read(&memcg->memsw)), 1176 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1177 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1178 K((u64)page_counter_read(&memcg->kmem)), 1179 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1180 1181 for_each_mem_cgroup_tree(iter, memcg) { 1182 pr_info("Memory cgroup stats for "); 1183 pr_cont_cgroup_path(iter->css.cgroup); 1184 pr_cont(":"); 1185 1186 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1187 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1188 continue; 1189 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1190 K(mem_cgroup_read_stat(iter, i))); 1191 } 1192 1193 for (i = 0; i < NR_LRU_LISTS; i++) 1194 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1195 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1196 1197 pr_cont("\n"); 1198 } 1199 } 1200 1201 /* 1202 * This function returns the number of memcg under hierarchy tree. Returns 1203 * 1(self count) if no children. 1204 */ 1205 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1206 { 1207 int num = 0; 1208 struct mem_cgroup *iter; 1209 1210 for_each_mem_cgroup_tree(iter, memcg) 1211 num++; 1212 return num; 1213 } 1214 1215 /* 1216 * Return the memory (and swap, if configured) limit for a memcg. 1217 */ 1218 unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1219 { 1220 unsigned long limit; 1221 1222 limit = memcg->memory.limit; 1223 if (mem_cgroup_swappiness(memcg)) { 1224 unsigned long memsw_limit; 1225 unsigned long swap_limit; 1226 1227 memsw_limit = memcg->memsw.limit; 1228 swap_limit = memcg->swap.limit; 1229 swap_limit = min(swap_limit, (unsigned long)total_swap_pages); 1230 limit = min(limit + swap_limit, memsw_limit); 1231 } 1232 return limit; 1233 } 1234 1235 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1236 int order) 1237 { 1238 struct oom_control oc = { 1239 .zonelist = NULL, 1240 .nodemask = NULL, 1241 .memcg = memcg, 1242 .gfp_mask = gfp_mask, 1243 .order = order, 1244 }; 1245 bool ret; 1246 1247 mutex_lock(&oom_lock); 1248 ret = out_of_memory(&oc); 1249 mutex_unlock(&oom_lock); 1250 return ret; 1251 } 1252 1253 #if MAX_NUMNODES > 1 1254 1255 /** 1256 * test_mem_cgroup_node_reclaimable 1257 * @memcg: the target memcg 1258 * @nid: the node ID to be checked. 1259 * @noswap : specify true here if the user wants flle only information. 1260 * 1261 * This function returns whether the specified memcg contains any 1262 * reclaimable pages on a node. Returns true if there are any reclaimable 1263 * pages in the node. 1264 */ 1265 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1266 int nid, bool noswap) 1267 { 1268 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1269 return true; 1270 if (noswap || !total_swap_pages) 1271 return false; 1272 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1273 return true; 1274 return false; 1275 1276 } 1277 1278 /* 1279 * Always updating the nodemask is not very good - even if we have an empty 1280 * list or the wrong list here, we can start from some node and traverse all 1281 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1282 * 1283 */ 1284 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1285 { 1286 int nid; 1287 /* 1288 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1289 * pagein/pageout changes since the last update. 1290 */ 1291 if (!atomic_read(&memcg->numainfo_events)) 1292 return; 1293 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1294 return; 1295 1296 /* make a nodemask where this memcg uses memory from */ 1297 memcg->scan_nodes = node_states[N_MEMORY]; 1298 1299 for_each_node_mask(nid, node_states[N_MEMORY]) { 1300 1301 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1302 node_clear(nid, memcg->scan_nodes); 1303 } 1304 1305 atomic_set(&memcg->numainfo_events, 0); 1306 atomic_set(&memcg->numainfo_updating, 0); 1307 } 1308 1309 /* 1310 * Selecting a node where we start reclaim from. Because what we need is just 1311 * reducing usage counter, start from anywhere is O,K. Considering 1312 * memory reclaim from current node, there are pros. and cons. 1313 * 1314 * Freeing memory from current node means freeing memory from a node which 1315 * we'll use or we've used. So, it may make LRU bad. And if several threads 1316 * hit limits, it will see a contention on a node. But freeing from remote 1317 * node means more costs for memory reclaim because of memory latency. 1318 * 1319 * Now, we use round-robin. Better algorithm is welcomed. 1320 */ 1321 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1322 { 1323 int node; 1324 1325 mem_cgroup_may_update_nodemask(memcg); 1326 node = memcg->last_scanned_node; 1327 1328 node = next_node_in(node, memcg->scan_nodes); 1329 /* 1330 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1331 * last time it really checked all the LRUs due to rate limiting. 1332 * Fallback to the current node in that case for simplicity. 1333 */ 1334 if (unlikely(node == MAX_NUMNODES)) 1335 node = numa_node_id(); 1336 1337 memcg->last_scanned_node = node; 1338 return node; 1339 } 1340 #else 1341 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1342 { 1343 return 0; 1344 } 1345 #endif 1346 1347 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1348 pg_data_t *pgdat, 1349 gfp_t gfp_mask, 1350 unsigned long *total_scanned) 1351 { 1352 struct mem_cgroup *victim = NULL; 1353 int total = 0; 1354 int loop = 0; 1355 unsigned long excess; 1356 unsigned long nr_scanned; 1357 struct mem_cgroup_reclaim_cookie reclaim = { 1358 .pgdat = pgdat, 1359 .priority = 0, 1360 }; 1361 1362 excess = soft_limit_excess(root_memcg); 1363 1364 while (1) { 1365 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1366 if (!victim) { 1367 loop++; 1368 if (loop >= 2) { 1369 /* 1370 * If we have not been able to reclaim 1371 * anything, it might because there are 1372 * no reclaimable pages under this hierarchy 1373 */ 1374 if (!total) 1375 break; 1376 /* 1377 * We want to do more targeted reclaim. 1378 * excess >> 2 is not to excessive so as to 1379 * reclaim too much, nor too less that we keep 1380 * coming back to reclaim from this cgroup 1381 */ 1382 if (total >= (excess >> 2) || 1383 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1384 break; 1385 } 1386 continue; 1387 } 1388 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1389 pgdat, &nr_scanned); 1390 *total_scanned += nr_scanned; 1391 if (!soft_limit_excess(root_memcg)) 1392 break; 1393 } 1394 mem_cgroup_iter_break(root_memcg, victim); 1395 return total; 1396 } 1397 1398 #ifdef CONFIG_LOCKDEP 1399 static struct lockdep_map memcg_oom_lock_dep_map = { 1400 .name = "memcg_oom_lock", 1401 }; 1402 #endif 1403 1404 static DEFINE_SPINLOCK(memcg_oom_lock); 1405 1406 /* 1407 * Check OOM-Killer is already running under our hierarchy. 1408 * If someone is running, return false. 1409 */ 1410 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1411 { 1412 struct mem_cgroup *iter, *failed = NULL; 1413 1414 spin_lock(&memcg_oom_lock); 1415 1416 for_each_mem_cgroup_tree(iter, memcg) { 1417 if (iter->oom_lock) { 1418 /* 1419 * this subtree of our hierarchy is already locked 1420 * so we cannot give a lock. 1421 */ 1422 failed = iter; 1423 mem_cgroup_iter_break(memcg, iter); 1424 break; 1425 } else 1426 iter->oom_lock = true; 1427 } 1428 1429 if (failed) { 1430 /* 1431 * OK, we failed to lock the whole subtree so we have 1432 * to clean up what we set up to the failing subtree 1433 */ 1434 for_each_mem_cgroup_tree(iter, memcg) { 1435 if (iter == failed) { 1436 mem_cgroup_iter_break(memcg, iter); 1437 break; 1438 } 1439 iter->oom_lock = false; 1440 } 1441 } else 1442 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1443 1444 spin_unlock(&memcg_oom_lock); 1445 1446 return !failed; 1447 } 1448 1449 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1450 { 1451 struct mem_cgroup *iter; 1452 1453 spin_lock(&memcg_oom_lock); 1454 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1455 for_each_mem_cgroup_tree(iter, memcg) 1456 iter->oom_lock = false; 1457 spin_unlock(&memcg_oom_lock); 1458 } 1459 1460 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1461 { 1462 struct mem_cgroup *iter; 1463 1464 spin_lock(&memcg_oom_lock); 1465 for_each_mem_cgroup_tree(iter, memcg) 1466 iter->under_oom++; 1467 spin_unlock(&memcg_oom_lock); 1468 } 1469 1470 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1471 { 1472 struct mem_cgroup *iter; 1473 1474 /* 1475 * When a new child is created while the hierarchy is under oom, 1476 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1477 */ 1478 spin_lock(&memcg_oom_lock); 1479 for_each_mem_cgroup_tree(iter, memcg) 1480 if (iter->under_oom > 0) 1481 iter->under_oom--; 1482 spin_unlock(&memcg_oom_lock); 1483 } 1484 1485 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1486 1487 struct oom_wait_info { 1488 struct mem_cgroup *memcg; 1489 wait_queue_t wait; 1490 }; 1491 1492 static int memcg_oom_wake_function(wait_queue_t *wait, 1493 unsigned mode, int sync, void *arg) 1494 { 1495 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1496 struct mem_cgroup *oom_wait_memcg; 1497 struct oom_wait_info *oom_wait_info; 1498 1499 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1500 oom_wait_memcg = oom_wait_info->memcg; 1501 1502 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1503 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1504 return 0; 1505 return autoremove_wake_function(wait, mode, sync, arg); 1506 } 1507 1508 static void memcg_oom_recover(struct mem_cgroup *memcg) 1509 { 1510 /* 1511 * For the following lockless ->under_oom test, the only required 1512 * guarantee is that it must see the state asserted by an OOM when 1513 * this function is called as a result of userland actions 1514 * triggered by the notification of the OOM. This is trivially 1515 * achieved by invoking mem_cgroup_mark_under_oom() before 1516 * triggering notification. 1517 */ 1518 if (memcg && memcg->under_oom) 1519 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1520 } 1521 1522 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1523 { 1524 if (!current->memcg_may_oom) 1525 return; 1526 /* 1527 * We are in the middle of the charge context here, so we 1528 * don't want to block when potentially sitting on a callstack 1529 * that holds all kinds of filesystem and mm locks. 1530 * 1531 * Also, the caller may handle a failed allocation gracefully 1532 * (like optional page cache readahead) and so an OOM killer 1533 * invocation might not even be necessary. 1534 * 1535 * That's why we don't do anything here except remember the 1536 * OOM context and then deal with it at the end of the page 1537 * fault when the stack is unwound, the locks are released, 1538 * and when we know whether the fault was overall successful. 1539 */ 1540 css_get(&memcg->css); 1541 current->memcg_in_oom = memcg; 1542 current->memcg_oom_gfp_mask = mask; 1543 current->memcg_oom_order = order; 1544 } 1545 1546 /** 1547 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1548 * @handle: actually kill/wait or just clean up the OOM state 1549 * 1550 * This has to be called at the end of a page fault if the memcg OOM 1551 * handler was enabled. 1552 * 1553 * Memcg supports userspace OOM handling where failed allocations must 1554 * sleep on a waitqueue until the userspace task resolves the 1555 * situation. Sleeping directly in the charge context with all kinds 1556 * of locks held is not a good idea, instead we remember an OOM state 1557 * in the task and mem_cgroup_oom_synchronize() has to be called at 1558 * the end of the page fault to complete the OOM handling. 1559 * 1560 * Returns %true if an ongoing memcg OOM situation was detected and 1561 * completed, %false otherwise. 1562 */ 1563 bool mem_cgroup_oom_synchronize(bool handle) 1564 { 1565 struct mem_cgroup *memcg = current->memcg_in_oom; 1566 struct oom_wait_info owait; 1567 bool locked; 1568 1569 /* OOM is global, do not handle */ 1570 if (!memcg) 1571 return false; 1572 1573 if (!handle) 1574 goto cleanup; 1575 1576 owait.memcg = memcg; 1577 owait.wait.flags = 0; 1578 owait.wait.func = memcg_oom_wake_function; 1579 owait.wait.private = current; 1580 INIT_LIST_HEAD(&owait.wait.task_list); 1581 1582 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1583 mem_cgroup_mark_under_oom(memcg); 1584 1585 locked = mem_cgroup_oom_trylock(memcg); 1586 1587 if (locked) 1588 mem_cgroup_oom_notify(memcg); 1589 1590 if (locked && !memcg->oom_kill_disable) { 1591 mem_cgroup_unmark_under_oom(memcg); 1592 finish_wait(&memcg_oom_waitq, &owait.wait); 1593 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1594 current->memcg_oom_order); 1595 } else { 1596 schedule(); 1597 mem_cgroup_unmark_under_oom(memcg); 1598 finish_wait(&memcg_oom_waitq, &owait.wait); 1599 } 1600 1601 if (locked) { 1602 mem_cgroup_oom_unlock(memcg); 1603 /* 1604 * There is no guarantee that an OOM-lock contender 1605 * sees the wakeups triggered by the OOM kill 1606 * uncharges. Wake any sleepers explicitely. 1607 */ 1608 memcg_oom_recover(memcg); 1609 } 1610 cleanup: 1611 current->memcg_in_oom = NULL; 1612 css_put(&memcg->css); 1613 return true; 1614 } 1615 1616 /** 1617 * lock_page_memcg - lock a page->mem_cgroup binding 1618 * @page: the page 1619 * 1620 * This function protects unlocked LRU pages from being moved to 1621 * another cgroup and stabilizes their page->mem_cgroup binding. 1622 */ 1623 void lock_page_memcg(struct page *page) 1624 { 1625 struct mem_cgroup *memcg; 1626 unsigned long flags; 1627 1628 /* 1629 * The RCU lock is held throughout the transaction. The fast 1630 * path can get away without acquiring the memcg->move_lock 1631 * because page moving starts with an RCU grace period. 1632 */ 1633 rcu_read_lock(); 1634 1635 if (mem_cgroup_disabled()) 1636 return; 1637 again: 1638 memcg = page->mem_cgroup; 1639 if (unlikely(!memcg)) 1640 return; 1641 1642 if (atomic_read(&memcg->moving_account) <= 0) 1643 return; 1644 1645 spin_lock_irqsave(&memcg->move_lock, flags); 1646 if (memcg != page->mem_cgroup) { 1647 spin_unlock_irqrestore(&memcg->move_lock, flags); 1648 goto again; 1649 } 1650 1651 /* 1652 * When charge migration first begins, we can have locked and 1653 * unlocked page stat updates happening concurrently. Track 1654 * the task who has the lock for unlock_page_memcg(). 1655 */ 1656 memcg->move_lock_task = current; 1657 memcg->move_lock_flags = flags; 1658 1659 return; 1660 } 1661 EXPORT_SYMBOL(lock_page_memcg); 1662 1663 /** 1664 * unlock_page_memcg - unlock a page->mem_cgroup binding 1665 * @page: the page 1666 */ 1667 void unlock_page_memcg(struct page *page) 1668 { 1669 struct mem_cgroup *memcg = page->mem_cgroup; 1670 1671 if (memcg && memcg->move_lock_task == current) { 1672 unsigned long flags = memcg->move_lock_flags; 1673 1674 memcg->move_lock_task = NULL; 1675 memcg->move_lock_flags = 0; 1676 1677 spin_unlock_irqrestore(&memcg->move_lock, flags); 1678 } 1679 1680 rcu_read_unlock(); 1681 } 1682 EXPORT_SYMBOL(unlock_page_memcg); 1683 1684 /* 1685 * size of first charge trial. "32" comes from vmscan.c's magic value. 1686 * TODO: maybe necessary to use big numbers in big irons. 1687 */ 1688 #define CHARGE_BATCH 32U 1689 struct memcg_stock_pcp { 1690 struct mem_cgroup *cached; /* this never be root cgroup */ 1691 unsigned int nr_pages; 1692 struct work_struct work; 1693 unsigned long flags; 1694 #define FLUSHING_CACHED_CHARGE 0 1695 }; 1696 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1697 static DEFINE_MUTEX(percpu_charge_mutex); 1698 1699 /** 1700 * consume_stock: Try to consume stocked charge on this cpu. 1701 * @memcg: memcg to consume from. 1702 * @nr_pages: how many pages to charge. 1703 * 1704 * The charges will only happen if @memcg matches the current cpu's memcg 1705 * stock, and at least @nr_pages are available in that stock. Failure to 1706 * service an allocation will refill the stock. 1707 * 1708 * returns true if successful, false otherwise. 1709 */ 1710 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1711 { 1712 struct memcg_stock_pcp *stock; 1713 unsigned long flags; 1714 bool ret = false; 1715 1716 if (nr_pages > CHARGE_BATCH) 1717 return ret; 1718 1719 local_irq_save(flags); 1720 1721 stock = this_cpu_ptr(&memcg_stock); 1722 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1723 stock->nr_pages -= nr_pages; 1724 ret = true; 1725 } 1726 1727 local_irq_restore(flags); 1728 1729 return ret; 1730 } 1731 1732 /* 1733 * Returns stocks cached in percpu and reset cached information. 1734 */ 1735 static void drain_stock(struct memcg_stock_pcp *stock) 1736 { 1737 struct mem_cgroup *old = stock->cached; 1738 1739 if (stock->nr_pages) { 1740 page_counter_uncharge(&old->memory, stock->nr_pages); 1741 if (do_memsw_account()) 1742 page_counter_uncharge(&old->memsw, stock->nr_pages); 1743 css_put_many(&old->css, stock->nr_pages); 1744 stock->nr_pages = 0; 1745 } 1746 stock->cached = NULL; 1747 } 1748 1749 static void drain_local_stock(struct work_struct *dummy) 1750 { 1751 struct memcg_stock_pcp *stock; 1752 unsigned long flags; 1753 1754 local_irq_save(flags); 1755 1756 stock = this_cpu_ptr(&memcg_stock); 1757 drain_stock(stock); 1758 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1759 1760 local_irq_restore(flags); 1761 } 1762 1763 /* 1764 * Cache charges(val) to local per_cpu area. 1765 * This will be consumed by consume_stock() function, later. 1766 */ 1767 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1768 { 1769 struct memcg_stock_pcp *stock; 1770 unsigned long flags; 1771 1772 local_irq_save(flags); 1773 1774 stock = this_cpu_ptr(&memcg_stock); 1775 if (stock->cached != memcg) { /* reset if necessary */ 1776 drain_stock(stock); 1777 stock->cached = memcg; 1778 } 1779 stock->nr_pages += nr_pages; 1780 1781 local_irq_restore(flags); 1782 } 1783 1784 /* 1785 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1786 * of the hierarchy under it. 1787 */ 1788 static void drain_all_stock(struct mem_cgroup *root_memcg) 1789 { 1790 int cpu, curcpu; 1791 1792 /* If someone's already draining, avoid adding running more workers. */ 1793 if (!mutex_trylock(&percpu_charge_mutex)) 1794 return; 1795 /* Notify other cpus that system-wide "drain" is running */ 1796 get_online_cpus(); 1797 curcpu = get_cpu(); 1798 for_each_online_cpu(cpu) { 1799 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1800 struct mem_cgroup *memcg; 1801 1802 memcg = stock->cached; 1803 if (!memcg || !stock->nr_pages) 1804 continue; 1805 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1806 continue; 1807 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1808 if (cpu == curcpu) 1809 drain_local_stock(&stock->work); 1810 else 1811 schedule_work_on(cpu, &stock->work); 1812 } 1813 } 1814 put_cpu(); 1815 put_online_cpus(); 1816 mutex_unlock(&percpu_charge_mutex); 1817 } 1818 1819 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 1820 unsigned long action, 1821 void *hcpu) 1822 { 1823 int cpu = (unsigned long)hcpu; 1824 struct memcg_stock_pcp *stock; 1825 1826 if (action == CPU_ONLINE) 1827 return NOTIFY_OK; 1828 1829 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 1830 return NOTIFY_OK; 1831 1832 stock = &per_cpu(memcg_stock, cpu); 1833 drain_stock(stock); 1834 return NOTIFY_OK; 1835 } 1836 1837 static void reclaim_high(struct mem_cgroup *memcg, 1838 unsigned int nr_pages, 1839 gfp_t gfp_mask) 1840 { 1841 do { 1842 if (page_counter_read(&memcg->memory) <= memcg->high) 1843 continue; 1844 mem_cgroup_events(memcg, MEMCG_HIGH, 1); 1845 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 1846 } while ((memcg = parent_mem_cgroup(memcg))); 1847 } 1848 1849 static void high_work_func(struct work_struct *work) 1850 { 1851 struct mem_cgroup *memcg; 1852 1853 memcg = container_of(work, struct mem_cgroup, high_work); 1854 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL); 1855 } 1856 1857 /* 1858 * Scheduled by try_charge() to be executed from the userland return path 1859 * and reclaims memory over the high limit. 1860 */ 1861 void mem_cgroup_handle_over_high(void) 1862 { 1863 unsigned int nr_pages = current->memcg_nr_pages_over_high; 1864 struct mem_cgroup *memcg; 1865 1866 if (likely(!nr_pages)) 1867 return; 1868 1869 memcg = get_mem_cgroup_from_mm(current->mm); 1870 reclaim_high(memcg, nr_pages, GFP_KERNEL); 1871 css_put(&memcg->css); 1872 current->memcg_nr_pages_over_high = 0; 1873 } 1874 1875 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 1876 unsigned int nr_pages) 1877 { 1878 unsigned int batch = max(CHARGE_BATCH, nr_pages); 1879 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1880 struct mem_cgroup *mem_over_limit; 1881 struct page_counter *counter; 1882 unsigned long nr_reclaimed; 1883 bool may_swap = true; 1884 bool drained = false; 1885 1886 if (mem_cgroup_is_root(memcg)) 1887 return 0; 1888 retry: 1889 if (consume_stock(memcg, nr_pages)) 1890 return 0; 1891 1892 if (!do_memsw_account() || 1893 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 1894 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 1895 goto done_restock; 1896 if (do_memsw_account()) 1897 page_counter_uncharge(&memcg->memsw, batch); 1898 mem_over_limit = mem_cgroup_from_counter(counter, memory); 1899 } else { 1900 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 1901 may_swap = false; 1902 } 1903 1904 if (batch > nr_pages) { 1905 batch = nr_pages; 1906 goto retry; 1907 } 1908 1909 /* 1910 * Unlike in global OOM situations, memcg is not in a physical 1911 * memory shortage. Allow dying and OOM-killed tasks to 1912 * bypass the last charges so that they can exit quickly and 1913 * free their memory. 1914 */ 1915 if (unlikely(test_thread_flag(TIF_MEMDIE) || 1916 fatal_signal_pending(current) || 1917 current->flags & PF_EXITING)) 1918 goto force; 1919 1920 /* 1921 * Prevent unbounded recursion when reclaim operations need to 1922 * allocate memory. This might exceed the limits temporarily, 1923 * but we prefer facilitating memory reclaim and getting back 1924 * under the limit over triggering OOM kills in these cases. 1925 */ 1926 if (unlikely(current->flags & PF_MEMALLOC)) 1927 goto force; 1928 1929 if (unlikely(task_in_memcg_oom(current))) 1930 goto nomem; 1931 1932 if (!gfpflags_allow_blocking(gfp_mask)) 1933 goto nomem; 1934 1935 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 1936 1937 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 1938 gfp_mask, may_swap); 1939 1940 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 1941 goto retry; 1942 1943 if (!drained) { 1944 drain_all_stock(mem_over_limit); 1945 drained = true; 1946 goto retry; 1947 } 1948 1949 if (gfp_mask & __GFP_NORETRY) 1950 goto nomem; 1951 /* 1952 * Even though the limit is exceeded at this point, reclaim 1953 * may have been able to free some pages. Retry the charge 1954 * before killing the task. 1955 * 1956 * Only for regular pages, though: huge pages are rather 1957 * unlikely to succeed so close to the limit, and we fall back 1958 * to regular pages anyway in case of failure. 1959 */ 1960 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 1961 goto retry; 1962 /* 1963 * At task move, charge accounts can be doubly counted. So, it's 1964 * better to wait until the end of task_move if something is going on. 1965 */ 1966 if (mem_cgroup_wait_acct_move(mem_over_limit)) 1967 goto retry; 1968 1969 if (nr_retries--) 1970 goto retry; 1971 1972 if (gfp_mask & __GFP_NOFAIL) 1973 goto force; 1974 1975 if (fatal_signal_pending(current)) 1976 goto force; 1977 1978 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 1979 1980 mem_cgroup_oom(mem_over_limit, gfp_mask, 1981 get_order(nr_pages * PAGE_SIZE)); 1982 nomem: 1983 if (!(gfp_mask & __GFP_NOFAIL)) 1984 return -ENOMEM; 1985 force: 1986 /* 1987 * The allocation either can't fail or will lead to more memory 1988 * being freed very soon. Allow memory usage go over the limit 1989 * temporarily by force charging it. 1990 */ 1991 page_counter_charge(&memcg->memory, nr_pages); 1992 if (do_memsw_account()) 1993 page_counter_charge(&memcg->memsw, nr_pages); 1994 css_get_many(&memcg->css, nr_pages); 1995 1996 return 0; 1997 1998 done_restock: 1999 css_get_many(&memcg->css, batch); 2000 if (batch > nr_pages) 2001 refill_stock(memcg, batch - nr_pages); 2002 2003 /* 2004 * If the hierarchy is above the normal consumption range, schedule 2005 * reclaim on returning to userland. We can perform reclaim here 2006 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2007 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2008 * not recorded as it most likely matches current's and won't 2009 * change in the meantime. As high limit is checked again before 2010 * reclaim, the cost of mismatch is negligible. 2011 */ 2012 do { 2013 if (page_counter_read(&memcg->memory) > memcg->high) { 2014 /* Don't bother a random interrupted task */ 2015 if (in_interrupt()) { 2016 schedule_work(&memcg->high_work); 2017 break; 2018 } 2019 current->memcg_nr_pages_over_high += batch; 2020 set_notify_resume(current); 2021 break; 2022 } 2023 } while ((memcg = parent_mem_cgroup(memcg))); 2024 2025 return 0; 2026 } 2027 2028 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2029 { 2030 if (mem_cgroup_is_root(memcg)) 2031 return; 2032 2033 page_counter_uncharge(&memcg->memory, nr_pages); 2034 if (do_memsw_account()) 2035 page_counter_uncharge(&memcg->memsw, nr_pages); 2036 2037 css_put_many(&memcg->css, nr_pages); 2038 } 2039 2040 static void lock_page_lru(struct page *page, int *isolated) 2041 { 2042 struct zone *zone = page_zone(page); 2043 2044 spin_lock_irq(zone_lru_lock(zone)); 2045 if (PageLRU(page)) { 2046 struct lruvec *lruvec; 2047 2048 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2049 ClearPageLRU(page); 2050 del_page_from_lru_list(page, lruvec, page_lru(page)); 2051 *isolated = 1; 2052 } else 2053 *isolated = 0; 2054 } 2055 2056 static void unlock_page_lru(struct page *page, int isolated) 2057 { 2058 struct zone *zone = page_zone(page); 2059 2060 if (isolated) { 2061 struct lruvec *lruvec; 2062 2063 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2064 VM_BUG_ON_PAGE(PageLRU(page), page); 2065 SetPageLRU(page); 2066 add_page_to_lru_list(page, lruvec, page_lru(page)); 2067 } 2068 spin_unlock_irq(zone_lru_lock(zone)); 2069 } 2070 2071 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2072 bool lrucare) 2073 { 2074 int isolated; 2075 2076 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2077 2078 /* 2079 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2080 * may already be on some other mem_cgroup's LRU. Take care of it. 2081 */ 2082 if (lrucare) 2083 lock_page_lru(page, &isolated); 2084 2085 /* 2086 * Nobody should be changing or seriously looking at 2087 * page->mem_cgroup at this point: 2088 * 2089 * - the page is uncharged 2090 * 2091 * - the page is off-LRU 2092 * 2093 * - an anonymous fault has exclusive page access, except for 2094 * a locked page table 2095 * 2096 * - a page cache insertion, a swapin fault, or a migration 2097 * have the page locked 2098 */ 2099 page->mem_cgroup = memcg; 2100 2101 if (lrucare) 2102 unlock_page_lru(page, isolated); 2103 } 2104 2105 #ifndef CONFIG_SLOB 2106 static int memcg_alloc_cache_id(void) 2107 { 2108 int id, size; 2109 int err; 2110 2111 id = ida_simple_get(&memcg_cache_ida, 2112 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2113 if (id < 0) 2114 return id; 2115 2116 if (id < memcg_nr_cache_ids) 2117 return id; 2118 2119 /* 2120 * There's no space for the new id in memcg_caches arrays, 2121 * so we have to grow them. 2122 */ 2123 down_write(&memcg_cache_ids_sem); 2124 2125 size = 2 * (id + 1); 2126 if (size < MEMCG_CACHES_MIN_SIZE) 2127 size = MEMCG_CACHES_MIN_SIZE; 2128 else if (size > MEMCG_CACHES_MAX_SIZE) 2129 size = MEMCG_CACHES_MAX_SIZE; 2130 2131 err = memcg_update_all_caches(size); 2132 if (!err) 2133 err = memcg_update_all_list_lrus(size); 2134 if (!err) 2135 memcg_nr_cache_ids = size; 2136 2137 up_write(&memcg_cache_ids_sem); 2138 2139 if (err) { 2140 ida_simple_remove(&memcg_cache_ida, id); 2141 return err; 2142 } 2143 return id; 2144 } 2145 2146 static void memcg_free_cache_id(int id) 2147 { 2148 ida_simple_remove(&memcg_cache_ida, id); 2149 } 2150 2151 struct memcg_kmem_cache_create_work { 2152 struct mem_cgroup *memcg; 2153 struct kmem_cache *cachep; 2154 struct work_struct work; 2155 }; 2156 2157 static void memcg_kmem_cache_create_func(struct work_struct *w) 2158 { 2159 struct memcg_kmem_cache_create_work *cw = 2160 container_of(w, struct memcg_kmem_cache_create_work, work); 2161 struct mem_cgroup *memcg = cw->memcg; 2162 struct kmem_cache *cachep = cw->cachep; 2163 2164 memcg_create_kmem_cache(memcg, cachep); 2165 2166 css_put(&memcg->css); 2167 kfree(cw); 2168 } 2169 2170 /* 2171 * Enqueue the creation of a per-memcg kmem_cache. 2172 */ 2173 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2174 struct kmem_cache *cachep) 2175 { 2176 struct memcg_kmem_cache_create_work *cw; 2177 2178 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2179 if (!cw) 2180 return; 2181 2182 css_get(&memcg->css); 2183 2184 cw->memcg = memcg; 2185 cw->cachep = cachep; 2186 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2187 2188 schedule_work(&cw->work); 2189 } 2190 2191 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2192 struct kmem_cache *cachep) 2193 { 2194 /* 2195 * We need to stop accounting when we kmalloc, because if the 2196 * corresponding kmalloc cache is not yet created, the first allocation 2197 * in __memcg_schedule_kmem_cache_create will recurse. 2198 * 2199 * However, it is better to enclose the whole function. Depending on 2200 * the debugging options enabled, INIT_WORK(), for instance, can 2201 * trigger an allocation. This too, will make us recurse. Because at 2202 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2203 * the safest choice is to do it like this, wrapping the whole function. 2204 */ 2205 current->memcg_kmem_skip_account = 1; 2206 __memcg_schedule_kmem_cache_create(memcg, cachep); 2207 current->memcg_kmem_skip_account = 0; 2208 } 2209 2210 static inline bool memcg_kmem_bypass(void) 2211 { 2212 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2213 return true; 2214 return false; 2215 } 2216 2217 /** 2218 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2219 * @cachep: the original global kmem cache 2220 * 2221 * Return the kmem_cache we're supposed to use for a slab allocation. 2222 * We try to use the current memcg's version of the cache. 2223 * 2224 * If the cache does not exist yet, if we are the first user of it, we 2225 * create it asynchronously in a workqueue and let the current allocation 2226 * go through with the original cache. 2227 * 2228 * This function takes a reference to the cache it returns to assure it 2229 * won't get destroyed while we are working with it. Once the caller is 2230 * done with it, memcg_kmem_put_cache() must be called to release the 2231 * reference. 2232 */ 2233 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2234 { 2235 struct mem_cgroup *memcg; 2236 struct kmem_cache *memcg_cachep; 2237 int kmemcg_id; 2238 2239 VM_BUG_ON(!is_root_cache(cachep)); 2240 2241 if (memcg_kmem_bypass()) 2242 return cachep; 2243 2244 if (current->memcg_kmem_skip_account) 2245 return cachep; 2246 2247 memcg = get_mem_cgroup_from_mm(current->mm); 2248 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2249 if (kmemcg_id < 0) 2250 goto out; 2251 2252 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2253 if (likely(memcg_cachep)) 2254 return memcg_cachep; 2255 2256 /* 2257 * If we are in a safe context (can wait, and not in interrupt 2258 * context), we could be be predictable and return right away. 2259 * This would guarantee that the allocation being performed 2260 * already belongs in the new cache. 2261 * 2262 * However, there are some clashes that can arrive from locking. 2263 * For instance, because we acquire the slab_mutex while doing 2264 * memcg_create_kmem_cache, this means no further allocation 2265 * could happen with the slab_mutex held. So it's better to 2266 * defer everything. 2267 */ 2268 memcg_schedule_kmem_cache_create(memcg, cachep); 2269 out: 2270 css_put(&memcg->css); 2271 return cachep; 2272 } 2273 2274 /** 2275 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2276 * @cachep: the cache returned by memcg_kmem_get_cache 2277 */ 2278 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2279 { 2280 if (!is_root_cache(cachep)) 2281 css_put(&cachep->memcg_params.memcg->css); 2282 } 2283 2284 /** 2285 * memcg_kmem_charge: charge a kmem page 2286 * @page: page to charge 2287 * @gfp: reclaim mode 2288 * @order: allocation order 2289 * @memcg: memory cgroup to charge 2290 * 2291 * Returns 0 on success, an error code on failure. 2292 */ 2293 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2294 struct mem_cgroup *memcg) 2295 { 2296 unsigned int nr_pages = 1 << order; 2297 struct page_counter *counter; 2298 int ret; 2299 2300 ret = try_charge(memcg, gfp, nr_pages); 2301 if (ret) 2302 return ret; 2303 2304 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2305 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2306 cancel_charge(memcg, nr_pages); 2307 return -ENOMEM; 2308 } 2309 2310 page->mem_cgroup = memcg; 2311 2312 return 0; 2313 } 2314 2315 /** 2316 * memcg_kmem_charge: charge a kmem page to the current memory cgroup 2317 * @page: page to charge 2318 * @gfp: reclaim mode 2319 * @order: allocation order 2320 * 2321 * Returns 0 on success, an error code on failure. 2322 */ 2323 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2324 { 2325 struct mem_cgroup *memcg; 2326 int ret = 0; 2327 2328 if (memcg_kmem_bypass()) 2329 return 0; 2330 2331 memcg = get_mem_cgroup_from_mm(current->mm); 2332 if (!mem_cgroup_is_root(memcg)) { 2333 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2334 if (!ret) 2335 __SetPageKmemcg(page); 2336 } 2337 css_put(&memcg->css); 2338 return ret; 2339 } 2340 /** 2341 * memcg_kmem_uncharge: uncharge a kmem page 2342 * @page: page to uncharge 2343 * @order: allocation order 2344 */ 2345 void memcg_kmem_uncharge(struct page *page, int order) 2346 { 2347 struct mem_cgroup *memcg = page->mem_cgroup; 2348 unsigned int nr_pages = 1 << order; 2349 2350 if (!memcg) 2351 return; 2352 2353 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2354 2355 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2356 page_counter_uncharge(&memcg->kmem, nr_pages); 2357 2358 page_counter_uncharge(&memcg->memory, nr_pages); 2359 if (do_memsw_account()) 2360 page_counter_uncharge(&memcg->memsw, nr_pages); 2361 2362 page->mem_cgroup = NULL; 2363 2364 /* slab pages do not have PageKmemcg flag set */ 2365 if (PageKmemcg(page)) 2366 __ClearPageKmemcg(page); 2367 2368 css_put_many(&memcg->css, nr_pages); 2369 } 2370 #endif /* !CONFIG_SLOB */ 2371 2372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2373 2374 /* 2375 * Because tail pages are not marked as "used", set it. We're under 2376 * zone_lru_lock and migration entries setup in all page mappings. 2377 */ 2378 void mem_cgroup_split_huge_fixup(struct page *head) 2379 { 2380 int i; 2381 2382 if (mem_cgroup_disabled()) 2383 return; 2384 2385 for (i = 1; i < HPAGE_PMD_NR; i++) 2386 head[i].mem_cgroup = head->mem_cgroup; 2387 2388 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2389 HPAGE_PMD_NR); 2390 } 2391 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2392 2393 #ifdef CONFIG_MEMCG_SWAP 2394 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2395 bool charge) 2396 { 2397 int val = (charge) ? 1 : -1; 2398 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2399 } 2400 2401 /** 2402 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2403 * @entry: swap entry to be moved 2404 * @from: mem_cgroup which the entry is moved from 2405 * @to: mem_cgroup which the entry is moved to 2406 * 2407 * It succeeds only when the swap_cgroup's record for this entry is the same 2408 * as the mem_cgroup's id of @from. 2409 * 2410 * Returns 0 on success, -EINVAL on failure. 2411 * 2412 * The caller must have charged to @to, IOW, called page_counter_charge() about 2413 * both res and memsw, and called css_get(). 2414 */ 2415 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2416 struct mem_cgroup *from, struct mem_cgroup *to) 2417 { 2418 unsigned short old_id, new_id; 2419 2420 old_id = mem_cgroup_id(from); 2421 new_id = mem_cgroup_id(to); 2422 2423 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2424 mem_cgroup_swap_statistics(from, false); 2425 mem_cgroup_swap_statistics(to, true); 2426 return 0; 2427 } 2428 return -EINVAL; 2429 } 2430 #else 2431 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2432 struct mem_cgroup *from, struct mem_cgroup *to) 2433 { 2434 return -EINVAL; 2435 } 2436 #endif 2437 2438 static DEFINE_MUTEX(memcg_limit_mutex); 2439 2440 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2441 unsigned long limit) 2442 { 2443 unsigned long curusage; 2444 unsigned long oldusage; 2445 bool enlarge = false; 2446 int retry_count; 2447 int ret; 2448 2449 /* 2450 * For keeping hierarchical_reclaim simple, how long we should retry 2451 * is depends on callers. We set our retry-count to be function 2452 * of # of children which we should visit in this loop. 2453 */ 2454 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2455 mem_cgroup_count_children(memcg); 2456 2457 oldusage = page_counter_read(&memcg->memory); 2458 2459 do { 2460 if (signal_pending(current)) { 2461 ret = -EINTR; 2462 break; 2463 } 2464 2465 mutex_lock(&memcg_limit_mutex); 2466 if (limit > memcg->memsw.limit) { 2467 mutex_unlock(&memcg_limit_mutex); 2468 ret = -EINVAL; 2469 break; 2470 } 2471 if (limit > memcg->memory.limit) 2472 enlarge = true; 2473 ret = page_counter_limit(&memcg->memory, limit); 2474 mutex_unlock(&memcg_limit_mutex); 2475 2476 if (!ret) 2477 break; 2478 2479 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2480 2481 curusage = page_counter_read(&memcg->memory); 2482 /* Usage is reduced ? */ 2483 if (curusage >= oldusage) 2484 retry_count--; 2485 else 2486 oldusage = curusage; 2487 } while (retry_count); 2488 2489 if (!ret && enlarge) 2490 memcg_oom_recover(memcg); 2491 2492 return ret; 2493 } 2494 2495 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2496 unsigned long limit) 2497 { 2498 unsigned long curusage; 2499 unsigned long oldusage; 2500 bool enlarge = false; 2501 int retry_count; 2502 int ret; 2503 2504 /* see mem_cgroup_resize_res_limit */ 2505 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2506 mem_cgroup_count_children(memcg); 2507 2508 oldusage = page_counter_read(&memcg->memsw); 2509 2510 do { 2511 if (signal_pending(current)) { 2512 ret = -EINTR; 2513 break; 2514 } 2515 2516 mutex_lock(&memcg_limit_mutex); 2517 if (limit < memcg->memory.limit) { 2518 mutex_unlock(&memcg_limit_mutex); 2519 ret = -EINVAL; 2520 break; 2521 } 2522 if (limit > memcg->memsw.limit) 2523 enlarge = true; 2524 ret = page_counter_limit(&memcg->memsw, limit); 2525 mutex_unlock(&memcg_limit_mutex); 2526 2527 if (!ret) 2528 break; 2529 2530 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2531 2532 curusage = page_counter_read(&memcg->memsw); 2533 /* Usage is reduced ? */ 2534 if (curusage >= oldusage) 2535 retry_count--; 2536 else 2537 oldusage = curusage; 2538 } while (retry_count); 2539 2540 if (!ret && enlarge) 2541 memcg_oom_recover(memcg); 2542 2543 return ret; 2544 } 2545 2546 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2547 gfp_t gfp_mask, 2548 unsigned long *total_scanned) 2549 { 2550 unsigned long nr_reclaimed = 0; 2551 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2552 unsigned long reclaimed; 2553 int loop = 0; 2554 struct mem_cgroup_tree_per_node *mctz; 2555 unsigned long excess; 2556 unsigned long nr_scanned; 2557 2558 if (order > 0) 2559 return 0; 2560 2561 mctz = soft_limit_tree_node(pgdat->node_id); 2562 2563 /* 2564 * Do not even bother to check the largest node if the root 2565 * is empty. Do it lockless to prevent lock bouncing. Races 2566 * are acceptable as soft limit is best effort anyway. 2567 */ 2568 if (RB_EMPTY_ROOT(&mctz->rb_root)) 2569 return 0; 2570 2571 /* 2572 * This loop can run a while, specially if mem_cgroup's continuously 2573 * keep exceeding their soft limit and putting the system under 2574 * pressure 2575 */ 2576 do { 2577 if (next_mz) 2578 mz = next_mz; 2579 else 2580 mz = mem_cgroup_largest_soft_limit_node(mctz); 2581 if (!mz) 2582 break; 2583 2584 nr_scanned = 0; 2585 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2586 gfp_mask, &nr_scanned); 2587 nr_reclaimed += reclaimed; 2588 *total_scanned += nr_scanned; 2589 spin_lock_irq(&mctz->lock); 2590 __mem_cgroup_remove_exceeded(mz, mctz); 2591 2592 /* 2593 * If we failed to reclaim anything from this memory cgroup 2594 * it is time to move on to the next cgroup 2595 */ 2596 next_mz = NULL; 2597 if (!reclaimed) 2598 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2599 2600 excess = soft_limit_excess(mz->memcg); 2601 /* 2602 * One school of thought says that we should not add 2603 * back the node to the tree if reclaim returns 0. 2604 * But our reclaim could return 0, simply because due 2605 * to priority we are exposing a smaller subset of 2606 * memory to reclaim from. Consider this as a longer 2607 * term TODO. 2608 */ 2609 /* If excess == 0, no tree ops */ 2610 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2611 spin_unlock_irq(&mctz->lock); 2612 css_put(&mz->memcg->css); 2613 loop++; 2614 /* 2615 * Could not reclaim anything and there are no more 2616 * mem cgroups to try or we seem to be looping without 2617 * reclaiming anything. 2618 */ 2619 if (!nr_reclaimed && 2620 (next_mz == NULL || 2621 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2622 break; 2623 } while (!nr_reclaimed); 2624 if (next_mz) 2625 css_put(&next_mz->memcg->css); 2626 return nr_reclaimed; 2627 } 2628 2629 /* 2630 * Test whether @memcg has children, dead or alive. Note that this 2631 * function doesn't care whether @memcg has use_hierarchy enabled and 2632 * returns %true if there are child csses according to the cgroup 2633 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2634 */ 2635 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2636 { 2637 bool ret; 2638 2639 rcu_read_lock(); 2640 ret = css_next_child(NULL, &memcg->css); 2641 rcu_read_unlock(); 2642 return ret; 2643 } 2644 2645 /* 2646 * Reclaims as many pages from the given memcg as possible. 2647 * 2648 * Caller is responsible for holding css reference for memcg. 2649 */ 2650 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2651 { 2652 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2653 2654 /* we call try-to-free pages for make this cgroup empty */ 2655 lru_add_drain_all(); 2656 /* try to free all pages in this cgroup */ 2657 while (nr_retries && page_counter_read(&memcg->memory)) { 2658 int progress; 2659 2660 if (signal_pending(current)) 2661 return -EINTR; 2662 2663 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2664 GFP_KERNEL, true); 2665 if (!progress) { 2666 nr_retries--; 2667 /* maybe some writeback is necessary */ 2668 congestion_wait(BLK_RW_ASYNC, HZ/10); 2669 } 2670 2671 } 2672 2673 return 0; 2674 } 2675 2676 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2677 char *buf, size_t nbytes, 2678 loff_t off) 2679 { 2680 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2681 2682 if (mem_cgroup_is_root(memcg)) 2683 return -EINVAL; 2684 return mem_cgroup_force_empty(memcg) ?: nbytes; 2685 } 2686 2687 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2688 struct cftype *cft) 2689 { 2690 return mem_cgroup_from_css(css)->use_hierarchy; 2691 } 2692 2693 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2694 struct cftype *cft, u64 val) 2695 { 2696 int retval = 0; 2697 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2698 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2699 2700 if (memcg->use_hierarchy == val) 2701 return 0; 2702 2703 /* 2704 * If parent's use_hierarchy is set, we can't make any modifications 2705 * in the child subtrees. If it is unset, then the change can 2706 * occur, provided the current cgroup has no children. 2707 * 2708 * For the root cgroup, parent_mem is NULL, we allow value to be 2709 * set if there are no children. 2710 */ 2711 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2712 (val == 1 || val == 0)) { 2713 if (!memcg_has_children(memcg)) 2714 memcg->use_hierarchy = val; 2715 else 2716 retval = -EBUSY; 2717 } else 2718 retval = -EINVAL; 2719 2720 return retval; 2721 } 2722 2723 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat) 2724 { 2725 struct mem_cgroup *iter; 2726 int i; 2727 2728 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT); 2729 2730 for_each_mem_cgroup_tree(iter, memcg) { 2731 for (i = 0; i < MEMCG_NR_STAT; i++) 2732 stat[i] += mem_cgroup_read_stat(iter, i); 2733 } 2734 } 2735 2736 static void tree_events(struct mem_cgroup *memcg, unsigned long *events) 2737 { 2738 struct mem_cgroup *iter; 2739 int i; 2740 2741 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS); 2742 2743 for_each_mem_cgroup_tree(iter, memcg) { 2744 for (i = 0; i < MEMCG_NR_EVENTS; i++) 2745 events[i] += mem_cgroup_read_events(iter, i); 2746 } 2747 } 2748 2749 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2750 { 2751 unsigned long val = 0; 2752 2753 if (mem_cgroup_is_root(memcg)) { 2754 struct mem_cgroup *iter; 2755 2756 for_each_mem_cgroup_tree(iter, memcg) { 2757 val += mem_cgroup_read_stat(iter, 2758 MEM_CGROUP_STAT_CACHE); 2759 val += mem_cgroup_read_stat(iter, 2760 MEM_CGROUP_STAT_RSS); 2761 if (swap) 2762 val += mem_cgroup_read_stat(iter, 2763 MEM_CGROUP_STAT_SWAP); 2764 } 2765 } else { 2766 if (!swap) 2767 val = page_counter_read(&memcg->memory); 2768 else 2769 val = page_counter_read(&memcg->memsw); 2770 } 2771 return val; 2772 } 2773 2774 enum { 2775 RES_USAGE, 2776 RES_LIMIT, 2777 RES_MAX_USAGE, 2778 RES_FAILCNT, 2779 RES_SOFT_LIMIT, 2780 }; 2781 2782 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2783 struct cftype *cft) 2784 { 2785 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2786 struct page_counter *counter; 2787 2788 switch (MEMFILE_TYPE(cft->private)) { 2789 case _MEM: 2790 counter = &memcg->memory; 2791 break; 2792 case _MEMSWAP: 2793 counter = &memcg->memsw; 2794 break; 2795 case _KMEM: 2796 counter = &memcg->kmem; 2797 break; 2798 case _TCP: 2799 counter = &memcg->tcpmem; 2800 break; 2801 default: 2802 BUG(); 2803 } 2804 2805 switch (MEMFILE_ATTR(cft->private)) { 2806 case RES_USAGE: 2807 if (counter == &memcg->memory) 2808 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2809 if (counter == &memcg->memsw) 2810 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2811 return (u64)page_counter_read(counter) * PAGE_SIZE; 2812 case RES_LIMIT: 2813 return (u64)counter->limit * PAGE_SIZE; 2814 case RES_MAX_USAGE: 2815 return (u64)counter->watermark * PAGE_SIZE; 2816 case RES_FAILCNT: 2817 return counter->failcnt; 2818 case RES_SOFT_LIMIT: 2819 return (u64)memcg->soft_limit * PAGE_SIZE; 2820 default: 2821 BUG(); 2822 } 2823 } 2824 2825 #ifndef CONFIG_SLOB 2826 static int memcg_online_kmem(struct mem_cgroup *memcg) 2827 { 2828 int memcg_id; 2829 2830 if (cgroup_memory_nokmem) 2831 return 0; 2832 2833 BUG_ON(memcg->kmemcg_id >= 0); 2834 BUG_ON(memcg->kmem_state); 2835 2836 memcg_id = memcg_alloc_cache_id(); 2837 if (memcg_id < 0) 2838 return memcg_id; 2839 2840 static_branch_inc(&memcg_kmem_enabled_key); 2841 /* 2842 * A memory cgroup is considered kmem-online as soon as it gets 2843 * kmemcg_id. Setting the id after enabling static branching will 2844 * guarantee no one starts accounting before all call sites are 2845 * patched. 2846 */ 2847 memcg->kmemcg_id = memcg_id; 2848 memcg->kmem_state = KMEM_ONLINE; 2849 2850 return 0; 2851 } 2852 2853 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2854 { 2855 struct cgroup_subsys_state *css; 2856 struct mem_cgroup *parent, *child; 2857 int kmemcg_id; 2858 2859 if (memcg->kmem_state != KMEM_ONLINE) 2860 return; 2861 /* 2862 * Clear the online state before clearing memcg_caches array 2863 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 2864 * guarantees that no cache will be created for this cgroup 2865 * after we are done (see memcg_create_kmem_cache()). 2866 */ 2867 memcg->kmem_state = KMEM_ALLOCATED; 2868 2869 memcg_deactivate_kmem_caches(memcg); 2870 2871 kmemcg_id = memcg->kmemcg_id; 2872 BUG_ON(kmemcg_id < 0); 2873 2874 parent = parent_mem_cgroup(memcg); 2875 if (!parent) 2876 parent = root_mem_cgroup; 2877 2878 /* 2879 * Change kmemcg_id of this cgroup and all its descendants to the 2880 * parent's id, and then move all entries from this cgroup's list_lrus 2881 * to ones of the parent. After we have finished, all list_lrus 2882 * corresponding to this cgroup are guaranteed to remain empty. The 2883 * ordering is imposed by list_lru_node->lock taken by 2884 * memcg_drain_all_list_lrus(). 2885 */ 2886 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 2887 css_for_each_descendant_pre(css, &memcg->css) { 2888 child = mem_cgroup_from_css(css); 2889 BUG_ON(child->kmemcg_id != kmemcg_id); 2890 child->kmemcg_id = parent->kmemcg_id; 2891 if (!memcg->use_hierarchy) 2892 break; 2893 } 2894 rcu_read_unlock(); 2895 2896 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2897 2898 memcg_free_cache_id(kmemcg_id); 2899 } 2900 2901 static void memcg_free_kmem(struct mem_cgroup *memcg) 2902 { 2903 /* css_alloc() failed, offlining didn't happen */ 2904 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 2905 memcg_offline_kmem(memcg); 2906 2907 if (memcg->kmem_state == KMEM_ALLOCATED) { 2908 memcg_destroy_kmem_caches(memcg); 2909 static_branch_dec(&memcg_kmem_enabled_key); 2910 WARN_ON(page_counter_read(&memcg->kmem)); 2911 } 2912 } 2913 #else 2914 static int memcg_online_kmem(struct mem_cgroup *memcg) 2915 { 2916 return 0; 2917 } 2918 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2919 { 2920 } 2921 static void memcg_free_kmem(struct mem_cgroup *memcg) 2922 { 2923 } 2924 #endif /* !CONFIG_SLOB */ 2925 2926 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2927 unsigned long limit) 2928 { 2929 int ret; 2930 2931 mutex_lock(&memcg_limit_mutex); 2932 ret = page_counter_limit(&memcg->kmem, limit); 2933 mutex_unlock(&memcg_limit_mutex); 2934 return ret; 2935 } 2936 2937 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) 2938 { 2939 int ret; 2940 2941 mutex_lock(&memcg_limit_mutex); 2942 2943 ret = page_counter_limit(&memcg->tcpmem, limit); 2944 if (ret) 2945 goto out; 2946 2947 if (!memcg->tcpmem_active) { 2948 /* 2949 * The active flag needs to be written after the static_key 2950 * update. This is what guarantees that the socket activation 2951 * function is the last one to run. See mem_cgroup_sk_alloc() 2952 * for details, and note that we don't mark any socket as 2953 * belonging to this memcg until that flag is up. 2954 * 2955 * We need to do this, because static_keys will span multiple 2956 * sites, but we can't control their order. If we mark a socket 2957 * as accounted, but the accounting functions are not patched in 2958 * yet, we'll lose accounting. 2959 * 2960 * We never race with the readers in mem_cgroup_sk_alloc(), 2961 * because when this value change, the code to process it is not 2962 * patched in yet. 2963 */ 2964 static_branch_inc(&memcg_sockets_enabled_key); 2965 memcg->tcpmem_active = true; 2966 } 2967 out: 2968 mutex_unlock(&memcg_limit_mutex); 2969 return ret; 2970 } 2971 2972 /* 2973 * The user of this function is... 2974 * RES_LIMIT. 2975 */ 2976 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2977 char *buf, size_t nbytes, loff_t off) 2978 { 2979 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2980 unsigned long nr_pages; 2981 int ret; 2982 2983 buf = strstrip(buf); 2984 ret = page_counter_memparse(buf, "-1", &nr_pages); 2985 if (ret) 2986 return ret; 2987 2988 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2989 case RES_LIMIT: 2990 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 2991 ret = -EINVAL; 2992 break; 2993 } 2994 switch (MEMFILE_TYPE(of_cft(of)->private)) { 2995 case _MEM: 2996 ret = mem_cgroup_resize_limit(memcg, nr_pages); 2997 break; 2998 case _MEMSWAP: 2999 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 3000 break; 3001 case _KMEM: 3002 ret = memcg_update_kmem_limit(memcg, nr_pages); 3003 break; 3004 case _TCP: 3005 ret = memcg_update_tcp_limit(memcg, nr_pages); 3006 break; 3007 } 3008 break; 3009 case RES_SOFT_LIMIT: 3010 memcg->soft_limit = nr_pages; 3011 ret = 0; 3012 break; 3013 } 3014 return ret ?: nbytes; 3015 } 3016 3017 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3018 size_t nbytes, loff_t off) 3019 { 3020 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3021 struct page_counter *counter; 3022 3023 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3024 case _MEM: 3025 counter = &memcg->memory; 3026 break; 3027 case _MEMSWAP: 3028 counter = &memcg->memsw; 3029 break; 3030 case _KMEM: 3031 counter = &memcg->kmem; 3032 break; 3033 case _TCP: 3034 counter = &memcg->tcpmem; 3035 break; 3036 default: 3037 BUG(); 3038 } 3039 3040 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3041 case RES_MAX_USAGE: 3042 page_counter_reset_watermark(counter); 3043 break; 3044 case RES_FAILCNT: 3045 counter->failcnt = 0; 3046 break; 3047 default: 3048 BUG(); 3049 } 3050 3051 return nbytes; 3052 } 3053 3054 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3055 struct cftype *cft) 3056 { 3057 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3058 } 3059 3060 #ifdef CONFIG_MMU 3061 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3062 struct cftype *cft, u64 val) 3063 { 3064 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3065 3066 if (val & ~MOVE_MASK) 3067 return -EINVAL; 3068 3069 /* 3070 * No kind of locking is needed in here, because ->can_attach() will 3071 * check this value once in the beginning of the process, and then carry 3072 * on with stale data. This means that changes to this value will only 3073 * affect task migrations starting after the change. 3074 */ 3075 memcg->move_charge_at_immigrate = val; 3076 return 0; 3077 } 3078 #else 3079 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3080 struct cftype *cft, u64 val) 3081 { 3082 return -ENOSYS; 3083 } 3084 #endif 3085 3086 #ifdef CONFIG_NUMA 3087 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3088 { 3089 struct numa_stat { 3090 const char *name; 3091 unsigned int lru_mask; 3092 }; 3093 3094 static const struct numa_stat stats[] = { 3095 { "total", LRU_ALL }, 3096 { "file", LRU_ALL_FILE }, 3097 { "anon", LRU_ALL_ANON }, 3098 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3099 }; 3100 const struct numa_stat *stat; 3101 int nid; 3102 unsigned long nr; 3103 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3104 3105 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3106 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3107 seq_printf(m, "%s=%lu", stat->name, nr); 3108 for_each_node_state(nid, N_MEMORY) { 3109 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3110 stat->lru_mask); 3111 seq_printf(m, " N%d=%lu", nid, nr); 3112 } 3113 seq_putc(m, '\n'); 3114 } 3115 3116 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3117 struct mem_cgroup *iter; 3118 3119 nr = 0; 3120 for_each_mem_cgroup_tree(iter, memcg) 3121 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3122 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3123 for_each_node_state(nid, N_MEMORY) { 3124 nr = 0; 3125 for_each_mem_cgroup_tree(iter, memcg) 3126 nr += mem_cgroup_node_nr_lru_pages( 3127 iter, nid, stat->lru_mask); 3128 seq_printf(m, " N%d=%lu", nid, nr); 3129 } 3130 seq_putc(m, '\n'); 3131 } 3132 3133 return 0; 3134 } 3135 #endif /* CONFIG_NUMA */ 3136 3137 static int memcg_stat_show(struct seq_file *m, void *v) 3138 { 3139 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3140 unsigned long memory, memsw; 3141 struct mem_cgroup *mi; 3142 unsigned int i; 3143 3144 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3145 MEM_CGROUP_STAT_NSTATS); 3146 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3147 MEM_CGROUP_EVENTS_NSTATS); 3148 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3149 3150 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3151 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3152 continue; 3153 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3154 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3155 } 3156 3157 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3158 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3159 mem_cgroup_read_events(memcg, i)); 3160 3161 for (i = 0; i < NR_LRU_LISTS; i++) 3162 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3163 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3164 3165 /* Hierarchical information */ 3166 memory = memsw = PAGE_COUNTER_MAX; 3167 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3168 memory = min(memory, mi->memory.limit); 3169 memsw = min(memsw, mi->memsw.limit); 3170 } 3171 seq_printf(m, "hierarchical_memory_limit %llu\n", 3172 (u64)memory * PAGE_SIZE); 3173 if (do_memsw_account()) 3174 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3175 (u64)memsw * PAGE_SIZE); 3176 3177 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3178 unsigned long long val = 0; 3179 3180 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3181 continue; 3182 for_each_mem_cgroup_tree(mi, memcg) 3183 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3184 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3185 } 3186 3187 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3188 unsigned long long val = 0; 3189 3190 for_each_mem_cgroup_tree(mi, memcg) 3191 val += mem_cgroup_read_events(mi, i); 3192 seq_printf(m, "total_%s %llu\n", 3193 mem_cgroup_events_names[i], val); 3194 } 3195 3196 for (i = 0; i < NR_LRU_LISTS; i++) { 3197 unsigned long long val = 0; 3198 3199 for_each_mem_cgroup_tree(mi, memcg) 3200 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3201 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3202 } 3203 3204 #ifdef CONFIG_DEBUG_VM 3205 { 3206 pg_data_t *pgdat; 3207 struct mem_cgroup_per_node *mz; 3208 struct zone_reclaim_stat *rstat; 3209 unsigned long recent_rotated[2] = {0, 0}; 3210 unsigned long recent_scanned[2] = {0, 0}; 3211 3212 for_each_online_pgdat(pgdat) { 3213 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3214 rstat = &mz->lruvec.reclaim_stat; 3215 3216 recent_rotated[0] += rstat->recent_rotated[0]; 3217 recent_rotated[1] += rstat->recent_rotated[1]; 3218 recent_scanned[0] += rstat->recent_scanned[0]; 3219 recent_scanned[1] += rstat->recent_scanned[1]; 3220 } 3221 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3222 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3223 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3224 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3225 } 3226 #endif 3227 3228 return 0; 3229 } 3230 3231 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3232 struct cftype *cft) 3233 { 3234 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3235 3236 return mem_cgroup_swappiness(memcg); 3237 } 3238 3239 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3240 struct cftype *cft, u64 val) 3241 { 3242 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3243 3244 if (val > 100) 3245 return -EINVAL; 3246 3247 if (css->parent) 3248 memcg->swappiness = val; 3249 else 3250 vm_swappiness = val; 3251 3252 return 0; 3253 } 3254 3255 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3256 { 3257 struct mem_cgroup_threshold_ary *t; 3258 unsigned long usage; 3259 int i; 3260 3261 rcu_read_lock(); 3262 if (!swap) 3263 t = rcu_dereference(memcg->thresholds.primary); 3264 else 3265 t = rcu_dereference(memcg->memsw_thresholds.primary); 3266 3267 if (!t) 3268 goto unlock; 3269 3270 usage = mem_cgroup_usage(memcg, swap); 3271 3272 /* 3273 * current_threshold points to threshold just below or equal to usage. 3274 * If it's not true, a threshold was crossed after last 3275 * call of __mem_cgroup_threshold(). 3276 */ 3277 i = t->current_threshold; 3278 3279 /* 3280 * Iterate backward over array of thresholds starting from 3281 * current_threshold and check if a threshold is crossed. 3282 * If none of thresholds below usage is crossed, we read 3283 * only one element of the array here. 3284 */ 3285 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3286 eventfd_signal(t->entries[i].eventfd, 1); 3287 3288 /* i = current_threshold + 1 */ 3289 i++; 3290 3291 /* 3292 * Iterate forward over array of thresholds starting from 3293 * current_threshold+1 and check if a threshold is crossed. 3294 * If none of thresholds above usage is crossed, we read 3295 * only one element of the array here. 3296 */ 3297 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3298 eventfd_signal(t->entries[i].eventfd, 1); 3299 3300 /* Update current_threshold */ 3301 t->current_threshold = i - 1; 3302 unlock: 3303 rcu_read_unlock(); 3304 } 3305 3306 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3307 { 3308 while (memcg) { 3309 __mem_cgroup_threshold(memcg, false); 3310 if (do_memsw_account()) 3311 __mem_cgroup_threshold(memcg, true); 3312 3313 memcg = parent_mem_cgroup(memcg); 3314 } 3315 } 3316 3317 static int compare_thresholds(const void *a, const void *b) 3318 { 3319 const struct mem_cgroup_threshold *_a = a; 3320 const struct mem_cgroup_threshold *_b = b; 3321 3322 if (_a->threshold > _b->threshold) 3323 return 1; 3324 3325 if (_a->threshold < _b->threshold) 3326 return -1; 3327 3328 return 0; 3329 } 3330 3331 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3332 { 3333 struct mem_cgroup_eventfd_list *ev; 3334 3335 spin_lock(&memcg_oom_lock); 3336 3337 list_for_each_entry(ev, &memcg->oom_notify, list) 3338 eventfd_signal(ev->eventfd, 1); 3339 3340 spin_unlock(&memcg_oom_lock); 3341 return 0; 3342 } 3343 3344 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3345 { 3346 struct mem_cgroup *iter; 3347 3348 for_each_mem_cgroup_tree(iter, memcg) 3349 mem_cgroup_oom_notify_cb(iter); 3350 } 3351 3352 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3353 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3354 { 3355 struct mem_cgroup_thresholds *thresholds; 3356 struct mem_cgroup_threshold_ary *new; 3357 unsigned long threshold; 3358 unsigned long usage; 3359 int i, size, ret; 3360 3361 ret = page_counter_memparse(args, "-1", &threshold); 3362 if (ret) 3363 return ret; 3364 3365 mutex_lock(&memcg->thresholds_lock); 3366 3367 if (type == _MEM) { 3368 thresholds = &memcg->thresholds; 3369 usage = mem_cgroup_usage(memcg, false); 3370 } else if (type == _MEMSWAP) { 3371 thresholds = &memcg->memsw_thresholds; 3372 usage = mem_cgroup_usage(memcg, true); 3373 } else 3374 BUG(); 3375 3376 /* Check if a threshold crossed before adding a new one */ 3377 if (thresholds->primary) 3378 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3379 3380 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3381 3382 /* Allocate memory for new array of thresholds */ 3383 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3384 GFP_KERNEL); 3385 if (!new) { 3386 ret = -ENOMEM; 3387 goto unlock; 3388 } 3389 new->size = size; 3390 3391 /* Copy thresholds (if any) to new array */ 3392 if (thresholds->primary) { 3393 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3394 sizeof(struct mem_cgroup_threshold)); 3395 } 3396 3397 /* Add new threshold */ 3398 new->entries[size - 1].eventfd = eventfd; 3399 new->entries[size - 1].threshold = threshold; 3400 3401 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3402 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3403 compare_thresholds, NULL); 3404 3405 /* Find current threshold */ 3406 new->current_threshold = -1; 3407 for (i = 0; i < size; i++) { 3408 if (new->entries[i].threshold <= usage) { 3409 /* 3410 * new->current_threshold will not be used until 3411 * rcu_assign_pointer(), so it's safe to increment 3412 * it here. 3413 */ 3414 ++new->current_threshold; 3415 } else 3416 break; 3417 } 3418 3419 /* Free old spare buffer and save old primary buffer as spare */ 3420 kfree(thresholds->spare); 3421 thresholds->spare = thresholds->primary; 3422 3423 rcu_assign_pointer(thresholds->primary, new); 3424 3425 /* To be sure that nobody uses thresholds */ 3426 synchronize_rcu(); 3427 3428 unlock: 3429 mutex_unlock(&memcg->thresholds_lock); 3430 3431 return ret; 3432 } 3433 3434 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3435 struct eventfd_ctx *eventfd, const char *args) 3436 { 3437 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3438 } 3439 3440 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3441 struct eventfd_ctx *eventfd, const char *args) 3442 { 3443 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3444 } 3445 3446 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3447 struct eventfd_ctx *eventfd, enum res_type type) 3448 { 3449 struct mem_cgroup_thresholds *thresholds; 3450 struct mem_cgroup_threshold_ary *new; 3451 unsigned long usage; 3452 int i, j, size; 3453 3454 mutex_lock(&memcg->thresholds_lock); 3455 3456 if (type == _MEM) { 3457 thresholds = &memcg->thresholds; 3458 usage = mem_cgroup_usage(memcg, false); 3459 } else if (type == _MEMSWAP) { 3460 thresholds = &memcg->memsw_thresholds; 3461 usage = mem_cgroup_usage(memcg, true); 3462 } else 3463 BUG(); 3464 3465 if (!thresholds->primary) 3466 goto unlock; 3467 3468 /* Check if a threshold crossed before removing */ 3469 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3470 3471 /* Calculate new number of threshold */ 3472 size = 0; 3473 for (i = 0; i < thresholds->primary->size; i++) { 3474 if (thresholds->primary->entries[i].eventfd != eventfd) 3475 size++; 3476 } 3477 3478 new = thresholds->spare; 3479 3480 /* Set thresholds array to NULL if we don't have thresholds */ 3481 if (!size) { 3482 kfree(new); 3483 new = NULL; 3484 goto swap_buffers; 3485 } 3486 3487 new->size = size; 3488 3489 /* Copy thresholds and find current threshold */ 3490 new->current_threshold = -1; 3491 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3492 if (thresholds->primary->entries[i].eventfd == eventfd) 3493 continue; 3494 3495 new->entries[j] = thresholds->primary->entries[i]; 3496 if (new->entries[j].threshold <= usage) { 3497 /* 3498 * new->current_threshold will not be used 3499 * until rcu_assign_pointer(), so it's safe to increment 3500 * it here. 3501 */ 3502 ++new->current_threshold; 3503 } 3504 j++; 3505 } 3506 3507 swap_buffers: 3508 /* Swap primary and spare array */ 3509 thresholds->spare = thresholds->primary; 3510 3511 rcu_assign_pointer(thresholds->primary, new); 3512 3513 /* To be sure that nobody uses thresholds */ 3514 synchronize_rcu(); 3515 3516 /* If all events are unregistered, free the spare array */ 3517 if (!new) { 3518 kfree(thresholds->spare); 3519 thresholds->spare = NULL; 3520 } 3521 unlock: 3522 mutex_unlock(&memcg->thresholds_lock); 3523 } 3524 3525 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3526 struct eventfd_ctx *eventfd) 3527 { 3528 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3529 } 3530 3531 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3532 struct eventfd_ctx *eventfd) 3533 { 3534 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3535 } 3536 3537 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3538 struct eventfd_ctx *eventfd, const char *args) 3539 { 3540 struct mem_cgroup_eventfd_list *event; 3541 3542 event = kmalloc(sizeof(*event), GFP_KERNEL); 3543 if (!event) 3544 return -ENOMEM; 3545 3546 spin_lock(&memcg_oom_lock); 3547 3548 event->eventfd = eventfd; 3549 list_add(&event->list, &memcg->oom_notify); 3550 3551 /* already in OOM ? */ 3552 if (memcg->under_oom) 3553 eventfd_signal(eventfd, 1); 3554 spin_unlock(&memcg_oom_lock); 3555 3556 return 0; 3557 } 3558 3559 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3560 struct eventfd_ctx *eventfd) 3561 { 3562 struct mem_cgroup_eventfd_list *ev, *tmp; 3563 3564 spin_lock(&memcg_oom_lock); 3565 3566 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3567 if (ev->eventfd == eventfd) { 3568 list_del(&ev->list); 3569 kfree(ev); 3570 } 3571 } 3572 3573 spin_unlock(&memcg_oom_lock); 3574 } 3575 3576 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3577 { 3578 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3579 3580 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3581 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3582 return 0; 3583 } 3584 3585 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3586 struct cftype *cft, u64 val) 3587 { 3588 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3589 3590 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3591 if (!css->parent || !((val == 0) || (val == 1))) 3592 return -EINVAL; 3593 3594 memcg->oom_kill_disable = val; 3595 if (!val) 3596 memcg_oom_recover(memcg); 3597 3598 return 0; 3599 } 3600 3601 #ifdef CONFIG_CGROUP_WRITEBACK 3602 3603 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3604 { 3605 return &memcg->cgwb_list; 3606 } 3607 3608 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3609 { 3610 return wb_domain_init(&memcg->cgwb_domain, gfp); 3611 } 3612 3613 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3614 { 3615 wb_domain_exit(&memcg->cgwb_domain); 3616 } 3617 3618 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3619 { 3620 wb_domain_size_changed(&memcg->cgwb_domain); 3621 } 3622 3623 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3624 { 3625 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3626 3627 if (!memcg->css.parent) 3628 return NULL; 3629 3630 return &memcg->cgwb_domain; 3631 } 3632 3633 /** 3634 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3635 * @wb: bdi_writeback in question 3636 * @pfilepages: out parameter for number of file pages 3637 * @pheadroom: out parameter for number of allocatable pages according to memcg 3638 * @pdirty: out parameter for number of dirty pages 3639 * @pwriteback: out parameter for number of pages under writeback 3640 * 3641 * Determine the numbers of file, headroom, dirty, and writeback pages in 3642 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3643 * is a bit more involved. 3644 * 3645 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3646 * headroom is calculated as the lowest headroom of itself and the 3647 * ancestors. Note that this doesn't consider the actual amount of 3648 * available memory in the system. The caller should further cap 3649 * *@pheadroom accordingly. 3650 */ 3651 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3652 unsigned long *pheadroom, unsigned long *pdirty, 3653 unsigned long *pwriteback) 3654 { 3655 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3656 struct mem_cgroup *parent; 3657 3658 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3659 3660 /* this should eventually include NR_UNSTABLE_NFS */ 3661 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3662 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3663 (1 << LRU_ACTIVE_FILE)); 3664 *pheadroom = PAGE_COUNTER_MAX; 3665 3666 while ((parent = parent_mem_cgroup(memcg))) { 3667 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3668 unsigned long used = page_counter_read(&memcg->memory); 3669 3670 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3671 memcg = parent; 3672 } 3673 } 3674 3675 #else /* CONFIG_CGROUP_WRITEBACK */ 3676 3677 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3678 { 3679 return 0; 3680 } 3681 3682 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3683 { 3684 } 3685 3686 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3687 { 3688 } 3689 3690 #endif /* CONFIG_CGROUP_WRITEBACK */ 3691 3692 /* 3693 * DO NOT USE IN NEW FILES. 3694 * 3695 * "cgroup.event_control" implementation. 3696 * 3697 * This is way over-engineered. It tries to support fully configurable 3698 * events for each user. Such level of flexibility is completely 3699 * unnecessary especially in the light of the planned unified hierarchy. 3700 * 3701 * Please deprecate this and replace with something simpler if at all 3702 * possible. 3703 */ 3704 3705 /* 3706 * Unregister event and free resources. 3707 * 3708 * Gets called from workqueue. 3709 */ 3710 static void memcg_event_remove(struct work_struct *work) 3711 { 3712 struct mem_cgroup_event *event = 3713 container_of(work, struct mem_cgroup_event, remove); 3714 struct mem_cgroup *memcg = event->memcg; 3715 3716 remove_wait_queue(event->wqh, &event->wait); 3717 3718 event->unregister_event(memcg, event->eventfd); 3719 3720 /* Notify userspace the event is going away. */ 3721 eventfd_signal(event->eventfd, 1); 3722 3723 eventfd_ctx_put(event->eventfd); 3724 kfree(event); 3725 css_put(&memcg->css); 3726 } 3727 3728 /* 3729 * Gets called on POLLHUP on eventfd when user closes it. 3730 * 3731 * Called with wqh->lock held and interrupts disabled. 3732 */ 3733 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3734 int sync, void *key) 3735 { 3736 struct mem_cgroup_event *event = 3737 container_of(wait, struct mem_cgroup_event, wait); 3738 struct mem_cgroup *memcg = event->memcg; 3739 unsigned long flags = (unsigned long)key; 3740 3741 if (flags & POLLHUP) { 3742 /* 3743 * If the event has been detached at cgroup removal, we 3744 * can simply return knowing the other side will cleanup 3745 * for us. 3746 * 3747 * We can't race against event freeing since the other 3748 * side will require wqh->lock via remove_wait_queue(), 3749 * which we hold. 3750 */ 3751 spin_lock(&memcg->event_list_lock); 3752 if (!list_empty(&event->list)) { 3753 list_del_init(&event->list); 3754 /* 3755 * We are in atomic context, but cgroup_event_remove() 3756 * may sleep, so we have to call it in workqueue. 3757 */ 3758 schedule_work(&event->remove); 3759 } 3760 spin_unlock(&memcg->event_list_lock); 3761 } 3762 3763 return 0; 3764 } 3765 3766 static void memcg_event_ptable_queue_proc(struct file *file, 3767 wait_queue_head_t *wqh, poll_table *pt) 3768 { 3769 struct mem_cgroup_event *event = 3770 container_of(pt, struct mem_cgroup_event, pt); 3771 3772 event->wqh = wqh; 3773 add_wait_queue(wqh, &event->wait); 3774 } 3775 3776 /* 3777 * DO NOT USE IN NEW FILES. 3778 * 3779 * Parse input and register new cgroup event handler. 3780 * 3781 * Input must be in format '<event_fd> <control_fd> <args>'. 3782 * Interpretation of args is defined by control file implementation. 3783 */ 3784 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3785 char *buf, size_t nbytes, loff_t off) 3786 { 3787 struct cgroup_subsys_state *css = of_css(of); 3788 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3789 struct mem_cgroup_event *event; 3790 struct cgroup_subsys_state *cfile_css; 3791 unsigned int efd, cfd; 3792 struct fd efile; 3793 struct fd cfile; 3794 const char *name; 3795 char *endp; 3796 int ret; 3797 3798 buf = strstrip(buf); 3799 3800 efd = simple_strtoul(buf, &endp, 10); 3801 if (*endp != ' ') 3802 return -EINVAL; 3803 buf = endp + 1; 3804 3805 cfd = simple_strtoul(buf, &endp, 10); 3806 if ((*endp != ' ') && (*endp != '\0')) 3807 return -EINVAL; 3808 buf = endp + 1; 3809 3810 event = kzalloc(sizeof(*event), GFP_KERNEL); 3811 if (!event) 3812 return -ENOMEM; 3813 3814 event->memcg = memcg; 3815 INIT_LIST_HEAD(&event->list); 3816 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3817 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3818 INIT_WORK(&event->remove, memcg_event_remove); 3819 3820 efile = fdget(efd); 3821 if (!efile.file) { 3822 ret = -EBADF; 3823 goto out_kfree; 3824 } 3825 3826 event->eventfd = eventfd_ctx_fileget(efile.file); 3827 if (IS_ERR(event->eventfd)) { 3828 ret = PTR_ERR(event->eventfd); 3829 goto out_put_efile; 3830 } 3831 3832 cfile = fdget(cfd); 3833 if (!cfile.file) { 3834 ret = -EBADF; 3835 goto out_put_eventfd; 3836 } 3837 3838 /* the process need read permission on control file */ 3839 /* AV: shouldn't we check that it's been opened for read instead? */ 3840 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3841 if (ret < 0) 3842 goto out_put_cfile; 3843 3844 /* 3845 * Determine the event callbacks and set them in @event. This used 3846 * to be done via struct cftype but cgroup core no longer knows 3847 * about these events. The following is crude but the whole thing 3848 * is for compatibility anyway. 3849 * 3850 * DO NOT ADD NEW FILES. 3851 */ 3852 name = cfile.file->f_path.dentry->d_name.name; 3853 3854 if (!strcmp(name, "memory.usage_in_bytes")) { 3855 event->register_event = mem_cgroup_usage_register_event; 3856 event->unregister_event = mem_cgroup_usage_unregister_event; 3857 } else if (!strcmp(name, "memory.oom_control")) { 3858 event->register_event = mem_cgroup_oom_register_event; 3859 event->unregister_event = mem_cgroup_oom_unregister_event; 3860 } else if (!strcmp(name, "memory.pressure_level")) { 3861 event->register_event = vmpressure_register_event; 3862 event->unregister_event = vmpressure_unregister_event; 3863 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3864 event->register_event = memsw_cgroup_usage_register_event; 3865 event->unregister_event = memsw_cgroup_usage_unregister_event; 3866 } else { 3867 ret = -EINVAL; 3868 goto out_put_cfile; 3869 } 3870 3871 /* 3872 * Verify @cfile should belong to @css. Also, remaining events are 3873 * automatically removed on cgroup destruction but the removal is 3874 * asynchronous, so take an extra ref on @css. 3875 */ 3876 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3877 &memory_cgrp_subsys); 3878 ret = -EINVAL; 3879 if (IS_ERR(cfile_css)) 3880 goto out_put_cfile; 3881 if (cfile_css != css) { 3882 css_put(cfile_css); 3883 goto out_put_cfile; 3884 } 3885 3886 ret = event->register_event(memcg, event->eventfd, buf); 3887 if (ret) 3888 goto out_put_css; 3889 3890 efile.file->f_op->poll(efile.file, &event->pt); 3891 3892 spin_lock(&memcg->event_list_lock); 3893 list_add(&event->list, &memcg->event_list); 3894 spin_unlock(&memcg->event_list_lock); 3895 3896 fdput(cfile); 3897 fdput(efile); 3898 3899 return nbytes; 3900 3901 out_put_css: 3902 css_put(css); 3903 out_put_cfile: 3904 fdput(cfile); 3905 out_put_eventfd: 3906 eventfd_ctx_put(event->eventfd); 3907 out_put_efile: 3908 fdput(efile); 3909 out_kfree: 3910 kfree(event); 3911 3912 return ret; 3913 } 3914 3915 static struct cftype mem_cgroup_legacy_files[] = { 3916 { 3917 .name = "usage_in_bytes", 3918 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3919 .read_u64 = mem_cgroup_read_u64, 3920 }, 3921 { 3922 .name = "max_usage_in_bytes", 3923 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 3924 .write = mem_cgroup_reset, 3925 .read_u64 = mem_cgroup_read_u64, 3926 }, 3927 { 3928 .name = "limit_in_bytes", 3929 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 3930 .write = mem_cgroup_write, 3931 .read_u64 = mem_cgroup_read_u64, 3932 }, 3933 { 3934 .name = "soft_limit_in_bytes", 3935 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 3936 .write = mem_cgroup_write, 3937 .read_u64 = mem_cgroup_read_u64, 3938 }, 3939 { 3940 .name = "failcnt", 3941 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 3942 .write = mem_cgroup_reset, 3943 .read_u64 = mem_cgroup_read_u64, 3944 }, 3945 { 3946 .name = "stat", 3947 .seq_show = memcg_stat_show, 3948 }, 3949 { 3950 .name = "force_empty", 3951 .write = mem_cgroup_force_empty_write, 3952 }, 3953 { 3954 .name = "use_hierarchy", 3955 .write_u64 = mem_cgroup_hierarchy_write, 3956 .read_u64 = mem_cgroup_hierarchy_read, 3957 }, 3958 { 3959 .name = "cgroup.event_control", /* XXX: for compat */ 3960 .write = memcg_write_event_control, 3961 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 3962 }, 3963 { 3964 .name = "swappiness", 3965 .read_u64 = mem_cgroup_swappiness_read, 3966 .write_u64 = mem_cgroup_swappiness_write, 3967 }, 3968 { 3969 .name = "move_charge_at_immigrate", 3970 .read_u64 = mem_cgroup_move_charge_read, 3971 .write_u64 = mem_cgroup_move_charge_write, 3972 }, 3973 { 3974 .name = "oom_control", 3975 .seq_show = mem_cgroup_oom_control_read, 3976 .write_u64 = mem_cgroup_oom_control_write, 3977 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 3978 }, 3979 { 3980 .name = "pressure_level", 3981 }, 3982 #ifdef CONFIG_NUMA 3983 { 3984 .name = "numa_stat", 3985 .seq_show = memcg_numa_stat_show, 3986 }, 3987 #endif 3988 { 3989 .name = "kmem.limit_in_bytes", 3990 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 3991 .write = mem_cgroup_write, 3992 .read_u64 = mem_cgroup_read_u64, 3993 }, 3994 { 3995 .name = "kmem.usage_in_bytes", 3996 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 3997 .read_u64 = mem_cgroup_read_u64, 3998 }, 3999 { 4000 .name = "kmem.failcnt", 4001 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4002 .write = mem_cgroup_reset, 4003 .read_u64 = mem_cgroup_read_u64, 4004 }, 4005 { 4006 .name = "kmem.max_usage_in_bytes", 4007 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4008 .write = mem_cgroup_reset, 4009 .read_u64 = mem_cgroup_read_u64, 4010 }, 4011 #ifdef CONFIG_SLABINFO 4012 { 4013 .name = "kmem.slabinfo", 4014 .seq_start = slab_start, 4015 .seq_next = slab_next, 4016 .seq_stop = slab_stop, 4017 .seq_show = memcg_slab_show, 4018 }, 4019 #endif 4020 { 4021 .name = "kmem.tcp.limit_in_bytes", 4022 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4023 .write = mem_cgroup_write, 4024 .read_u64 = mem_cgroup_read_u64, 4025 }, 4026 { 4027 .name = "kmem.tcp.usage_in_bytes", 4028 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4029 .read_u64 = mem_cgroup_read_u64, 4030 }, 4031 { 4032 .name = "kmem.tcp.failcnt", 4033 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4034 .write = mem_cgroup_reset, 4035 .read_u64 = mem_cgroup_read_u64, 4036 }, 4037 { 4038 .name = "kmem.tcp.max_usage_in_bytes", 4039 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4040 .write = mem_cgroup_reset, 4041 .read_u64 = mem_cgroup_read_u64, 4042 }, 4043 { }, /* terminate */ 4044 }; 4045 4046 /* 4047 * Private memory cgroup IDR 4048 * 4049 * Swap-out records and page cache shadow entries need to store memcg 4050 * references in constrained space, so we maintain an ID space that is 4051 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4052 * memory-controlled cgroups to 64k. 4053 * 4054 * However, there usually are many references to the oflline CSS after 4055 * the cgroup has been destroyed, such as page cache or reclaimable 4056 * slab objects, that don't need to hang on to the ID. We want to keep 4057 * those dead CSS from occupying IDs, or we might quickly exhaust the 4058 * relatively small ID space and prevent the creation of new cgroups 4059 * even when there are much fewer than 64k cgroups - possibly none. 4060 * 4061 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4062 * be freed and recycled when it's no longer needed, which is usually 4063 * when the CSS is offlined. 4064 * 4065 * The only exception to that are records of swapped out tmpfs/shmem 4066 * pages that need to be attributed to live ancestors on swapin. But 4067 * those references are manageable from userspace. 4068 */ 4069 4070 static DEFINE_IDR(mem_cgroup_idr); 4071 4072 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4073 { 4074 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); 4075 atomic_add(n, &memcg->id.ref); 4076 } 4077 4078 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4079 { 4080 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); 4081 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4082 idr_remove(&mem_cgroup_idr, memcg->id.id); 4083 memcg->id.id = 0; 4084 4085 /* Memcg ID pins CSS */ 4086 css_put(&memcg->css); 4087 } 4088 } 4089 4090 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4091 { 4092 mem_cgroup_id_get_many(memcg, 1); 4093 } 4094 4095 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4096 { 4097 mem_cgroup_id_put_many(memcg, 1); 4098 } 4099 4100 /** 4101 * mem_cgroup_from_id - look up a memcg from a memcg id 4102 * @id: the memcg id to look up 4103 * 4104 * Caller must hold rcu_read_lock(). 4105 */ 4106 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4107 { 4108 WARN_ON_ONCE(!rcu_read_lock_held()); 4109 return idr_find(&mem_cgroup_idr, id); 4110 } 4111 4112 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4113 { 4114 struct mem_cgroup_per_node *pn; 4115 int tmp = node; 4116 /* 4117 * This routine is called against possible nodes. 4118 * But it's BUG to call kmalloc() against offline node. 4119 * 4120 * TODO: this routine can waste much memory for nodes which will 4121 * never be onlined. It's better to use memory hotplug callback 4122 * function. 4123 */ 4124 if (!node_state(node, N_NORMAL_MEMORY)) 4125 tmp = -1; 4126 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4127 if (!pn) 4128 return 1; 4129 4130 lruvec_init(&pn->lruvec); 4131 pn->usage_in_excess = 0; 4132 pn->on_tree = false; 4133 pn->memcg = memcg; 4134 4135 memcg->nodeinfo[node] = pn; 4136 return 0; 4137 } 4138 4139 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4140 { 4141 kfree(memcg->nodeinfo[node]); 4142 } 4143 4144 static void mem_cgroup_free(struct mem_cgroup *memcg) 4145 { 4146 int node; 4147 4148 memcg_wb_domain_exit(memcg); 4149 for_each_node(node) 4150 free_mem_cgroup_per_node_info(memcg, node); 4151 free_percpu(memcg->stat); 4152 kfree(memcg); 4153 } 4154 4155 static struct mem_cgroup *mem_cgroup_alloc(void) 4156 { 4157 struct mem_cgroup *memcg; 4158 size_t size; 4159 int node; 4160 4161 size = sizeof(struct mem_cgroup); 4162 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4163 4164 memcg = kzalloc(size, GFP_KERNEL); 4165 if (!memcg) 4166 return NULL; 4167 4168 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4169 1, MEM_CGROUP_ID_MAX, 4170 GFP_KERNEL); 4171 if (memcg->id.id < 0) 4172 goto fail; 4173 4174 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4175 if (!memcg->stat) 4176 goto fail; 4177 4178 for_each_node(node) 4179 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4180 goto fail; 4181 4182 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4183 goto fail; 4184 4185 INIT_WORK(&memcg->high_work, high_work_func); 4186 memcg->last_scanned_node = MAX_NUMNODES; 4187 INIT_LIST_HEAD(&memcg->oom_notify); 4188 mutex_init(&memcg->thresholds_lock); 4189 spin_lock_init(&memcg->move_lock); 4190 vmpressure_init(&memcg->vmpressure); 4191 INIT_LIST_HEAD(&memcg->event_list); 4192 spin_lock_init(&memcg->event_list_lock); 4193 memcg->socket_pressure = jiffies; 4194 #ifndef CONFIG_SLOB 4195 memcg->kmemcg_id = -1; 4196 #endif 4197 #ifdef CONFIG_CGROUP_WRITEBACK 4198 INIT_LIST_HEAD(&memcg->cgwb_list); 4199 #endif 4200 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4201 return memcg; 4202 fail: 4203 if (memcg->id.id > 0) 4204 idr_remove(&mem_cgroup_idr, memcg->id.id); 4205 mem_cgroup_free(memcg); 4206 return NULL; 4207 } 4208 4209 static struct cgroup_subsys_state * __ref 4210 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4211 { 4212 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4213 struct mem_cgroup *memcg; 4214 long error = -ENOMEM; 4215 4216 memcg = mem_cgroup_alloc(); 4217 if (!memcg) 4218 return ERR_PTR(error); 4219 4220 memcg->high = PAGE_COUNTER_MAX; 4221 memcg->soft_limit = PAGE_COUNTER_MAX; 4222 if (parent) { 4223 memcg->swappiness = mem_cgroup_swappiness(parent); 4224 memcg->oom_kill_disable = parent->oom_kill_disable; 4225 } 4226 if (parent && parent->use_hierarchy) { 4227 memcg->use_hierarchy = true; 4228 page_counter_init(&memcg->memory, &parent->memory); 4229 page_counter_init(&memcg->swap, &parent->swap); 4230 page_counter_init(&memcg->memsw, &parent->memsw); 4231 page_counter_init(&memcg->kmem, &parent->kmem); 4232 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4233 } else { 4234 page_counter_init(&memcg->memory, NULL); 4235 page_counter_init(&memcg->swap, NULL); 4236 page_counter_init(&memcg->memsw, NULL); 4237 page_counter_init(&memcg->kmem, NULL); 4238 page_counter_init(&memcg->tcpmem, NULL); 4239 /* 4240 * Deeper hierachy with use_hierarchy == false doesn't make 4241 * much sense so let cgroup subsystem know about this 4242 * unfortunate state in our controller. 4243 */ 4244 if (parent != root_mem_cgroup) 4245 memory_cgrp_subsys.broken_hierarchy = true; 4246 } 4247 4248 /* The following stuff does not apply to the root */ 4249 if (!parent) { 4250 root_mem_cgroup = memcg; 4251 return &memcg->css; 4252 } 4253 4254 error = memcg_online_kmem(memcg); 4255 if (error) 4256 goto fail; 4257 4258 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4259 static_branch_inc(&memcg_sockets_enabled_key); 4260 4261 return &memcg->css; 4262 fail: 4263 mem_cgroup_free(memcg); 4264 return ERR_PTR(-ENOMEM); 4265 } 4266 4267 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4268 { 4269 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4270 4271 /* Online state pins memcg ID, memcg ID pins CSS */ 4272 atomic_set(&memcg->id.ref, 1); 4273 css_get(css); 4274 return 0; 4275 } 4276 4277 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4278 { 4279 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4280 struct mem_cgroup_event *event, *tmp; 4281 4282 /* 4283 * Unregister events and notify userspace. 4284 * Notify userspace about cgroup removing only after rmdir of cgroup 4285 * directory to avoid race between userspace and kernelspace. 4286 */ 4287 spin_lock(&memcg->event_list_lock); 4288 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4289 list_del_init(&event->list); 4290 schedule_work(&event->remove); 4291 } 4292 spin_unlock(&memcg->event_list_lock); 4293 4294 memcg_offline_kmem(memcg); 4295 wb_memcg_offline(memcg); 4296 4297 mem_cgroup_id_put(memcg); 4298 } 4299 4300 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4301 { 4302 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4303 4304 invalidate_reclaim_iterators(memcg); 4305 } 4306 4307 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4308 { 4309 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4310 4311 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4312 static_branch_dec(&memcg_sockets_enabled_key); 4313 4314 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4315 static_branch_dec(&memcg_sockets_enabled_key); 4316 4317 vmpressure_cleanup(&memcg->vmpressure); 4318 cancel_work_sync(&memcg->high_work); 4319 mem_cgroup_remove_from_trees(memcg); 4320 memcg_free_kmem(memcg); 4321 mem_cgroup_free(memcg); 4322 } 4323 4324 /** 4325 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4326 * @css: the target css 4327 * 4328 * Reset the states of the mem_cgroup associated with @css. This is 4329 * invoked when the userland requests disabling on the default hierarchy 4330 * but the memcg is pinned through dependency. The memcg should stop 4331 * applying policies and should revert to the vanilla state as it may be 4332 * made visible again. 4333 * 4334 * The current implementation only resets the essential configurations. 4335 * This needs to be expanded to cover all the visible parts. 4336 */ 4337 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4338 { 4339 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4340 4341 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX); 4342 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX); 4343 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX); 4344 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX); 4345 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX); 4346 memcg->low = 0; 4347 memcg->high = PAGE_COUNTER_MAX; 4348 memcg->soft_limit = PAGE_COUNTER_MAX; 4349 memcg_wb_domain_size_changed(memcg); 4350 } 4351 4352 #ifdef CONFIG_MMU 4353 /* Handlers for move charge at task migration. */ 4354 static int mem_cgroup_do_precharge(unsigned long count) 4355 { 4356 int ret; 4357 4358 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4359 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4360 if (!ret) { 4361 mc.precharge += count; 4362 return ret; 4363 } 4364 4365 /* Try charges one by one with reclaim */ 4366 while (count--) { 4367 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4368 if (ret) 4369 return ret; 4370 mc.precharge++; 4371 cond_resched(); 4372 } 4373 return 0; 4374 } 4375 4376 union mc_target { 4377 struct page *page; 4378 swp_entry_t ent; 4379 }; 4380 4381 enum mc_target_type { 4382 MC_TARGET_NONE = 0, 4383 MC_TARGET_PAGE, 4384 MC_TARGET_SWAP, 4385 }; 4386 4387 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4388 unsigned long addr, pte_t ptent) 4389 { 4390 struct page *page = vm_normal_page(vma, addr, ptent); 4391 4392 if (!page || !page_mapped(page)) 4393 return NULL; 4394 if (PageAnon(page)) { 4395 if (!(mc.flags & MOVE_ANON)) 4396 return NULL; 4397 } else { 4398 if (!(mc.flags & MOVE_FILE)) 4399 return NULL; 4400 } 4401 if (!get_page_unless_zero(page)) 4402 return NULL; 4403 4404 return page; 4405 } 4406 4407 #ifdef CONFIG_SWAP 4408 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4409 pte_t ptent, swp_entry_t *entry) 4410 { 4411 struct page *page = NULL; 4412 swp_entry_t ent = pte_to_swp_entry(ptent); 4413 4414 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4415 return NULL; 4416 /* 4417 * Because lookup_swap_cache() updates some statistics counter, 4418 * we call find_get_page() with swapper_space directly. 4419 */ 4420 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4421 if (do_memsw_account()) 4422 entry->val = ent.val; 4423 4424 return page; 4425 } 4426 #else 4427 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4428 pte_t ptent, swp_entry_t *entry) 4429 { 4430 return NULL; 4431 } 4432 #endif 4433 4434 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4435 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4436 { 4437 struct page *page = NULL; 4438 struct address_space *mapping; 4439 pgoff_t pgoff; 4440 4441 if (!vma->vm_file) /* anonymous vma */ 4442 return NULL; 4443 if (!(mc.flags & MOVE_FILE)) 4444 return NULL; 4445 4446 mapping = vma->vm_file->f_mapping; 4447 pgoff = linear_page_index(vma, addr); 4448 4449 /* page is moved even if it's not RSS of this task(page-faulted). */ 4450 #ifdef CONFIG_SWAP 4451 /* shmem/tmpfs may report page out on swap: account for that too. */ 4452 if (shmem_mapping(mapping)) { 4453 page = find_get_entry(mapping, pgoff); 4454 if (radix_tree_exceptional_entry(page)) { 4455 swp_entry_t swp = radix_to_swp_entry(page); 4456 if (do_memsw_account()) 4457 *entry = swp; 4458 page = find_get_page(swap_address_space(swp), 4459 swp_offset(swp)); 4460 } 4461 } else 4462 page = find_get_page(mapping, pgoff); 4463 #else 4464 page = find_get_page(mapping, pgoff); 4465 #endif 4466 return page; 4467 } 4468 4469 /** 4470 * mem_cgroup_move_account - move account of the page 4471 * @page: the page 4472 * @compound: charge the page as compound or small page 4473 * @from: mem_cgroup which the page is moved from. 4474 * @to: mem_cgroup which the page is moved to. @from != @to. 4475 * 4476 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4477 * 4478 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4479 * from old cgroup. 4480 */ 4481 static int mem_cgroup_move_account(struct page *page, 4482 bool compound, 4483 struct mem_cgroup *from, 4484 struct mem_cgroup *to) 4485 { 4486 unsigned long flags; 4487 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4488 int ret; 4489 bool anon; 4490 4491 VM_BUG_ON(from == to); 4492 VM_BUG_ON_PAGE(PageLRU(page), page); 4493 VM_BUG_ON(compound && !PageTransHuge(page)); 4494 4495 /* 4496 * Prevent mem_cgroup_migrate() from looking at 4497 * page->mem_cgroup of its source page while we change it. 4498 */ 4499 ret = -EBUSY; 4500 if (!trylock_page(page)) 4501 goto out; 4502 4503 ret = -EINVAL; 4504 if (page->mem_cgroup != from) 4505 goto out_unlock; 4506 4507 anon = PageAnon(page); 4508 4509 spin_lock_irqsave(&from->move_lock, flags); 4510 4511 if (!anon && page_mapped(page)) { 4512 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4513 nr_pages); 4514 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4515 nr_pages); 4516 } 4517 4518 /* 4519 * move_lock grabbed above and caller set from->moving_account, so 4520 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4521 * So mapping should be stable for dirty pages. 4522 */ 4523 if (!anon && PageDirty(page)) { 4524 struct address_space *mapping = page_mapping(page); 4525 4526 if (mapping_cap_account_dirty(mapping)) { 4527 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4528 nr_pages); 4529 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4530 nr_pages); 4531 } 4532 } 4533 4534 if (PageWriteback(page)) { 4535 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4536 nr_pages); 4537 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4538 nr_pages); 4539 } 4540 4541 /* 4542 * It is safe to change page->mem_cgroup here because the page 4543 * is referenced, charged, and isolated - we can't race with 4544 * uncharging, charging, migration, or LRU putback. 4545 */ 4546 4547 /* caller should have done css_get */ 4548 page->mem_cgroup = to; 4549 spin_unlock_irqrestore(&from->move_lock, flags); 4550 4551 ret = 0; 4552 4553 local_irq_disable(); 4554 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4555 memcg_check_events(to, page); 4556 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4557 memcg_check_events(from, page); 4558 local_irq_enable(); 4559 out_unlock: 4560 unlock_page(page); 4561 out: 4562 return ret; 4563 } 4564 4565 /** 4566 * get_mctgt_type - get target type of moving charge 4567 * @vma: the vma the pte to be checked belongs 4568 * @addr: the address corresponding to the pte to be checked 4569 * @ptent: the pte to be checked 4570 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4571 * 4572 * Returns 4573 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4574 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4575 * move charge. if @target is not NULL, the page is stored in target->page 4576 * with extra refcnt got(Callers should handle it). 4577 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4578 * target for charge migration. if @target is not NULL, the entry is stored 4579 * in target->ent. 4580 * 4581 * Called with pte lock held. 4582 */ 4583 4584 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4585 unsigned long addr, pte_t ptent, union mc_target *target) 4586 { 4587 struct page *page = NULL; 4588 enum mc_target_type ret = MC_TARGET_NONE; 4589 swp_entry_t ent = { .val = 0 }; 4590 4591 if (pte_present(ptent)) 4592 page = mc_handle_present_pte(vma, addr, ptent); 4593 else if (is_swap_pte(ptent)) 4594 page = mc_handle_swap_pte(vma, ptent, &ent); 4595 else if (pte_none(ptent)) 4596 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4597 4598 if (!page && !ent.val) 4599 return ret; 4600 if (page) { 4601 /* 4602 * Do only loose check w/o serialization. 4603 * mem_cgroup_move_account() checks the page is valid or 4604 * not under LRU exclusion. 4605 */ 4606 if (page->mem_cgroup == mc.from) { 4607 ret = MC_TARGET_PAGE; 4608 if (target) 4609 target->page = page; 4610 } 4611 if (!ret || !target) 4612 put_page(page); 4613 } 4614 /* There is a swap entry and a page doesn't exist or isn't charged */ 4615 if (ent.val && !ret && 4616 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4617 ret = MC_TARGET_SWAP; 4618 if (target) 4619 target->ent = ent; 4620 } 4621 return ret; 4622 } 4623 4624 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4625 /* 4626 * We don't consider swapping or file mapped pages because THP does not 4627 * support them for now. 4628 * Caller should make sure that pmd_trans_huge(pmd) is true. 4629 */ 4630 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4631 unsigned long addr, pmd_t pmd, union mc_target *target) 4632 { 4633 struct page *page = NULL; 4634 enum mc_target_type ret = MC_TARGET_NONE; 4635 4636 page = pmd_page(pmd); 4637 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4638 if (!(mc.flags & MOVE_ANON)) 4639 return ret; 4640 if (page->mem_cgroup == mc.from) { 4641 ret = MC_TARGET_PAGE; 4642 if (target) { 4643 get_page(page); 4644 target->page = page; 4645 } 4646 } 4647 return ret; 4648 } 4649 #else 4650 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4651 unsigned long addr, pmd_t pmd, union mc_target *target) 4652 { 4653 return MC_TARGET_NONE; 4654 } 4655 #endif 4656 4657 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4658 unsigned long addr, unsigned long end, 4659 struct mm_walk *walk) 4660 { 4661 struct vm_area_struct *vma = walk->vma; 4662 pte_t *pte; 4663 spinlock_t *ptl; 4664 4665 ptl = pmd_trans_huge_lock(pmd, vma); 4666 if (ptl) { 4667 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4668 mc.precharge += HPAGE_PMD_NR; 4669 spin_unlock(ptl); 4670 return 0; 4671 } 4672 4673 if (pmd_trans_unstable(pmd)) 4674 return 0; 4675 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4676 for (; addr != end; pte++, addr += PAGE_SIZE) 4677 if (get_mctgt_type(vma, addr, *pte, NULL)) 4678 mc.precharge++; /* increment precharge temporarily */ 4679 pte_unmap_unlock(pte - 1, ptl); 4680 cond_resched(); 4681 4682 return 0; 4683 } 4684 4685 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4686 { 4687 unsigned long precharge; 4688 4689 struct mm_walk mem_cgroup_count_precharge_walk = { 4690 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4691 .mm = mm, 4692 }; 4693 down_read(&mm->mmap_sem); 4694 walk_page_range(0, mm->highest_vm_end, 4695 &mem_cgroup_count_precharge_walk); 4696 up_read(&mm->mmap_sem); 4697 4698 precharge = mc.precharge; 4699 mc.precharge = 0; 4700 4701 return precharge; 4702 } 4703 4704 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4705 { 4706 unsigned long precharge = mem_cgroup_count_precharge(mm); 4707 4708 VM_BUG_ON(mc.moving_task); 4709 mc.moving_task = current; 4710 return mem_cgroup_do_precharge(precharge); 4711 } 4712 4713 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4714 static void __mem_cgroup_clear_mc(void) 4715 { 4716 struct mem_cgroup *from = mc.from; 4717 struct mem_cgroup *to = mc.to; 4718 4719 /* we must uncharge all the leftover precharges from mc.to */ 4720 if (mc.precharge) { 4721 cancel_charge(mc.to, mc.precharge); 4722 mc.precharge = 0; 4723 } 4724 /* 4725 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4726 * we must uncharge here. 4727 */ 4728 if (mc.moved_charge) { 4729 cancel_charge(mc.from, mc.moved_charge); 4730 mc.moved_charge = 0; 4731 } 4732 /* we must fixup refcnts and charges */ 4733 if (mc.moved_swap) { 4734 /* uncharge swap account from the old cgroup */ 4735 if (!mem_cgroup_is_root(mc.from)) 4736 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4737 4738 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 4739 4740 /* 4741 * we charged both to->memory and to->memsw, so we 4742 * should uncharge to->memory. 4743 */ 4744 if (!mem_cgroup_is_root(mc.to)) 4745 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4746 4747 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 4748 css_put_many(&mc.to->css, mc.moved_swap); 4749 4750 mc.moved_swap = 0; 4751 } 4752 memcg_oom_recover(from); 4753 memcg_oom_recover(to); 4754 wake_up_all(&mc.waitq); 4755 } 4756 4757 static void mem_cgroup_clear_mc(void) 4758 { 4759 struct mm_struct *mm = mc.mm; 4760 4761 /* 4762 * we must clear moving_task before waking up waiters at the end of 4763 * task migration. 4764 */ 4765 mc.moving_task = NULL; 4766 __mem_cgroup_clear_mc(); 4767 spin_lock(&mc.lock); 4768 mc.from = NULL; 4769 mc.to = NULL; 4770 mc.mm = NULL; 4771 spin_unlock(&mc.lock); 4772 4773 mmput(mm); 4774 } 4775 4776 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4777 { 4778 struct cgroup_subsys_state *css; 4779 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 4780 struct mem_cgroup *from; 4781 struct task_struct *leader, *p; 4782 struct mm_struct *mm; 4783 unsigned long move_flags; 4784 int ret = 0; 4785 4786 /* charge immigration isn't supported on the default hierarchy */ 4787 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4788 return 0; 4789 4790 /* 4791 * Multi-process migrations only happen on the default hierarchy 4792 * where charge immigration is not used. Perform charge 4793 * immigration if @tset contains a leader and whine if there are 4794 * multiple. 4795 */ 4796 p = NULL; 4797 cgroup_taskset_for_each_leader(leader, css, tset) { 4798 WARN_ON_ONCE(p); 4799 p = leader; 4800 memcg = mem_cgroup_from_css(css); 4801 } 4802 if (!p) 4803 return 0; 4804 4805 /* 4806 * We are now commited to this value whatever it is. Changes in this 4807 * tunable will only affect upcoming migrations, not the current one. 4808 * So we need to save it, and keep it going. 4809 */ 4810 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4811 if (!move_flags) 4812 return 0; 4813 4814 from = mem_cgroup_from_task(p); 4815 4816 VM_BUG_ON(from == memcg); 4817 4818 mm = get_task_mm(p); 4819 if (!mm) 4820 return 0; 4821 /* We move charges only when we move a owner of the mm */ 4822 if (mm->owner == p) { 4823 VM_BUG_ON(mc.from); 4824 VM_BUG_ON(mc.to); 4825 VM_BUG_ON(mc.precharge); 4826 VM_BUG_ON(mc.moved_charge); 4827 VM_BUG_ON(mc.moved_swap); 4828 4829 spin_lock(&mc.lock); 4830 mc.mm = mm; 4831 mc.from = from; 4832 mc.to = memcg; 4833 mc.flags = move_flags; 4834 spin_unlock(&mc.lock); 4835 /* We set mc.moving_task later */ 4836 4837 ret = mem_cgroup_precharge_mc(mm); 4838 if (ret) 4839 mem_cgroup_clear_mc(); 4840 } else { 4841 mmput(mm); 4842 } 4843 return ret; 4844 } 4845 4846 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4847 { 4848 if (mc.to) 4849 mem_cgroup_clear_mc(); 4850 } 4851 4852 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4853 unsigned long addr, unsigned long end, 4854 struct mm_walk *walk) 4855 { 4856 int ret = 0; 4857 struct vm_area_struct *vma = walk->vma; 4858 pte_t *pte; 4859 spinlock_t *ptl; 4860 enum mc_target_type target_type; 4861 union mc_target target; 4862 struct page *page; 4863 4864 ptl = pmd_trans_huge_lock(pmd, vma); 4865 if (ptl) { 4866 if (mc.precharge < HPAGE_PMD_NR) { 4867 spin_unlock(ptl); 4868 return 0; 4869 } 4870 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4871 if (target_type == MC_TARGET_PAGE) { 4872 page = target.page; 4873 if (!isolate_lru_page(page)) { 4874 if (!mem_cgroup_move_account(page, true, 4875 mc.from, mc.to)) { 4876 mc.precharge -= HPAGE_PMD_NR; 4877 mc.moved_charge += HPAGE_PMD_NR; 4878 } 4879 putback_lru_page(page); 4880 } 4881 put_page(page); 4882 } 4883 spin_unlock(ptl); 4884 return 0; 4885 } 4886 4887 if (pmd_trans_unstable(pmd)) 4888 return 0; 4889 retry: 4890 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4891 for (; addr != end; addr += PAGE_SIZE) { 4892 pte_t ptent = *(pte++); 4893 swp_entry_t ent; 4894 4895 if (!mc.precharge) 4896 break; 4897 4898 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4899 case MC_TARGET_PAGE: 4900 page = target.page; 4901 /* 4902 * We can have a part of the split pmd here. Moving it 4903 * can be done but it would be too convoluted so simply 4904 * ignore such a partial THP and keep it in original 4905 * memcg. There should be somebody mapping the head. 4906 */ 4907 if (PageTransCompound(page)) 4908 goto put; 4909 if (isolate_lru_page(page)) 4910 goto put; 4911 if (!mem_cgroup_move_account(page, false, 4912 mc.from, mc.to)) { 4913 mc.precharge--; 4914 /* we uncharge from mc.from later. */ 4915 mc.moved_charge++; 4916 } 4917 putback_lru_page(page); 4918 put: /* get_mctgt_type() gets the page */ 4919 put_page(page); 4920 break; 4921 case MC_TARGET_SWAP: 4922 ent = target.ent; 4923 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4924 mc.precharge--; 4925 /* we fixup refcnts and charges later. */ 4926 mc.moved_swap++; 4927 } 4928 break; 4929 default: 4930 break; 4931 } 4932 } 4933 pte_unmap_unlock(pte - 1, ptl); 4934 cond_resched(); 4935 4936 if (addr != end) { 4937 /* 4938 * We have consumed all precharges we got in can_attach(). 4939 * We try charge one by one, but don't do any additional 4940 * charges to mc.to if we have failed in charge once in attach() 4941 * phase. 4942 */ 4943 ret = mem_cgroup_do_precharge(1); 4944 if (!ret) 4945 goto retry; 4946 } 4947 4948 return ret; 4949 } 4950 4951 static void mem_cgroup_move_charge(void) 4952 { 4953 struct mm_walk mem_cgroup_move_charge_walk = { 4954 .pmd_entry = mem_cgroup_move_charge_pte_range, 4955 .mm = mc.mm, 4956 }; 4957 4958 lru_add_drain_all(); 4959 /* 4960 * Signal lock_page_memcg() to take the memcg's move_lock 4961 * while we're moving its pages to another memcg. Then wait 4962 * for already started RCU-only updates to finish. 4963 */ 4964 atomic_inc(&mc.from->moving_account); 4965 synchronize_rcu(); 4966 retry: 4967 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 4968 /* 4969 * Someone who are holding the mmap_sem might be waiting in 4970 * waitq. So we cancel all extra charges, wake up all waiters, 4971 * and retry. Because we cancel precharges, we might not be able 4972 * to move enough charges, but moving charge is a best-effort 4973 * feature anyway, so it wouldn't be a big problem. 4974 */ 4975 __mem_cgroup_clear_mc(); 4976 cond_resched(); 4977 goto retry; 4978 } 4979 /* 4980 * When we have consumed all precharges and failed in doing 4981 * additional charge, the page walk just aborts. 4982 */ 4983 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 4984 4985 up_read(&mc.mm->mmap_sem); 4986 atomic_dec(&mc.from->moving_account); 4987 } 4988 4989 static void mem_cgroup_move_task(void) 4990 { 4991 if (mc.to) { 4992 mem_cgroup_move_charge(); 4993 mem_cgroup_clear_mc(); 4994 } 4995 } 4996 #else /* !CONFIG_MMU */ 4997 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4998 { 4999 return 0; 5000 } 5001 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5002 { 5003 } 5004 static void mem_cgroup_move_task(void) 5005 { 5006 } 5007 #endif 5008 5009 /* 5010 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5011 * to verify whether we're attached to the default hierarchy on each mount 5012 * attempt. 5013 */ 5014 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5015 { 5016 /* 5017 * use_hierarchy is forced on the default hierarchy. cgroup core 5018 * guarantees that @root doesn't have any children, so turning it 5019 * on for the root memcg is enough. 5020 */ 5021 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5022 root_mem_cgroup->use_hierarchy = true; 5023 else 5024 root_mem_cgroup->use_hierarchy = false; 5025 } 5026 5027 static u64 memory_current_read(struct cgroup_subsys_state *css, 5028 struct cftype *cft) 5029 { 5030 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5031 5032 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5033 } 5034 5035 static int memory_low_show(struct seq_file *m, void *v) 5036 { 5037 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5038 unsigned long low = READ_ONCE(memcg->low); 5039 5040 if (low == PAGE_COUNTER_MAX) 5041 seq_puts(m, "max\n"); 5042 else 5043 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5044 5045 return 0; 5046 } 5047 5048 static ssize_t memory_low_write(struct kernfs_open_file *of, 5049 char *buf, size_t nbytes, loff_t off) 5050 { 5051 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5052 unsigned long low; 5053 int err; 5054 5055 buf = strstrip(buf); 5056 err = page_counter_memparse(buf, "max", &low); 5057 if (err) 5058 return err; 5059 5060 memcg->low = low; 5061 5062 return nbytes; 5063 } 5064 5065 static int memory_high_show(struct seq_file *m, void *v) 5066 { 5067 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5068 unsigned long high = READ_ONCE(memcg->high); 5069 5070 if (high == PAGE_COUNTER_MAX) 5071 seq_puts(m, "max\n"); 5072 else 5073 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5074 5075 return 0; 5076 } 5077 5078 static ssize_t memory_high_write(struct kernfs_open_file *of, 5079 char *buf, size_t nbytes, loff_t off) 5080 { 5081 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5082 unsigned long nr_pages; 5083 unsigned long high; 5084 int err; 5085 5086 buf = strstrip(buf); 5087 err = page_counter_memparse(buf, "max", &high); 5088 if (err) 5089 return err; 5090 5091 memcg->high = high; 5092 5093 nr_pages = page_counter_read(&memcg->memory); 5094 if (nr_pages > high) 5095 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5096 GFP_KERNEL, true); 5097 5098 memcg_wb_domain_size_changed(memcg); 5099 return nbytes; 5100 } 5101 5102 static int memory_max_show(struct seq_file *m, void *v) 5103 { 5104 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5105 unsigned long max = READ_ONCE(memcg->memory.limit); 5106 5107 if (max == PAGE_COUNTER_MAX) 5108 seq_puts(m, "max\n"); 5109 else 5110 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5111 5112 return 0; 5113 } 5114 5115 static ssize_t memory_max_write(struct kernfs_open_file *of, 5116 char *buf, size_t nbytes, loff_t off) 5117 { 5118 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5119 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5120 bool drained = false; 5121 unsigned long max; 5122 int err; 5123 5124 buf = strstrip(buf); 5125 err = page_counter_memparse(buf, "max", &max); 5126 if (err) 5127 return err; 5128 5129 xchg(&memcg->memory.limit, max); 5130 5131 for (;;) { 5132 unsigned long nr_pages = page_counter_read(&memcg->memory); 5133 5134 if (nr_pages <= max) 5135 break; 5136 5137 if (signal_pending(current)) { 5138 err = -EINTR; 5139 break; 5140 } 5141 5142 if (!drained) { 5143 drain_all_stock(memcg); 5144 drained = true; 5145 continue; 5146 } 5147 5148 if (nr_reclaims) { 5149 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5150 GFP_KERNEL, true)) 5151 nr_reclaims--; 5152 continue; 5153 } 5154 5155 mem_cgroup_events(memcg, MEMCG_OOM, 1); 5156 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5157 break; 5158 } 5159 5160 memcg_wb_domain_size_changed(memcg); 5161 return nbytes; 5162 } 5163 5164 static int memory_events_show(struct seq_file *m, void *v) 5165 { 5166 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5167 5168 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5169 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5170 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5171 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5172 5173 return 0; 5174 } 5175 5176 static int memory_stat_show(struct seq_file *m, void *v) 5177 { 5178 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5179 unsigned long stat[MEMCG_NR_STAT]; 5180 unsigned long events[MEMCG_NR_EVENTS]; 5181 int i; 5182 5183 /* 5184 * Provide statistics on the state of the memory subsystem as 5185 * well as cumulative event counters that show past behavior. 5186 * 5187 * This list is ordered following a combination of these gradients: 5188 * 1) generic big picture -> specifics and details 5189 * 2) reflecting userspace activity -> reflecting kernel heuristics 5190 * 5191 * Current memory state: 5192 */ 5193 5194 tree_stat(memcg, stat); 5195 tree_events(memcg, events); 5196 5197 seq_printf(m, "anon %llu\n", 5198 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5199 seq_printf(m, "file %llu\n", 5200 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5201 seq_printf(m, "kernel_stack %llu\n", 5202 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5203 seq_printf(m, "slab %llu\n", 5204 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] + 5205 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5206 seq_printf(m, "sock %llu\n", 5207 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5208 5209 seq_printf(m, "file_mapped %llu\n", 5210 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE); 5211 seq_printf(m, "file_dirty %llu\n", 5212 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE); 5213 seq_printf(m, "file_writeback %llu\n", 5214 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE); 5215 5216 for (i = 0; i < NR_LRU_LISTS; i++) { 5217 struct mem_cgroup *mi; 5218 unsigned long val = 0; 5219 5220 for_each_mem_cgroup_tree(mi, memcg) 5221 val += mem_cgroup_nr_lru_pages(mi, BIT(i)); 5222 seq_printf(m, "%s %llu\n", 5223 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5224 } 5225 5226 seq_printf(m, "slab_reclaimable %llu\n", 5227 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE); 5228 seq_printf(m, "slab_unreclaimable %llu\n", 5229 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5230 5231 /* Accumulated memory events */ 5232 5233 seq_printf(m, "pgfault %lu\n", 5234 events[MEM_CGROUP_EVENTS_PGFAULT]); 5235 seq_printf(m, "pgmajfault %lu\n", 5236 events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 5237 5238 return 0; 5239 } 5240 5241 static struct cftype memory_files[] = { 5242 { 5243 .name = "current", 5244 .flags = CFTYPE_NOT_ON_ROOT, 5245 .read_u64 = memory_current_read, 5246 }, 5247 { 5248 .name = "low", 5249 .flags = CFTYPE_NOT_ON_ROOT, 5250 .seq_show = memory_low_show, 5251 .write = memory_low_write, 5252 }, 5253 { 5254 .name = "high", 5255 .flags = CFTYPE_NOT_ON_ROOT, 5256 .seq_show = memory_high_show, 5257 .write = memory_high_write, 5258 }, 5259 { 5260 .name = "max", 5261 .flags = CFTYPE_NOT_ON_ROOT, 5262 .seq_show = memory_max_show, 5263 .write = memory_max_write, 5264 }, 5265 { 5266 .name = "events", 5267 .flags = CFTYPE_NOT_ON_ROOT, 5268 .file_offset = offsetof(struct mem_cgroup, events_file), 5269 .seq_show = memory_events_show, 5270 }, 5271 { 5272 .name = "stat", 5273 .flags = CFTYPE_NOT_ON_ROOT, 5274 .seq_show = memory_stat_show, 5275 }, 5276 { } /* terminate */ 5277 }; 5278 5279 struct cgroup_subsys memory_cgrp_subsys = { 5280 .css_alloc = mem_cgroup_css_alloc, 5281 .css_online = mem_cgroup_css_online, 5282 .css_offline = mem_cgroup_css_offline, 5283 .css_released = mem_cgroup_css_released, 5284 .css_free = mem_cgroup_css_free, 5285 .css_reset = mem_cgroup_css_reset, 5286 .can_attach = mem_cgroup_can_attach, 5287 .cancel_attach = mem_cgroup_cancel_attach, 5288 .post_attach = mem_cgroup_move_task, 5289 .bind = mem_cgroup_bind, 5290 .dfl_cftypes = memory_files, 5291 .legacy_cftypes = mem_cgroup_legacy_files, 5292 .early_init = 0, 5293 }; 5294 5295 /** 5296 * mem_cgroup_low - check if memory consumption is below the normal range 5297 * @root: the highest ancestor to consider 5298 * @memcg: the memory cgroup to check 5299 * 5300 * Returns %true if memory consumption of @memcg, and that of all 5301 * configurable ancestors up to @root, is below the normal range. 5302 */ 5303 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5304 { 5305 if (mem_cgroup_disabled()) 5306 return false; 5307 5308 /* 5309 * The toplevel group doesn't have a configurable range, so 5310 * it's never low when looked at directly, and it is not 5311 * considered an ancestor when assessing the hierarchy. 5312 */ 5313 5314 if (memcg == root_mem_cgroup) 5315 return false; 5316 5317 if (page_counter_read(&memcg->memory) >= memcg->low) 5318 return false; 5319 5320 while (memcg != root) { 5321 memcg = parent_mem_cgroup(memcg); 5322 5323 if (memcg == root_mem_cgroup) 5324 break; 5325 5326 if (page_counter_read(&memcg->memory) >= memcg->low) 5327 return false; 5328 } 5329 return true; 5330 } 5331 5332 /** 5333 * mem_cgroup_try_charge - try charging a page 5334 * @page: page to charge 5335 * @mm: mm context of the victim 5336 * @gfp_mask: reclaim mode 5337 * @memcgp: charged memcg return 5338 * @compound: charge the page as compound or small page 5339 * 5340 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5341 * pages according to @gfp_mask if necessary. 5342 * 5343 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5344 * Otherwise, an error code is returned. 5345 * 5346 * After page->mapping has been set up, the caller must finalize the 5347 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5348 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5349 */ 5350 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5351 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5352 bool compound) 5353 { 5354 struct mem_cgroup *memcg = NULL; 5355 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5356 int ret = 0; 5357 5358 if (mem_cgroup_disabled()) 5359 goto out; 5360 5361 if (PageSwapCache(page)) { 5362 /* 5363 * Every swap fault against a single page tries to charge the 5364 * page, bail as early as possible. shmem_unuse() encounters 5365 * already charged pages, too. The USED bit is protected by 5366 * the page lock, which serializes swap cache removal, which 5367 * in turn serializes uncharging. 5368 */ 5369 VM_BUG_ON_PAGE(!PageLocked(page), page); 5370 if (page->mem_cgroup) 5371 goto out; 5372 5373 if (do_swap_account) { 5374 swp_entry_t ent = { .val = page_private(page), }; 5375 unsigned short id = lookup_swap_cgroup_id(ent); 5376 5377 rcu_read_lock(); 5378 memcg = mem_cgroup_from_id(id); 5379 if (memcg && !css_tryget_online(&memcg->css)) 5380 memcg = NULL; 5381 rcu_read_unlock(); 5382 } 5383 } 5384 5385 if (!memcg) 5386 memcg = get_mem_cgroup_from_mm(mm); 5387 5388 ret = try_charge(memcg, gfp_mask, nr_pages); 5389 5390 css_put(&memcg->css); 5391 out: 5392 *memcgp = memcg; 5393 return ret; 5394 } 5395 5396 /** 5397 * mem_cgroup_commit_charge - commit a page charge 5398 * @page: page to charge 5399 * @memcg: memcg to charge the page to 5400 * @lrucare: page might be on LRU already 5401 * @compound: charge the page as compound or small page 5402 * 5403 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5404 * after page->mapping has been set up. This must happen atomically 5405 * as part of the page instantiation, i.e. under the page table lock 5406 * for anonymous pages, under the page lock for page and swap cache. 5407 * 5408 * In addition, the page must not be on the LRU during the commit, to 5409 * prevent racing with task migration. If it might be, use @lrucare. 5410 * 5411 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5412 */ 5413 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5414 bool lrucare, bool compound) 5415 { 5416 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5417 5418 VM_BUG_ON_PAGE(!page->mapping, page); 5419 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5420 5421 if (mem_cgroup_disabled()) 5422 return; 5423 /* 5424 * Swap faults will attempt to charge the same page multiple 5425 * times. But reuse_swap_page() might have removed the page 5426 * from swapcache already, so we can't check PageSwapCache(). 5427 */ 5428 if (!memcg) 5429 return; 5430 5431 commit_charge(page, memcg, lrucare); 5432 5433 local_irq_disable(); 5434 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 5435 memcg_check_events(memcg, page); 5436 local_irq_enable(); 5437 5438 if (do_memsw_account() && PageSwapCache(page)) { 5439 swp_entry_t entry = { .val = page_private(page) }; 5440 /* 5441 * The swap entry might not get freed for a long time, 5442 * let's not wait for it. The page already received a 5443 * memory+swap charge, drop the swap entry duplicate. 5444 */ 5445 mem_cgroup_uncharge_swap(entry); 5446 } 5447 } 5448 5449 /** 5450 * mem_cgroup_cancel_charge - cancel a page charge 5451 * @page: page to charge 5452 * @memcg: memcg to charge the page to 5453 * @compound: charge the page as compound or small page 5454 * 5455 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5456 */ 5457 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 5458 bool compound) 5459 { 5460 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5461 5462 if (mem_cgroup_disabled()) 5463 return; 5464 /* 5465 * Swap faults will attempt to charge the same page multiple 5466 * times. But reuse_swap_page() might have removed the page 5467 * from swapcache already, so we can't check PageSwapCache(). 5468 */ 5469 if (!memcg) 5470 return; 5471 5472 cancel_charge(memcg, nr_pages); 5473 } 5474 5475 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5476 unsigned long nr_anon, unsigned long nr_file, 5477 unsigned long nr_huge, unsigned long nr_kmem, 5478 struct page *dummy_page) 5479 { 5480 unsigned long nr_pages = nr_anon + nr_file + nr_kmem; 5481 unsigned long flags; 5482 5483 if (!mem_cgroup_is_root(memcg)) { 5484 page_counter_uncharge(&memcg->memory, nr_pages); 5485 if (do_memsw_account()) 5486 page_counter_uncharge(&memcg->memsw, nr_pages); 5487 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) 5488 page_counter_uncharge(&memcg->kmem, nr_kmem); 5489 memcg_oom_recover(memcg); 5490 } 5491 5492 local_irq_save(flags); 5493 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5494 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5495 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5496 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5497 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5498 memcg_check_events(memcg, dummy_page); 5499 local_irq_restore(flags); 5500 5501 if (!mem_cgroup_is_root(memcg)) 5502 css_put_many(&memcg->css, nr_pages); 5503 } 5504 5505 static void uncharge_list(struct list_head *page_list) 5506 { 5507 struct mem_cgroup *memcg = NULL; 5508 unsigned long nr_anon = 0; 5509 unsigned long nr_file = 0; 5510 unsigned long nr_huge = 0; 5511 unsigned long nr_kmem = 0; 5512 unsigned long pgpgout = 0; 5513 struct list_head *next; 5514 struct page *page; 5515 5516 /* 5517 * Note that the list can be a single page->lru; hence the 5518 * do-while loop instead of a simple list_for_each_entry(). 5519 */ 5520 next = page_list->next; 5521 do { 5522 page = list_entry(next, struct page, lru); 5523 next = page->lru.next; 5524 5525 VM_BUG_ON_PAGE(PageLRU(page), page); 5526 VM_BUG_ON_PAGE(page_count(page), page); 5527 5528 if (!page->mem_cgroup) 5529 continue; 5530 5531 /* 5532 * Nobody should be changing or seriously looking at 5533 * page->mem_cgroup at this point, we have fully 5534 * exclusive access to the page. 5535 */ 5536 5537 if (memcg != page->mem_cgroup) { 5538 if (memcg) { 5539 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5540 nr_huge, nr_kmem, page); 5541 pgpgout = nr_anon = nr_file = 5542 nr_huge = nr_kmem = 0; 5543 } 5544 memcg = page->mem_cgroup; 5545 } 5546 5547 if (!PageKmemcg(page)) { 5548 unsigned int nr_pages = 1; 5549 5550 if (PageTransHuge(page)) { 5551 nr_pages <<= compound_order(page); 5552 nr_huge += nr_pages; 5553 } 5554 if (PageAnon(page)) 5555 nr_anon += nr_pages; 5556 else 5557 nr_file += nr_pages; 5558 pgpgout++; 5559 } else { 5560 nr_kmem += 1 << compound_order(page); 5561 __ClearPageKmemcg(page); 5562 } 5563 5564 page->mem_cgroup = NULL; 5565 } while (next != page_list); 5566 5567 if (memcg) 5568 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5569 nr_huge, nr_kmem, page); 5570 } 5571 5572 /** 5573 * mem_cgroup_uncharge - uncharge a page 5574 * @page: page to uncharge 5575 * 5576 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5577 * mem_cgroup_commit_charge(). 5578 */ 5579 void mem_cgroup_uncharge(struct page *page) 5580 { 5581 if (mem_cgroup_disabled()) 5582 return; 5583 5584 /* Don't touch page->lru of any random page, pre-check: */ 5585 if (!page->mem_cgroup) 5586 return; 5587 5588 INIT_LIST_HEAD(&page->lru); 5589 uncharge_list(&page->lru); 5590 } 5591 5592 /** 5593 * mem_cgroup_uncharge_list - uncharge a list of page 5594 * @page_list: list of pages to uncharge 5595 * 5596 * Uncharge a list of pages previously charged with 5597 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5598 */ 5599 void mem_cgroup_uncharge_list(struct list_head *page_list) 5600 { 5601 if (mem_cgroup_disabled()) 5602 return; 5603 5604 if (!list_empty(page_list)) 5605 uncharge_list(page_list); 5606 } 5607 5608 /** 5609 * mem_cgroup_migrate - charge a page's replacement 5610 * @oldpage: currently circulating page 5611 * @newpage: replacement page 5612 * 5613 * Charge @newpage as a replacement page for @oldpage. @oldpage will 5614 * be uncharged upon free. 5615 * 5616 * Both pages must be locked, @newpage->mapping must be set up. 5617 */ 5618 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 5619 { 5620 struct mem_cgroup *memcg; 5621 unsigned int nr_pages; 5622 bool compound; 5623 unsigned long flags; 5624 5625 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5626 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5627 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5628 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5629 newpage); 5630 5631 if (mem_cgroup_disabled()) 5632 return; 5633 5634 /* Page cache replacement: new page already charged? */ 5635 if (newpage->mem_cgroup) 5636 return; 5637 5638 /* Swapcache readahead pages can get replaced before being charged */ 5639 memcg = oldpage->mem_cgroup; 5640 if (!memcg) 5641 return; 5642 5643 /* Force-charge the new page. The old one will be freed soon */ 5644 compound = PageTransHuge(newpage); 5645 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 5646 5647 page_counter_charge(&memcg->memory, nr_pages); 5648 if (do_memsw_account()) 5649 page_counter_charge(&memcg->memsw, nr_pages); 5650 css_get_many(&memcg->css, nr_pages); 5651 5652 commit_charge(newpage, memcg, false); 5653 5654 local_irq_save(flags); 5655 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5656 memcg_check_events(memcg, newpage); 5657 local_irq_restore(flags); 5658 } 5659 5660 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5661 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5662 5663 void mem_cgroup_sk_alloc(struct sock *sk) 5664 { 5665 struct mem_cgroup *memcg; 5666 5667 if (!mem_cgroup_sockets_enabled) 5668 return; 5669 5670 /* 5671 * Socket cloning can throw us here with sk_memcg already 5672 * filled. It won't however, necessarily happen from 5673 * process context. So the test for root memcg given 5674 * the current task's memcg won't help us in this case. 5675 * 5676 * Respecting the original socket's memcg is a better 5677 * decision in this case. 5678 */ 5679 if (sk->sk_memcg) { 5680 BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); 5681 css_get(&sk->sk_memcg->css); 5682 return; 5683 } 5684 5685 rcu_read_lock(); 5686 memcg = mem_cgroup_from_task(current); 5687 if (memcg == root_mem_cgroup) 5688 goto out; 5689 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 5690 goto out; 5691 if (css_tryget_online(&memcg->css)) 5692 sk->sk_memcg = memcg; 5693 out: 5694 rcu_read_unlock(); 5695 } 5696 5697 void mem_cgroup_sk_free(struct sock *sk) 5698 { 5699 if (sk->sk_memcg) 5700 css_put(&sk->sk_memcg->css); 5701 } 5702 5703 /** 5704 * mem_cgroup_charge_skmem - charge socket memory 5705 * @memcg: memcg to charge 5706 * @nr_pages: number of pages to charge 5707 * 5708 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5709 * @memcg's configured limit, %false if the charge had to be forced. 5710 */ 5711 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5712 { 5713 gfp_t gfp_mask = GFP_KERNEL; 5714 5715 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5716 struct page_counter *fail; 5717 5718 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 5719 memcg->tcpmem_pressure = 0; 5720 return true; 5721 } 5722 page_counter_charge(&memcg->tcpmem, nr_pages); 5723 memcg->tcpmem_pressure = 1; 5724 return false; 5725 } 5726 5727 /* Don't block in the packet receive path */ 5728 if (in_softirq()) 5729 gfp_mask = GFP_NOWAIT; 5730 5731 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages); 5732 5733 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 5734 return true; 5735 5736 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 5737 return false; 5738 } 5739 5740 /** 5741 * mem_cgroup_uncharge_skmem - uncharge socket memory 5742 * @memcg - memcg to uncharge 5743 * @nr_pages - number of pages to uncharge 5744 */ 5745 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5746 { 5747 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5748 page_counter_uncharge(&memcg->tcpmem, nr_pages); 5749 return; 5750 } 5751 5752 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages); 5753 5754 page_counter_uncharge(&memcg->memory, nr_pages); 5755 css_put_many(&memcg->css, nr_pages); 5756 } 5757 5758 static int __init cgroup_memory(char *s) 5759 { 5760 char *token; 5761 5762 while ((token = strsep(&s, ",")) != NULL) { 5763 if (!*token) 5764 continue; 5765 if (!strcmp(token, "nosocket")) 5766 cgroup_memory_nosocket = true; 5767 if (!strcmp(token, "nokmem")) 5768 cgroup_memory_nokmem = true; 5769 } 5770 return 0; 5771 } 5772 __setup("cgroup.memory=", cgroup_memory); 5773 5774 /* 5775 * subsys_initcall() for memory controller. 5776 * 5777 * Some parts like hotcpu_notifier() have to be initialized from this context 5778 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5779 * everything that doesn't depend on a specific mem_cgroup structure should 5780 * be initialized from here. 5781 */ 5782 static int __init mem_cgroup_init(void) 5783 { 5784 int cpu, node; 5785 5786 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5787 5788 for_each_possible_cpu(cpu) 5789 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5790 drain_local_stock); 5791 5792 for_each_node(node) { 5793 struct mem_cgroup_tree_per_node *rtpn; 5794 5795 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5796 node_online(node) ? node : NUMA_NO_NODE); 5797 5798 rtpn->rb_root = RB_ROOT; 5799 spin_lock_init(&rtpn->lock); 5800 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5801 } 5802 5803 return 0; 5804 } 5805 subsys_initcall(mem_cgroup_init); 5806 5807 #ifdef CONFIG_MEMCG_SWAP 5808 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 5809 { 5810 while (!atomic_inc_not_zero(&memcg->id.ref)) { 5811 /* 5812 * The root cgroup cannot be destroyed, so it's refcount must 5813 * always be >= 1. 5814 */ 5815 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 5816 VM_BUG_ON(1); 5817 break; 5818 } 5819 memcg = parent_mem_cgroup(memcg); 5820 if (!memcg) 5821 memcg = root_mem_cgroup; 5822 } 5823 return memcg; 5824 } 5825 5826 /** 5827 * mem_cgroup_swapout - transfer a memsw charge to swap 5828 * @page: page whose memsw charge to transfer 5829 * @entry: swap entry to move the charge to 5830 * 5831 * Transfer the memsw charge of @page to @entry. 5832 */ 5833 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5834 { 5835 struct mem_cgroup *memcg, *swap_memcg; 5836 unsigned short oldid; 5837 5838 VM_BUG_ON_PAGE(PageLRU(page), page); 5839 VM_BUG_ON_PAGE(page_count(page), page); 5840 5841 if (!do_memsw_account()) 5842 return; 5843 5844 memcg = page->mem_cgroup; 5845 5846 /* Readahead page, never charged */ 5847 if (!memcg) 5848 return; 5849 5850 /* 5851 * In case the memcg owning these pages has been offlined and doesn't 5852 * have an ID allocated to it anymore, charge the closest online 5853 * ancestor for the swap instead and transfer the memory+swap charge. 5854 */ 5855 swap_memcg = mem_cgroup_id_get_online(memcg); 5856 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); 5857 VM_BUG_ON_PAGE(oldid, page); 5858 mem_cgroup_swap_statistics(swap_memcg, true); 5859 5860 page->mem_cgroup = NULL; 5861 5862 if (!mem_cgroup_is_root(memcg)) 5863 page_counter_uncharge(&memcg->memory, 1); 5864 5865 if (memcg != swap_memcg) { 5866 if (!mem_cgroup_is_root(swap_memcg)) 5867 page_counter_charge(&swap_memcg->memsw, 1); 5868 page_counter_uncharge(&memcg->memsw, 1); 5869 } 5870 5871 /* 5872 * Interrupts should be disabled here because the caller holds the 5873 * mapping->tree_lock lock which is taken with interrupts-off. It is 5874 * important here to have the interrupts disabled because it is the 5875 * only synchronisation we have for udpating the per-CPU variables. 5876 */ 5877 VM_BUG_ON(!irqs_disabled()); 5878 mem_cgroup_charge_statistics(memcg, page, false, -1); 5879 memcg_check_events(memcg, page); 5880 5881 if (!mem_cgroup_is_root(memcg)) 5882 css_put(&memcg->css); 5883 } 5884 5885 /* 5886 * mem_cgroup_try_charge_swap - try charging a swap entry 5887 * @page: page being added to swap 5888 * @entry: swap entry to charge 5889 * 5890 * Try to charge @entry to the memcg that @page belongs to. 5891 * 5892 * Returns 0 on success, -ENOMEM on failure. 5893 */ 5894 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 5895 { 5896 struct mem_cgroup *memcg; 5897 struct page_counter *counter; 5898 unsigned short oldid; 5899 5900 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 5901 return 0; 5902 5903 memcg = page->mem_cgroup; 5904 5905 /* Readahead page, never charged */ 5906 if (!memcg) 5907 return 0; 5908 5909 memcg = mem_cgroup_id_get_online(memcg); 5910 5911 if (!mem_cgroup_is_root(memcg) && 5912 !page_counter_try_charge(&memcg->swap, 1, &counter)) { 5913 mem_cgroup_id_put(memcg); 5914 return -ENOMEM; 5915 } 5916 5917 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5918 VM_BUG_ON_PAGE(oldid, page); 5919 mem_cgroup_swap_statistics(memcg, true); 5920 5921 return 0; 5922 } 5923 5924 /** 5925 * mem_cgroup_uncharge_swap - uncharge a swap entry 5926 * @entry: swap entry to uncharge 5927 * 5928 * Drop the swap charge associated with @entry. 5929 */ 5930 void mem_cgroup_uncharge_swap(swp_entry_t entry) 5931 { 5932 struct mem_cgroup *memcg; 5933 unsigned short id; 5934 5935 if (!do_swap_account) 5936 return; 5937 5938 id = swap_cgroup_record(entry, 0); 5939 rcu_read_lock(); 5940 memcg = mem_cgroup_from_id(id); 5941 if (memcg) { 5942 if (!mem_cgroup_is_root(memcg)) { 5943 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5944 page_counter_uncharge(&memcg->swap, 1); 5945 else 5946 page_counter_uncharge(&memcg->memsw, 1); 5947 } 5948 mem_cgroup_swap_statistics(memcg, false); 5949 mem_cgroup_id_put(memcg); 5950 } 5951 rcu_read_unlock(); 5952 } 5953 5954 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5955 { 5956 long nr_swap_pages = get_nr_swap_pages(); 5957 5958 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5959 return nr_swap_pages; 5960 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 5961 nr_swap_pages = min_t(long, nr_swap_pages, 5962 READ_ONCE(memcg->swap.limit) - 5963 page_counter_read(&memcg->swap)); 5964 return nr_swap_pages; 5965 } 5966 5967 bool mem_cgroup_swap_full(struct page *page) 5968 { 5969 struct mem_cgroup *memcg; 5970 5971 VM_BUG_ON_PAGE(!PageLocked(page), page); 5972 5973 if (vm_swap_full()) 5974 return true; 5975 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5976 return false; 5977 5978 memcg = page->mem_cgroup; 5979 if (!memcg) 5980 return false; 5981 5982 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 5983 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) 5984 return true; 5985 5986 return false; 5987 } 5988 5989 /* for remember boot option*/ 5990 #ifdef CONFIG_MEMCG_SWAP_ENABLED 5991 static int really_do_swap_account __initdata = 1; 5992 #else 5993 static int really_do_swap_account __initdata; 5994 #endif 5995 5996 static int __init enable_swap_account(char *s) 5997 { 5998 if (!strcmp(s, "1")) 5999 really_do_swap_account = 1; 6000 else if (!strcmp(s, "0")) 6001 really_do_swap_account = 0; 6002 return 1; 6003 } 6004 __setup("swapaccount=", enable_swap_account); 6005 6006 static u64 swap_current_read(struct cgroup_subsys_state *css, 6007 struct cftype *cft) 6008 { 6009 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6010 6011 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6012 } 6013 6014 static int swap_max_show(struct seq_file *m, void *v) 6015 { 6016 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6017 unsigned long max = READ_ONCE(memcg->swap.limit); 6018 6019 if (max == PAGE_COUNTER_MAX) 6020 seq_puts(m, "max\n"); 6021 else 6022 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 6023 6024 return 0; 6025 } 6026 6027 static ssize_t swap_max_write(struct kernfs_open_file *of, 6028 char *buf, size_t nbytes, loff_t off) 6029 { 6030 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6031 unsigned long max; 6032 int err; 6033 6034 buf = strstrip(buf); 6035 err = page_counter_memparse(buf, "max", &max); 6036 if (err) 6037 return err; 6038 6039 mutex_lock(&memcg_limit_mutex); 6040 err = page_counter_limit(&memcg->swap, max); 6041 mutex_unlock(&memcg_limit_mutex); 6042 if (err) 6043 return err; 6044 6045 return nbytes; 6046 } 6047 6048 static struct cftype swap_files[] = { 6049 { 6050 .name = "swap.current", 6051 .flags = CFTYPE_NOT_ON_ROOT, 6052 .read_u64 = swap_current_read, 6053 }, 6054 { 6055 .name = "swap.max", 6056 .flags = CFTYPE_NOT_ON_ROOT, 6057 .seq_show = swap_max_show, 6058 .write = swap_max_write, 6059 }, 6060 { } /* terminate */ 6061 }; 6062 6063 static struct cftype memsw_cgroup_files[] = { 6064 { 6065 .name = "memsw.usage_in_bytes", 6066 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6067 .read_u64 = mem_cgroup_read_u64, 6068 }, 6069 { 6070 .name = "memsw.max_usage_in_bytes", 6071 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6072 .write = mem_cgroup_reset, 6073 .read_u64 = mem_cgroup_read_u64, 6074 }, 6075 { 6076 .name = "memsw.limit_in_bytes", 6077 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6078 .write = mem_cgroup_write, 6079 .read_u64 = mem_cgroup_read_u64, 6080 }, 6081 { 6082 .name = "memsw.failcnt", 6083 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6084 .write = mem_cgroup_reset, 6085 .read_u64 = mem_cgroup_read_u64, 6086 }, 6087 { }, /* terminate */ 6088 }; 6089 6090 static int __init mem_cgroup_swap_init(void) 6091 { 6092 if (!mem_cgroup_disabled() && really_do_swap_account) { 6093 do_swap_account = 1; 6094 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6095 swap_files)); 6096 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6097 memsw_cgroup_files)); 6098 } 6099 return 0; 6100 } 6101 subsys_initcall(mem_cgroup_swap_init); 6102 6103 #endif /* CONFIG_MEMCG_SWAP */ 6104