1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/hugetlb.h> 39 #include <linux/pagemap.h> 40 #include <linux/smp.h> 41 #include <linux/page-flags.h> 42 #include <linux/backing-dev.h> 43 #include <linux/bit_spinlock.h> 44 #include <linux/rcupdate.h> 45 #include <linux/limits.h> 46 #include <linux/export.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swap.h> 51 #include <linux/swapops.h> 52 #include <linux/spinlock.h> 53 #include <linux/eventfd.h> 54 #include <linux/poll.h> 55 #include <linux/sort.h> 56 #include <linux/fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/vmpressure.h> 59 #include <linux/mm_inline.h> 60 #include <linux/swap_cgroup.h> 61 #include <linux/cpu.h> 62 #include <linux/oom.h> 63 #include <linux/lockdep.h> 64 #include <linux/file.h> 65 #include <linux/tracehook.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 71 #include <asm/uaccess.h> 72 73 #include <trace/events/vmscan.h> 74 75 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 76 EXPORT_SYMBOL(memory_cgrp_subsys); 77 78 struct mem_cgroup *root_mem_cgroup __read_mostly; 79 80 #define MEM_CGROUP_RECLAIM_RETRIES 5 81 82 /* Socket memory accounting disabled? */ 83 static bool cgroup_memory_nosocket; 84 85 /* Kernel memory accounting disabled? */ 86 static bool cgroup_memory_nokmem; 87 88 /* Whether the swap controller is active */ 89 #ifdef CONFIG_MEMCG_SWAP 90 int do_swap_account __read_mostly; 91 #else 92 #define do_swap_account 0 93 #endif 94 95 /* Whether legacy memory+swap accounting is active */ 96 static bool do_memsw_account(void) 97 { 98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 99 } 100 101 static const char * const mem_cgroup_stat_names[] = { 102 "cache", 103 "rss", 104 "rss_huge", 105 "mapped_file", 106 "dirty", 107 "writeback", 108 "swap", 109 }; 110 111 static const char * const mem_cgroup_events_names[] = { 112 "pgpgin", 113 "pgpgout", 114 "pgfault", 115 "pgmajfault", 116 }; 117 118 static const char * const mem_cgroup_lru_names[] = { 119 "inactive_anon", 120 "active_anon", 121 "inactive_file", 122 "active_file", 123 "unevictable", 124 }; 125 126 #define THRESHOLDS_EVENTS_TARGET 128 127 #define SOFTLIMIT_EVENTS_TARGET 1024 128 #define NUMAINFO_EVENTS_TARGET 1024 129 130 /* 131 * Cgroups above their limits are maintained in a RB-Tree, independent of 132 * their hierarchy representation 133 */ 134 135 struct mem_cgroup_tree_per_node { 136 struct rb_root rb_root; 137 spinlock_t lock; 138 }; 139 140 struct mem_cgroup_tree { 141 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 142 }; 143 144 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 145 146 /* for OOM */ 147 struct mem_cgroup_eventfd_list { 148 struct list_head list; 149 struct eventfd_ctx *eventfd; 150 }; 151 152 /* 153 * cgroup_event represents events which userspace want to receive. 154 */ 155 struct mem_cgroup_event { 156 /* 157 * memcg which the event belongs to. 158 */ 159 struct mem_cgroup *memcg; 160 /* 161 * eventfd to signal userspace about the event. 162 */ 163 struct eventfd_ctx *eventfd; 164 /* 165 * Each of these stored in a list by the cgroup. 166 */ 167 struct list_head list; 168 /* 169 * register_event() callback will be used to add new userspace 170 * waiter for changes related to this event. Use eventfd_signal() 171 * on eventfd to send notification to userspace. 172 */ 173 int (*register_event)(struct mem_cgroup *memcg, 174 struct eventfd_ctx *eventfd, const char *args); 175 /* 176 * unregister_event() callback will be called when userspace closes 177 * the eventfd or on cgroup removing. This callback must be set, 178 * if you want provide notification functionality. 179 */ 180 void (*unregister_event)(struct mem_cgroup *memcg, 181 struct eventfd_ctx *eventfd); 182 /* 183 * All fields below needed to unregister event when 184 * userspace closes eventfd. 185 */ 186 poll_table pt; 187 wait_queue_head_t *wqh; 188 wait_queue_t wait; 189 struct work_struct remove; 190 }; 191 192 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 193 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 194 195 /* Stuffs for move charges at task migration. */ 196 /* 197 * Types of charges to be moved. 198 */ 199 #define MOVE_ANON 0x1U 200 #define MOVE_FILE 0x2U 201 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 202 203 /* "mc" and its members are protected by cgroup_mutex */ 204 static struct move_charge_struct { 205 spinlock_t lock; /* for from, to */ 206 struct mm_struct *mm; 207 struct mem_cgroup *from; 208 struct mem_cgroup *to; 209 unsigned long flags; 210 unsigned long precharge; 211 unsigned long moved_charge; 212 unsigned long moved_swap; 213 struct task_struct *moving_task; /* a task moving charges */ 214 wait_queue_head_t waitq; /* a waitq for other context */ 215 } mc = { 216 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 217 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 218 }; 219 220 /* 221 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 222 * limit reclaim to prevent infinite loops, if they ever occur. 223 */ 224 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 225 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 226 227 enum charge_type { 228 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 229 MEM_CGROUP_CHARGE_TYPE_ANON, 230 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 231 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 232 NR_CHARGE_TYPE, 233 }; 234 235 /* for encoding cft->private value on file */ 236 enum res_type { 237 _MEM, 238 _MEMSWAP, 239 _OOM_TYPE, 240 _KMEM, 241 _TCP, 242 }; 243 244 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 245 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 246 #define MEMFILE_ATTR(val) ((val) & 0xffff) 247 /* Used for OOM nofiier */ 248 #define OOM_CONTROL (0) 249 250 /* Some nice accessors for the vmpressure. */ 251 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 252 { 253 if (!memcg) 254 memcg = root_mem_cgroup; 255 return &memcg->vmpressure; 256 } 257 258 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 259 { 260 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 261 } 262 263 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 264 { 265 return (memcg == root_mem_cgroup); 266 } 267 268 #ifndef CONFIG_SLOB 269 /* 270 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 271 * The main reason for not using cgroup id for this: 272 * this works better in sparse environments, where we have a lot of memcgs, 273 * but only a few kmem-limited. Or also, if we have, for instance, 200 274 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 275 * 200 entry array for that. 276 * 277 * The current size of the caches array is stored in memcg_nr_cache_ids. It 278 * will double each time we have to increase it. 279 */ 280 static DEFINE_IDA(memcg_cache_ida); 281 int memcg_nr_cache_ids; 282 283 /* Protects memcg_nr_cache_ids */ 284 static DECLARE_RWSEM(memcg_cache_ids_sem); 285 286 void memcg_get_cache_ids(void) 287 { 288 down_read(&memcg_cache_ids_sem); 289 } 290 291 void memcg_put_cache_ids(void) 292 { 293 up_read(&memcg_cache_ids_sem); 294 } 295 296 /* 297 * MIN_SIZE is different than 1, because we would like to avoid going through 298 * the alloc/free process all the time. In a small machine, 4 kmem-limited 299 * cgroups is a reasonable guess. In the future, it could be a parameter or 300 * tunable, but that is strictly not necessary. 301 * 302 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 303 * this constant directly from cgroup, but it is understandable that this is 304 * better kept as an internal representation in cgroup.c. In any case, the 305 * cgrp_id space is not getting any smaller, and we don't have to necessarily 306 * increase ours as well if it increases. 307 */ 308 #define MEMCG_CACHES_MIN_SIZE 4 309 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 310 311 /* 312 * A lot of the calls to the cache allocation functions are expected to be 313 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 314 * conditional to this static branch, we'll have to allow modules that does 315 * kmem_cache_alloc and the such to see this symbol as well 316 */ 317 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 318 EXPORT_SYMBOL(memcg_kmem_enabled_key); 319 320 #endif /* !CONFIG_SLOB */ 321 322 /** 323 * mem_cgroup_css_from_page - css of the memcg associated with a page 324 * @page: page of interest 325 * 326 * If memcg is bound to the default hierarchy, css of the memcg associated 327 * with @page is returned. The returned css remains associated with @page 328 * until it is released. 329 * 330 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 331 * is returned. 332 */ 333 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 334 { 335 struct mem_cgroup *memcg; 336 337 memcg = page->mem_cgroup; 338 339 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 340 memcg = root_mem_cgroup; 341 342 return &memcg->css; 343 } 344 345 /** 346 * page_cgroup_ino - return inode number of the memcg a page is charged to 347 * @page: the page 348 * 349 * Look up the closest online ancestor of the memory cgroup @page is charged to 350 * and return its inode number or 0 if @page is not charged to any cgroup. It 351 * is safe to call this function without holding a reference to @page. 352 * 353 * Note, this function is inherently racy, because there is nothing to prevent 354 * the cgroup inode from getting torn down and potentially reallocated a moment 355 * after page_cgroup_ino() returns, so it only should be used by callers that 356 * do not care (such as procfs interfaces). 357 */ 358 ino_t page_cgroup_ino(struct page *page) 359 { 360 struct mem_cgroup *memcg; 361 unsigned long ino = 0; 362 363 rcu_read_lock(); 364 memcg = READ_ONCE(page->mem_cgroup); 365 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 366 memcg = parent_mem_cgroup(memcg); 367 if (memcg) 368 ino = cgroup_ino(memcg->css.cgroup); 369 rcu_read_unlock(); 370 return ino; 371 } 372 373 static struct mem_cgroup_per_node * 374 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 375 { 376 int nid = page_to_nid(page); 377 378 return memcg->nodeinfo[nid]; 379 } 380 381 static struct mem_cgroup_tree_per_node * 382 soft_limit_tree_node(int nid) 383 { 384 return soft_limit_tree.rb_tree_per_node[nid]; 385 } 386 387 static struct mem_cgroup_tree_per_node * 388 soft_limit_tree_from_page(struct page *page) 389 { 390 int nid = page_to_nid(page); 391 392 return soft_limit_tree.rb_tree_per_node[nid]; 393 } 394 395 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 396 struct mem_cgroup_tree_per_node *mctz, 397 unsigned long new_usage_in_excess) 398 { 399 struct rb_node **p = &mctz->rb_root.rb_node; 400 struct rb_node *parent = NULL; 401 struct mem_cgroup_per_node *mz_node; 402 403 if (mz->on_tree) 404 return; 405 406 mz->usage_in_excess = new_usage_in_excess; 407 if (!mz->usage_in_excess) 408 return; 409 while (*p) { 410 parent = *p; 411 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 412 tree_node); 413 if (mz->usage_in_excess < mz_node->usage_in_excess) 414 p = &(*p)->rb_left; 415 /* 416 * We can't avoid mem cgroups that are over their soft 417 * limit by the same amount 418 */ 419 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 420 p = &(*p)->rb_right; 421 } 422 rb_link_node(&mz->tree_node, parent, p); 423 rb_insert_color(&mz->tree_node, &mctz->rb_root); 424 mz->on_tree = true; 425 } 426 427 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 428 struct mem_cgroup_tree_per_node *mctz) 429 { 430 if (!mz->on_tree) 431 return; 432 rb_erase(&mz->tree_node, &mctz->rb_root); 433 mz->on_tree = false; 434 } 435 436 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 437 struct mem_cgroup_tree_per_node *mctz) 438 { 439 unsigned long flags; 440 441 spin_lock_irqsave(&mctz->lock, flags); 442 __mem_cgroup_remove_exceeded(mz, mctz); 443 spin_unlock_irqrestore(&mctz->lock, flags); 444 } 445 446 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 447 { 448 unsigned long nr_pages = page_counter_read(&memcg->memory); 449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 450 unsigned long excess = 0; 451 452 if (nr_pages > soft_limit) 453 excess = nr_pages - soft_limit; 454 455 return excess; 456 } 457 458 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 459 { 460 unsigned long excess; 461 struct mem_cgroup_per_node *mz; 462 struct mem_cgroup_tree_per_node *mctz; 463 464 mctz = soft_limit_tree_from_page(page); 465 /* 466 * Necessary to update all ancestors when hierarchy is used. 467 * because their event counter is not touched. 468 */ 469 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 470 mz = mem_cgroup_page_nodeinfo(memcg, page); 471 excess = soft_limit_excess(memcg); 472 /* 473 * We have to update the tree if mz is on RB-tree or 474 * mem is over its softlimit. 475 */ 476 if (excess || mz->on_tree) { 477 unsigned long flags; 478 479 spin_lock_irqsave(&mctz->lock, flags); 480 /* if on-tree, remove it */ 481 if (mz->on_tree) 482 __mem_cgroup_remove_exceeded(mz, mctz); 483 /* 484 * Insert again. mz->usage_in_excess will be updated. 485 * If excess is 0, no tree ops. 486 */ 487 __mem_cgroup_insert_exceeded(mz, mctz, excess); 488 spin_unlock_irqrestore(&mctz->lock, flags); 489 } 490 } 491 } 492 493 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 494 { 495 struct mem_cgroup_tree_per_node *mctz; 496 struct mem_cgroup_per_node *mz; 497 int nid; 498 499 for_each_node(nid) { 500 mz = mem_cgroup_nodeinfo(memcg, nid); 501 mctz = soft_limit_tree_node(nid); 502 mem_cgroup_remove_exceeded(mz, mctz); 503 } 504 } 505 506 static struct mem_cgroup_per_node * 507 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 508 { 509 struct rb_node *rightmost = NULL; 510 struct mem_cgroup_per_node *mz; 511 512 retry: 513 mz = NULL; 514 rightmost = rb_last(&mctz->rb_root); 515 if (!rightmost) 516 goto done; /* Nothing to reclaim from */ 517 518 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node); 519 /* 520 * Remove the node now but someone else can add it back, 521 * we will to add it back at the end of reclaim to its correct 522 * position in the tree. 523 */ 524 __mem_cgroup_remove_exceeded(mz, mctz); 525 if (!soft_limit_excess(mz->memcg) || 526 !css_tryget_online(&mz->memcg->css)) 527 goto retry; 528 done: 529 return mz; 530 } 531 532 static struct mem_cgroup_per_node * 533 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 534 { 535 struct mem_cgroup_per_node *mz; 536 537 spin_lock_irq(&mctz->lock); 538 mz = __mem_cgroup_largest_soft_limit_node(mctz); 539 spin_unlock_irq(&mctz->lock); 540 return mz; 541 } 542 543 /* 544 * Return page count for single (non recursive) @memcg. 545 * 546 * Implementation Note: reading percpu statistics for memcg. 547 * 548 * Both of vmstat[] and percpu_counter has threshold and do periodic 549 * synchronization to implement "quick" read. There are trade-off between 550 * reading cost and precision of value. Then, we may have a chance to implement 551 * a periodic synchronization of counter in memcg's counter. 552 * 553 * But this _read() function is used for user interface now. The user accounts 554 * memory usage by memory cgroup and he _always_ requires exact value because 555 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 556 * have to visit all online cpus and make sum. So, for now, unnecessary 557 * synchronization is not implemented. (just implemented for cpu hotplug) 558 * 559 * If there are kernel internal actions which can make use of some not-exact 560 * value, and reading all cpu value can be performance bottleneck in some 561 * common workload, threshold and synchronization as vmstat[] should be 562 * implemented. 563 */ 564 static unsigned long 565 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 566 { 567 long val = 0; 568 int cpu; 569 570 /* Per-cpu values can be negative, use a signed accumulator */ 571 for_each_possible_cpu(cpu) 572 val += per_cpu(memcg->stat->count[idx], cpu); 573 /* 574 * Summing races with updates, so val may be negative. Avoid exposing 575 * transient negative values. 576 */ 577 if (val < 0) 578 val = 0; 579 return val; 580 } 581 582 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 583 enum mem_cgroup_events_index idx) 584 { 585 unsigned long val = 0; 586 int cpu; 587 588 for_each_possible_cpu(cpu) 589 val += per_cpu(memcg->stat->events[idx], cpu); 590 return val; 591 } 592 593 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 594 struct page *page, 595 bool compound, int nr_pages) 596 { 597 /* 598 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 599 * counted as CACHE even if it's on ANON LRU. 600 */ 601 if (PageAnon(page)) 602 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 603 nr_pages); 604 else 605 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 606 nr_pages); 607 608 if (compound) { 609 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 610 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 611 nr_pages); 612 } 613 614 /* pagein of a big page is an event. So, ignore page size */ 615 if (nr_pages > 0) 616 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 617 else { 618 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 619 nr_pages = -nr_pages; /* for event */ 620 } 621 622 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 623 } 624 625 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 626 int nid, unsigned int lru_mask) 627 { 628 unsigned long nr = 0; 629 struct mem_cgroup_per_node *mz; 630 enum lru_list lru; 631 632 VM_BUG_ON((unsigned)nid >= nr_node_ids); 633 634 for_each_lru(lru) { 635 if (!(BIT(lru) & lru_mask)) 636 continue; 637 mz = mem_cgroup_nodeinfo(memcg, nid); 638 nr += mz->lru_size[lru]; 639 } 640 return nr; 641 } 642 643 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 644 unsigned int lru_mask) 645 { 646 unsigned long nr = 0; 647 int nid; 648 649 for_each_node_state(nid, N_MEMORY) 650 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 651 return nr; 652 } 653 654 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 655 enum mem_cgroup_events_target target) 656 { 657 unsigned long val, next; 658 659 val = __this_cpu_read(memcg->stat->nr_page_events); 660 next = __this_cpu_read(memcg->stat->targets[target]); 661 /* from time_after() in jiffies.h */ 662 if ((long)next - (long)val < 0) { 663 switch (target) { 664 case MEM_CGROUP_TARGET_THRESH: 665 next = val + THRESHOLDS_EVENTS_TARGET; 666 break; 667 case MEM_CGROUP_TARGET_SOFTLIMIT: 668 next = val + SOFTLIMIT_EVENTS_TARGET; 669 break; 670 case MEM_CGROUP_TARGET_NUMAINFO: 671 next = val + NUMAINFO_EVENTS_TARGET; 672 break; 673 default: 674 break; 675 } 676 __this_cpu_write(memcg->stat->targets[target], next); 677 return true; 678 } 679 return false; 680 } 681 682 /* 683 * Check events in order. 684 * 685 */ 686 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 687 { 688 /* threshold event is triggered in finer grain than soft limit */ 689 if (unlikely(mem_cgroup_event_ratelimit(memcg, 690 MEM_CGROUP_TARGET_THRESH))) { 691 bool do_softlimit; 692 bool do_numainfo __maybe_unused; 693 694 do_softlimit = mem_cgroup_event_ratelimit(memcg, 695 MEM_CGROUP_TARGET_SOFTLIMIT); 696 #if MAX_NUMNODES > 1 697 do_numainfo = mem_cgroup_event_ratelimit(memcg, 698 MEM_CGROUP_TARGET_NUMAINFO); 699 #endif 700 mem_cgroup_threshold(memcg); 701 if (unlikely(do_softlimit)) 702 mem_cgroup_update_tree(memcg, page); 703 #if MAX_NUMNODES > 1 704 if (unlikely(do_numainfo)) 705 atomic_inc(&memcg->numainfo_events); 706 #endif 707 } 708 } 709 710 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 711 { 712 /* 713 * mm_update_next_owner() may clear mm->owner to NULL 714 * if it races with swapoff, page migration, etc. 715 * So this can be called with p == NULL. 716 */ 717 if (unlikely(!p)) 718 return NULL; 719 720 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 721 } 722 EXPORT_SYMBOL(mem_cgroup_from_task); 723 724 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 725 { 726 struct mem_cgroup *memcg = NULL; 727 728 rcu_read_lock(); 729 do { 730 /* 731 * Page cache insertions can happen withou an 732 * actual mm context, e.g. during disk probing 733 * on boot, loopback IO, acct() writes etc. 734 */ 735 if (unlikely(!mm)) 736 memcg = root_mem_cgroup; 737 else { 738 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 739 if (unlikely(!memcg)) 740 memcg = root_mem_cgroup; 741 } 742 } while (!css_tryget_online(&memcg->css)); 743 rcu_read_unlock(); 744 return memcg; 745 } 746 747 /** 748 * mem_cgroup_iter - iterate over memory cgroup hierarchy 749 * @root: hierarchy root 750 * @prev: previously returned memcg, NULL on first invocation 751 * @reclaim: cookie for shared reclaim walks, NULL for full walks 752 * 753 * Returns references to children of the hierarchy below @root, or 754 * @root itself, or %NULL after a full round-trip. 755 * 756 * Caller must pass the return value in @prev on subsequent 757 * invocations for reference counting, or use mem_cgroup_iter_break() 758 * to cancel a hierarchy walk before the round-trip is complete. 759 * 760 * Reclaimers can specify a zone and a priority level in @reclaim to 761 * divide up the memcgs in the hierarchy among all concurrent 762 * reclaimers operating on the same zone and priority. 763 */ 764 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 765 struct mem_cgroup *prev, 766 struct mem_cgroup_reclaim_cookie *reclaim) 767 { 768 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 769 struct cgroup_subsys_state *css = NULL; 770 struct mem_cgroup *memcg = NULL; 771 struct mem_cgroup *pos = NULL; 772 773 if (mem_cgroup_disabled()) 774 return NULL; 775 776 if (!root) 777 root = root_mem_cgroup; 778 779 if (prev && !reclaim) 780 pos = prev; 781 782 if (!root->use_hierarchy && root != root_mem_cgroup) { 783 if (prev) 784 goto out; 785 return root; 786 } 787 788 rcu_read_lock(); 789 790 if (reclaim) { 791 struct mem_cgroup_per_node *mz; 792 793 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 794 iter = &mz->iter[reclaim->priority]; 795 796 if (prev && reclaim->generation != iter->generation) 797 goto out_unlock; 798 799 while (1) { 800 pos = READ_ONCE(iter->position); 801 if (!pos || css_tryget(&pos->css)) 802 break; 803 /* 804 * css reference reached zero, so iter->position will 805 * be cleared by ->css_released. However, we should not 806 * rely on this happening soon, because ->css_released 807 * is called from a work queue, and by busy-waiting we 808 * might block it. So we clear iter->position right 809 * away. 810 */ 811 (void)cmpxchg(&iter->position, pos, NULL); 812 } 813 } 814 815 if (pos) 816 css = &pos->css; 817 818 for (;;) { 819 css = css_next_descendant_pre(css, &root->css); 820 if (!css) { 821 /* 822 * Reclaimers share the hierarchy walk, and a 823 * new one might jump in right at the end of 824 * the hierarchy - make sure they see at least 825 * one group and restart from the beginning. 826 */ 827 if (!prev) 828 continue; 829 break; 830 } 831 832 /* 833 * Verify the css and acquire a reference. The root 834 * is provided by the caller, so we know it's alive 835 * and kicking, and don't take an extra reference. 836 */ 837 memcg = mem_cgroup_from_css(css); 838 839 if (css == &root->css) 840 break; 841 842 if (css_tryget(css)) 843 break; 844 845 memcg = NULL; 846 } 847 848 if (reclaim) { 849 /* 850 * The position could have already been updated by a competing 851 * thread, so check that the value hasn't changed since we read 852 * it to avoid reclaiming from the same cgroup twice. 853 */ 854 (void)cmpxchg(&iter->position, pos, memcg); 855 856 if (pos) 857 css_put(&pos->css); 858 859 if (!memcg) 860 iter->generation++; 861 else if (!prev) 862 reclaim->generation = iter->generation; 863 } 864 865 out_unlock: 866 rcu_read_unlock(); 867 out: 868 if (prev && prev != root) 869 css_put(&prev->css); 870 871 return memcg; 872 } 873 874 /** 875 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 876 * @root: hierarchy root 877 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 878 */ 879 void mem_cgroup_iter_break(struct mem_cgroup *root, 880 struct mem_cgroup *prev) 881 { 882 if (!root) 883 root = root_mem_cgroup; 884 if (prev && prev != root) 885 css_put(&prev->css); 886 } 887 888 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 889 { 890 struct mem_cgroup *memcg = dead_memcg; 891 struct mem_cgroup_reclaim_iter *iter; 892 struct mem_cgroup_per_node *mz; 893 int nid; 894 int i; 895 896 while ((memcg = parent_mem_cgroup(memcg))) { 897 for_each_node(nid) { 898 mz = mem_cgroup_nodeinfo(memcg, nid); 899 for (i = 0; i <= DEF_PRIORITY; i++) { 900 iter = &mz->iter[i]; 901 cmpxchg(&iter->position, 902 dead_memcg, NULL); 903 } 904 } 905 } 906 } 907 908 /* 909 * Iteration constructs for visiting all cgroups (under a tree). If 910 * loops are exited prematurely (break), mem_cgroup_iter_break() must 911 * be used for reference counting. 912 */ 913 #define for_each_mem_cgroup_tree(iter, root) \ 914 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 915 iter != NULL; \ 916 iter = mem_cgroup_iter(root, iter, NULL)) 917 918 #define for_each_mem_cgroup(iter) \ 919 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 920 iter != NULL; \ 921 iter = mem_cgroup_iter(NULL, iter, NULL)) 922 923 /** 924 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 925 * @memcg: hierarchy root 926 * @fn: function to call for each task 927 * @arg: argument passed to @fn 928 * 929 * This function iterates over tasks attached to @memcg or to any of its 930 * descendants and calls @fn for each task. If @fn returns a non-zero 931 * value, the function breaks the iteration loop and returns the value. 932 * Otherwise, it will iterate over all tasks and return 0. 933 * 934 * This function must not be called for the root memory cgroup. 935 */ 936 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 937 int (*fn)(struct task_struct *, void *), void *arg) 938 { 939 struct mem_cgroup *iter; 940 int ret = 0; 941 942 BUG_ON(memcg == root_mem_cgroup); 943 944 for_each_mem_cgroup_tree(iter, memcg) { 945 struct css_task_iter it; 946 struct task_struct *task; 947 948 css_task_iter_start(&iter->css, &it); 949 while (!ret && (task = css_task_iter_next(&it))) 950 ret = fn(task, arg); 951 css_task_iter_end(&it); 952 if (ret) { 953 mem_cgroup_iter_break(memcg, iter); 954 break; 955 } 956 } 957 return ret; 958 } 959 960 /** 961 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 962 * @page: the page 963 * @zone: zone of the page 964 * 965 * This function is only safe when following the LRU page isolation 966 * and putback protocol: the LRU lock must be held, and the page must 967 * either be PageLRU() or the caller must have isolated/allocated it. 968 */ 969 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 970 { 971 struct mem_cgroup_per_node *mz; 972 struct mem_cgroup *memcg; 973 struct lruvec *lruvec; 974 975 if (mem_cgroup_disabled()) { 976 lruvec = &pgdat->lruvec; 977 goto out; 978 } 979 980 memcg = page->mem_cgroup; 981 /* 982 * Swapcache readahead pages are added to the LRU - and 983 * possibly migrated - before they are charged. 984 */ 985 if (!memcg) 986 memcg = root_mem_cgroup; 987 988 mz = mem_cgroup_page_nodeinfo(memcg, page); 989 lruvec = &mz->lruvec; 990 out: 991 /* 992 * Since a node can be onlined after the mem_cgroup was created, 993 * we have to be prepared to initialize lruvec->zone here; 994 * and if offlined then reonlined, we need to reinitialize it. 995 */ 996 if (unlikely(lruvec->pgdat != pgdat)) 997 lruvec->pgdat = pgdat; 998 return lruvec; 999 } 1000 1001 /** 1002 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1003 * @lruvec: mem_cgroup per zone lru vector 1004 * @lru: index of lru list the page is sitting on 1005 * @nr_pages: positive when adding or negative when removing 1006 * 1007 * This function must be called under lru_lock, just before a page is added 1008 * to or just after a page is removed from an lru list (that ordering being 1009 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1010 */ 1011 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1012 int nr_pages) 1013 { 1014 struct mem_cgroup_per_node *mz; 1015 unsigned long *lru_size; 1016 long size; 1017 bool empty; 1018 1019 if (mem_cgroup_disabled()) 1020 return; 1021 1022 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1023 lru_size = mz->lru_size + lru; 1024 empty = list_empty(lruvec->lists + lru); 1025 1026 if (nr_pages < 0) 1027 *lru_size += nr_pages; 1028 1029 size = *lru_size; 1030 if (WARN_ONCE(size < 0 || empty != !size, 1031 "%s(%p, %d, %d): lru_size %ld but %sempty\n", 1032 __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) { 1033 VM_BUG_ON(1); 1034 *lru_size = 0; 1035 } 1036 1037 if (nr_pages > 0) 1038 *lru_size += nr_pages; 1039 } 1040 1041 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1042 { 1043 struct mem_cgroup *task_memcg; 1044 struct task_struct *p; 1045 bool ret; 1046 1047 p = find_lock_task_mm(task); 1048 if (p) { 1049 task_memcg = get_mem_cgroup_from_mm(p->mm); 1050 task_unlock(p); 1051 } else { 1052 /* 1053 * All threads may have already detached their mm's, but the oom 1054 * killer still needs to detect if they have already been oom 1055 * killed to prevent needlessly killing additional tasks. 1056 */ 1057 rcu_read_lock(); 1058 task_memcg = mem_cgroup_from_task(task); 1059 css_get(&task_memcg->css); 1060 rcu_read_unlock(); 1061 } 1062 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1063 css_put(&task_memcg->css); 1064 return ret; 1065 } 1066 1067 /** 1068 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1069 * @memcg: the memory cgroup 1070 * 1071 * Returns the maximum amount of memory @mem can be charged with, in 1072 * pages. 1073 */ 1074 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1075 { 1076 unsigned long margin = 0; 1077 unsigned long count; 1078 unsigned long limit; 1079 1080 count = page_counter_read(&memcg->memory); 1081 limit = READ_ONCE(memcg->memory.limit); 1082 if (count < limit) 1083 margin = limit - count; 1084 1085 if (do_memsw_account()) { 1086 count = page_counter_read(&memcg->memsw); 1087 limit = READ_ONCE(memcg->memsw.limit); 1088 if (count <= limit) 1089 margin = min(margin, limit - count); 1090 else 1091 margin = 0; 1092 } 1093 1094 return margin; 1095 } 1096 1097 /* 1098 * A routine for checking "mem" is under move_account() or not. 1099 * 1100 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1101 * moving cgroups. This is for waiting at high-memory pressure 1102 * caused by "move". 1103 */ 1104 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1105 { 1106 struct mem_cgroup *from; 1107 struct mem_cgroup *to; 1108 bool ret = false; 1109 /* 1110 * Unlike task_move routines, we access mc.to, mc.from not under 1111 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1112 */ 1113 spin_lock(&mc.lock); 1114 from = mc.from; 1115 to = mc.to; 1116 if (!from) 1117 goto unlock; 1118 1119 ret = mem_cgroup_is_descendant(from, memcg) || 1120 mem_cgroup_is_descendant(to, memcg); 1121 unlock: 1122 spin_unlock(&mc.lock); 1123 return ret; 1124 } 1125 1126 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1127 { 1128 if (mc.moving_task && current != mc.moving_task) { 1129 if (mem_cgroup_under_move(memcg)) { 1130 DEFINE_WAIT(wait); 1131 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1132 /* moving charge context might have finished. */ 1133 if (mc.moving_task) 1134 schedule(); 1135 finish_wait(&mc.waitq, &wait); 1136 return true; 1137 } 1138 } 1139 return false; 1140 } 1141 1142 #define K(x) ((x) << (PAGE_SHIFT-10)) 1143 /** 1144 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1145 * @memcg: The memory cgroup that went over limit 1146 * @p: Task that is going to be killed 1147 * 1148 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1149 * enabled 1150 */ 1151 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1152 { 1153 struct mem_cgroup *iter; 1154 unsigned int i; 1155 1156 rcu_read_lock(); 1157 1158 if (p) { 1159 pr_info("Task in "); 1160 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1161 pr_cont(" killed as a result of limit of "); 1162 } else { 1163 pr_info("Memory limit reached of cgroup "); 1164 } 1165 1166 pr_cont_cgroup_path(memcg->css.cgroup); 1167 pr_cont("\n"); 1168 1169 rcu_read_unlock(); 1170 1171 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1172 K((u64)page_counter_read(&memcg->memory)), 1173 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1174 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1175 K((u64)page_counter_read(&memcg->memsw)), 1176 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1177 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1178 K((u64)page_counter_read(&memcg->kmem)), 1179 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1180 1181 for_each_mem_cgroup_tree(iter, memcg) { 1182 pr_info("Memory cgroup stats for "); 1183 pr_cont_cgroup_path(iter->css.cgroup); 1184 pr_cont(":"); 1185 1186 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1187 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1188 continue; 1189 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1190 K(mem_cgroup_read_stat(iter, i))); 1191 } 1192 1193 for (i = 0; i < NR_LRU_LISTS; i++) 1194 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1195 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1196 1197 pr_cont("\n"); 1198 } 1199 } 1200 1201 /* 1202 * This function returns the number of memcg under hierarchy tree. Returns 1203 * 1(self count) if no children. 1204 */ 1205 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1206 { 1207 int num = 0; 1208 struct mem_cgroup *iter; 1209 1210 for_each_mem_cgroup_tree(iter, memcg) 1211 num++; 1212 return num; 1213 } 1214 1215 /* 1216 * Return the memory (and swap, if configured) limit for a memcg. 1217 */ 1218 unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1219 { 1220 unsigned long limit; 1221 1222 limit = memcg->memory.limit; 1223 if (mem_cgroup_swappiness(memcg)) { 1224 unsigned long memsw_limit; 1225 unsigned long swap_limit; 1226 1227 memsw_limit = memcg->memsw.limit; 1228 swap_limit = memcg->swap.limit; 1229 swap_limit = min(swap_limit, (unsigned long)total_swap_pages); 1230 limit = min(limit + swap_limit, memsw_limit); 1231 } 1232 return limit; 1233 } 1234 1235 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1236 int order) 1237 { 1238 struct oom_control oc = { 1239 .zonelist = NULL, 1240 .nodemask = NULL, 1241 .memcg = memcg, 1242 .gfp_mask = gfp_mask, 1243 .order = order, 1244 }; 1245 bool ret; 1246 1247 mutex_lock(&oom_lock); 1248 ret = out_of_memory(&oc); 1249 mutex_unlock(&oom_lock); 1250 return ret; 1251 } 1252 1253 #if MAX_NUMNODES > 1 1254 1255 /** 1256 * test_mem_cgroup_node_reclaimable 1257 * @memcg: the target memcg 1258 * @nid: the node ID to be checked. 1259 * @noswap : specify true here if the user wants flle only information. 1260 * 1261 * This function returns whether the specified memcg contains any 1262 * reclaimable pages on a node. Returns true if there are any reclaimable 1263 * pages in the node. 1264 */ 1265 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1266 int nid, bool noswap) 1267 { 1268 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1269 return true; 1270 if (noswap || !total_swap_pages) 1271 return false; 1272 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1273 return true; 1274 return false; 1275 1276 } 1277 1278 /* 1279 * Always updating the nodemask is not very good - even if we have an empty 1280 * list or the wrong list here, we can start from some node and traverse all 1281 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1282 * 1283 */ 1284 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1285 { 1286 int nid; 1287 /* 1288 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1289 * pagein/pageout changes since the last update. 1290 */ 1291 if (!atomic_read(&memcg->numainfo_events)) 1292 return; 1293 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1294 return; 1295 1296 /* make a nodemask where this memcg uses memory from */ 1297 memcg->scan_nodes = node_states[N_MEMORY]; 1298 1299 for_each_node_mask(nid, node_states[N_MEMORY]) { 1300 1301 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1302 node_clear(nid, memcg->scan_nodes); 1303 } 1304 1305 atomic_set(&memcg->numainfo_events, 0); 1306 atomic_set(&memcg->numainfo_updating, 0); 1307 } 1308 1309 /* 1310 * Selecting a node where we start reclaim from. Because what we need is just 1311 * reducing usage counter, start from anywhere is O,K. Considering 1312 * memory reclaim from current node, there are pros. and cons. 1313 * 1314 * Freeing memory from current node means freeing memory from a node which 1315 * we'll use or we've used. So, it may make LRU bad. And if several threads 1316 * hit limits, it will see a contention on a node. But freeing from remote 1317 * node means more costs for memory reclaim because of memory latency. 1318 * 1319 * Now, we use round-robin. Better algorithm is welcomed. 1320 */ 1321 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1322 { 1323 int node; 1324 1325 mem_cgroup_may_update_nodemask(memcg); 1326 node = memcg->last_scanned_node; 1327 1328 node = next_node_in(node, memcg->scan_nodes); 1329 /* 1330 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1331 * last time it really checked all the LRUs due to rate limiting. 1332 * Fallback to the current node in that case for simplicity. 1333 */ 1334 if (unlikely(node == MAX_NUMNODES)) 1335 node = numa_node_id(); 1336 1337 memcg->last_scanned_node = node; 1338 return node; 1339 } 1340 #else 1341 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1342 { 1343 return 0; 1344 } 1345 #endif 1346 1347 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1348 pg_data_t *pgdat, 1349 gfp_t gfp_mask, 1350 unsigned long *total_scanned) 1351 { 1352 struct mem_cgroup *victim = NULL; 1353 int total = 0; 1354 int loop = 0; 1355 unsigned long excess; 1356 unsigned long nr_scanned; 1357 struct mem_cgroup_reclaim_cookie reclaim = { 1358 .pgdat = pgdat, 1359 .priority = 0, 1360 }; 1361 1362 excess = soft_limit_excess(root_memcg); 1363 1364 while (1) { 1365 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1366 if (!victim) { 1367 loop++; 1368 if (loop >= 2) { 1369 /* 1370 * If we have not been able to reclaim 1371 * anything, it might because there are 1372 * no reclaimable pages under this hierarchy 1373 */ 1374 if (!total) 1375 break; 1376 /* 1377 * We want to do more targeted reclaim. 1378 * excess >> 2 is not to excessive so as to 1379 * reclaim too much, nor too less that we keep 1380 * coming back to reclaim from this cgroup 1381 */ 1382 if (total >= (excess >> 2) || 1383 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1384 break; 1385 } 1386 continue; 1387 } 1388 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1389 pgdat, &nr_scanned); 1390 *total_scanned += nr_scanned; 1391 if (!soft_limit_excess(root_memcg)) 1392 break; 1393 } 1394 mem_cgroup_iter_break(root_memcg, victim); 1395 return total; 1396 } 1397 1398 #ifdef CONFIG_LOCKDEP 1399 static struct lockdep_map memcg_oom_lock_dep_map = { 1400 .name = "memcg_oom_lock", 1401 }; 1402 #endif 1403 1404 static DEFINE_SPINLOCK(memcg_oom_lock); 1405 1406 /* 1407 * Check OOM-Killer is already running under our hierarchy. 1408 * If someone is running, return false. 1409 */ 1410 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1411 { 1412 struct mem_cgroup *iter, *failed = NULL; 1413 1414 spin_lock(&memcg_oom_lock); 1415 1416 for_each_mem_cgroup_tree(iter, memcg) { 1417 if (iter->oom_lock) { 1418 /* 1419 * this subtree of our hierarchy is already locked 1420 * so we cannot give a lock. 1421 */ 1422 failed = iter; 1423 mem_cgroup_iter_break(memcg, iter); 1424 break; 1425 } else 1426 iter->oom_lock = true; 1427 } 1428 1429 if (failed) { 1430 /* 1431 * OK, we failed to lock the whole subtree so we have 1432 * to clean up what we set up to the failing subtree 1433 */ 1434 for_each_mem_cgroup_tree(iter, memcg) { 1435 if (iter == failed) { 1436 mem_cgroup_iter_break(memcg, iter); 1437 break; 1438 } 1439 iter->oom_lock = false; 1440 } 1441 } else 1442 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1443 1444 spin_unlock(&memcg_oom_lock); 1445 1446 return !failed; 1447 } 1448 1449 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1450 { 1451 struct mem_cgroup *iter; 1452 1453 spin_lock(&memcg_oom_lock); 1454 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1455 for_each_mem_cgroup_tree(iter, memcg) 1456 iter->oom_lock = false; 1457 spin_unlock(&memcg_oom_lock); 1458 } 1459 1460 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1461 { 1462 struct mem_cgroup *iter; 1463 1464 spin_lock(&memcg_oom_lock); 1465 for_each_mem_cgroup_tree(iter, memcg) 1466 iter->under_oom++; 1467 spin_unlock(&memcg_oom_lock); 1468 } 1469 1470 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1471 { 1472 struct mem_cgroup *iter; 1473 1474 /* 1475 * When a new child is created while the hierarchy is under oom, 1476 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1477 */ 1478 spin_lock(&memcg_oom_lock); 1479 for_each_mem_cgroup_tree(iter, memcg) 1480 if (iter->under_oom > 0) 1481 iter->under_oom--; 1482 spin_unlock(&memcg_oom_lock); 1483 } 1484 1485 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1486 1487 struct oom_wait_info { 1488 struct mem_cgroup *memcg; 1489 wait_queue_t wait; 1490 }; 1491 1492 static int memcg_oom_wake_function(wait_queue_t *wait, 1493 unsigned mode, int sync, void *arg) 1494 { 1495 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1496 struct mem_cgroup *oom_wait_memcg; 1497 struct oom_wait_info *oom_wait_info; 1498 1499 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1500 oom_wait_memcg = oom_wait_info->memcg; 1501 1502 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1503 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1504 return 0; 1505 return autoremove_wake_function(wait, mode, sync, arg); 1506 } 1507 1508 static void memcg_oom_recover(struct mem_cgroup *memcg) 1509 { 1510 /* 1511 * For the following lockless ->under_oom test, the only required 1512 * guarantee is that it must see the state asserted by an OOM when 1513 * this function is called as a result of userland actions 1514 * triggered by the notification of the OOM. This is trivially 1515 * achieved by invoking mem_cgroup_mark_under_oom() before 1516 * triggering notification. 1517 */ 1518 if (memcg && memcg->under_oom) 1519 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1520 } 1521 1522 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1523 { 1524 if (!current->memcg_may_oom) 1525 return; 1526 /* 1527 * We are in the middle of the charge context here, so we 1528 * don't want to block when potentially sitting on a callstack 1529 * that holds all kinds of filesystem and mm locks. 1530 * 1531 * Also, the caller may handle a failed allocation gracefully 1532 * (like optional page cache readahead) and so an OOM killer 1533 * invocation might not even be necessary. 1534 * 1535 * That's why we don't do anything here except remember the 1536 * OOM context and then deal with it at the end of the page 1537 * fault when the stack is unwound, the locks are released, 1538 * and when we know whether the fault was overall successful. 1539 */ 1540 css_get(&memcg->css); 1541 current->memcg_in_oom = memcg; 1542 current->memcg_oom_gfp_mask = mask; 1543 current->memcg_oom_order = order; 1544 } 1545 1546 /** 1547 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1548 * @handle: actually kill/wait or just clean up the OOM state 1549 * 1550 * This has to be called at the end of a page fault if the memcg OOM 1551 * handler was enabled. 1552 * 1553 * Memcg supports userspace OOM handling where failed allocations must 1554 * sleep on a waitqueue until the userspace task resolves the 1555 * situation. Sleeping directly in the charge context with all kinds 1556 * of locks held is not a good idea, instead we remember an OOM state 1557 * in the task and mem_cgroup_oom_synchronize() has to be called at 1558 * the end of the page fault to complete the OOM handling. 1559 * 1560 * Returns %true if an ongoing memcg OOM situation was detected and 1561 * completed, %false otherwise. 1562 */ 1563 bool mem_cgroup_oom_synchronize(bool handle) 1564 { 1565 struct mem_cgroup *memcg = current->memcg_in_oom; 1566 struct oom_wait_info owait; 1567 bool locked; 1568 1569 /* OOM is global, do not handle */ 1570 if (!memcg) 1571 return false; 1572 1573 if (!handle) 1574 goto cleanup; 1575 1576 owait.memcg = memcg; 1577 owait.wait.flags = 0; 1578 owait.wait.func = memcg_oom_wake_function; 1579 owait.wait.private = current; 1580 INIT_LIST_HEAD(&owait.wait.task_list); 1581 1582 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1583 mem_cgroup_mark_under_oom(memcg); 1584 1585 locked = mem_cgroup_oom_trylock(memcg); 1586 1587 if (locked) 1588 mem_cgroup_oom_notify(memcg); 1589 1590 if (locked && !memcg->oom_kill_disable) { 1591 mem_cgroup_unmark_under_oom(memcg); 1592 finish_wait(&memcg_oom_waitq, &owait.wait); 1593 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1594 current->memcg_oom_order); 1595 } else { 1596 schedule(); 1597 mem_cgroup_unmark_under_oom(memcg); 1598 finish_wait(&memcg_oom_waitq, &owait.wait); 1599 } 1600 1601 if (locked) { 1602 mem_cgroup_oom_unlock(memcg); 1603 /* 1604 * There is no guarantee that an OOM-lock contender 1605 * sees the wakeups triggered by the OOM kill 1606 * uncharges. Wake any sleepers explicitely. 1607 */ 1608 memcg_oom_recover(memcg); 1609 } 1610 cleanup: 1611 current->memcg_in_oom = NULL; 1612 css_put(&memcg->css); 1613 return true; 1614 } 1615 1616 /** 1617 * lock_page_memcg - lock a page->mem_cgroup binding 1618 * @page: the page 1619 * 1620 * This function protects unlocked LRU pages from being moved to 1621 * another cgroup and stabilizes their page->mem_cgroup binding. 1622 */ 1623 void lock_page_memcg(struct page *page) 1624 { 1625 struct mem_cgroup *memcg; 1626 unsigned long flags; 1627 1628 /* 1629 * The RCU lock is held throughout the transaction. The fast 1630 * path can get away without acquiring the memcg->move_lock 1631 * because page moving starts with an RCU grace period. 1632 */ 1633 rcu_read_lock(); 1634 1635 if (mem_cgroup_disabled()) 1636 return; 1637 again: 1638 memcg = page->mem_cgroup; 1639 if (unlikely(!memcg)) 1640 return; 1641 1642 if (atomic_read(&memcg->moving_account) <= 0) 1643 return; 1644 1645 spin_lock_irqsave(&memcg->move_lock, flags); 1646 if (memcg != page->mem_cgroup) { 1647 spin_unlock_irqrestore(&memcg->move_lock, flags); 1648 goto again; 1649 } 1650 1651 /* 1652 * When charge migration first begins, we can have locked and 1653 * unlocked page stat updates happening concurrently. Track 1654 * the task who has the lock for unlock_page_memcg(). 1655 */ 1656 memcg->move_lock_task = current; 1657 memcg->move_lock_flags = flags; 1658 1659 return; 1660 } 1661 EXPORT_SYMBOL(lock_page_memcg); 1662 1663 /** 1664 * unlock_page_memcg - unlock a page->mem_cgroup binding 1665 * @page: the page 1666 */ 1667 void unlock_page_memcg(struct page *page) 1668 { 1669 struct mem_cgroup *memcg = page->mem_cgroup; 1670 1671 if (memcg && memcg->move_lock_task == current) { 1672 unsigned long flags = memcg->move_lock_flags; 1673 1674 memcg->move_lock_task = NULL; 1675 memcg->move_lock_flags = 0; 1676 1677 spin_unlock_irqrestore(&memcg->move_lock, flags); 1678 } 1679 1680 rcu_read_unlock(); 1681 } 1682 EXPORT_SYMBOL(unlock_page_memcg); 1683 1684 /* 1685 * size of first charge trial. "32" comes from vmscan.c's magic value. 1686 * TODO: maybe necessary to use big numbers in big irons. 1687 */ 1688 #define CHARGE_BATCH 32U 1689 struct memcg_stock_pcp { 1690 struct mem_cgroup *cached; /* this never be root cgroup */ 1691 unsigned int nr_pages; 1692 struct work_struct work; 1693 unsigned long flags; 1694 #define FLUSHING_CACHED_CHARGE 0 1695 }; 1696 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1697 static DEFINE_MUTEX(percpu_charge_mutex); 1698 1699 /** 1700 * consume_stock: Try to consume stocked charge on this cpu. 1701 * @memcg: memcg to consume from. 1702 * @nr_pages: how many pages to charge. 1703 * 1704 * The charges will only happen if @memcg matches the current cpu's memcg 1705 * stock, and at least @nr_pages are available in that stock. Failure to 1706 * service an allocation will refill the stock. 1707 * 1708 * returns true if successful, false otherwise. 1709 */ 1710 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1711 { 1712 struct memcg_stock_pcp *stock; 1713 unsigned long flags; 1714 bool ret = false; 1715 1716 if (nr_pages > CHARGE_BATCH) 1717 return ret; 1718 1719 local_irq_save(flags); 1720 1721 stock = this_cpu_ptr(&memcg_stock); 1722 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1723 stock->nr_pages -= nr_pages; 1724 ret = true; 1725 } 1726 1727 local_irq_restore(flags); 1728 1729 return ret; 1730 } 1731 1732 /* 1733 * Returns stocks cached in percpu and reset cached information. 1734 */ 1735 static void drain_stock(struct memcg_stock_pcp *stock) 1736 { 1737 struct mem_cgroup *old = stock->cached; 1738 1739 if (stock->nr_pages) { 1740 page_counter_uncharge(&old->memory, stock->nr_pages); 1741 if (do_memsw_account()) 1742 page_counter_uncharge(&old->memsw, stock->nr_pages); 1743 css_put_many(&old->css, stock->nr_pages); 1744 stock->nr_pages = 0; 1745 } 1746 stock->cached = NULL; 1747 } 1748 1749 static void drain_local_stock(struct work_struct *dummy) 1750 { 1751 struct memcg_stock_pcp *stock; 1752 unsigned long flags; 1753 1754 local_irq_save(flags); 1755 1756 stock = this_cpu_ptr(&memcg_stock); 1757 drain_stock(stock); 1758 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1759 1760 local_irq_restore(flags); 1761 } 1762 1763 /* 1764 * Cache charges(val) to local per_cpu area. 1765 * This will be consumed by consume_stock() function, later. 1766 */ 1767 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1768 { 1769 struct memcg_stock_pcp *stock; 1770 unsigned long flags; 1771 1772 local_irq_save(flags); 1773 1774 stock = this_cpu_ptr(&memcg_stock); 1775 if (stock->cached != memcg) { /* reset if necessary */ 1776 drain_stock(stock); 1777 stock->cached = memcg; 1778 } 1779 stock->nr_pages += nr_pages; 1780 1781 local_irq_restore(flags); 1782 } 1783 1784 /* 1785 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1786 * of the hierarchy under it. 1787 */ 1788 static void drain_all_stock(struct mem_cgroup *root_memcg) 1789 { 1790 int cpu, curcpu; 1791 1792 /* If someone's already draining, avoid adding running more workers. */ 1793 if (!mutex_trylock(&percpu_charge_mutex)) 1794 return; 1795 /* Notify other cpus that system-wide "drain" is running */ 1796 get_online_cpus(); 1797 curcpu = get_cpu(); 1798 for_each_online_cpu(cpu) { 1799 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1800 struct mem_cgroup *memcg; 1801 1802 memcg = stock->cached; 1803 if (!memcg || !stock->nr_pages) 1804 continue; 1805 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1806 continue; 1807 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1808 if (cpu == curcpu) 1809 drain_local_stock(&stock->work); 1810 else 1811 schedule_work_on(cpu, &stock->work); 1812 } 1813 } 1814 put_cpu(); 1815 put_online_cpus(); 1816 mutex_unlock(&percpu_charge_mutex); 1817 } 1818 1819 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 1820 unsigned long action, 1821 void *hcpu) 1822 { 1823 int cpu = (unsigned long)hcpu; 1824 struct memcg_stock_pcp *stock; 1825 1826 if (action == CPU_ONLINE) 1827 return NOTIFY_OK; 1828 1829 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 1830 return NOTIFY_OK; 1831 1832 stock = &per_cpu(memcg_stock, cpu); 1833 drain_stock(stock); 1834 return NOTIFY_OK; 1835 } 1836 1837 static void reclaim_high(struct mem_cgroup *memcg, 1838 unsigned int nr_pages, 1839 gfp_t gfp_mask) 1840 { 1841 do { 1842 if (page_counter_read(&memcg->memory) <= memcg->high) 1843 continue; 1844 mem_cgroup_events(memcg, MEMCG_HIGH, 1); 1845 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 1846 } while ((memcg = parent_mem_cgroup(memcg))); 1847 } 1848 1849 static void high_work_func(struct work_struct *work) 1850 { 1851 struct mem_cgroup *memcg; 1852 1853 memcg = container_of(work, struct mem_cgroup, high_work); 1854 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL); 1855 } 1856 1857 /* 1858 * Scheduled by try_charge() to be executed from the userland return path 1859 * and reclaims memory over the high limit. 1860 */ 1861 void mem_cgroup_handle_over_high(void) 1862 { 1863 unsigned int nr_pages = current->memcg_nr_pages_over_high; 1864 struct mem_cgroup *memcg; 1865 1866 if (likely(!nr_pages)) 1867 return; 1868 1869 memcg = get_mem_cgroup_from_mm(current->mm); 1870 reclaim_high(memcg, nr_pages, GFP_KERNEL); 1871 css_put(&memcg->css); 1872 current->memcg_nr_pages_over_high = 0; 1873 } 1874 1875 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 1876 unsigned int nr_pages) 1877 { 1878 unsigned int batch = max(CHARGE_BATCH, nr_pages); 1879 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1880 struct mem_cgroup *mem_over_limit; 1881 struct page_counter *counter; 1882 unsigned long nr_reclaimed; 1883 bool may_swap = true; 1884 bool drained = false; 1885 1886 if (mem_cgroup_is_root(memcg)) 1887 return 0; 1888 retry: 1889 if (consume_stock(memcg, nr_pages)) 1890 return 0; 1891 1892 if (!do_memsw_account() || 1893 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 1894 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 1895 goto done_restock; 1896 if (do_memsw_account()) 1897 page_counter_uncharge(&memcg->memsw, batch); 1898 mem_over_limit = mem_cgroup_from_counter(counter, memory); 1899 } else { 1900 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 1901 may_swap = false; 1902 } 1903 1904 if (batch > nr_pages) { 1905 batch = nr_pages; 1906 goto retry; 1907 } 1908 1909 /* 1910 * Unlike in global OOM situations, memcg is not in a physical 1911 * memory shortage. Allow dying and OOM-killed tasks to 1912 * bypass the last charges so that they can exit quickly and 1913 * free their memory. 1914 */ 1915 if (unlikely(test_thread_flag(TIF_MEMDIE) || 1916 fatal_signal_pending(current) || 1917 current->flags & PF_EXITING)) 1918 goto force; 1919 1920 if (unlikely(task_in_memcg_oom(current))) 1921 goto nomem; 1922 1923 if (!gfpflags_allow_blocking(gfp_mask)) 1924 goto nomem; 1925 1926 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 1927 1928 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 1929 gfp_mask, may_swap); 1930 1931 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 1932 goto retry; 1933 1934 if (!drained) { 1935 drain_all_stock(mem_over_limit); 1936 drained = true; 1937 goto retry; 1938 } 1939 1940 if (gfp_mask & __GFP_NORETRY) 1941 goto nomem; 1942 /* 1943 * Even though the limit is exceeded at this point, reclaim 1944 * may have been able to free some pages. Retry the charge 1945 * before killing the task. 1946 * 1947 * Only for regular pages, though: huge pages are rather 1948 * unlikely to succeed so close to the limit, and we fall back 1949 * to regular pages anyway in case of failure. 1950 */ 1951 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 1952 goto retry; 1953 /* 1954 * At task move, charge accounts can be doubly counted. So, it's 1955 * better to wait until the end of task_move if something is going on. 1956 */ 1957 if (mem_cgroup_wait_acct_move(mem_over_limit)) 1958 goto retry; 1959 1960 if (nr_retries--) 1961 goto retry; 1962 1963 if (gfp_mask & __GFP_NOFAIL) 1964 goto force; 1965 1966 if (fatal_signal_pending(current)) 1967 goto force; 1968 1969 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 1970 1971 mem_cgroup_oom(mem_over_limit, gfp_mask, 1972 get_order(nr_pages * PAGE_SIZE)); 1973 nomem: 1974 if (!(gfp_mask & __GFP_NOFAIL)) 1975 return -ENOMEM; 1976 force: 1977 /* 1978 * The allocation either can't fail or will lead to more memory 1979 * being freed very soon. Allow memory usage go over the limit 1980 * temporarily by force charging it. 1981 */ 1982 page_counter_charge(&memcg->memory, nr_pages); 1983 if (do_memsw_account()) 1984 page_counter_charge(&memcg->memsw, nr_pages); 1985 css_get_many(&memcg->css, nr_pages); 1986 1987 return 0; 1988 1989 done_restock: 1990 css_get_many(&memcg->css, batch); 1991 if (batch > nr_pages) 1992 refill_stock(memcg, batch - nr_pages); 1993 1994 /* 1995 * If the hierarchy is above the normal consumption range, schedule 1996 * reclaim on returning to userland. We can perform reclaim here 1997 * if __GFP_RECLAIM but let's always punt for simplicity and so that 1998 * GFP_KERNEL can consistently be used during reclaim. @memcg is 1999 * not recorded as it most likely matches current's and won't 2000 * change in the meantime. As high limit is checked again before 2001 * reclaim, the cost of mismatch is negligible. 2002 */ 2003 do { 2004 if (page_counter_read(&memcg->memory) > memcg->high) { 2005 /* Don't bother a random interrupted task */ 2006 if (in_interrupt()) { 2007 schedule_work(&memcg->high_work); 2008 break; 2009 } 2010 current->memcg_nr_pages_over_high += batch; 2011 set_notify_resume(current); 2012 break; 2013 } 2014 } while ((memcg = parent_mem_cgroup(memcg))); 2015 2016 return 0; 2017 } 2018 2019 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2020 { 2021 if (mem_cgroup_is_root(memcg)) 2022 return; 2023 2024 page_counter_uncharge(&memcg->memory, nr_pages); 2025 if (do_memsw_account()) 2026 page_counter_uncharge(&memcg->memsw, nr_pages); 2027 2028 css_put_many(&memcg->css, nr_pages); 2029 } 2030 2031 static void lock_page_lru(struct page *page, int *isolated) 2032 { 2033 struct zone *zone = page_zone(page); 2034 2035 spin_lock_irq(zone_lru_lock(zone)); 2036 if (PageLRU(page)) { 2037 struct lruvec *lruvec; 2038 2039 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2040 ClearPageLRU(page); 2041 del_page_from_lru_list(page, lruvec, page_lru(page)); 2042 *isolated = 1; 2043 } else 2044 *isolated = 0; 2045 } 2046 2047 static void unlock_page_lru(struct page *page, int isolated) 2048 { 2049 struct zone *zone = page_zone(page); 2050 2051 if (isolated) { 2052 struct lruvec *lruvec; 2053 2054 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2055 VM_BUG_ON_PAGE(PageLRU(page), page); 2056 SetPageLRU(page); 2057 add_page_to_lru_list(page, lruvec, page_lru(page)); 2058 } 2059 spin_unlock_irq(zone_lru_lock(zone)); 2060 } 2061 2062 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2063 bool lrucare) 2064 { 2065 int isolated; 2066 2067 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2068 2069 /* 2070 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2071 * may already be on some other mem_cgroup's LRU. Take care of it. 2072 */ 2073 if (lrucare) 2074 lock_page_lru(page, &isolated); 2075 2076 /* 2077 * Nobody should be changing or seriously looking at 2078 * page->mem_cgroup at this point: 2079 * 2080 * - the page is uncharged 2081 * 2082 * - the page is off-LRU 2083 * 2084 * - an anonymous fault has exclusive page access, except for 2085 * a locked page table 2086 * 2087 * - a page cache insertion, a swapin fault, or a migration 2088 * have the page locked 2089 */ 2090 page->mem_cgroup = memcg; 2091 2092 if (lrucare) 2093 unlock_page_lru(page, isolated); 2094 } 2095 2096 #ifndef CONFIG_SLOB 2097 static int memcg_alloc_cache_id(void) 2098 { 2099 int id, size; 2100 int err; 2101 2102 id = ida_simple_get(&memcg_cache_ida, 2103 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2104 if (id < 0) 2105 return id; 2106 2107 if (id < memcg_nr_cache_ids) 2108 return id; 2109 2110 /* 2111 * There's no space for the new id in memcg_caches arrays, 2112 * so we have to grow them. 2113 */ 2114 down_write(&memcg_cache_ids_sem); 2115 2116 size = 2 * (id + 1); 2117 if (size < MEMCG_CACHES_MIN_SIZE) 2118 size = MEMCG_CACHES_MIN_SIZE; 2119 else if (size > MEMCG_CACHES_MAX_SIZE) 2120 size = MEMCG_CACHES_MAX_SIZE; 2121 2122 err = memcg_update_all_caches(size); 2123 if (!err) 2124 err = memcg_update_all_list_lrus(size); 2125 if (!err) 2126 memcg_nr_cache_ids = size; 2127 2128 up_write(&memcg_cache_ids_sem); 2129 2130 if (err) { 2131 ida_simple_remove(&memcg_cache_ida, id); 2132 return err; 2133 } 2134 return id; 2135 } 2136 2137 static void memcg_free_cache_id(int id) 2138 { 2139 ida_simple_remove(&memcg_cache_ida, id); 2140 } 2141 2142 struct memcg_kmem_cache_create_work { 2143 struct mem_cgroup *memcg; 2144 struct kmem_cache *cachep; 2145 struct work_struct work; 2146 }; 2147 2148 static void memcg_kmem_cache_create_func(struct work_struct *w) 2149 { 2150 struct memcg_kmem_cache_create_work *cw = 2151 container_of(w, struct memcg_kmem_cache_create_work, work); 2152 struct mem_cgroup *memcg = cw->memcg; 2153 struct kmem_cache *cachep = cw->cachep; 2154 2155 memcg_create_kmem_cache(memcg, cachep); 2156 2157 css_put(&memcg->css); 2158 kfree(cw); 2159 } 2160 2161 /* 2162 * Enqueue the creation of a per-memcg kmem_cache. 2163 */ 2164 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2165 struct kmem_cache *cachep) 2166 { 2167 struct memcg_kmem_cache_create_work *cw; 2168 2169 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2170 if (!cw) 2171 return; 2172 2173 css_get(&memcg->css); 2174 2175 cw->memcg = memcg; 2176 cw->cachep = cachep; 2177 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2178 2179 schedule_work(&cw->work); 2180 } 2181 2182 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2183 struct kmem_cache *cachep) 2184 { 2185 /* 2186 * We need to stop accounting when we kmalloc, because if the 2187 * corresponding kmalloc cache is not yet created, the first allocation 2188 * in __memcg_schedule_kmem_cache_create will recurse. 2189 * 2190 * However, it is better to enclose the whole function. Depending on 2191 * the debugging options enabled, INIT_WORK(), for instance, can 2192 * trigger an allocation. This too, will make us recurse. Because at 2193 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2194 * the safest choice is to do it like this, wrapping the whole function. 2195 */ 2196 current->memcg_kmem_skip_account = 1; 2197 __memcg_schedule_kmem_cache_create(memcg, cachep); 2198 current->memcg_kmem_skip_account = 0; 2199 } 2200 2201 static inline bool memcg_kmem_bypass(void) 2202 { 2203 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2204 return true; 2205 return false; 2206 } 2207 2208 /** 2209 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2210 * @cachep: the original global kmem cache 2211 * 2212 * Return the kmem_cache we're supposed to use for a slab allocation. 2213 * We try to use the current memcg's version of the cache. 2214 * 2215 * If the cache does not exist yet, if we are the first user of it, we 2216 * create it asynchronously in a workqueue and let the current allocation 2217 * go through with the original cache. 2218 * 2219 * This function takes a reference to the cache it returns to assure it 2220 * won't get destroyed while we are working with it. Once the caller is 2221 * done with it, memcg_kmem_put_cache() must be called to release the 2222 * reference. 2223 */ 2224 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2225 { 2226 struct mem_cgroup *memcg; 2227 struct kmem_cache *memcg_cachep; 2228 int kmemcg_id; 2229 2230 VM_BUG_ON(!is_root_cache(cachep)); 2231 2232 if (memcg_kmem_bypass()) 2233 return cachep; 2234 2235 if (current->memcg_kmem_skip_account) 2236 return cachep; 2237 2238 memcg = get_mem_cgroup_from_mm(current->mm); 2239 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2240 if (kmemcg_id < 0) 2241 goto out; 2242 2243 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2244 if (likely(memcg_cachep)) 2245 return memcg_cachep; 2246 2247 /* 2248 * If we are in a safe context (can wait, and not in interrupt 2249 * context), we could be be predictable and return right away. 2250 * This would guarantee that the allocation being performed 2251 * already belongs in the new cache. 2252 * 2253 * However, there are some clashes that can arrive from locking. 2254 * For instance, because we acquire the slab_mutex while doing 2255 * memcg_create_kmem_cache, this means no further allocation 2256 * could happen with the slab_mutex held. So it's better to 2257 * defer everything. 2258 */ 2259 memcg_schedule_kmem_cache_create(memcg, cachep); 2260 out: 2261 css_put(&memcg->css); 2262 return cachep; 2263 } 2264 2265 /** 2266 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2267 * @cachep: the cache returned by memcg_kmem_get_cache 2268 */ 2269 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2270 { 2271 if (!is_root_cache(cachep)) 2272 css_put(&cachep->memcg_params.memcg->css); 2273 } 2274 2275 /** 2276 * memcg_kmem_charge: charge a kmem page 2277 * @page: page to charge 2278 * @gfp: reclaim mode 2279 * @order: allocation order 2280 * @memcg: memory cgroup to charge 2281 * 2282 * Returns 0 on success, an error code on failure. 2283 */ 2284 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2285 struct mem_cgroup *memcg) 2286 { 2287 unsigned int nr_pages = 1 << order; 2288 struct page_counter *counter; 2289 int ret; 2290 2291 ret = try_charge(memcg, gfp, nr_pages); 2292 if (ret) 2293 return ret; 2294 2295 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2296 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2297 cancel_charge(memcg, nr_pages); 2298 return -ENOMEM; 2299 } 2300 2301 page->mem_cgroup = memcg; 2302 2303 return 0; 2304 } 2305 2306 /** 2307 * memcg_kmem_charge: charge a kmem page to the current memory cgroup 2308 * @page: page to charge 2309 * @gfp: reclaim mode 2310 * @order: allocation order 2311 * 2312 * Returns 0 on success, an error code on failure. 2313 */ 2314 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2315 { 2316 struct mem_cgroup *memcg; 2317 int ret = 0; 2318 2319 if (memcg_kmem_bypass()) 2320 return 0; 2321 2322 memcg = get_mem_cgroup_from_mm(current->mm); 2323 if (!mem_cgroup_is_root(memcg)) { 2324 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2325 if (!ret) 2326 __SetPageKmemcg(page); 2327 } 2328 css_put(&memcg->css); 2329 return ret; 2330 } 2331 /** 2332 * memcg_kmem_uncharge: uncharge a kmem page 2333 * @page: page to uncharge 2334 * @order: allocation order 2335 */ 2336 void memcg_kmem_uncharge(struct page *page, int order) 2337 { 2338 struct mem_cgroup *memcg = page->mem_cgroup; 2339 unsigned int nr_pages = 1 << order; 2340 2341 if (!memcg) 2342 return; 2343 2344 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2345 2346 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2347 page_counter_uncharge(&memcg->kmem, nr_pages); 2348 2349 page_counter_uncharge(&memcg->memory, nr_pages); 2350 if (do_memsw_account()) 2351 page_counter_uncharge(&memcg->memsw, nr_pages); 2352 2353 page->mem_cgroup = NULL; 2354 2355 /* slab pages do not have PageKmemcg flag set */ 2356 if (PageKmemcg(page)) 2357 __ClearPageKmemcg(page); 2358 2359 css_put_many(&memcg->css, nr_pages); 2360 } 2361 #endif /* !CONFIG_SLOB */ 2362 2363 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2364 2365 /* 2366 * Because tail pages are not marked as "used", set it. We're under 2367 * zone_lru_lock and migration entries setup in all page mappings. 2368 */ 2369 void mem_cgroup_split_huge_fixup(struct page *head) 2370 { 2371 int i; 2372 2373 if (mem_cgroup_disabled()) 2374 return; 2375 2376 for (i = 1; i < HPAGE_PMD_NR; i++) 2377 head[i].mem_cgroup = head->mem_cgroup; 2378 2379 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2380 HPAGE_PMD_NR); 2381 } 2382 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2383 2384 #ifdef CONFIG_MEMCG_SWAP 2385 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2386 bool charge) 2387 { 2388 int val = (charge) ? 1 : -1; 2389 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2390 } 2391 2392 /** 2393 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2394 * @entry: swap entry to be moved 2395 * @from: mem_cgroup which the entry is moved from 2396 * @to: mem_cgroup which the entry is moved to 2397 * 2398 * It succeeds only when the swap_cgroup's record for this entry is the same 2399 * as the mem_cgroup's id of @from. 2400 * 2401 * Returns 0 on success, -EINVAL on failure. 2402 * 2403 * The caller must have charged to @to, IOW, called page_counter_charge() about 2404 * both res and memsw, and called css_get(). 2405 */ 2406 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2407 struct mem_cgroup *from, struct mem_cgroup *to) 2408 { 2409 unsigned short old_id, new_id; 2410 2411 old_id = mem_cgroup_id(from); 2412 new_id = mem_cgroup_id(to); 2413 2414 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2415 mem_cgroup_swap_statistics(from, false); 2416 mem_cgroup_swap_statistics(to, true); 2417 return 0; 2418 } 2419 return -EINVAL; 2420 } 2421 #else 2422 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2423 struct mem_cgroup *from, struct mem_cgroup *to) 2424 { 2425 return -EINVAL; 2426 } 2427 #endif 2428 2429 static DEFINE_MUTEX(memcg_limit_mutex); 2430 2431 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2432 unsigned long limit) 2433 { 2434 unsigned long curusage; 2435 unsigned long oldusage; 2436 bool enlarge = false; 2437 int retry_count; 2438 int ret; 2439 2440 /* 2441 * For keeping hierarchical_reclaim simple, how long we should retry 2442 * is depends on callers. We set our retry-count to be function 2443 * of # of children which we should visit in this loop. 2444 */ 2445 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2446 mem_cgroup_count_children(memcg); 2447 2448 oldusage = page_counter_read(&memcg->memory); 2449 2450 do { 2451 if (signal_pending(current)) { 2452 ret = -EINTR; 2453 break; 2454 } 2455 2456 mutex_lock(&memcg_limit_mutex); 2457 if (limit > memcg->memsw.limit) { 2458 mutex_unlock(&memcg_limit_mutex); 2459 ret = -EINVAL; 2460 break; 2461 } 2462 if (limit > memcg->memory.limit) 2463 enlarge = true; 2464 ret = page_counter_limit(&memcg->memory, limit); 2465 mutex_unlock(&memcg_limit_mutex); 2466 2467 if (!ret) 2468 break; 2469 2470 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2471 2472 curusage = page_counter_read(&memcg->memory); 2473 /* Usage is reduced ? */ 2474 if (curusage >= oldusage) 2475 retry_count--; 2476 else 2477 oldusage = curusage; 2478 } while (retry_count); 2479 2480 if (!ret && enlarge) 2481 memcg_oom_recover(memcg); 2482 2483 return ret; 2484 } 2485 2486 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2487 unsigned long limit) 2488 { 2489 unsigned long curusage; 2490 unsigned long oldusage; 2491 bool enlarge = false; 2492 int retry_count; 2493 int ret; 2494 2495 /* see mem_cgroup_resize_res_limit */ 2496 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2497 mem_cgroup_count_children(memcg); 2498 2499 oldusage = page_counter_read(&memcg->memsw); 2500 2501 do { 2502 if (signal_pending(current)) { 2503 ret = -EINTR; 2504 break; 2505 } 2506 2507 mutex_lock(&memcg_limit_mutex); 2508 if (limit < memcg->memory.limit) { 2509 mutex_unlock(&memcg_limit_mutex); 2510 ret = -EINVAL; 2511 break; 2512 } 2513 if (limit > memcg->memsw.limit) 2514 enlarge = true; 2515 ret = page_counter_limit(&memcg->memsw, limit); 2516 mutex_unlock(&memcg_limit_mutex); 2517 2518 if (!ret) 2519 break; 2520 2521 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2522 2523 curusage = page_counter_read(&memcg->memsw); 2524 /* Usage is reduced ? */ 2525 if (curusage >= oldusage) 2526 retry_count--; 2527 else 2528 oldusage = curusage; 2529 } while (retry_count); 2530 2531 if (!ret && enlarge) 2532 memcg_oom_recover(memcg); 2533 2534 return ret; 2535 } 2536 2537 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2538 gfp_t gfp_mask, 2539 unsigned long *total_scanned) 2540 { 2541 unsigned long nr_reclaimed = 0; 2542 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2543 unsigned long reclaimed; 2544 int loop = 0; 2545 struct mem_cgroup_tree_per_node *mctz; 2546 unsigned long excess; 2547 unsigned long nr_scanned; 2548 2549 if (order > 0) 2550 return 0; 2551 2552 mctz = soft_limit_tree_node(pgdat->node_id); 2553 2554 /* 2555 * Do not even bother to check the largest node if the root 2556 * is empty. Do it lockless to prevent lock bouncing. Races 2557 * are acceptable as soft limit is best effort anyway. 2558 */ 2559 if (RB_EMPTY_ROOT(&mctz->rb_root)) 2560 return 0; 2561 2562 /* 2563 * This loop can run a while, specially if mem_cgroup's continuously 2564 * keep exceeding their soft limit and putting the system under 2565 * pressure 2566 */ 2567 do { 2568 if (next_mz) 2569 mz = next_mz; 2570 else 2571 mz = mem_cgroup_largest_soft_limit_node(mctz); 2572 if (!mz) 2573 break; 2574 2575 nr_scanned = 0; 2576 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2577 gfp_mask, &nr_scanned); 2578 nr_reclaimed += reclaimed; 2579 *total_scanned += nr_scanned; 2580 spin_lock_irq(&mctz->lock); 2581 __mem_cgroup_remove_exceeded(mz, mctz); 2582 2583 /* 2584 * If we failed to reclaim anything from this memory cgroup 2585 * it is time to move on to the next cgroup 2586 */ 2587 next_mz = NULL; 2588 if (!reclaimed) 2589 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2590 2591 excess = soft_limit_excess(mz->memcg); 2592 /* 2593 * One school of thought says that we should not add 2594 * back the node to the tree if reclaim returns 0. 2595 * But our reclaim could return 0, simply because due 2596 * to priority we are exposing a smaller subset of 2597 * memory to reclaim from. Consider this as a longer 2598 * term TODO. 2599 */ 2600 /* If excess == 0, no tree ops */ 2601 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2602 spin_unlock_irq(&mctz->lock); 2603 css_put(&mz->memcg->css); 2604 loop++; 2605 /* 2606 * Could not reclaim anything and there are no more 2607 * mem cgroups to try or we seem to be looping without 2608 * reclaiming anything. 2609 */ 2610 if (!nr_reclaimed && 2611 (next_mz == NULL || 2612 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2613 break; 2614 } while (!nr_reclaimed); 2615 if (next_mz) 2616 css_put(&next_mz->memcg->css); 2617 return nr_reclaimed; 2618 } 2619 2620 /* 2621 * Test whether @memcg has children, dead or alive. Note that this 2622 * function doesn't care whether @memcg has use_hierarchy enabled and 2623 * returns %true if there are child csses according to the cgroup 2624 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2625 */ 2626 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2627 { 2628 bool ret; 2629 2630 rcu_read_lock(); 2631 ret = css_next_child(NULL, &memcg->css); 2632 rcu_read_unlock(); 2633 return ret; 2634 } 2635 2636 /* 2637 * Reclaims as many pages from the given memcg as possible. 2638 * 2639 * Caller is responsible for holding css reference for memcg. 2640 */ 2641 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2642 { 2643 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2644 2645 /* we call try-to-free pages for make this cgroup empty */ 2646 lru_add_drain_all(); 2647 /* try to free all pages in this cgroup */ 2648 while (nr_retries && page_counter_read(&memcg->memory)) { 2649 int progress; 2650 2651 if (signal_pending(current)) 2652 return -EINTR; 2653 2654 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2655 GFP_KERNEL, true); 2656 if (!progress) { 2657 nr_retries--; 2658 /* maybe some writeback is necessary */ 2659 congestion_wait(BLK_RW_ASYNC, HZ/10); 2660 } 2661 2662 } 2663 2664 return 0; 2665 } 2666 2667 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2668 char *buf, size_t nbytes, 2669 loff_t off) 2670 { 2671 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2672 2673 if (mem_cgroup_is_root(memcg)) 2674 return -EINVAL; 2675 return mem_cgroup_force_empty(memcg) ?: nbytes; 2676 } 2677 2678 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2679 struct cftype *cft) 2680 { 2681 return mem_cgroup_from_css(css)->use_hierarchy; 2682 } 2683 2684 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2685 struct cftype *cft, u64 val) 2686 { 2687 int retval = 0; 2688 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2689 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2690 2691 if (memcg->use_hierarchy == val) 2692 return 0; 2693 2694 /* 2695 * If parent's use_hierarchy is set, we can't make any modifications 2696 * in the child subtrees. If it is unset, then the change can 2697 * occur, provided the current cgroup has no children. 2698 * 2699 * For the root cgroup, parent_mem is NULL, we allow value to be 2700 * set if there are no children. 2701 */ 2702 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2703 (val == 1 || val == 0)) { 2704 if (!memcg_has_children(memcg)) 2705 memcg->use_hierarchy = val; 2706 else 2707 retval = -EBUSY; 2708 } else 2709 retval = -EINVAL; 2710 2711 return retval; 2712 } 2713 2714 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat) 2715 { 2716 struct mem_cgroup *iter; 2717 int i; 2718 2719 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT); 2720 2721 for_each_mem_cgroup_tree(iter, memcg) { 2722 for (i = 0; i < MEMCG_NR_STAT; i++) 2723 stat[i] += mem_cgroup_read_stat(iter, i); 2724 } 2725 } 2726 2727 static void tree_events(struct mem_cgroup *memcg, unsigned long *events) 2728 { 2729 struct mem_cgroup *iter; 2730 int i; 2731 2732 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS); 2733 2734 for_each_mem_cgroup_tree(iter, memcg) { 2735 for (i = 0; i < MEMCG_NR_EVENTS; i++) 2736 events[i] += mem_cgroup_read_events(iter, i); 2737 } 2738 } 2739 2740 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2741 { 2742 unsigned long val = 0; 2743 2744 if (mem_cgroup_is_root(memcg)) { 2745 struct mem_cgroup *iter; 2746 2747 for_each_mem_cgroup_tree(iter, memcg) { 2748 val += mem_cgroup_read_stat(iter, 2749 MEM_CGROUP_STAT_CACHE); 2750 val += mem_cgroup_read_stat(iter, 2751 MEM_CGROUP_STAT_RSS); 2752 if (swap) 2753 val += mem_cgroup_read_stat(iter, 2754 MEM_CGROUP_STAT_SWAP); 2755 } 2756 } else { 2757 if (!swap) 2758 val = page_counter_read(&memcg->memory); 2759 else 2760 val = page_counter_read(&memcg->memsw); 2761 } 2762 return val; 2763 } 2764 2765 enum { 2766 RES_USAGE, 2767 RES_LIMIT, 2768 RES_MAX_USAGE, 2769 RES_FAILCNT, 2770 RES_SOFT_LIMIT, 2771 }; 2772 2773 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2774 struct cftype *cft) 2775 { 2776 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2777 struct page_counter *counter; 2778 2779 switch (MEMFILE_TYPE(cft->private)) { 2780 case _MEM: 2781 counter = &memcg->memory; 2782 break; 2783 case _MEMSWAP: 2784 counter = &memcg->memsw; 2785 break; 2786 case _KMEM: 2787 counter = &memcg->kmem; 2788 break; 2789 case _TCP: 2790 counter = &memcg->tcpmem; 2791 break; 2792 default: 2793 BUG(); 2794 } 2795 2796 switch (MEMFILE_ATTR(cft->private)) { 2797 case RES_USAGE: 2798 if (counter == &memcg->memory) 2799 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2800 if (counter == &memcg->memsw) 2801 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2802 return (u64)page_counter_read(counter) * PAGE_SIZE; 2803 case RES_LIMIT: 2804 return (u64)counter->limit * PAGE_SIZE; 2805 case RES_MAX_USAGE: 2806 return (u64)counter->watermark * PAGE_SIZE; 2807 case RES_FAILCNT: 2808 return counter->failcnt; 2809 case RES_SOFT_LIMIT: 2810 return (u64)memcg->soft_limit * PAGE_SIZE; 2811 default: 2812 BUG(); 2813 } 2814 } 2815 2816 #ifndef CONFIG_SLOB 2817 static int memcg_online_kmem(struct mem_cgroup *memcg) 2818 { 2819 int memcg_id; 2820 2821 if (cgroup_memory_nokmem) 2822 return 0; 2823 2824 BUG_ON(memcg->kmemcg_id >= 0); 2825 BUG_ON(memcg->kmem_state); 2826 2827 memcg_id = memcg_alloc_cache_id(); 2828 if (memcg_id < 0) 2829 return memcg_id; 2830 2831 static_branch_inc(&memcg_kmem_enabled_key); 2832 /* 2833 * A memory cgroup is considered kmem-online as soon as it gets 2834 * kmemcg_id. Setting the id after enabling static branching will 2835 * guarantee no one starts accounting before all call sites are 2836 * patched. 2837 */ 2838 memcg->kmemcg_id = memcg_id; 2839 memcg->kmem_state = KMEM_ONLINE; 2840 2841 return 0; 2842 } 2843 2844 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2845 { 2846 struct cgroup_subsys_state *css; 2847 struct mem_cgroup *parent, *child; 2848 int kmemcg_id; 2849 2850 if (memcg->kmem_state != KMEM_ONLINE) 2851 return; 2852 /* 2853 * Clear the online state before clearing memcg_caches array 2854 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 2855 * guarantees that no cache will be created for this cgroup 2856 * after we are done (see memcg_create_kmem_cache()). 2857 */ 2858 memcg->kmem_state = KMEM_ALLOCATED; 2859 2860 memcg_deactivate_kmem_caches(memcg); 2861 2862 kmemcg_id = memcg->kmemcg_id; 2863 BUG_ON(kmemcg_id < 0); 2864 2865 parent = parent_mem_cgroup(memcg); 2866 if (!parent) 2867 parent = root_mem_cgroup; 2868 2869 /* 2870 * Change kmemcg_id of this cgroup and all its descendants to the 2871 * parent's id, and then move all entries from this cgroup's list_lrus 2872 * to ones of the parent. After we have finished, all list_lrus 2873 * corresponding to this cgroup are guaranteed to remain empty. The 2874 * ordering is imposed by list_lru_node->lock taken by 2875 * memcg_drain_all_list_lrus(). 2876 */ 2877 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 2878 css_for_each_descendant_pre(css, &memcg->css) { 2879 child = mem_cgroup_from_css(css); 2880 BUG_ON(child->kmemcg_id != kmemcg_id); 2881 child->kmemcg_id = parent->kmemcg_id; 2882 if (!memcg->use_hierarchy) 2883 break; 2884 } 2885 rcu_read_unlock(); 2886 2887 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2888 2889 memcg_free_cache_id(kmemcg_id); 2890 } 2891 2892 static void memcg_free_kmem(struct mem_cgroup *memcg) 2893 { 2894 /* css_alloc() failed, offlining didn't happen */ 2895 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 2896 memcg_offline_kmem(memcg); 2897 2898 if (memcg->kmem_state == KMEM_ALLOCATED) { 2899 memcg_destroy_kmem_caches(memcg); 2900 static_branch_dec(&memcg_kmem_enabled_key); 2901 WARN_ON(page_counter_read(&memcg->kmem)); 2902 } 2903 } 2904 #else 2905 static int memcg_online_kmem(struct mem_cgroup *memcg) 2906 { 2907 return 0; 2908 } 2909 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2910 { 2911 } 2912 static void memcg_free_kmem(struct mem_cgroup *memcg) 2913 { 2914 } 2915 #endif /* !CONFIG_SLOB */ 2916 2917 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2918 unsigned long limit) 2919 { 2920 int ret; 2921 2922 mutex_lock(&memcg_limit_mutex); 2923 ret = page_counter_limit(&memcg->kmem, limit); 2924 mutex_unlock(&memcg_limit_mutex); 2925 return ret; 2926 } 2927 2928 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) 2929 { 2930 int ret; 2931 2932 mutex_lock(&memcg_limit_mutex); 2933 2934 ret = page_counter_limit(&memcg->tcpmem, limit); 2935 if (ret) 2936 goto out; 2937 2938 if (!memcg->tcpmem_active) { 2939 /* 2940 * The active flag needs to be written after the static_key 2941 * update. This is what guarantees that the socket activation 2942 * function is the last one to run. See mem_cgroup_sk_alloc() 2943 * for details, and note that we don't mark any socket as 2944 * belonging to this memcg until that flag is up. 2945 * 2946 * We need to do this, because static_keys will span multiple 2947 * sites, but we can't control their order. If we mark a socket 2948 * as accounted, but the accounting functions are not patched in 2949 * yet, we'll lose accounting. 2950 * 2951 * We never race with the readers in mem_cgroup_sk_alloc(), 2952 * because when this value change, the code to process it is not 2953 * patched in yet. 2954 */ 2955 static_branch_inc(&memcg_sockets_enabled_key); 2956 memcg->tcpmem_active = true; 2957 } 2958 out: 2959 mutex_unlock(&memcg_limit_mutex); 2960 return ret; 2961 } 2962 2963 /* 2964 * The user of this function is... 2965 * RES_LIMIT. 2966 */ 2967 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2968 char *buf, size_t nbytes, loff_t off) 2969 { 2970 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2971 unsigned long nr_pages; 2972 int ret; 2973 2974 buf = strstrip(buf); 2975 ret = page_counter_memparse(buf, "-1", &nr_pages); 2976 if (ret) 2977 return ret; 2978 2979 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2980 case RES_LIMIT: 2981 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 2982 ret = -EINVAL; 2983 break; 2984 } 2985 switch (MEMFILE_TYPE(of_cft(of)->private)) { 2986 case _MEM: 2987 ret = mem_cgroup_resize_limit(memcg, nr_pages); 2988 break; 2989 case _MEMSWAP: 2990 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 2991 break; 2992 case _KMEM: 2993 ret = memcg_update_kmem_limit(memcg, nr_pages); 2994 break; 2995 case _TCP: 2996 ret = memcg_update_tcp_limit(memcg, nr_pages); 2997 break; 2998 } 2999 break; 3000 case RES_SOFT_LIMIT: 3001 memcg->soft_limit = nr_pages; 3002 ret = 0; 3003 break; 3004 } 3005 return ret ?: nbytes; 3006 } 3007 3008 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3009 size_t nbytes, loff_t off) 3010 { 3011 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3012 struct page_counter *counter; 3013 3014 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3015 case _MEM: 3016 counter = &memcg->memory; 3017 break; 3018 case _MEMSWAP: 3019 counter = &memcg->memsw; 3020 break; 3021 case _KMEM: 3022 counter = &memcg->kmem; 3023 break; 3024 case _TCP: 3025 counter = &memcg->tcpmem; 3026 break; 3027 default: 3028 BUG(); 3029 } 3030 3031 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3032 case RES_MAX_USAGE: 3033 page_counter_reset_watermark(counter); 3034 break; 3035 case RES_FAILCNT: 3036 counter->failcnt = 0; 3037 break; 3038 default: 3039 BUG(); 3040 } 3041 3042 return nbytes; 3043 } 3044 3045 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3046 struct cftype *cft) 3047 { 3048 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3049 } 3050 3051 #ifdef CONFIG_MMU 3052 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3053 struct cftype *cft, u64 val) 3054 { 3055 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3056 3057 if (val & ~MOVE_MASK) 3058 return -EINVAL; 3059 3060 /* 3061 * No kind of locking is needed in here, because ->can_attach() will 3062 * check this value once in the beginning of the process, and then carry 3063 * on with stale data. This means that changes to this value will only 3064 * affect task migrations starting after the change. 3065 */ 3066 memcg->move_charge_at_immigrate = val; 3067 return 0; 3068 } 3069 #else 3070 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3071 struct cftype *cft, u64 val) 3072 { 3073 return -ENOSYS; 3074 } 3075 #endif 3076 3077 #ifdef CONFIG_NUMA 3078 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3079 { 3080 struct numa_stat { 3081 const char *name; 3082 unsigned int lru_mask; 3083 }; 3084 3085 static const struct numa_stat stats[] = { 3086 { "total", LRU_ALL }, 3087 { "file", LRU_ALL_FILE }, 3088 { "anon", LRU_ALL_ANON }, 3089 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3090 }; 3091 const struct numa_stat *stat; 3092 int nid; 3093 unsigned long nr; 3094 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3095 3096 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3097 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3098 seq_printf(m, "%s=%lu", stat->name, nr); 3099 for_each_node_state(nid, N_MEMORY) { 3100 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3101 stat->lru_mask); 3102 seq_printf(m, " N%d=%lu", nid, nr); 3103 } 3104 seq_putc(m, '\n'); 3105 } 3106 3107 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3108 struct mem_cgroup *iter; 3109 3110 nr = 0; 3111 for_each_mem_cgroup_tree(iter, memcg) 3112 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3113 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3114 for_each_node_state(nid, N_MEMORY) { 3115 nr = 0; 3116 for_each_mem_cgroup_tree(iter, memcg) 3117 nr += mem_cgroup_node_nr_lru_pages( 3118 iter, nid, stat->lru_mask); 3119 seq_printf(m, " N%d=%lu", nid, nr); 3120 } 3121 seq_putc(m, '\n'); 3122 } 3123 3124 return 0; 3125 } 3126 #endif /* CONFIG_NUMA */ 3127 3128 static int memcg_stat_show(struct seq_file *m, void *v) 3129 { 3130 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3131 unsigned long memory, memsw; 3132 struct mem_cgroup *mi; 3133 unsigned int i; 3134 3135 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3136 MEM_CGROUP_STAT_NSTATS); 3137 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3138 MEM_CGROUP_EVENTS_NSTATS); 3139 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3140 3141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3142 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3143 continue; 3144 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3145 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3146 } 3147 3148 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3149 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3150 mem_cgroup_read_events(memcg, i)); 3151 3152 for (i = 0; i < NR_LRU_LISTS; i++) 3153 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3154 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3155 3156 /* Hierarchical information */ 3157 memory = memsw = PAGE_COUNTER_MAX; 3158 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3159 memory = min(memory, mi->memory.limit); 3160 memsw = min(memsw, mi->memsw.limit); 3161 } 3162 seq_printf(m, "hierarchical_memory_limit %llu\n", 3163 (u64)memory * PAGE_SIZE); 3164 if (do_memsw_account()) 3165 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3166 (u64)memsw * PAGE_SIZE); 3167 3168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3169 unsigned long long val = 0; 3170 3171 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3172 continue; 3173 for_each_mem_cgroup_tree(mi, memcg) 3174 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3175 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3176 } 3177 3178 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3179 unsigned long long val = 0; 3180 3181 for_each_mem_cgroup_tree(mi, memcg) 3182 val += mem_cgroup_read_events(mi, i); 3183 seq_printf(m, "total_%s %llu\n", 3184 mem_cgroup_events_names[i], val); 3185 } 3186 3187 for (i = 0; i < NR_LRU_LISTS; i++) { 3188 unsigned long long val = 0; 3189 3190 for_each_mem_cgroup_tree(mi, memcg) 3191 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3192 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3193 } 3194 3195 #ifdef CONFIG_DEBUG_VM 3196 { 3197 pg_data_t *pgdat; 3198 struct mem_cgroup_per_node *mz; 3199 struct zone_reclaim_stat *rstat; 3200 unsigned long recent_rotated[2] = {0, 0}; 3201 unsigned long recent_scanned[2] = {0, 0}; 3202 3203 for_each_online_pgdat(pgdat) { 3204 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3205 rstat = &mz->lruvec.reclaim_stat; 3206 3207 recent_rotated[0] += rstat->recent_rotated[0]; 3208 recent_rotated[1] += rstat->recent_rotated[1]; 3209 recent_scanned[0] += rstat->recent_scanned[0]; 3210 recent_scanned[1] += rstat->recent_scanned[1]; 3211 } 3212 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3213 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3214 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3215 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3216 } 3217 #endif 3218 3219 return 0; 3220 } 3221 3222 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3223 struct cftype *cft) 3224 { 3225 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3226 3227 return mem_cgroup_swappiness(memcg); 3228 } 3229 3230 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3231 struct cftype *cft, u64 val) 3232 { 3233 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3234 3235 if (val > 100) 3236 return -EINVAL; 3237 3238 if (css->parent) 3239 memcg->swappiness = val; 3240 else 3241 vm_swappiness = val; 3242 3243 return 0; 3244 } 3245 3246 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3247 { 3248 struct mem_cgroup_threshold_ary *t; 3249 unsigned long usage; 3250 int i; 3251 3252 rcu_read_lock(); 3253 if (!swap) 3254 t = rcu_dereference(memcg->thresholds.primary); 3255 else 3256 t = rcu_dereference(memcg->memsw_thresholds.primary); 3257 3258 if (!t) 3259 goto unlock; 3260 3261 usage = mem_cgroup_usage(memcg, swap); 3262 3263 /* 3264 * current_threshold points to threshold just below or equal to usage. 3265 * If it's not true, a threshold was crossed after last 3266 * call of __mem_cgroup_threshold(). 3267 */ 3268 i = t->current_threshold; 3269 3270 /* 3271 * Iterate backward over array of thresholds starting from 3272 * current_threshold and check if a threshold is crossed. 3273 * If none of thresholds below usage is crossed, we read 3274 * only one element of the array here. 3275 */ 3276 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3277 eventfd_signal(t->entries[i].eventfd, 1); 3278 3279 /* i = current_threshold + 1 */ 3280 i++; 3281 3282 /* 3283 * Iterate forward over array of thresholds starting from 3284 * current_threshold+1 and check if a threshold is crossed. 3285 * If none of thresholds above usage is crossed, we read 3286 * only one element of the array here. 3287 */ 3288 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3289 eventfd_signal(t->entries[i].eventfd, 1); 3290 3291 /* Update current_threshold */ 3292 t->current_threshold = i - 1; 3293 unlock: 3294 rcu_read_unlock(); 3295 } 3296 3297 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3298 { 3299 while (memcg) { 3300 __mem_cgroup_threshold(memcg, false); 3301 if (do_memsw_account()) 3302 __mem_cgroup_threshold(memcg, true); 3303 3304 memcg = parent_mem_cgroup(memcg); 3305 } 3306 } 3307 3308 static int compare_thresholds(const void *a, const void *b) 3309 { 3310 const struct mem_cgroup_threshold *_a = a; 3311 const struct mem_cgroup_threshold *_b = b; 3312 3313 if (_a->threshold > _b->threshold) 3314 return 1; 3315 3316 if (_a->threshold < _b->threshold) 3317 return -1; 3318 3319 return 0; 3320 } 3321 3322 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3323 { 3324 struct mem_cgroup_eventfd_list *ev; 3325 3326 spin_lock(&memcg_oom_lock); 3327 3328 list_for_each_entry(ev, &memcg->oom_notify, list) 3329 eventfd_signal(ev->eventfd, 1); 3330 3331 spin_unlock(&memcg_oom_lock); 3332 return 0; 3333 } 3334 3335 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3336 { 3337 struct mem_cgroup *iter; 3338 3339 for_each_mem_cgroup_tree(iter, memcg) 3340 mem_cgroup_oom_notify_cb(iter); 3341 } 3342 3343 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3344 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3345 { 3346 struct mem_cgroup_thresholds *thresholds; 3347 struct mem_cgroup_threshold_ary *new; 3348 unsigned long threshold; 3349 unsigned long usage; 3350 int i, size, ret; 3351 3352 ret = page_counter_memparse(args, "-1", &threshold); 3353 if (ret) 3354 return ret; 3355 3356 mutex_lock(&memcg->thresholds_lock); 3357 3358 if (type == _MEM) { 3359 thresholds = &memcg->thresholds; 3360 usage = mem_cgroup_usage(memcg, false); 3361 } else if (type == _MEMSWAP) { 3362 thresholds = &memcg->memsw_thresholds; 3363 usage = mem_cgroup_usage(memcg, true); 3364 } else 3365 BUG(); 3366 3367 /* Check if a threshold crossed before adding a new one */ 3368 if (thresholds->primary) 3369 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3370 3371 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3372 3373 /* Allocate memory for new array of thresholds */ 3374 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3375 GFP_KERNEL); 3376 if (!new) { 3377 ret = -ENOMEM; 3378 goto unlock; 3379 } 3380 new->size = size; 3381 3382 /* Copy thresholds (if any) to new array */ 3383 if (thresholds->primary) { 3384 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3385 sizeof(struct mem_cgroup_threshold)); 3386 } 3387 3388 /* Add new threshold */ 3389 new->entries[size - 1].eventfd = eventfd; 3390 new->entries[size - 1].threshold = threshold; 3391 3392 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3393 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3394 compare_thresholds, NULL); 3395 3396 /* Find current threshold */ 3397 new->current_threshold = -1; 3398 for (i = 0; i < size; i++) { 3399 if (new->entries[i].threshold <= usage) { 3400 /* 3401 * new->current_threshold will not be used until 3402 * rcu_assign_pointer(), so it's safe to increment 3403 * it here. 3404 */ 3405 ++new->current_threshold; 3406 } else 3407 break; 3408 } 3409 3410 /* Free old spare buffer and save old primary buffer as spare */ 3411 kfree(thresholds->spare); 3412 thresholds->spare = thresholds->primary; 3413 3414 rcu_assign_pointer(thresholds->primary, new); 3415 3416 /* To be sure that nobody uses thresholds */ 3417 synchronize_rcu(); 3418 3419 unlock: 3420 mutex_unlock(&memcg->thresholds_lock); 3421 3422 return ret; 3423 } 3424 3425 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3426 struct eventfd_ctx *eventfd, const char *args) 3427 { 3428 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3429 } 3430 3431 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3432 struct eventfd_ctx *eventfd, const char *args) 3433 { 3434 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3435 } 3436 3437 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3438 struct eventfd_ctx *eventfd, enum res_type type) 3439 { 3440 struct mem_cgroup_thresholds *thresholds; 3441 struct mem_cgroup_threshold_ary *new; 3442 unsigned long usage; 3443 int i, j, size; 3444 3445 mutex_lock(&memcg->thresholds_lock); 3446 3447 if (type == _MEM) { 3448 thresholds = &memcg->thresholds; 3449 usage = mem_cgroup_usage(memcg, false); 3450 } else if (type == _MEMSWAP) { 3451 thresholds = &memcg->memsw_thresholds; 3452 usage = mem_cgroup_usage(memcg, true); 3453 } else 3454 BUG(); 3455 3456 if (!thresholds->primary) 3457 goto unlock; 3458 3459 /* Check if a threshold crossed before removing */ 3460 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3461 3462 /* Calculate new number of threshold */ 3463 size = 0; 3464 for (i = 0; i < thresholds->primary->size; i++) { 3465 if (thresholds->primary->entries[i].eventfd != eventfd) 3466 size++; 3467 } 3468 3469 new = thresholds->spare; 3470 3471 /* Set thresholds array to NULL if we don't have thresholds */ 3472 if (!size) { 3473 kfree(new); 3474 new = NULL; 3475 goto swap_buffers; 3476 } 3477 3478 new->size = size; 3479 3480 /* Copy thresholds and find current threshold */ 3481 new->current_threshold = -1; 3482 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3483 if (thresholds->primary->entries[i].eventfd == eventfd) 3484 continue; 3485 3486 new->entries[j] = thresholds->primary->entries[i]; 3487 if (new->entries[j].threshold <= usage) { 3488 /* 3489 * new->current_threshold will not be used 3490 * until rcu_assign_pointer(), so it's safe to increment 3491 * it here. 3492 */ 3493 ++new->current_threshold; 3494 } 3495 j++; 3496 } 3497 3498 swap_buffers: 3499 /* Swap primary and spare array */ 3500 thresholds->spare = thresholds->primary; 3501 3502 rcu_assign_pointer(thresholds->primary, new); 3503 3504 /* To be sure that nobody uses thresholds */ 3505 synchronize_rcu(); 3506 3507 /* If all events are unregistered, free the spare array */ 3508 if (!new) { 3509 kfree(thresholds->spare); 3510 thresholds->spare = NULL; 3511 } 3512 unlock: 3513 mutex_unlock(&memcg->thresholds_lock); 3514 } 3515 3516 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3517 struct eventfd_ctx *eventfd) 3518 { 3519 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3520 } 3521 3522 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3523 struct eventfd_ctx *eventfd) 3524 { 3525 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3526 } 3527 3528 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3529 struct eventfd_ctx *eventfd, const char *args) 3530 { 3531 struct mem_cgroup_eventfd_list *event; 3532 3533 event = kmalloc(sizeof(*event), GFP_KERNEL); 3534 if (!event) 3535 return -ENOMEM; 3536 3537 spin_lock(&memcg_oom_lock); 3538 3539 event->eventfd = eventfd; 3540 list_add(&event->list, &memcg->oom_notify); 3541 3542 /* already in OOM ? */ 3543 if (memcg->under_oom) 3544 eventfd_signal(eventfd, 1); 3545 spin_unlock(&memcg_oom_lock); 3546 3547 return 0; 3548 } 3549 3550 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3551 struct eventfd_ctx *eventfd) 3552 { 3553 struct mem_cgroup_eventfd_list *ev, *tmp; 3554 3555 spin_lock(&memcg_oom_lock); 3556 3557 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3558 if (ev->eventfd == eventfd) { 3559 list_del(&ev->list); 3560 kfree(ev); 3561 } 3562 } 3563 3564 spin_unlock(&memcg_oom_lock); 3565 } 3566 3567 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3568 { 3569 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3570 3571 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3572 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3573 return 0; 3574 } 3575 3576 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3577 struct cftype *cft, u64 val) 3578 { 3579 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3580 3581 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3582 if (!css->parent || !((val == 0) || (val == 1))) 3583 return -EINVAL; 3584 3585 memcg->oom_kill_disable = val; 3586 if (!val) 3587 memcg_oom_recover(memcg); 3588 3589 return 0; 3590 } 3591 3592 #ifdef CONFIG_CGROUP_WRITEBACK 3593 3594 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3595 { 3596 return &memcg->cgwb_list; 3597 } 3598 3599 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3600 { 3601 return wb_domain_init(&memcg->cgwb_domain, gfp); 3602 } 3603 3604 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3605 { 3606 wb_domain_exit(&memcg->cgwb_domain); 3607 } 3608 3609 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3610 { 3611 wb_domain_size_changed(&memcg->cgwb_domain); 3612 } 3613 3614 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3615 { 3616 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3617 3618 if (!memcg->css.parent) 3619 return NULL; 3620 3621 return &memcg->cgwb_domain; 3622 } 3623 3624 /** 3625 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3626 * @wb: bdi_writeback in question 3627 * @pfilepages: out parameter for number of file pages 3628 * @pheadroom: out parameter for number of allocatable pages according to memcg 3629 * @pdirty: out parameter for number of dirty pages 3630 * @pwriteback: out parameter for number of pages under writeback 3631 * 3632 * Determine the numbers of file, headroom, dirty, and writeback pages in 3633 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3634 * is a bit more involved. 3635 * 3636 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3637 * headroom is calculated as the lowest headroom of itself and the 3638 * ancestors. Note that this doesn't consider the actual amount of 3639 * available memory in the system. The caller should further cap 3640 * *@pheadroom accordingly. 3641 */ 3642 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3643 unsigned long *pheadroom, unsigned long *pdirty, 3644 unsigned long *pwriteback) 3645 { 3646 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3647 struct mem_cgroup *parent; 3648 3649 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3650 3651 /* this should eventually include NR_UNSTABLE_NFS */ 3652 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3653 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3654 (1 << LRU_ACTIVE_FILE)); 3655 *pheadroom = PAGE_COUNTER_MAX; 3656 3657 while ((parent = parent_mem_cgroup(memcg))) { 3658 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3659 unsigned long used = page_counter_read(&memcg->memory); 3660 3661 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3662 memcg = parent; 3663 } 3664 } 3665 3666 #else /* CONFIG_CGROUP_WRITEBACK */ 3667 3668 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3669 { 3670 return 0; 3671 } 3672 3673 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3674 { 3675 } 3676 3677 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3678 { 3679 } 3680 3681 #endif /* CONFIG_CGROUP_WRITEBACK */ 3682 3683 /* 3684 * DO NOT USE IN NEW FILES. 3685 * 3686 * "cgroup.event_control" implementation. 3687 * 3688 * This is way over-engineered. It tries to support fully configurable 3689 * events for each user. Such level of flexibility is completely 3690 * unnecessary especially in the light of the planned unified hierarchy. 3691 * 3692 * Please deprecate this and replace with something simpler if at all 3693 * possible. 3694 */ 3695 3696 /* 3697 * Unregister event and free resources. 3698 * 3699 * Gets called from workqueue. 3700 */ 3701 static void memcg_event_remove(struct work_struct *work) 3702 { 3703 struct mem_cgroup_event *event = 3704 container_of(work, struct mem_cgroup_event, remove); 3705 struct mem_cgroup *memcg = event->memcg; 3706 3707 remove_wait_queue(event->wqh, &event->wait); 3708 3709 event->unregister_event(memcg, event->eventfd); 3710 3711 /* Notify userspace the event is going away. */ 3712 eventfd_signal(event->eventfd, 1); 3713 3714 eventfd_ctx_put(event->eventfd); 3715 kfree(event); 3716 css_put(&memcg->css); 3717 } 3718 3719 /* 3720 * Gets called on POLLHUP on eventfd when user closes it. 3721 * 3722 * Called with wqh->lock held and interrupts disabled. 3723 */ 3724 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3725 int sync, void *key) 3726 { 3727 struct mem_cgroup_event *event = 3728 container_of(wait, struct mem_cgroup_event, wait); 3729 struct mem_cgroup *memcg = event->memcg; 3730 unsigned long flags = (unsigned long)key; 3731 3732 if (flags & POLLHUP) { 3733 /* 3734 * If the event has been detached at cgroup removal, we 3735 * can simply return knowing the other side will cleanup 3736 * for us. 3737 * 3738 * We can't race against event freeing since the other 3739 * side will require wqh->lock via remove_wait_queue(), 3740 * which we hold. 3741 */ 3742 spin_lock(&memcg->event_list_lock); 3743 if (!list_empty(&event->list)) { 3744 list_del_init(&event->list); 3745 /* 3746 * We are in atomic context, but cgroup_event_remove() 3747 * may sleep, so we have to call it in workqueue. 3748 */ 3749 schedule_work(&event->remove); 3750 } 3751 spin_unlock(&memcg->event_list_lock); 3752 } 3753 3754 return 0; 3755 } 3756 3757 static void memcg_event_ptable_queue_proc(struct file *file, 3758 wait_queue_head_t *wqh, poll_table *pt) 3759 { 3760 struct mem_cgroup_event *event = 3761 container_of(pt, struct mem_cgroup_event, pt); 3762 3763 event->wqh = wqh; 3764 add_wait_queue(wqh, &event->wait); 3765 } 3766 3767 /* 3768 * DO NOT USE IN NEW FILES. 3769 * 3770 * Parse input and register new cgroup event handler. 3771 * 3772 * Input must be in format '<event_fd> <control_fd> <args>'. 3773 * Interpretation of args is defined by control file implementation. 3774 */ 3775 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3776 char *buf, size_t nbytes, loff_t off) 3777 { 3778 struct cgroup_subsys_state *css = of_css(of); 3779 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3780 struct mem_cgroup_event *event; 3781 struct cgroup_subsys_state *cfile_css; 3782 unsigned int efd, cfd; 3783 struct fd efile; 3784 struct fd cfile; 3785 const char *name; 3786 char *endp; 3787 int ret; 3788 3789 buf = strstrip(buf); 3790 3791 efd = simple_strtoul(buf, &endp, 10); 3792 if (*endp != ' ') 3793 return -EINVAL; 3794 buf = endp + 1; 3795 3796 cfd = simple_strtoul(buf, &endp, 10); 3797 if ((*endp != ' ') && (*endp != '\0')) 3798 return -EINVAL; 3799 buf = endp + 1; 3800 3801 event = kzalloc(sizeof(*event), GFP_KERNEL); 3802 if (!event) 3803 return -ENOMEM; 3804 3805 event->memcg = memcg; 3806 INIT_LIST_HEAD(&event->list); 3807 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3808 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3809 INIT_WORK(&event->remove, memcg_event_remove); 3810 3811 efile = fdget(efd); 3812 if (!efile.file) { 3813 ret = -EBADF; 3814 goto out_kfree; 3815 } 3816 3817 event->eventfd = eventfd_ctx_fileget(efile.file); 3818 if (IS_ERR(event->eventfd)) { 3819 ret = PTR_ERR(event->eventfd); 3820 goto out_put_efile; 3821 } 3822 3823 cfile = fdget(cfd); 3824 if (!cfile.file) { 3825 ret = -EBADF; 3826 goto out_put_eventfd; 3827 } 3828 3829 /* the process need read permission on control file */ 3830 /* AV: shouldn't we check that it's been opened for read instead? */ 3831 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3832 if (ret < 0) 3833 goto out_put_cfile; 3834 3835 /* 3836 * Determine the event callbacks and set them in @event. This used 3837 * to be done via struct cftype but cgroup core no longer knows 3838 * about these events. The following is crude but the whole thing 3839 * is for compatibility anyway. 3840 * 3841 * DO NOT ADD NEW FILES. 3842 */ 3843 name = cfile.file->f_path.dentry->d_name.name; 3844 3845 if (!strcmp(name, "memory.usage_in_bytes")) { 3846 event->register_event = mem_cgroup_usage_register_event; 3847 event->unregister_event = mem_cgroup_usage_unregister_event; 3848 } else if (!strcmp(name, "memory.oom_control")) { 3849 event->register_event = mem_cgroup_oom_register_event; 3850 event->unregister_event = mem_cgroup_oom_unregister_event; 3851 } else if (!strcmp(name, "memory.pressure_level")) { 3852 event->register_event = vmpressure_register_event; 3853 event->unregister_event = vmpressure_unregister_event; 3854 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3855 event->register_event = memsw_cgroup_usage_register_event; 3856 event->unregister_event = memsw_cgroup_usage_unregister_event; 3857 } else { 3858 ret = -EINVAL; 3859 goto out_put_cfile; 3860 } 3861 3862 /* 3863 * Verify @cfile should belong to @css. Also, remaining events are 3864 * automatically removed on cgroup destruction but the removal is 3865 * asynchronous, so take an extra ref on @css. 3866 */ 3867 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3868 &memory_cgrp_subsys); 3869 ret = -EINVAL; 3870 if (IS_ERR(cfile_css)) 3871 goto out_put_cfile; 3872 if (cfile_css != css) { 3873 css_put(cfile_css); 3874 goto out_put_cfile; 3875 } 3876 3877 ret = event->register_event(memcg, event->eventfd, buf); 3878 if (ret) 3879 goto out_put_css; 3880 3881 efile.file->f_op->poll(efile.file, &event->pt); 3882 3883 spin_lock(&memcg->event_list_lock); 3884 list_add(&event->list, &memcg->event_list); 3885 spin_unlock(&memcg->event_list_lock); 3886 3887 fdput(cfile); 3888 fdput(efile); 3889 3890 return nbytes; 3891 3892 out_put_css: 3893 css_put(css); 3894 out_put_cfile: 3895 fdput(cfile); 3896 out_put_eventfd: 3897 eventfd_ctx_put(event->eventfd); 3898 out_put_efile: 3899 fdput(efile); 3900 out_kfree: 3901 kfree(event); 3902 3903 return ret; 3904 } 3905 3906 static struct cftype mem_cgroup_legacy_files[] = { 3907 { 3908 .name = "usage_in_bytes", 3909 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3910 .read_u64 = mem_cgroup_read_u64, 3911 }, 3912 { 3913 .name = "max_usage_in_bytes", 3914 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 3915 .write = mem_cgroup_reset, 3916 .read_u64 = mem_cgroup_read_u64, 3917 }, 3918 { 3919 .name = "limit_in_bytes", 3920 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 3921 .write = mem_cgroup_write, 3922 .read_u64 = mem_cgroup_read_u64, 3923 }, 3924 { 3925 .name = "soft_limit_in_bytes", 3926 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 3927 .write = mem_cgroup_write, 3928 .read_u64 = mem_cgroup_read_u64, 3929 }, 3930 { 3931 .name = "failcnt", 3932 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 3933 .write = mem_cgroup_reset, 3934 .read_u64 = mem_cgroup_read_u64, 3935 }, 3936 { 3937 .name = "stat", 3938 .seq_show = memcg_stat_show, 3939 }, 3940 { 3941 .name = "force_empty", 3942 .write = mem_cgroup_force_empty_write, 3943 }, 3944 { 3945 .name = "use_hierarchy", 3946 .write_u64 = mem_cgroup_hierarchy_write, 3947 .read_u64 = mem_cgroup_hierarchy_read, 3948 }, 3949 { 3950 .name = "cgroup.event_control", /* XXX: for compat */ 3951 .write = memcg_write_event_control, 3952 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 3953 }, 3954 { 3955 .name = "swappiness", 3956 .read_u64 = mem_cgroup_swappiness_read, 3957 .write_u64 = mem_cgroup_swappiness_write, 3958 }, 3959 { 3960 .name = "move_charge_at_immigrate", 3961 .read_u64 = mem_cgroup_move_charge_read, 3962 .write_u64 = mem_cgroup_move_charge_write, 3963 }, 3964 { 3965 .name = "oom_control", 3966 .seq_show = mem_cgroup_oom_control_read, 3967 .write_u64 = mem_cgroup_oom_control_write, 3968 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 3969 }, 3970 { 3971 .name = "pressure_level", 3972 }, 3973 #ifdef CONFIG_NUMA 3974 { 3975 .name = "numa_stat", 3976 .seq_show = memcg_numa_stat_show, 3977 }, 3978 #endif 3979 { 3980 .name = "kmem.limit_in_bytes", 3981 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 3982 .write = mem_cgroup_write, 3983 .read_u64 = mem_cgroup_read_u64, 3984 }, 3985 { 3986 .name = "kmem.usage_in_bytes", 3987 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 3988 .read_u64 = mem_cgroup_read_u64, 3989 }, 3990 { 3991 .name = "kmem.failcnt", 3992 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 3993 .write = mem_cgroup_reset, 3994 .read_u64 = mem_cgroup_read_u64, 3995 }, 3996 { 3997 .name = "kmem.max_usage_in_bytes", 3998 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 3999 .write = mem_cgroup_reset, 4000 .read_u64 = mem_cgroup_read_u64, 4001 }, 4002 #ifdef CONFIG_SLABINFO 4003 { 4004 .name = "kmem.slabinfo", 4005 .seq_start = slab_start, 4006 .seq_next = slab_next, 4007 .seq_stop = slab_stop, 4008 .seq_show = memcg_slab_show, 4009 }, 4010 #endif 4011 { 4012 .name = "kmem.tcp.limit_in_bytes", 4013 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4014 .write = mem_cgroup_write, 4015 .read_u64 = mem_cgroup_read_u64, 4016 }, 4017 { 4018 .name = "kmem.tcp.usage_in_bytes", 4019 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4020 .read_u64 = mem_cgroup_read_u64, 4021 }, 4022 { 4023 .name = "kmem.tcp.failcnt", 4024 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4025 .write = mem_cgroup_reset, 4026 .read_u64 = mem_cgroup_read_u64, 4027 }, 4028 { 4029 .name = "kmem.tcp.max_usage_in_bytes", 4030 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4031 .write = mem_cgroup_reset, 4032 .read_u64 = mem_cgroup_read_u64, 4033 }, 4034 { }, /* terminate */ 4035 }; 4036 4037 /* 4038 * Private memory cgroup IDR 4039 * 4040 * Swap-out records and page cache shadow entries need to store memcg 4041 * references in constrained space, so we maintain an ID space that is 4042 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4043 * memory-controlled cgroups to 64k. 4044 * 4045 * However, there usually are many references to the oflline CSS after 4046 * the cgroup has been destroyed, such as page cache or reclaimable 4047 * slab objects, that don't need to hang on to the ID. We want to keep 4048 * those dead CSS from occupying IDs, or we might quickly exhaust the 4049 * relatively small ID space and prevent the creation of new cgroups 4050 * even when there are much fewer than 64k cgroups - possibly none. 4051 * 4052 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4053 * be freed and recycled when it's no longer needed, which is usually 4054 * when the CSS is offlined. 4055 * 4056 * The only exception to that are records of swapped out tmpfs/shmem 4057 * pages that need to be attributed to live ancestors on swapin. But 4058 * those references are manageable from userspace. 4059 */ 4060 4061 static DEFINE_IDR(mem_cgroup_idr); 4062 4063 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4064 { 4065 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); 4066 atomic_add(n, &memcg->id.ref); 4067 } 4068 4069 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4070 { 4071 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); 4072 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4073 idr_remove(&mem_cgroup_idr, memcg->id.id); 4074 memcg->id.id = 0; 4075 4076 /* Memcg ID pins CSS */ 4077 css_put(&memcg->css); 4078 } 4079 } 4080 4081 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4082 { 4083 mem_cgroup_id_get_many(memcg, 1); 4084 } 4085 4086 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4087 { 4088 mem_cgroup_id_put_many(memcg, 1); 4089 } 4090 4091 /** 4092 * mem_cgroup_from_id - look up a memcg from a memcg id 4093 * @id: the memcg id to look up 4094 * 4095 * Caller must hold rcu_read_lock(). 4096 */ 4097 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4098 { 4099 WARN_ON_ONCE(!rcu_read_lock_held()); 4100 return idr_find(&mem_cgroup_idr, id); 4101 } 4102 4103 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4104 { 4105 struct mem_cgroup_per_node *pn; 4106 int tmp = node; 4107 /* 4108 * This routine is called against possible nodes. 4109 * But it's BUG to call kmalloc() against offline node. 4110 * 4111 * TODO: this routine can waste much memory for nodes which will 4112 * never be onlined. It's better to use memory hotplug callback 4113 * function. 4114 */ 4115 if (!node_state(node, N_NORMAL_MEMORY)) 4116 tmp = -1; 4117 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4118 if (!pn) 4119 return 1; 4120 4121 lruvec_init(&pn->lruvec); 4122 pn->usage_in_excess = 0; 4123 pn->on_tree = false; 4124 pn->memcg = memcg; 4125 4126 memcg->nodeinfo[node] = pn; 4127 return 0; 4128 } 4129 4130 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4131 { 4132 kfree(memcg->nodeinfo[node]); 4133 } 4134 4135 static void mem_cgroup_free(struct mem_cgroup *memcg) 4136 { 4137 int node; 4138 4139 memcg_wb_domain_exit(memcg); 4140 for_each_node(node) 4141 free_mem_cgroup_per_node_info(memcg, node); 4142 free_percpu(memcg->stat); 4143 kfree(memcg); 4144 } 4145 4146 static struct mem_cgroup *mem_cgroup_alloc(void) 4147 { 4148 struct mem_cgroup *memcg; 4149 size_t size; 4150 int node; 4151 4152 size = sizeof(struct mem_cgroup); 4153 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4154 4155 memcg = kzalloc(size, GFP_KERNEL); 4156 if (!memcg) 4157 return NULL; 4158 4159 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4160 1, MEM_CGROUP_ID_MAX, 4161 GFP_KERNEL); 4162 if (memcg->id.id < 0) 4163 goto fail; 4164 4165 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4166 if (!memcg->stat) 4167 goto fail; 4168 4169 for_each_node(node) 4170 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4171 goto fail; 4172 4173 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4174 goto fail; 4175 4176 INIT_WORK(&memcg->high_work, high_work_func); 4177 memcg->last_scanned_node = MAX_NUMNODES; 4178 INIT_LIST_HEAD(&memcg->oom_notify); 4179 mutex_init(&memcg->thresholds_lock); 4180 spin_lock_init(&memcg->move_lock); 4181 vmpressure_init(&memcg->vmpressure); 4182 INIT_LIST_HEAD(&memcg->event_list); 4183 spin_lock_init(&memcg->event_list_lock); 4184 memcg->socket_pressure = jiffies; 4185 #ifndef CONFIG_SLOB 4186 memcg->kmemcg_id = -1; 4187 #endif 4188 #ifdef CONFIG_CGROUP_WRITEBACK 4189 INIT_LIST_HEAD(&memcg->cgwb_list); 4190 #endif 4191 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4192 return memcg; 4193 fail: 4194 if (memcg->id.id > 0) 4195 idr_remove(&mem_cgroup_idr, memcg->id.id); 4196 mem_cgroup_free(memcg); 4197 return NULL; 4198 } 4199 4200 static struct cgroup_subsys_state * __ref 4201 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4202 { 4203 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4204 struct mem_cgroup *memcg; 4205 long error = -ENOMEM; 4206 4207 memcg = mem_cgroup_alloc(); 4208 if (!memcg) 4209 return ERR_PTR(error); 4210 4211 memcg->high = PAGE_COUNTER_MAX; 4212 memcg->soft_limit = PAGE_COUNTER_MAX; 4213 if (parent) { 4214 memcg->swappiness = mem_cgroup_swappiness(parent); 4215 memcg->oom_kill_disable = parent->oom_kill_disable; 4216 } 4217 if (parent && parent->use_hierarchy) { 4218 memcg->use_hierarchy = true; 4219 page_counter_init(&memcg->memory, &parent->memory); 4220 page_counter_init(&memcg->swap, &parent->swap); 4221 page_counter_init(&memcg->memsw, &parent->memsw); 4222 page_counter_init(&memcg->kmem, &parent->kmem); 4223 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4224 } else { 4225 page_counter_init(&memcg->memory, NULL); 4226 page_counter_init(&memcg->swap, NULL); 4227 page_counter_init(&memcg->memsw, NULL); 4228 page_counter_init(&memcg->kmem, NULL); 4229 page_counter_init(&memcg->tcpmem, NULL); 4230 /* 4231 * Deeper hierachy with use_hierarchy == false doesn't make 4232 * much sense so let cgroup subsystem know about this 4233 * unfortunate state in our controller. 4234 */ 4235 if (parent != root_mem_cgroup) 4236 memory_cgrp_subsys.broken_hierarchy = true; 4237 } 4238 4239 /* The following stuff does not apply to the root */ 4240 if (!parent) { 4241 root_mem_cgroup = memcg; 4242 return &memcg->css; 4243 } 4244 4245 error = memcg_online_kmem(memcg); 4246 if (error) 4247 goto fail; 4248 4249 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4250 static_branch_inc(&memcg_sockets_enabled_key); 4251 4252 return &memcg->css; 4253 fail: 4254 mem_cgroup_free(memcg); 4255 return ERR_PTR(-ENOMEM); 4256 } 4257 4258 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4259 { 4260 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4261 4262 /* Online state pins memcg ID, memcg ID pins CSS */ 4263 atomic_set(&memcg->id.ref, 1); 4264 css_get(css); 4265 return 0; 4266 } 4267 4268 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4269 { 4270 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4271 struct mem_cgroup_event *event, *tmp; 4272 4273 /* 4274 * Unregister events and notify userspace. 4275 * Notify userspace about cgroup removing only after rmdir of cgroup 4276 * directory to avoid race between userspace and kernelspace. 4277 */ 4278 spin_lock(&memcg->event_list_lock); 4279 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4280 list_del_init(&event->list); 4281 schedule_work(&event->remove); 4282 } 4283 spin_unlock(&memcg->event_list_lock); 4284 4285 memcg_offline_kmem(memcg); 4286 wb_memcg_offline(memcg); 4287 4288 mem_cgroup_id_put(memcg); 4289 } 4290 4291 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4292 { 4293 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4294 4295 invalidate_reclaim_iterators(memcg); 4296 } 4297 4298 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4299 { 4300 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4301 4302 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4303 static_branch_dec(&memcg_sockets_enabled_key); 4304 4305 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4306 static_branch_dec(&memcg_sockets_enabled_key); 4307 4308 vmpressure_cleanup(&memcg->vmpressure); 4309 cancel_work_sync(&memcg->high_work); 4310 mem_cgroup_remove_from_trees(memcg); 4311 memcg_free_kmem(memcg); 4312 mem_cgroup_free(memcg); 4313 } 4314 4315 /** 4316 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4317 * @css: the target css 4318 * 4319 * Reset the states of the mem_cgroup associated with @css. This is 4320 * invoked when the userland requests disabling on the default hierarchy 4321 * but the memcg is pinned through dependency. The memcg should stop 4322 * applying policies and should revert to the vanilla state as it may be 4323 * made visible again. 4324 * 4325 * The current implementation only resets the essential configurations. 4326 * This needs to be expanded to cover all the visible parts. 4327 */ 4328 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4329 { 4330 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4331 4332 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX); 4333 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX); 4334 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX); 4335 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX); 4336 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX); 4337 memcg->low = 0; 4338 memcg->high = PAGE_COUNTER_MAX; 4339 memcg->soft_limit = PAGE_COUNTER_MAX; 4340 memcg_wb_domain_size_changed(memcg); 4341 } 4342 4343 #ifdef CONFIG_MMU 4344 /* Handlers for move charge at task migration. */ 4345 static int mem_cgroup_do_precharge(unsigned long count) 4346 { 4347 int ret; 4348 4349 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4350 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4351 if (!ret) { 4352 mc.precharge += count; 4353 return ret; 4354 } 4355 4356 /* Try charges one by one with reclaim */ 4357 while (count--) { 4358 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4359 if (ret) 4360 return ret; 4361 mc.precharge++; 4362 cond_resched(); 4363 } 4364 return 0; 4365 } 4366 4367 union mc_target { 4368 struct page *page; 4369 swp_entry_t ent; 4370 }; 4371 4372 enum mc_target_type { 4373 MC_TARGET_NONE = 0, 4374 MC_TARGET_PAGE, 4375 MC_TARGET_SWAP, 4376 }; 4377 4378 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4379 unsigned long addr, pte_t ptent) 4380 { 4381 struct page *page = vm_normal_page(vma, addr, ptent); 4382 4383 if (!page || !page_mapped(page)) 4384 return NULL; 4385 if (PageAnon(page)) { 4386 if (!(mc.flags & MOVE_ANON)) 4387 return NULL; 4388 } else { 4389 if (!(mc.flags & MOVE_FILE)) 4390 return NULL; 4391 } 4392 if (!get_page_unless_zero(page)) 4393 return NULL; 4394 4395 return page; 4396 } 4397 4398 #ifdef CONFIG_SWAP 4399 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4400 pte_t ptent, swp_entry_t *entry) 4401 { 4402 struct page *page = NULL; 4403 swp_entry_t ent = pte_to_swp_entry(ptent); 4404 4405 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4406 return NULL; 4407 /* 4408 * Because lookup_swap_cache() updates some statistics counter, 4409 * we call find_get_page() with swapper_space directly. 4410 */ 4411 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4412 if (do_memsw_account()) 4413 entry->val = ent.val; 4414 4415 return page; 4416 } 4417 #else 4418 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4419 pte_t ptent, swp_entry_t *entry) 4420 { 4421 return NULL; 4422 } 4423 #endif 4424 4425 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4426 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4427 { 4428 struct page *page = NULL; 4429 struct address_space *mapping; 4430 pgoff_t pgoff; 4431 4432 if (!vma->vm_file) /* anonymous vma */ 4433 return NULL; 4434 if (!(mc.flags & MOVE_FILE)) 4435 return NULL; 4436 4437 mapping = vma->vm_file->f_mapping; 4438 pgoff = linear_page_index(vma, addr); 4439 4440 /* page is moved even if it's not RSS of this task(page-faulted). */ 4441 #ifdef CONFIG_SWAP 4442 /* shmem/tmpfs may report page out on swap: account for that too. */ 4443 if (shmem_mapping(mapping)) { 4444 page = find_get_entry(mapping, pgoff); 4445 if (radix_tree_exceptional_entry(page)) { 4446 swp_entry_t swp = radix_to_swp_entry(page); 4447 if (do_memsw_account()) 4448 *entry = swp; 4449 page = find_get_page(swap_address_space(swp), 4450 swp_offset(swp)); 4451 } 4452 } else 4453 page = find_get_page(mapping, pgoff); 4454 #else 4455 page = find_get_page(mapping, pgoff); 4456 #endif 4457 return page; 4458 } 4459 4460 /** 4461 * mem_cgroup_move_account - move account of the page 4462 * @page: the page 4463 * @compound: charge the page as compound or small page 4464 * @from: mem_cgroup which the page is moved from. 4465 * @to: mem_cgroup which the page is moved to. @from != @to. 4466 * 4467 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4468 * 4469 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4470 * from old cgroup. 4471 */ 4472 static int mem_cgroup_move_account(struct page *page, 4473 bool compound, 4474 struct mem_cgroup *from, 4475 struct mem_cgroup *to) 4476 { 4477 unsigned long flags; 4478 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4479 int ret; 4480 bool anon; 4481 4482 VM_BUG_ON(from == to); 4483 VM_BUG_ON_PAGE(PageLRU(page), page); 4484 VM_BUG_ON(compound && !PageTransHuge(page)); 4485 4486 /* 4487 * Prevent mem_cgroup_migrate() from looking at 4488 * page->mem_cgroup of its source page while we change it. 4489 */ 4490 ret = -EBUSY; 4491 if (!trylock_page(page)) 4492 goto out; 4493 4494 ret = -EINVAL; 4495 if (page->mem_cgroup != from) 4496 goto out_unlock; 4497 4498 anon = PageAnon(page); 4499 4500 spin_lock_irqsave(&from->move_lock, flags); 4501 4502 if (!anon && page_mapped(page)) { 4503 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4504 nr_pages); 4505 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4506 nr_pages); 4507 } 4508 4509 /* 4510 * move_lock grabbed above and caller set from->moving_account, so 4511 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4512 * So mapping should be stable for dirty pages. 4513 */ 4514 if (!anon && PageDirty(page)) { 4515 struct address_space *mapping = page_mapping(page); 4516 4517 if (mapping_cap_account_dirty(mapping)) { 4518 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4519 nr_pages); 4520 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4521 nr_pages); 4522 } 4523 } 4524 4525 if (PageWriteback(page)) { 4526 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4527 nr_pages); 4528 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4529 nr_pages); 4530 } 4531 4532 /* 4533 * It is safe to change page->mem_cgroup here because the page 4534 * is referenced, charged, and isolated - we can't race with 4535 * uncharging, charging, migration, or LRU putback. 4536 */ 4537 4538 /* caller should have done css_get */ 4539 page->mem_cgroup = to; 4540 spin_unlock_irqrestore(&from->move_lock, flags); 4541 4542 ret = 0; 4543 4544 local_irq_disable(); 4545 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4546 memcg_check_events(to, page); 4547 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4548 memcg_check_events(from, page); 4549 local_irq_enable(); 4550 out_unlock: 4551 unlock_page(page); 4552 out: 4553 return ret; 4554 } 4555 4556 /** 4557 * get_mctgt_type - get target type of moving charge 4558 * @vma: the vma the pte to be checked belongs 4559 * @addr: the address corresponding to the pte to be checked 4560 * @ptent: the pte to be checked 4561 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4562 * 4563 * Returns 4564 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4565 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4566 * move charge. if @target is not NULL, the page is stored in target->page 4567 * with extra refcnt got(Callers should handle it). 4568 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4569 * target for charge migration. if @target is not NULL, the entry is stored 4570 * in target->ent. 4571 * 4572 * Called with pte lock held. 4573 */ 4574 4575 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4576 unsigned long addr, pte_t ptent, union mc_target *target) 4577 { 4578 struct page *page = NULL; 4579 enum mc_target_type ret = MC_TARGET_NONE; 4580 swp_entry_t ent = { .val = 0 }; 4581 4582 if (pte_present(ptent)) 4583 page = mc_handle_present_pte(vma, addr, ptent); 4584 else if (is_swap_pte(ptent)) 4585 page = mc_handle_swap_pte(vma, ptent, &ent); 4586 else if (pte_none(ptent)) 4587 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4588 4589 if (!page && !ent.val) 4590 return ret; 4591 if (page) { 4592 /* 4593 * Do only loose check w/o serialization. 4594 * mem_cgroup_move_account() checks the page is valid or 4595 * not under LRU exclusion. 4596 */ 4597 if (page->mem_cgroup == mc.from) { 4598 ret = MC_TARGET_PAGE; 4599 if (target) 4600 target->page = page; 4601 } 4602 if (!ret || !target) 4603 put_page(page); 4604 } 4605 /* There is a swap entry and a page doesn't exist or isn't charged */ 4606 if (ent.val && !ret && 4607 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4608 ret = MC_TARGET_SWAP; 4609 if (target) 4610 target->ent = ent; 4611 } 4612 return ret; 4613 } 4614 4615 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4616 /* 4617 * We don't consider swapping or file mapped pages because THP does not 4618 * support them for now. 4619 * Caller should make sure that pmd_trans_huge(pmd) is true. 4620 */ 4621 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4622 unsigned long addr, pmd_t pmd, union mc_target *target) 4623 { 4624 struct page *page = NULL; 4625 enum mc_target_type ret = MC_TARGET_NONE; 4626 4627 page = pmd_page(pmd); 4628 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4629 if (!(mc.flags & MOVE_ANON)) 4630 return ret; 4631 if (page->mem_cgroup == mc.from) { 4632 ret = MC_TARGET_PAGE; 4633 if (target) { 4634 get_page(page); 4635 target->page = page; 4636 } 4637 } 4638 return ret; 4639 } 4640 #else 4641 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4642 unsigned long addr, pmd_t pmd, union mc_target *target) 4643 { 4644 return MC_TARGET_NONE; 4645 } 4646 #endif 4647 4648 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4649 unsigned long addr, unsigned long end, 4650 struct mm_walk *walk) 4651 { 4652 struct vm_area_struct *vma = walk->vma; 4653 pte_t *pte; 4654 spinlock_t *ptl; 4655 4656 ptl = pmd_trans_huge_lock(pmd, vma); 4657 if (ptl) { 4658 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4659 mc.precharge += HPAGE_PMD_NR; 4660 spin_unlock(ptl); 4661 return 0; 4662 } 4663 4664 if (pmd_trans_unstable(pmd)) 4665 return 0; 4666 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4667 for (; addr != end; pte++, addr += PAGE_SIZE) 4668 if (get_mctgt_type(vma, addr, *pte, NULL)) 4669 mc.precharge++; /* increment precharge temporarily */ 4670 pte_unmap_unlock(pte - 1, ptl); 4671 cond_resched(); 4672 4673 return 0; 4674 } 4675 4676 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4677 { 4678 unsigned long precharge; 4679 4680 struct mm_walk mem_cgroup_count_precharge_walk = { 4681 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4682 .mm = mm, 4683 }; 4684 down_read(&mm->mmap_sem); 4685 walk_page_range(0, mm->highest_vm_end, 4686 &mem_cgroup_count_precharge_walk); 4687 up_read(&mm->mmap_sem); 4688 4689 precharge = mc.precharge; 4690 mc.precharge = 0; 4691 4692 return precharge; 4693 } 4694 4695 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4696 { 4697 unsigned long precharge = mem_cgroup_count_precharge(mm); 4698 4699 VM_BUG_ON(mc.moving_task); 4700 mc.moving_task = current; 4701 return mem_cgroup_do_precharge(precharge); 4702 } 4703 4704 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4705 static void __mem_cgroup_clear_mc(void) 4706 { 4707 struct mem_cgroup *from = mc.from; 4708 struct mem_cgroup *to = mc.to; 4709 4710 /* we must uncharge all the leftover precharges from mc.to */ 4711 if (mc.precharge) { 4712 cancel_charge(mc.to, mc.precharge); 4713 mc.precharge = 0; 4714 } 4715 /* 4716 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4717 * we must uncharge here. 4718 */ 4719 if (mc.moved_charge) { 4720 cancel_charge(mc.from, mc.moved_charge); 4721 mc.moved_charge = 0; 4722 } 4723 /* we must fixup refcnts and charges */ 4724 if (mc.moved_swap) { 4725 /* uncharge swap account from the old cgroup */ 4726 if (!mem_cgroup_is_root(mc.from)) 4727 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4728 4729 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 4730 4731 /* 4732 * we charged both to->memory and to->memsw, so we 4733 * should uncharge to->memory. 4734 */ 4735 if (!mem_cgroup_is_root(mc.to)) 4736 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4737 4738 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 4739 css_put_many(&mc.to->css, mc.moved_swap); 4740 4741 mc.moved_swap = 0; 4742 } 4743 memcg_oom_recover(from); 4744 memcg_oom_recover(to); 4745 wake_up_all(&mc.waitq); 4746 } 4747 4748 static void mem_cgroup_clear_mc(void) 4749 { 4750 struct mm_struct *mm = mc.mm; 4751 4752 /* 4753 * we must clear moving_task before waking up waiters at the end of 4754 * task migration. 4755 */ 4756 mc.moving_task = NULL; 4757 __mem_cgroup_clear_mc(); 4758 spin_lock(&mc.lock); 4759 mc.from = NULL; 4760 mc.to = NULL; 4761 mc.mm = NULL; 4762 spin_unlock(&mc.lock); 4763 4764 mmput(mm); 4765 } 4766 4767 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4768 { 4769 struct cgroup_subsys_state *css; 4770 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 4771 struct mem_cgroup *from; 4772 struct task_struct *leader, *p; 4773 struct mm_struct *mm; 4774 unsigned long move_flags; 4775 int ret = 0; 4776 4777 /* charge immigration isn't supported on the default hierarchy */ 4778 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4779 return 0; 4780 4781 /* 4782 * Multi-process migrations only happen on the default hierarchy 4783 * where charge immigration is not used. Perform charge 4784 * immigration if @tset contains a leader and whine if there are 4785 * multiple. 4786 */ 4787 p = NULL; 4788 cgroup_taskset_for_each_leader(leader, css, tset) { 4789 WARN_ON_ONCE(p); 4790 p = leader; 4791 memcg = mem_cgroup_from_css(css); 4792 } 4793 if (!p) 4794 return 0; 4795 4796 /* 4797 * We are now commited to this value whatever it is. Changes in this 4798 * tunable will only affect upcoming migrations, not the current one. 4799 * So we need to save it, and keep it going. 4800 */ 4801 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4802 if (!move_flags) 4803 return 0; 4804 4805 from = mem_cgroup_from_task(p); 4806 4807 VM_BUG_ON(from == memcg); 4808 4809 mm = get_task_mm(p); 4810 if (!mm) 4811 return 0; 4812 /* We move charges only when we move a owner of the mm */ 4813 if (mm->owner == p) { 4814 VM_BUG_ON(mc.from); 4815 VM_BUG_ON(mc.to); 4816 VM_BUG_ON(mc.precharge); 4817 VM_BUG_ON(mc.moved_charge); 4818 VM_BUG_ON(mc.moved_swap); 4819 4820 spin_lock(&mc.lock); 4821 mc.mm = mm; 4822 mc.from = from; 4823 mc.to = memcg; 4824 mc.flags = move_flags; 4825 spin_unlock(&mc.lock); 4826 /* We set mc.moving_task later */ 4827 4828 ret = mem_cgroup_precharge_mc(mm); 4829 if (ret) 4830 mem_cgroup_clear_mc(); 4831 } else { 4832 mmput(mm); 4833 } 4834 return ret; 4835 } 4836 4837 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4838 { 4839 if (mc.to) 4840 mem_cgroup_clear_mc(); 4841 } 4842 4843 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4844 unsigned long addr, unsigned long end, 4845 struct mm_walk *walk) 4846 { 4847 int ret = 0; 4848 struct vm_area_struct *vma = walk->vma; 4849 pte_t *pte; 4850 spinlock_t *ptl; 4851 enum mc_target_type target_type; 4852 union mc_target target; 4853 struct page *page; 4854 4855 ptl = pmd_trans_huge_lock(pmd, vma); 4856 if (ptl) { 4857 if (mc.precharge < HPAGE_PMD_NR) { 4858 spin_unlock(ptl); 4859 return 0; 4860 } 4861 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4862 if (target_type == MC_TARGET_PAGE) { 4863 page = target.page; 4864 if (!isolate_lru_page(page)) { 4865 if (!mem_cgroup_move_account(page, true, 4866 mc.from, mc.to)) { 4867 mc.precharge -= HPAGE_PMD_NR; 4868 mc.moved_charge += HPAGE_PMD_NR; 4869 } 4870 putback_lru_page(page); 4871 } 4872 put_page(page); 4873 } 4874 spin_unlock(ptl); 4875 return 0; 4876 } 4877 4878 if (pmd_trans_unstable(pmd)) 4879 return 0; 4880 retry: 4881 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4882 for (; addr != end; addr += PAGE_SIZE) { 4883 pte_t ptent = *(pte++); 4884 swp_entry_t ent; 4885 4886 if (!mc.precharge) 4887 break; 4888 4889 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4890 case MC_TARGET_PAGE: 4891 page = target.page; 4892 /* 4893 * We can have a part of the split pmd here. Moving it 4894 * can be done but it would be too convoluted so simply 4895 * ignore such a partial THP and keep it in original 4896 * memcg. There should be somebody mapping the head. 4897 */ 4898 if (PageTransCompound(page)) 4899 goto put; 4900 if (isolate_lru_page(page)) 4901 goto put; 4902 if (!mem_cgroup_move_account(page, false, 4903 mc.from, mc.to)) { 4904 mc.precharge--; 4905 /* we uncharge from mc.from later. */ 4906 mc.moved_charge++; 4907 } 4908 putback_lru_page(page); 4909 put: /* get_mctgt_type() gets the page */ 4910 put_page(page); 4911 break; 4912 case MC_TARGET_SWAP: 4913 ent = target.ent; 4914 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4915 mc.precharge--; 4916 /* we fixup refcnts and charges later. */ 4917 mc.moved_swap++; 4918 } 4919 break; 4920 default: 4921 break; 4922 } 4923 } 4924 pte_unmap_unlock(pte - 1, ptl); 4925 cond_resched(); 4926 4927 if (addr != end) { 4928 /* 4929 * We have consumed all precharges we got in can_attach(). 4930 * We try charge one by one, but don't do any additional 4931 * charges to mc.to if we have failed in charge once in attach() 4932 * phase. 4933 */ 4934 ret = mem_cgroup_do_precharge(1); 4935 if (!ret) 4936 goto retry; 4937 } 4938 4939 return ret; 4940 } 4941 4942 static void mem_cgroup_move_charge(void) 4943 { 4944 struct mm_walk mem_cgroup_move_charge_walk = { 4945 .pmd_entry = mem_cgroup_move_charge_pte_range, 4946 .mm = mc.mm, 4947 }; 4948 4949 lru_add_drain_all(); 4950 /* 4951 * Signal lock_page_memcg() to take the memcg's move_lock 4952 * while we're moving its pages to another memcg. Then wait 4953 * for already started RCU-only updates to finish. 4954 */ 4955 atomic_inc(&mc.from->moving_account); 4956 synchronize_rcu(); 4957 retry: 4958 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 4959 /* 4960 * Someone who are holding the mmap_sem might be waiting in 4961 * waitq. So we cancel all extra charges, wake up all waiters, 4962 * and retry. Because we cancel precharges, we might not be able 4963 * to move enough charges, but moving charge is a best-effort 4964 * feature anyway, so it wouldn't be a big problem. 4965 */ 4966 __mem_cgroup_clear_mc(); 4967 cond_resched(); 4968 goto retry; 4969 } 4970 /* 4971 * When we have consumed all precharges and failed in doing 4972 * additional charge, the page walk just aborts. 4973 */ 4974 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 4975 4976 up_read(&mc.mm->mmap_sem); 4977 atomic_dec(&mc.from->moving_account); 4978 } 4979 4980 static void mem_cgroup_move_task(void) 4981 { 4982 if (mc.to) { 4983 mem_cgroup_move_charge(); 4984 mem_cgroup_clear_mc(); 4985 } 4986 } 4987 #else /* !CONFIG_MMU */ 4988 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4989 { 4990 return 0; 4991 } 4992 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4993 { 4994 } 4995 static void mem_cgroup_move_task(void) 4996 { 4997 } 4998 #endif 4999 5000 /* 5001 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5002 * to verify whether we're attached to the default hierarchy on each mount 5003 * attempt. 5004 */ 5005 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5006 { 5007 /* 5008 * use_hierarchy is forced on the default hierarchy. cgroup core 5009 * guarantees that @root doesn't have any children, so turning it 5010 * on for the root memcg is enough. 5011 */ 5012 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5013 root_mem_cgroup->use_hierarchy = true; 5014 else 5015 root_mem_cgroup->use_hierarchy = false; 5016 } 5017 5018 static u64 memory_current_read(struct cgroup_subsys_state *css, 5019 struct cftype *cft) 5020 { 5021 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5022 5023 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5024 } 5025 5026 static int memory_low_show(struct seq_file *m, void *v) 5027 { 5028 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5029 unsigned long low = READ_ONCE(memcg->low); 5030 5031 if (low == PAGE_COUNTER_MAX) 5032 seq_puts(m, "max\n"); 5033 else 5034 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5035 5036 return 0; 5037 } 5038 5039 static ssize_t memory_low_write(struct kernfs_open_file *of, 5040 char *buf, size_t nbytes, loff_t off) 5041 { 5042 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5043 unsigned long low; 5044 int err; 5045 5046 buf = strstrip(buf); 5047 err = page_counter_memparse(buf, "max", &low); 5048 if (err) 5049 return err; 5050 5051 memcg->low = low; 5052 5053 return nbytes; 5054 } 5055 5056 static int memory_high_show(struct seq_file *m, void *v) 5057 { 5058 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5059 unsigned long high = READ_ONCE(memcg->high); 5060 5061 if (high == PAGE_COUNTER_MAX) 5062 seq_puts(m, "max\n"); 5063 else 5064 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5065 5066 return 0; 5067 } 5068 5069 static ssize_t memory_high_write(struct kernfs_open_file *of, 5070 char *buf, size_t nbytes, loff_t off) 5071 { 5072 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5073 unsigned long nr_pages; 5074 unsigned long high; 5075 int err; 5076 5077 buf = strstrip(buf); 5078 err = page_counter_memparse(buf, "max", &high); 5079 if (err) 5080 return err; 5081 5082 memcg->high = high; 5083 5084 nr_pages = page_counter_read(&memcg->memory); 5085 if (nr_pages > high) 5086 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5087 GFP_KERNEL, true); 5088 5089 memcg_wb_domain_size_changed(memcg); 5090 return nbytes; 5091 } 5092 5093 static int memory_max_show(struct seq_file *m, void *v) 5094 { 5095 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5096 unsigned long max = READ_ONCE(memcg->memory.limit); 5097 5098 if (max == PAGE_COUNTER_MAX) 5099 seq_puts(m, "max\n"); 5100 else 5101 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5102 5103 return 0; 5104 } 5105 5106 static ssize_t memory_max_write(struct kernfs_open_file *of, 5107 char *buf, size_t nbytes, loff_t off) 5108 { 5109 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5110 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5111 bool drained = false; 5112 unsigned long max; 5113 int err; 5114 5115 buf = strstrip(buf); 5116 err = page_counter_memparse(buf, "max", &max); 5117 if (err) 5118 return err; 5119 5120 xchg(&memcg->memory.limit, max); 5121 5122 for (;;) { 5123 unsigned long nr_pages = page_counter_read(&memcg->memory); 5124 5125 if (nr_pages <= max) 5126 break; 5127 5128 if (signal_pending(current)) { 5129 err = -EINTR; 5130 break; 5131 } 5132 5133 if (!drained) { 5134 drain_all_stock(memcg); 5135 drained = true; 5136 continue; 5137 } 5138 5139 if (nr_reclaims) { 5140 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5141 GFP_KERNEL, true)) 5142 nr_reclaims--; 5143 continue; 5144 } 5145 5146 mem_cgroup_events(memcg, MEMCG_OOM, 1); 5147 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5148 break; 5149 } 5150 5151 memcg_wb_domain_size_changed(memcg); 5152 return nbytes; 5153 } 5154 5155 static int memory_events_show(struct seq_file *m, void *v) 5156 { 5157 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5158 5159 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5160 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5161 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5162 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5163 5164 return 0; 5165 } 5166 5167 static int memory_stat_show(struct seq_file *m, void *v) 5168 { 5169 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5170 unsigned long stat[MEMCG_NR_STAT]; 5171 unsigned long events[MEMCG_NR_EVENTS]; 5172 int i; 5173 5174 /* 5175 * Provide statistics on the state of the memory subsystem as 5176 * well as cumulative event counters that show past behavior. 5177 * 5178 * This list is ordered following a combination of these gradients: 5179 * 1) generic big picture -> specifics and details 5180 * 2) reflecting userspace activity -> reflecting kernel heuristics 5181 * 5182 * Current memory state: 5183 */ 5184 5185 tree_stat(memcg, stat); 5186 tree_events(memcg, events); 5187 5188 seq_printf(m, "anon %llu\n", 5189 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5190 seq_printf(m, "file %llu\n", 5191 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5192 seq_printf(m, "kernel_stack %llu\n", 5193 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5194 seq_printf(m, "slab %llu\n", 5195 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] + 5196 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5197 seq_printf(m, "sock %llu\n", 5198 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5199 5200 seq_printf(m, "file_mapped %llu\n", 5201 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE); 5202 seq_printf(m, "file_dirty %llu\n", 5203 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE); 5204 seq_printf(m, "file_writeback %llu\n", 5205 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE); 5206 5207 for (i = 0; i < NR_LRU_LISTS; i++) { 5208 struct mem_cgroup *mi; 5209 unsigned long val = 0; 5210 5211 for_each_mem_cgroup_tree(mi, memcg) 5212 val += mem_cgroup_nr_lru_pages(mi, BIT(i)); 5213 seq_printf(m, "%s %llu\n", 5214 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5215 } 5216 5217 seq_printf(m, "slab_reclaimable %llu\n", 5218 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE); 5219 seq_printf(m, "slab_unreclaimable %llu\n", 5220 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5221 5222 /* Accumulated memory events */ 5223 5224 seq_printf(m, "pgfault %lu\n", 5225 events[MEM_CGROUP_EVENTS_PGFAULT]); 5226 seq_printf(m, "pgmajfault %lu\n", 5227 events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 5228 5229 return 0; 5230 } 5231 5232 static struct cftype memory_files[] = { 5233 { 5234 .name = "current", 5235 .flags = CFTYPE_NOT_ON_ROOT, 5236 .read_u64 = memory_current_read, 5237 }, 5238 { 5239 .name = "low", 5240 .flags = CFTYPE_NOT_ON_ROOT, 5241 .seq_show = memory_low_show, 5242 .write = memory_low_write, 5243 }, 5244 { 5245 .name = "high", 5246 .flags = CFTYPE_NOT_ON_ROOT, 5247 .seq_show = memory_high_show, 5248 .write = memory_high_write, 5249 }, 5250 { 5251 .name = "max", 5252 .flags = CFTYPE_NOT_ON_ROOT, 5253 .seq_show = memory_max_show, 5254 .write = memory_max_write, 5255 }, 5256 { 5257 .name = "events", 5258 .flags = CFTYPE_NOT_ON_ROOT, 5259 .file_offset = offsetof(struct mem_cgroup, events_file), 5260 .seq_show = memory_events_show, 5261 }, 5262 { 5263 .name = "stat", 5264 .flags = CFTYPE_NOT_ON_ROOT, 5265 .seq_show = memory_stat_show, 5266 }, 5267 { } /* terminate */ 5268 }; 5269 5270 struct cgroup_subsys memory_cgrp_subsys = { 5271 .css_alloc = mem_cgroup_css_alloc, 5272 .css_online = mem_cgroup_css_online, 5273 .css_offline = mem_cgroup_css_offline, 5274 .css_released = mem_cgroup_css_released, 5275 .css_free = mem_cgroup_css_free, 5276 .css_reset = mem_cgroup_css_reset, 5277 .can_attach = mem_cgroup_can_attach, 5278 .cancel_attach = mem_cgroup_cancel_attach, 5279 .post_attach = mem_cgroup_move_task, 5280 .bind = mem_cgroup_bind, 5281 .dfl_cftypes = memory_files, 5282 .legacy_cftypes = mem_cgroup_legacy_files, 5283 .early_init = 0, 5284 }; 5285 5286 /** 5287 * mem_cgroup_low - check if memory consumption is below the normal range 5288 * @root: the highest ancestor to consider 5289 * @memcg: the memory cgroup to check 5290 * 5291 * Returns %true if memory consumption of @memcg, and that of all 5292 * configurable ancestors up to @root, is below the normal range. 5293 */ 5294 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5295 { 5296 if (mem_cgroup_disabled()) 5297 return false; 5298 5299 /* 5300 * The toplevel group doesn't have a configurable range, so 5301 * it's never low when looked at directly, and it is not 5302 * considered an ancestor when assessing the hierarchy. 5303 */ 5304 5305 if (memcg == root_mem_cgroup) 5306 return false; 5307 5308 if (page_counter_read(&memcg->memory) >= memcg->low) 5309 return false; 5310 5311 while (memcg != root) { 5312 memcg = parent_mem_cgroup(memcg); 5313 5314 if (memcg == root_mem_cgroup) 5315 break; 5316 5317 if (page_counter_read(&memcg->memory) >= memcg->low) 5318 return false; 5319 } 5320 return true; 5321 } 5322 5323 /** 5324 * mem_cgroup_try_charge - try charging a page 5325 * @page: page to charge 5326 * @mm: mm context of the victim 5327 * @gfp_mask: reclaim mode 5328 * @memcgp: charged memcg return 5329 * @compound: charge the page as compound or small page 5330 * 5331 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5332 * pages according to @gfp_mask if necessary. 5333 * 5334 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5335 * Otherwise, an error code is returned. 5336 * 5337 * After page->mapping has been set up, the caller must finalize the 5338 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5339 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5340 */ 5341 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5342 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5343 bool compound) 5344 { 5345 struct mem_cgroup *memcg = NULL; 5346 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5347 int ret = 0; 5348 5349 if (mem_cgroup_disabled()) 5350 goto out; 5351 5352 if (PageSwapCache(page)) { 5353 /* 5354 * Every swap fault against a single page tries to charge the 5355 * page, bail as early as possible. shmem_unuse() encounters 5356 * already charged pages, too. The USED bit is protected by 5357 * the page lock, which serializes swap cache removal, which 5358 * in turn serializes uncharging. 5359 */ 5360 VM_BUG_ON_PAGE(!PageLocked(page), page); 5361 if (page->mem_cgroup) 5362 goto out; 5363 5364 if (do_swap_account) { 5365 swp_entry_t ent = { .val = page_private(page), }; 5366 unsigned short id = lookup_swap_cgroup_id(ent); 5367 5368 rcu_read_lock(); 5369 memcg = mem_cgroup_from_id(id); 5370 if (memcg && !css_tryget_online(&memcg->css)) 5371 memcg = NULL; 5372 rcu_read_unlock(); 5373 } 5374 } 5375 5376 if (!memcg) 5377 memcg = get_mem_cgroup_from_mm(mm); 5378 5379 ret = try_charge(memcg, gfp_mask, nr_pages); 5380 5381 css_put(&memcg->css); 5382 out: 5383 *memcgp = memcg; 5384 return ret; 5385 } 5386 5387 /** 5388 * mem_cgroup_commit_charge - commit a page charge 5389 * @page: page to charge 5390 * @memcg: memcg to charge the page to 5391 * @lrucare: page might be on LRU already 5392 * @compound: charge the page as compound or small page 5393 * 5394 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5395 * after page->mapping has been set up. This must happen atomically 5396 * as part of the page instantiation, i.e. under the page table lock 5397 * for anonymous pages, under the page lock for page and swap cache. 5398 * 5399 * In addition, the page must not be on the LRU during the commit, to 5400 * prevent racing with task migration. If it might be, use @lrucare. 5401 * 5402 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5403 */ 5404 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5405 bool lrucare, bool compound) 5406 { 5407 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5408 5409 VM_BUG_ON_PAGE(!page->mapping, page); 5410 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5411 5412 if (mem_cgroup_disabled()) 5413 return; 5414 /* 5415 * Swap faults will attempt to charge the same page multiple 5416 * times. But reuse_swap_page() might have removed the page 5417 * from swapcache already, so we can't check PageSwapCache(). 5418 */ 5419 if (!memcg) 5420 return; 5421 5422 commit_charge(page, memcg, lrucare); 5423 5424 local_irq_disable(); 5425 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 5426 memcg_check_events(memcg, page); 5427 local_irq_enable(); 5428 5429 if (do_memsw_account() && PageSwapCache(page)) { 5430 swp_entry_t entry = { .val = page_private(page) }; 5431 /* 5432 * The swap entry might not get freed for a long time, 5433 * let's not wait for it. The page already received a 5434 * memory+swap charge, drop the swap entry duplicate. 5435 */ 5436 mem_cgroup_uncharge_swap(entry); 5437 } 5438 } 5439 5440 /** 5441 * mem_cgroup_cancel_charge - cancel a page charge 5442 * @page: page to charge 5443 * @memcg: memcg to charge the page to 5444 * @compound: charge the page as compound or small page 5445 * 5446 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5447 */ 5448 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 5449 bool compound) 5450 { 5451 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5452 5453 if (mem_cgroup_disabled()) 5454 return; 5455 /* 5456 * Swap faults will attempt to charge the same page multiple 5457 * times. But reuse_swap_page() might have removed the page 5458 * from swapcache already, so we can't check PageSwapCache(). 5459 */ 5460 if (!memcg) 5461 return; 5462 5463 cancel_charge(memcg, nr_pages); 5464 } 5465 5466 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5467 unsigned long nr_anon, unsigned long nr_file, 5468 unsigned long nr_huge, unsigned long nr_kmem, 5469 struct page *dummy_page) 5470 { 5471 unsigned long nr_pages = nr_anon + nr_file + nr_kmem; 5472 unsigned long flags; 5473 5474 if (!mem_cgroup_is_root(memcg)) { 5475 page_counter_uncharge(&memcg->memory, nr_pages); 5476 if (do_memsw_account()) 5477 page_counter_uncharge(&memcg->memsw, nr_pages); 5478 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) 5479 page_counter_uncharge(&memcg->kmem, nr_kmem); 5480 memcg_oom_recover(memcg); 5481 } 5482 5483 local_irq_save(flags); 5484 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5485 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5486 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5487 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5488 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5489 memcg_check_events(memcg, dummy_page); 5490 local_irq_restore(flags); 5491 5492 if (!mem_cgroup_is_root(memcg)) 5493 css_put_many(&memcg->css, nr_pages); 5494 } 5495 5496 static void uncharge_list(struct list_head *page_list) 5497 { 5498 struct mem_cgroup *memcg = NULL; 5499 unsigned long nr_anon = 0; 5500 unsigned long nr_file = 0; 5501 unsigned long nr_huge = 0; 5502 unsigned long nr_kmem = 0; 5503 unsigned long pgpgout = 0; 5504 struct list_head *next; 5505 struct page *page; 5506 5507 /* 5508 * Note that the list can be a single page->lru; hence the 5509 * do-while loop instead of a simple list_for_each_entry(). 5510 */ 5511 next = page_list->next; 5512 do { 5513 page = list_entry(next, struct page, lru); 5514 next = page->lru.next; 5515 5516 VM_BUG_ON_PAGE(PageLRU(page), page); 5517 VM_BUG_ON_PAGE(page_count(page), page); 5518 5519 if (!page->mem_cgroup) 5520 continue; 5521 5522 /* 5523 * Nobody should be changing or seriously looking at 5524 * page->mem_cgroup at this point, we have fully 5525 * exclusive access to the page. 5526 */ 5527 5528 if (memcg != page->mem_cgroup) { 5529 if (memcg) { 5530 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5531 nr_huge, nr_kmem, page); 5532 pgpgout = nr_anon = nr_file = 5533 nr_huge = nr_kmem = 0; 5534 } 5535 memcg = page->mem_cgroup; 5536 } 5537 5538 if (!PageKmemcg(page)) { 5539 unsigned int nr_pages = 1; 5540 5541 if (PageTransHuge(page)) { 5542 nr_pages <<= compound_order(page); 5543 nr_huge += nr_pages; 5544 } 5545 if (PageAnon(page)) 5546 nr_anon += nr_pages; 5547 else 5548 nr_file += nr_pages; 5549 pgpgout++; 5550 } else { 5551 nr_kmem += 1 << compound_order(page); 5552 __ClearPageKmemcg(page); 5553 } 5554 5555 page->mem_cgroup = NULL; 5556 } while (next != page_list); 5557 5558 if (memcg) 5559 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5560 nr_huge, nr_kmem, page); 5561 } 5562 5563 /** 5564 * mem_cgroup_uncharge - uncharge a page 5565 * @page: page to uncharge 5566 * 5567 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5568 * mem_cgroup_commit_charge(). 5569 */ 5570 void mem_cgroup_uncharge(struct page *page) 5571 { 5572 if (mem_cgroup_disabled()) 5573 return; 5574 5575 /* Don't touch page->lru of any random page, pre-check: */ 5576 if (!page->mem_cgroup) 5577 return; 5578 5579 INIT_LIST_HEAD(&page->lru); 5580 uncharge_list(&page->lru); 5581 } 5582 5583 /** 5584 * mem_cgroup_uncharge_list - uncharge a list of page 5585 * @page_list: list of pages to uncharge 5586 * 5587 * Uncharge a list of pages previously charged with 5588 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5589 */ 5590 void mem_cgroup_uncharge_list(struct list_head *page_list) 5591 { 5592 if (mem_cgroup_disabled()) 5593 return; 5594 5595 if (!list_empty(page_list)) 5596 uncharge_list(page_list); 5597 } 5598 5599 /** 5600 * mem_cgroup_migrate - charge a page's replacement 5601 * @oldpage: currently circulating page 5602 * @newpage: replacement page 5603 * 5604 * Charge @newpage as a replacement page for @oldpage. @oldpage will 5605 * be uncharged upon free. 5606 * 5607 * Both pages must be locked, @newpage->mapping must be set up. 5608 */ 5609 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 5610 { 5611 struct mem_cgroup *memcg; 5612 unsigned int nr_pages; 5613 bool compound; 5614 unsigned long flags; 5615 5616 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5617 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5618 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5619 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5620 newpage); 5621 5622 if (mem_cgroup_disabled()) 5623 return; 5624 5625 /* Page cache replacement: new page already charged? */ 5626 if (newpage->mem_cgroup) 5627 return; 5628 5629 /* Swapcache readahead pages can get replaced before being charged */ 5630 memcg = oldpage->mem_cgroup; 5631 if (!memcg) 5632 return; 5633 5634 /* Force-charge the new page. The old one will be freed soon */ 5635 compound = PageTransHuge(newpage); 5636 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 5637 5638 page_counter_charge(&memcg->memory, nr_pages); 5639 if (do_memsw_account()) 5640 page_counter_charge(&memcg->memsw, nr_pages); 5641 css_get_many(&memcg->css, nr_pages); 5642 5643 commit_charge(newpage, memcg, false); 5644 5645 local_irq_save(flags); 5646 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5647 memcg_check_events(memcg, newpage); 5648 local_irq_restore(flags); 5649 } 5650 5651 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5652 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5653 5654 void mem_cgroup_sk_alloc(struct sock *sk) 5655 { 5656 struct mem_cgroup *memcg; 5657 5658 if (!mem_cgroup_sockets_enabled) 5659 return; 5660 5661 /* 5662 * Socket cloning can throw us here with sk_memcg already 5663 * filled. It won't however, necessarily happen from 5664 * process context. So the test for root memcg given 5665 * the current task's memcg won't help us in this case. 5666 * 5667 * Respecting the original socket's memcg is a better 5668 * decision in this case. 5669 */ 5670 if (sk->sk_memcg) { 5671 BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); 5672 css_get(&sk->sk_memcg->css); 5673 return; 5674 } 5675 5676 rcu_read_lock(); 5677 memcg = mem_cgroup_from_task(current); 5678 if (memcg == root_mem_cgroup) 5679 goto out; 5680 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 5681 goto out; 5682 if (css_tryget_online(&memcg->css)) 5683 sk->sk_memcg = memcg; 5684 out: 5685 rcu_read_unlock(); 5686 } 5687 5688 void mem_cgroup_sk_free(struct sock *sk) 5689 { 5690 if (sk->sk_memcg) 5691 css_put(&sk->sk_memcg->css); 5692 } 5693 5694 /** 5695 * mem_cgroup_charge_skmem - charge socket memory 5696 * @memcg: memcg to charge 5697 * @nr_pages: number of pages to charge 5698 * 5699 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5700 * @memcg's configured limit, %false if the charge had to be forced. 5701 */ 5702 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5703 { 5704 gfp_t gfp_mask = GFP_KERNEL; 5705 5706 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5707 struct page_counter *fail; 5708 5709 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 5710 memcg->tcpmem_pressure = 0; 5711 return true; 5712 } 5713 page_counter_charge(&memcg->tcpmem, nr_pages); 5714 memcg->tcpmem_pressure = 1; 5715 return false; 5716 } 5717 5718 /* Don't block in the packet receive path */ 5719 if (in_softirq()) 5720 gfp_mask = GFP_NOWAIT; 5721 5722 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages); 5723 5724 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 5725 return true; 5726 5727 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 5728 return false; 5729 } 5730 5731 /** 5732 * mem_cgroup_uncharge_skmem - uncharge socket memory 5733 * @memcg - memcg to uncharge 5734 * @nr_pages - number of pages to uncharge 5735 */ 5736 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5737 { 5738 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5739 page_counter_uncharge(&memcg->tcpmem, nr_pages); 5740 return; 5741 } 5742 5743 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages); 5744 5745 page_counter_uncharge(&memcg->memory, nr_pages); 5746 css_put_many(&memcg->css, nr_pages); 5747 } 5748 5749 static int __init cgroup_memory(char *s) 5750 { 5751 char *token; 5752 5753 while ((token = strsep(&s, ",")) != NULL) { 5754 if (!*token) 5755 continue; 5756 if (!strcmp(token, "nosocket")) 5757 cgroup_memory_nosocket = true; 5758 if (!strcmp(token, "nokmem")) 5759 cgroup_memory_nokmem = true; 5760 } 5761 return 0; 5762 } 5763 __setup("cgroup.memory=", cgroup_memory); 5764 5765 /* 5766 * subsys_initcall() for memory controller. 5767 * 5768 * Some parts like hotcpu_notifier() have to be initialized from this context 5769 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5770 * everything that doesn't depend on a specific mem_cgroup structure should 5771 * be initialized from here. 5772 */ 5773 static int __init mem_cgroup_init(void) 5774 { 5775 int cpu, node; 5776 5777 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5778 5779 for_each_possible_cpu(cpu) 5780 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5781 drain_local_stock); 5782 5783 for_each_node(node) { 5784 struct mem_cgroup_tree_per_node *rtpn; 5785 5786 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5787 node_online(node) ? node : NUMA_NO_NODE); 5788 5789 rtpn->rb_root = RB_ROOT; 5790 spin_lock_init(&rtpn->lock); 5791 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5792 } 5793 5794 return 0; 5795 } 5796 subsys_initcall(mem_cgroup_init); 5797 5798 #ifdef CONFIG_MEMCG_SWAP 5799 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 5800 { 5801 while (!atomic_inc_not_zero(&memcg->id.ref)) { 5802 /* 5803 * The root cgroup cannot be destroyed, so it's refcount must 5804 * always be >= 1. 5805 */ 5806 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 5807 VM_BUG_ON(1); 5808 break; 5809 } 5810 memcg = parent_mem_cgroup(memcg); 5811 if (!memcg) 5812 memcg = root_mem_cgroup; 5813 } 5814 return memcg; 5815 } 5816 5817 /** 5818 * mem_cgroup_swapout - transfer a memsw charge to swap 5819 * @page: page whose memsw charge to transfer 5820 * @entry: swap entry to move the charge to 5821 * 5822 * Transfer the memsw charge of @page to @entry. 5823 */ 5824 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5825 { 5826 struct mem_cgroup *memcg, *swap_memcg; 5827 unsigned short oldid; 5828 5829 VM_BUG_ON_PAGE(PageLRU(page), page); 5830 VM_BUG_ON_PAGE(page_count(page), page); 5831 5832 if (!do_memsw_account()) 5833 return; 5834 5835 memcg = page->mem_cgroup; 5836 5837 /* Readahead page, never charged */ 5838 if (!memcg) 5839 return; 5840 5841 /* 5842 * In case the memcg owning these pages has been offlined and doesn't 5843 * have an ID allocated to it anymore, charge the closest online 5844 * ancestor for the swap instead and transfer the memory+swap charge. 5845 */ 5846 swap_memcg = mem_cgroup_id_get_online(memcg); 5847 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); 5848 VM_BUG_ON_PAGE(oldid, page); 5849 mem_cgroup_swap_statistics(swap_memcg, true); 5850 5851 page->mem_cgroup = NULL; 5852 5853 if (!mem_cgroup_is_root(memcg)) 5854 page_counter_uncharge(&memcg->memory, 1); 5855 5856 if (memcg != swap_memcg) { 5857 if (!mem_cgroup_is_root(swap_memcg)) 5858 page_counter_charge(&swap_memcg->memsw, 1); 5859 page_counter_uncharge(&memcg->memsw, 1); 5860 } 5861 5862 /* 5863 * Interrupts should be disabled here because the caller holds the 5864 * mapping->tree_lock lock which is taken with interrupts-off. It is 5865 * important here to have the interrupts disabled because it is the 5866 * only synchronisation we have for udpating the per-CPU variables. 5867 */ 5868 VM_BUG_ON(!irqs_disabled()); 5869 mem_cgroup_charge_statistics(memcg, page, false, -1); 5870 memcg_check_events(memcg, page); 5871 5872 if (!mem_cgroup_is_root(memcg)) 5873 css_put(&memcg->css); 5874 } 5875 5876 /* 5877 * mem_cgroup_try_charge_swap - try charging a swap entry 5878 * @page: page being added to swap 5879 * @entry: swap entry to charge 5880 * 5881 * Try to charge @entry to the memcg that @page belongs to. 5882 * 5883 * Returns 0 on success, -ENOMEM on failure. 5884 */ 5885 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 5886 { 5887 struct mem_cgroup *memcg; 5888 struct page_counter *counter; 5889 unsigned short oldid; 5890 5891 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 5892 return 0; 5893 5894 memcg = page->mem_cgroup; 5895 5896 /* Readahead page, never charged */ 5897 if (!memcg) 5898 return 0; 5899 5900 memcg = mem_cgroup_id_get_online(memcg); 5901 5902 if (!mem_cgroup_is_root(memcg) && 5903 !page_counter_try_charge(&memcg->swap, 1, &counter)) { 5904 mem_cgroup_id_put(memcg); 5905 return -ENOMEM; 5906 } 5907 5908 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5909 VM_BUG_ON_PAGE(oldid, page); 5910 mem_cgroup_swap_statistics(memcg, true); 5911 5912 return 0; 5913 } 5914 5915 /** 5916 * mem_cgroup_uncharge_swap - uncharge a swap entry 5917 * @entry: swap entry to uncharge 5918 * 5919 * Drop the swap charge associated with @entry. 5920 */ 5921 void mem_cgroup_uncharge_swap(swp_entry_t entry) 5922 { 5923 struct mem_cgroup *memcg; 5924 unsigned short id; 5925 5926 if (!do_swap_account) 5927 return; 5928 5929 id = swap_cgroup_record(entry, 0); 5930 rcu_read_lock(); 5931 memcg = mem_cgroup_from_id(id); 5932 if (memcg) { 5933 if (!mem_cgroup_is_root(memcg)) { 5934 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5935 page_counter_uncharge(&memcg->swap, 1); 5936 else 5937 page_counter_uncharge(&memcg->memsw, 1); 5938 } 5939 mem_cgroup_swap_statistics(memcg, false); 5940 mem_cgroup_id_put(memcg); 5941 } 5942 rcu_read_unlock(); 5943 } 5944 5945 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5946 { 5947 long nr_swap_pages = get_nr_swap_pages(); 5948 5949 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5950 return nr_swap_pages; 5951 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 5952 nr_swap_pages = min_t(long, nr_swap_pages, 5953 READ_ONCE(memcg->swap.limit) - 5954 page_counter_read(&memcg->swap)); 5955 return nr_swap_pages; 5956 } 5957 5958 bool mem_cgroup_swap_full(struct page *page) 5959 { 5960 struct mem_cgroup *memcg; 5961 5962 VM_BUG_ON_PAGE(!PageLocked(page), page); 5963 5964 if (vm_swap_full()) 5965 return true; 5966 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5967 return false; 5968 5969 memcg = page->mem_cgroup; 5970 if (!memcg) 5971 return false; 5972 5973 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 5974 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) 5975 return true; 5976 5977 return false; 5978 } 5979 5980 /* for remember boot option*/ 5981 #ifdef CONFIG_MEMCG_SWAP_ENABLED 5982 static int really_do_swap_account __initdata = 1; 5983 #else 5984 static int really_do_swap_account __initdata; 5985 #endif 5986 5987 static int __init enable_swap_account(char *s) 5988 { 5989 if (!strcmp(s, "1")) 5990 really_do_swap_account = 1; 5991 else if (!strcmp(s, "0")) 5992 really_do_swap_account = 0; 5993 return 1; 5994 } 5995 __setup("swapaccount=", enable_swap_account); 5996 5997 static u64 swap_current_read(struct cgroup_subsys_state *css, 5998 struct cftype *cft) 5999 { 6000 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6001 6002 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6003 } 6004 6005 static int swap_max_show(struct seq_file *m, void *v) 6006 { 6007 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6008 unsigned long max = READ_ONCE(memcg->swap.limit); 6009 6010 if (max == PAGE_COUNTER_MAX) 6011 seq_puts(m, "max\n"); 6012 else 6013 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 6014 6015 return 0; 6016 } 6017 6018 static ssize_t swap_max_write(struct kernfs_open_file *of, 6019 char *buf, size_t nbytes, loff_t off) 6020 { 6021 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6022 unsigned long max; 6023 int err; 6024 6025 buf = strstrip(buf); 6026 err = page_counter_memparse(buf, "max", &max); 6027 if (err) 6028 return err; 6029 6030 mutex_lock(&memcg_limit_mutex); 6031 err = page_counter_limit(&memcg->swap, max); 6032 mutex_unlock(&memcg_limit_mutex); 6033 if (err) 6034 return err; 6035 6036 return nbytes; 6037 } 6038 6039 static struct cftype swap_files[] = { 6040 { 6041 .name = "swap.current", 6042 .flags = CFTYPE_NOT_ON_ROOT, 6043 .read_u64 = swap_current_read, 6044 }, 6045 { 6046 .name = "swap.max", 6047 .flags = CFTYPE_NOT_ON_ROOT, 6048 .seq_show = swap_max_show, 6049 .write = swap_max_write, 6050 }, 6051 { } /* terminate */ 6052 }; 6053 6054 static struct cftype memsw_cgroup_files[] = { 6055 { 6056 .name = "memsw.usage_in_bytes", 6057 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6058 .read_u64 = mem_cgroup_read_u64, 6059 }, 6060 { 6061 .name = "memsw.max_usage_in_bytes", 6062 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6063 .write = mem_cgroup_reset, 6064 .read_u64 = mem_cgroup_read_u64, 6065 }, 6066 { 6067 .name = "memsw.limit_in_bytes", 6068 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6069 .write = mem_cgroup_write, 6070 .read_u64 = mem_cgroup_read_u64, 6071 }, 6072 { 6073 .name = "memsw.failcnt", 6074 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6075 .write = mem_cgroup_reset, 6076 .read_u64 = mem_cgroup_read_u64, 6077 }, 6078 { }, /* terminate */ 6079 }; 6080 6081 static int __init mem_cgroup_swap_init(void) 6082 { 6083 if (!mem_cgroup_disabled() && really_do_swap_account) { 6084 do_swap_account = 1; 6085 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6086 swap_files)); 6087 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6088 memsw_cgroup_files)); 6089 } 6090 return 0; 6091 } 6092 subsys_initcall(mem_cgroup_swap_init); 6093 6094 #endif /* CONFIG_MEMCG_SWAP */ 6095