1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * This program is free software; you can redistribute it and/or modify 18 * it under the terms of the GNU General Public License as published by 19 * the Free Software Foundation; either version 2 of the License, or 20 * (at your option) any later version. 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 */ 27 28 #include <linux/res_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/mm.h> 32 #include <linux/hugetlb.h> 33 #include <linux/pagemap.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/page_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include "internal.h" 60 #include <net/sock.h> 61 #include <net/ip.h> 62 #include <net/tcp_memcontrol.h> 63 #include "slab.h" 64 65 #include <asm/uaccess.h> 66 67 #include <trace/events/vmscan.h> 68 69 struct cgroup_subsys mem_cgroup_subsys __read_mostly; 70 EXPORT_SYMBOL(mem_cgroup_subsys); 71 72 #define MEM_CGROUP_RECLAIM_RETRIES 5 73 static struct mem_cgroup *root_mem_cgroup __read_mostly; 74 75 #ifdef CONFIG_MEMCG_SWAP 76 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 77 int do_swap_account __read_mostly; 78 79 /* for remember boot option*/ 80 #ifdef CONFIG_MEMCG_SWAP_ENABLED 81 static int really_do_swap_account __initdata = 1; 82 #else 83 static int really_do_swap_account __initdata = 0; 84 #endif 85 86 #else 87 #define do_swap_account 0 88 #endif 89 90 91 static const char * const mem_cgroup_stat_names[] = { 92 "cache", 93 "rss", 94 "rss_huge", 95 "mapped_file", 96 "writeback", 97 "swap", 98 }; 99 100 enum mem_cgroup_events_index { 101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 105 MEM_CGROUP_EVENTS_NSTATS, 106 }; 107 108 static const char * const mem_cgroup_events_names[] = { 109 "pgpgin", 110 "pgpgout", 111 "pgfault", 112 "pgmajfault", 113 }; 114 115 static const char * const mem_cgroup_lru_names[] = { 116 "inactive_anon", 117 "active_anon", 118 "inactive_file", 119 "active_file", 120 "unevictable", 121 }; 122 123 /* 124 * Per memcg event counter is incremented at every pagein/pageout. With THP, 125 * it will be incremated by the number of pages. This counter is used for 126 * for trigger some periodic events. This is straightforward and better 127 * than using jiffies etc. to handle periodic memcg event. 128 */ 129 enum mem_cgroup_events_target { 130 MEM_CGROUP_TARGET_THRESH, 131 MEM_CGROUP_TARGET_SOFTLIMIT, 132 MEM_CGROUP_TARGET_NUMAINFO, 133 MEM_CGROUP_NTARGETS, 134 }; 135 #define THRESHOLDS_EVENTS_TARGET 128 136 #define SOFTLIMIT_EVENTS_TARGET 1024 137 #define NUMAINFO_EVENTS_TARGET 1024 138 139 struct mem_cgroup_stat_cpu { 140 long count[MEM_CGROUP_STAT_NSTATS]; 141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 142 unsigned long nr_page_events; 143 unsigned long targets[MEM_CGROUP_NTARGETS]; 144 }; 145 146 struct mem_cgroup_reclaim_iter { 147 /* 148 * last scanned hierarchy member. Valid only if last_dead_count 149 * matches memcg->dead_count of the hierarchy root group. 150 */ 151 struct mem_cgroup *last_visited; 152 int last_dead_count; 153 154 /* scan generation, increased every round-trip */ 155 unsigned int generation; 156 }; 157 158 /* 159 * per-zone information in memory controller. 160 */ 161 struct mem_cgroup_per_zone { 162 struct lruvec lruvec; 163 unsigned long lru_size[NR_LRU_LISTS]; 164 165 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 166 167 struct rb_node tree_node; /* RB tree node */ 168 unsigned long long usage_in_excess;/* Set to the value by which */ 169 /* the soft limit is exceeded*/ 170 bool on_tree; 171 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 172 /* use container_of */ 173 }; 174 175 struct mem_cgroup_per_node { 176 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 177 }; 178 179 /* 180 * Cgroups above their limits are maintained in a RB-Tree, independent of 181 * their hierarchy representation 182 */ 183 184 struct mem_cgroup_tree_per_zone { 185 struct rb_root rb_root; 186 spinlock_t lock; 187 }; 188 189 struct mem_cgroup_tree_per_node { 190 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 191 }; 192 193 struct mem_cgroup_tree { 194 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 195 }; 196 197 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 198 199 struct mem_cgroup_threshold { 200 struct eventfd_ctx *eventfd; 201 u64 threshold; 202 }; 203 204 /* For threshold */ 205 struct mem_cgroup_threshold_ary { 206 /* An array index points to threshold just below or equal to usage. */ 207 int current_threshold; 208 /* Size of entries[] */ 209 unsigned int size; 210 /* Array of thresholds */ 211 struct mem_cgroup_threshold entries[0]; 212 }; 213 214 struct mem_cgroup_thresholds { 215 /* Primary thresholds array */ 216 struct mem_cgroup_threshold_ary *primary; 217 /* 218 * Spare threshold array. 219 * This is needed to make mem_cgroup_unregister_event() "never fail". 220 * It must be able to store at least primary->size - 1 entries. 221 */ 222 struct mem_cgroup_threshold_ary *spare; 223 }; 224 225 /* for OOM */ 226 struct mem_cgroup_eventfd_list { 227 struct list_head list; 228 struct eventfd_ctx *eventfd; 229 }; 230 231 /* 232 * cgroup_event represents events which userspace want to receive. 233 */ 234 struct mem_cgroup_event { 235 /* 236 * memcg which the event belongs to. 237 */ 238 struct mem_cgroup *memcg; 239 /* 240 * eventfd to signal userspace about the event. 241 */ 242 struct eventfd_ctx *eventfd; 243 /* 244 * Each of these stored in a list by the cgroup. 245 */ 246 struct list_head list; 247 /* 248 * register_event() callback will be used to add new userspace 249 * waiter for changes related to this event. Use eventfd_signal() 250 * on eventfd to send notification to userspace. 251 */ 252 int (*register_event)(struct mem_cgroup *memcg, 253 struct eventfd_ctx *eventfd, const char *args); 254 /* 255 * unregister_event() callback will be called when userspace closes 256 * the eventfd or on cgroup removing. This callback must be set, 257 * if you want provide notification functionality. 258 */ 259 void (*unregister_event)(struct mem_cgroup *memcg, 260 struct eventfd_ctx *eventfd); 261 /* 262 * All fields below needed to unregister event when 263 * userspace closes eventfd. 264 */ 265 poll_table pt; 266 wait_queue_head_t *wqh; 267 wait_queue_t wait; 268 struct work_struct remove; 269 }; 270 271 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 272 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 273 274 /* 275 * The memory controller data structure. The memory controller controls both 276 * page cache and RSS per cgroup. We would eventually like to provide 277 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 278 * to help the administrator determine what knobs to tune. 279 * 280 * TODO: Add a water mark for the memory controller. Reclaim will begin when 281 * we hit the water mark. May be even add a low water mark, such that 282 * no reclaim occurs from a cgroup at it's low water mark, this is 283 * a feature that will be implemented much later in the future. 284 */ 285 struct mem_cgroup { 286 struct cgroup_subsys_state css; 287 /* 288 * the counter to account for memory usage 289 */ 290 struct res_counter res; 291 292 /* vmpressure notifications */ 293 struct vmpressure vmpressure; 294 295 /* 296 * the counter to account for mem+swap usage. 297 */ 298 struct res_counter memsw; 299 300 /* 301 * the counter to account for kernel memory usage. 302 */ 303 struct res_counter kmem; 304 /* 305 * Should the accounting and control be hierarchical, per subtree? 306 */ 307 bool use_hierarchy; 308 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */ 309 310 bool oom_lock; 311 atomic_t under_oom; 312 atomic_t oom_wakeups; 313 314 int swappiness; 315 /* OOM-Killer disable */ 316 int oom_kill_disable; 317 318 /* set when res.limit == memsw.limit */ 319 bool memsw_is_minimum; 320 321 /* protect arrays of thresholds */ 322 struct mutex thresholds_lock; 323 324 /* thresholds for memory usage. RCU-protected */ 325 struct mem_cgroup_thresholds thresholds; 326 327 /* thresholds for mem+swap usage. RCU-protected */ 328 struct mem_cgroup_thresholds memsw_thresholds; 329 330 /* For oom notifier event fd */ 331 struct list_head oom_notify; 332 333 /* 334 * Should we move charges of a task when a task is moved into this 335 * mem_cgroup ? And what type of charges should we move ? 336 */ 337 unsigned long move_charge_at_immigrate; 338 /* 339 * set > 0 if pages under this cgroup are moving to other cgroup. 340 */ 341 atomic_t moving_account; 342 /* taken only while moving_account > 0 */ 343 spinlock_t move_lock; 344 /* 345 * percpu counter. 346 */ 347 struct mem_cgroup_stat_cpu __percpu *stat; 348 /* 349 * used when a cpu is offlined or other synchronizations 350 * See mem_cgroup_read_stat(). 351 */ 352 struct mem_cgroup_stat_cpu nocpu_base; 353 spinlock_t pcp_counter_lock; 354 355 atomic_t dead_count; 356 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 357 struct cg_proto tcp_mem; 358 #endif 359 #if defined(CONFIG_MEMCG_KMEM) 360 /* analogous to slab_common's slab_caches list. per-memcg */ 361 struct list_head memcg_slab_caches; 362 /* Not a spinlock, we can take a lot of time walking the list */ 363 struct mutex slab_caches_mutex; 364 /* Index in the kmem_cache->memcg_params->memcg_caches array */ 365 int kmemcg_id; 366 #endif 367 368 int last_scanned_node; 369 #if MAX_NUMNODES > 1 370 nodemask_t scan_nodes; 371 atomic_t numainfo_events; 372 atomic_t numainfo_updating; 373 #endif 374 375 /* List of events which userspace want to receive */ 376 struct list_head event_list; 377 spinlock_t event_list_lock; 378 379 struct mem_cgroup_per_node *nodeinfo[0]; 380 /* WARNING: nodeinfo must be the last member here */ 381 }; 382 383 /* internal only representation about the status of kmem accounting. */ 384 enum { 385 KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */ 386 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ 387 }; 388 389 #ifdef CONFIG_MEMCG_KMEM 390 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) 391 { 392 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 393 } 394 395 static bool memcg_kmem_is_active(struct mem_cgroup *memcg) 396 { 397 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 398 } 399 400 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) 401 { 402 /* 403 * Our caller must use css_get() first, because memcg_uncharge_kmem() 404 * will call css_put() if it sees the memcg is dead. 405 */ 406 smp_wmb(); 407 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) 408 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); 409 } 410 411 static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg) 412 { 413 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD, 414 &memcg->kmem_account_flags); 415 } 416 #endif 417 418 /* Stuffs for move charges at task migration. */ 419 /* 420 * Types of charges to be moved. "move_charge_at_immitgrate" and 421 * "immigrate_flags" are treated as a left-shifted bitmap of these types. 422 */ 423 enum move_type { 424 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 425 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 426 NR_MOVE_TYPE, 427 }; 428 429 /* "mc" and its members are protected by cgroup_mutex */ 430 static struct move_charge_struct { 431 spinlock_t lock; /* for from, to */ 432 struct mem_cgroup *from; 433 struct mem_cgroup *to; 434 unsigned long immigrate_flags; 435 unsigned long precharge; 436 unsigned long moved_charge; 437 unsigned long moved_swap; 438 struct task_struct *moving_task; /* a task moving charges */ 439 wait_queue_head_t waitq; /* a waitq for other context */ 440 } mc = { 441 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 442 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 443 }; 444 445 static bool move_anon(void) 446 { 447 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags); 448 } 449 450 static bool move_file(void) 451 { 452 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags); 453 } 454 455 /* 456 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 457 * limit reclaim to prevent infinite loops, if they ever occur. 458 */ 459 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 460 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 461 462 enum charge_type { 463 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 464 MEM_CGROUP_CHARGE_TYPE_ANON, 465 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 466 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 467 NR_CHARGE_TYPE, 468 }; 469 470 /* for encoding cft->private value on file */ 471 enum res_type { 472 _MEM, 473 _MEMSWAP, 474 _OOM_TYPE, 475 _KMEM, 476 }; 477 478 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 479 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 480 #define MEMFILE_ATTR(val) ((val) & 0xffff) 481 /* Used for OOM nofiier */ 482 #define OOM_CONTROL (0) 483 484 /* 485 * Reclaim flags for mem_cgroup_hierarchical_reclaim 486 */ 487 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 488 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 489 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 490 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 491 492 /* 493 * The memcg_create_mutex will be held whenever a new cgroup is created. 494 * As a consequence, any change that needs to protect against new child cgroups 495 * appearing has to hold it as well. 496 */ 497 static DEFINE_MUTEX(memcg_create_mutex); 498 499 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) 500 { 501 return s ? container_of(s, struct mem_cgroup, css) : NULL; 502 } 503 504 /* Some nice accessors for the vmpressure. */ 505 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 506 { 507 if (!memcg) 508 memcg = root_mem_cgroup; 509 return &memcg->vmpressure; 510 } 511 512 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 513 { 514 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 515 } 516 517 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 518 { 519 return (memcg == root_mem_cgroup); 520 } 521 522 /* 523 * We restrict the id in the range of [1, 65535], so it can fit into 524 * an unsigned short. 525 */ 526 #define MEM_CGROUP_ID_MAX USHRT_MAX 527 528 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 529 { 530 /* 531 * The ID of the root cgroup is 0, but memcg treat 0 as an 532 * invalid ID, so we return (cgroup_id + 1). 533 */ 534 return memcg->css.cgroup->id + 1; 535 } 536 537 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 538 { 539 struct cgroup_subsys_state *css; 540 541 css = css_from_id(id - 1, &mem_cgroup_subsys); 542 return mem_cgroup_from_css(css); 543 } 544 545 /* Writing them here to avoid exposing memcg's inner layout */ 546 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 547 548 void sock_update_memcg(struct sock *sk) 549 { 550 if (mem_cgroup_sockets_enabled) { 551 struct mem_cgroup *memcg; 552 struct cg_proto *cg_proto; 553 554 BUG_ON(!sk->sk_prot->proto_cgroup); 555 556 /* Socket cloning can throw us here with sk_cgrp already 557 * filled. It won't however, necessarily happen from 558 * process context. So the test for root memcg given 559 * the current task's memcg won't help us in this case. 560 * 561 * Respecting the original socket's memcg is a better 562 * decision in this case. 563 */ 564 if (sk->sk_cgrp) { 565 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 566 css_get(&sk->sk_cgrp->memcg->css); 567 return; 568 } 569 570 rcu_read_lock(); 571 memcg = mem_cgroup_from_task(current); 572 cg_proto = sk->sk_prot->proto_cgroup(memcg); 573 if (!mem_cgroup_is_root(memcg) && 574 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) { 575 sk->sk_cgrp = cg_proto; 576 } 577 rcu_read_unlock(); 578 } 579 } 580 EXPORT_SYMBOL(sock_update_memcg); 581 582 void sock_release_memcg(struct sock *sk) 583 { 584 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 585 struct mem_cgroup *memcg; 586 WARN_ON(!sk->sk_cgrp->memcg); 587 memcg = sk->sk_cgrp->memcg; 588 css_put(&sk->sk_cgrp->memcg->css); 589 } 590 } 591 592 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 593 { 594 if (!memcg || mem_cgroup_is_root(memcg)) 595 return NULL; 596 597 return &memcg->tcp_mem; 598 } 599 EXPORT_SYMBOL(tcp_proto_cgroup); 600 601 static void disarm_sock_keys(struct mem_cgroup *memcg) 602 { 603 if (!memcg_proto_activated(&memcg->tcp_mem)) 604 return; 605 static_key_slow_dec(&memcg_socket_limit_enabled); 606 } 607 #else 608 static void disarm_sock_keys(struct mem_cgroup *memcg) 609 { 610 } 611 #endif 612 613 #ifdef CONFIG_MEMCG_KMEM 614 /* 615 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches. 616 * The main reason for not using cgroup id for this: 617 * this works better in sparse environments, where we have a lot of memcgs, 618 * but only a few kmem-limited. Or also, if we have, for instance, 200 619 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 620 * 200 entry array for that. 621 * 622 * The current size of the caches array is stored in 623 * memcg_limited_groups_array_size. It will double each time we have to 624 * increase it. 625 */ 626 static DEFINE_IDA(kmem_limited_groups); 627 int memcg_limited_groups_array_size; 628 629 /* 630 * MIN_SIZE is different than 1, because we would like to avoid going through 631 * the alloc/free process all the time. In a small machine, 4 kmem-limited 632 * cgroups is a reasonable guess. In the future, it could be a parameter or 633 * tunable, but that is strictly not necessary. 634 * 635 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 636 * this constant directly from cgroup, but it is understandable that this is 637 * better kept as an internal representation in cgroup.c. In any case, the 638 * cgrp_id space is not getting any smaller, and we don't have to necessarily 639 * increase ours as well if it increases. 640 */ 641 #define MEMCG_CACHES_MIN_SIZE 4 642 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 643 644 /* 645 * A lot of the calls to the cache allocation functions are expected to be 646 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 647 * conditional to this static branch, we'll have to allow modules that does 648 * kmem_cache_alloc and the such to see this symbol as well 649 */ 650 struct static_key memcg_kmem_enabled_key; 651 EXPORT_SYMBOL(memcg_kmem_enabled_key); 652 653 static void disarm_kmem_keys(struct mem_cgroup *memcg) 654 { 655 if (memcg_kmem_is_active(memcg)) { 656 static_key_slow_dec(&memcg_kmem_enabled_key); 657 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id); 658 } 659 /* 660 * This check can't live in kmem destruction function, 661 * since the charges will outlive the cgroup 662 */ 663 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0); 664 } 665 #else 666 static void disarm_kmem_keys(struct mem_cgroup *memcg) 667 { 668 } 669 #endif /* CONFIG_MEMCG_KMEM */ 670 671 static void disarm_static_keys(struct mem_cgroup *memcg) 672 { 673 disarm_sock_keys(memcg); 674 disarm_kmem_keys(memcg); 675 } 676 677 static void drain_all_stock_async(struct mem_cgroup *memcg); 678 679 static struct mem_cgroup_per_zone * 680 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) 681 { 682 VM_BUG_ON((unsigned)nid >= nr_node_ids); 683 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 684 } 685 686 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) 687 { 688 return &memcg->css; 689 } 690 691 static struct mem_cgroup_per_zone * 692 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) 693 { 694 int nid = page_to_nid(page); 695 int zid = page_zonenum(page); 696 697 return mem_cgroup_zoneinfo(memcg, nid, zid); 698 } 699 700 static struct mem_cgroup_tree_per_zone * 701 soft_limit_tree_node_zone(int nid, int zid) 702 { 703 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 704 } 705 706 static struct mem_cgroup_tree_per_zone * 707 soft_limit_tree_from_page(struct page *page) 708 { 709 int nid = page_to_nid(page); 710 int zid = page_zonenum(page); 711 712 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 713 } 714 715 static void 716 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, 717 struct mem_cgroup_per_zone *mz, 718 struct mem_cgroup_tree_per_zone *mctz, 719 unsigned long long new_usage_in_excess) 720 { 721 struct rb_node **p = &mctz->rb_root.rb_node; 722 struct rb_node *parent = NULL; 723 struct mem_cgroup_per_zone *mz_node; 724 725 if (mz->on_tree) 726 return; 727 728 mz->usage_in_excess = new_usage_in_excess; 729 if (!mz->usage_in_excess) 730 return; 731 while (*p) { 732 parent = *p; 733 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 734 tree_node); 735 if (mz->usage_in_excess < mz_node->usage_in_excess) 736 p = &(*p)->rb_left; 737 /* 738 * We can't avoid mem cgroups that are over their soft 739 * limit by the same amount 740 */ 741 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 742 p = &(*p)->rb_right; 743 } 744 rb_link_node(&mz->tree_node, parent, p); 745 rb_insert_color(&mz->tree_node, &mctz->rb_root); 746 mz->on_tree = true; 747 } 748 749 static void 750 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 751 struct mem_cgroup_per_zone *mz, 752 struct mem_cgroup_tree_per_zone *mctz) 753 { 754 if (!mz->on_tree) 755 return; 756 rb_erase(&mz->tree_node, &mctz->rb_root); 757 mz->on_tree = false; 758 } 759 760 static void 761 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 762 struct mem_cgroup_per_zone *mz, 763 struct mem_cgroup_tree_per_zone *mctz) 764 { 765 spin_lock(&mctz->lock); 766 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 767 spin_unlock(&mctz->lock); 768 } 769 770 771 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 772 { 773 unsigned long long excess; 774 struct mem_cgroup_per_zone *mz; 775 struct mem_cgroup_tree_per_zone *mctz; 776 int nid = page_to_nid(page); 777 int zid = page_zonenum(page); 778 mctz = soft_limit_tree_from_page(page); 779 780 /* 781 * Necessary to update all ancestors when hierarchy is used. 782 * because their event counter is not touched. 783 */ 784 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 785 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 786 excess = res_counter_soft_limit_excess(&memcg->res); 787 /* 788 * We have to update the tree if mz is on RB-tree or 789 * mem is over its softlimit. 790 */ 791 if (excess || mz->on_tree) { 792 spin_lock(&mctz->lock); 793 /* if on-tree, remove it */ 794 if (mz->on_tree) 795 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 796 /* 797 * Insert again. mz->usage_in_excess will be updated. 798 * If excess is 0, no tree ops. 799 */ 800 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); 801 spin_unlock(&mctz->lock); 802 } 803 } 804 } 805 806 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 807 { 808 int node, zone; 809 struct mem_cgroup_per_zone *mz; 810 struct mem_cgroup_tree_per_zone *mctz; 811 812 for_each_node(node) { 813 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 814 mz = mem_cgroup_zoneinfo(memcg, node, zone); 815 mctz = soft_limit_tree_node_zone(node, zone); 816 mem_cgroup_remove_exceeded(memcg, mz, mctz); 817 } 818 } 819 } 820 821 static struct mem_cgroup_per_zone * 822 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 823 { 824 struct rb_node *rightmost = NULL; 825 struct mem_cgroup_per_zone *mz; 826 827 retry: 828 mz = NULL; 829 rightmost = rb_last(&mctz->rb_root); 830 if (!rightmost) 831 goto done; /* Nothing to reclaim from */ 832 833 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 834 /* 835 * Remove the node now but someone else can add it back, 836 * we will to add it back at the end of reclaim to its correct 837 * position in the tree. 838 */ 839 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 840 if (!res_counter_soft_limit_excess(&mz->memcg->res) || 841 !css_tryget(&mz->memcg->css)) 842 goto retry; 843 done: 844 return mz; 845 } 846 847 static struct mem_cgroup_per_zone * 848 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 849 { 850 struct mem_cgroup_per_zone *mz; 851 852 spin_lock(&mctz->lock); 853 mz = __mem_cgroup_largest_soft_limit_node(mctz); 854 spin_unlock(&mctz->lock); 855 return mz; 856 } 857 858 /* 859 * Implementation Note: reading percpu statistics for memcg. 860 * 861 * Both of vmstat[] and percpu_counter has threshold and do periodic 862 * synchronization to implement "quick" read. There are trade-off between 863 * reading cost and precision of value. Then, we may have a chance to implement 864 * a periodic synchronizion of counter in memcg's counter. 865 * 866 * But this _read() function is used for user interface now. The user accounts 867 * memory usage by memory cgroup and he _always_ requires exact value because 868 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 869 * have to visit all online cpus and make sum. So, for now, unnecessary 870 * synchronization is not implemented. (just implemented for cpu hotplug) 871 * 872 * If there are kernel internal actions which can make use of some not-exact 873 * value, and reading all cpu value can be performance bottleneck in some 874 * common workload, threashold and synchonization as vmstat[] should be 875 * implemented. 876 */ 877 static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 878 enum mem_cgroup_stat_index idx) 879 { 880 long val = 0; 881 int cpu; 882 883 get_online_cpus(); 884 for_each_online_cpu(cpu) 885 val += per_cpu(memcg->stat->count[idx], cpu); 886 #ifdef CONFIG_HOTPLUG_CPU 887 spin_lock(&memcg->pcp_counter_lock); 888 val += memcg->nocpu_base.count[idx]; 889 spin_unlock(&memcg->pcp_counter_lock); 890 #endif 891 put_online_cpus(); 892 return val; 893 } 894 895 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 896 bool charge) 897 { 898 int val = (charge) ? 1 : -1; 899 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 900 } 901 902 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 903 enum mem_cgroup_events_index idx) 904 { 905 unsigned long val = 0; 906 int cpu; 907 908 get_online_cpus(); 909 for_each_online_cpu(cpu) 910 val += per_cpu(memcg->stat->events[idx], cpu); 911 #ifdef CONFIG_HOTPLUG_CPU 912 spin_lock(&memcg->pcp_counter_lock); 913 val += memcg->nocpu_base.events[idx]; 914 spin_unlock(&memcg->pcp_counter_lock); 915 #endif 916 put_online_cpus(); 917 return val; 918 } 919 920 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 921 struct page *page, 922 bool anon, int nr_pages) 923 { 924 preempt_disable(); 925 926 /* 927 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 928 * counted as CACHE even if it's on ANON LRU. 929 */ 930 if (anon) 931 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 932 nr_pages); 933 else 934 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 935 nr_pages); 936 937 if (PageTransHuge(page)) 938 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 939 nr_pages); 940 941 /* pagein of a big page is an event. So, ignore page size */ 942 if (nr_pages > 0) 943 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 944 else { 945 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 946 nr_pages = -nr_pages; /* for event */ 947 } 948 949 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 950 951 preempt_enable(); 952 } 953 954 unsigned long 955 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 956 { 957 struct mem_cgroup_per_zone *mz; 958 959 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 960 return mz->lru_size[lru]; 961 } 962 963 static unsigned long 964 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 965 unsigned int lru_mask) 966 { 967 struct mem_cgroup_per_zone *mz; 968 enum lru_list lru; 969 unsigned long ret = 0; 970 971 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 972 973 for_each_lru(lru) { 974 if (BIT(lru) & lru_mask) 975 ret += mz->lru_size[lru]; 976 } 977 return ret; 978 } 979 980 static unsigned long 981 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 982 int nid, unsigned int lru_mask) 983 { 984 u64 total = 0; 985 int zid; 986 987 for (zid = 0; zid < MAX_NR_ZONES; zid++) 988 total += mem_cgroup_zone_nr_lru_pages(memcg, 989 nid, zid, lru_mask); 990 991 return total; 992 } 993 994 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 995 unsigned int lru_mask) 996 { 997 int nid; 998 u64 total = 0; 999 1000 for_each_node_state(nid, N_MEMORY) 1001 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 1002 return total; 1003 } 1004 1005 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 1006 enum mem_cgroup_events_target target) 1007 { 1008 unsigned long val, next; 1009 1010 val = __this_cpu_read(memcg->stat->nr_page_events); 1011 next = __this_cpu_read(memcg->stat->targets[target]); 1012 /* from time_after() in jiffies.h */ 1013 if ((long)next - (long)val < 0) { 1014 switch (target) { 1015 case MEM_CGROUP_TARGET_THRESH: 1016 next = val + THRESHOLDS_EVENTS_TARGET; 1017 break; 1018 case MEM_CGROUP_TARGET_SOFTLIMIT: 1019 next = val + SOFTLIMIT_EVENTS_TARGET; 1020 break; 1021 case MEM_CGROUP_TARGET_NUMAINFO: 1022 next = val + NUMAINFO_EVENTS_TARGET; 1023 break; 1024 default: 1025 break; 1026 } 1027 __this_cpu_write(memcg->stat->targets[target], next); 1028 return true; 1029 } 1030 return false; 1031 } 1032 1033 /* 1034 * Check events in order. 1035 * 1036 */ 1037 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 1038 { 1039 preempt_disable(); 1040 /* threshold event is triggered in finer grain than soft limit */ 1041 if (unlikely(mem_cgroup_event_ratelimit(memcg, 1042 MEM_CGROUP_TARGET_THRESH))) { 1043 bool do_softlimit; 1044 bool do_numainfo __maybe_unused; 1045 1046 do_softlimit = mem_cgroup_event_ratelimit(memcg, 1047 MEM_CGROUP_TARGET_SOFTLIMIT); 1048 #if MAX_NUMNODES > 1 1049 do_numainfo = mem_cgroup_event_ratelimit(memcg, 1050 MEM_CGROUP_TARGET_NUMAINFO); 1051 #endif 1052 preempt_enable(); 1053 1054 mem_cgroup_threshold(memcg); 1055 if (unlikely(do_softlimit)) 1056 mem_cgroup_update_tree(memcg, page); 1057 #if MAX_NUMNODES > 1 1058 if (unlikely(do_numainfo)) 1059 atomic_inc(&memcg->numainfo_events); 1060 #endif 1061 } else 1062 preempt_enable(); 1063 } 1064 1065 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 1066 { 1067 /* 1068 * mm_update_next_owner() may clear mm->owner to NULL 1069 * if it races with swapoff, page migration, etc. 1070 * So this can be called with p == NULL. 1071 */ 1072 if (unlikely(!p)) 1073 return NULL; 1074 1075 return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id)); 1076 } 1077 1078 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 1079 { 1080 struct mem_cgroup *memcg = NULL; 1081 1082 if (!mm) 1083 return NULL; 1084 /* 1085 * Because we have no locks, mm->owner's may be being moved to other 1086 * cgroup. We use css_tryget() here even if this looks 1087 * pessimistic (rather than adding locks here). 1088 */ 1089 rcu_read_lock(); 1090 do { 1091 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1092 if (unlikely(!memcg)) 1093 break; 1094 } while (!css_tryget(&memcg->css)); 1095 rcu_read_unlock(); 1096 return memcg; 1097 } 1098 1099 /* 1100 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1101 * ref. count) or NULL if the whole root's subtree has been visited. 1102 * 1103 * helper function to be used by mem_cgroup_iter 1104 */ 1105 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1106 struct mem_cgroup *last_visited) 1107 { 1108 struct cgroup_subsys_state *prev_css, *next_css; 1109 1110 prev_css = last_visited ? &last_visited->css : NULL; 1111 skip_node: 1112 next_css = css_next_descendant_pre(prev_css, &root->css); 1113 1114 /* 1115 * Even if we found a group we have to make sure it is 1116 * alive. css && !memcg means that the groups should be 1117 * skipped and we should continue the tree walk. 1118 * last_visited css is safe to use because it is 1119 * protected by css_get and the tree walk is rcu safe. 1120 * 1121 * We do not take a reference on the root of the tree walk 1122 * because we might race with the root removal when it would 1123 * be the only node in the iterated hierarchy and mem_cgroup_iter 1124 * would end up in an endless loop because it expects that at 1125 * least one valid node will be returned. Root cannot disappear 1126 * because caller of the iterator should hold it already so 1127 * skipping css reference should be safe. 1128 */ 1129 if (next_css) { 1130 if ((next_css->flags & CSS_ONLINE) && 1131 (next_css == &root->css || css_tryget(next_css))) 1132 return mem_cgroup_from_css(next_css); 1133 1134 prev_css = next_css; 1135 goto skip_node; 1136 } 1137 1138 return NULL; 1139 } 1140 1141 static void mem_cgroup_iter_invalidate(struct mem_cgroup *root) 1142 { 1143 /* 1144 * When a group in the hierarchy below root is destroyed, the 1145 * hierarchy iterator can no longer be trusted since it might 1146 * have pointed to the destroyed group. Invalidate it. 1147 */ 1148 atomic_inc(&root->dead_count); 1149 } 1150 1151 static struct mem_cgroup * 1152 mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, 1153 struct mem_cgroup *root, 1154 int *sequence) 1155 { 1156 struct mem_cgroup *position = NULL; 1157 /* 1158 * A cgroup destruction happens in two stages: offlining and 1159 * release. They are separated by a RCU grace period. 1160 * 1161 * If the iterator is valid, we may still race with an 1162 * offlining. The RCU lock ensures the object won't be 1163 * released, tryget will fail if we lost the race. 1164 */ 1165 *sequence = atomic_read(&root->dead_count); 1166 if (iter->last_dead_count == *sequence) { 1167 smp_rmb(); 1168 position = iter->last_visited; 1169 1170 /* 1171 * We cannot take a reference to root because we might race 1172 * with root removal and returning NULL would end up in 1173 * an endless loop on the iterator user level when root 1174 * would be returned all the time. 1175 */ 1176 if (position && position != root && 1177 !css_tryget(&position->css)) 1178 position = NULL; 1179 } 1180 return position; 1181 } 1182 1183 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, 1184 struct mem_cgroup *last_visited, 1185 struct mem_cgroup *new_position, 1186 struct mem_cgroup *root, 1187 int sequence) 1188 { 1189 /* root reference counting symmetric to mem_cgroup_iter_load */ 1190 if (last_visited && last_visited != root) 1191 css_put(&last_visited->css); 1192 /* 1193 * We store the sequence count from the time @last_visited was 1194 * loaded successfully instead of rereading it here so that we 1195 * don't lose destruction events in between. We could have 1196 * raced with the destruction of @new_position after all. 1197 */ 1198 iter->last_visited = new_position; 1199 smp_wmb(); 1200 iter->last_dead_count = sequence; 1201 } 1202 1203 /** 1204 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1205 * @root: hierarchy root 1206 * @prev: previously returned memcg, NULL on first invocation 1207 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1208 * 1209 * Returns references to children of the hierarchy below @root, or 1210 * @root itself, or %NULL after a full round-trip. 1211 * 1212 * Caller must pass the return value in @prev on subsequent 1213 * invocations for reference counting, or use mem_cgroup_iter_break() 1214 * to cancel a hierarchy walk before the round-trip is complete. 1215 * 1216 * Reclaimers can specify a zone and a priority level in @reclaim to 1217 * divide up the memcgs in the hierarchy among all concurrent 1218 * reclaimers operating on the same zone and priority. 1219 */ 1220 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1221 struct mem_cgroup *prev, 1222 struct mem_cgroup_reclaim_cookie *reclaim) 1223 { 1224 struct mem_cgroup *memcg = NULL; 1225 struct mem_cgroup *last_visited = NULL; 1226 1227 if (mem_cgroup_disabled()) 1228 return NULL; 1229 1230 if (!root) 1231 root = root_mem_cgroup; 1232 1233 if (prev && !reclaim) 1234 last_visited = prev; 1235 1236 if (!root->use_hierarchy && root != root_mem_cgroup) { 1237 if (prev) 1238 goto out_css_put; 1239 return root; 1240 } 1241 1242 rcu_read_lock(); 1243 while (!memcg) { 1244 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 1245 int uninitialized_var(seq); 1246 1247 if (reclaim) { 1248 int nid = zone_to_nid(reclaim->zone); 1249 int zid = zone_idx(reclaim->zone); 1250 struct mem_cgroup_per_zone *mz; 1251 1252 mz = mem_cgroup_zoneinfo(root, nid, zid); 1253 iter = &mz->reclaim_iter[reclaim->priority]; 1254 if (prev && reclaim->generation != iter->generation) { 1255 iter->last_visited = NULL; 1256 goto out_unlock; 1257 } 1258 1259 last_visited = mem_cgroup_iter_load(iter, root, &seq); 1260 } 1261 1262 memcg = __mem_cgroup_iter_next(root, last_visited); 1263 1264 if (reclaim) { 1265 mem_cgroup_iter_update(iter, last_visited, memcg, root, 1266 seq); 1267 1268 if (!memcg) 1269 iter->generation++; 1270 else if (!prev && memcg) 1271 reclaim->generation = iter->generation; 1272 } 1273 1274 if (prev && !memcg) 1275 goto out_unlock; 1276 } 1277 out_unlock: 1278 rcu_read_unlock(); 1279 out_css_put: 1280 if (prev && prev != root) 1281 css_put(&prev->css); 1282 1283 return memcg; 1284 } 1285 1286 /** 1287 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1288 * @root: hierarchy root 1289 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1290 */ 1291 void mem_cgroup_iter_break(struct mem_cgroup *root, 1292 struct mem_cgroup *prev) 1293 { 1294 if (!root) 1295 root = root_mem_cgroup; 1296 if (prev && prev != root) 1297 css_put(&prev->css); 1298 } 1299 1300 /* 1301 * Iteration constructs for visiting all cgroups (under a tree). If 1302 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1303 * be used for reference counting. 1304 */ 1305 #define for_each_mem_cgroup_tree(iter, root) \ 1306 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1307 iter != NULL; \ 1308 iter = mem_cgroup_iter(root, iter, NULL)) 1309 1310 #define for_each_mem_cgroup(iter) \ 1311 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1312 iter != NULL; \ 1313 iter = mem_cgroup_iter(NULL, iter, NULL)) 1314 1315 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 1316 { 1317 struct mem_cgroup *memcg; 1318 1319 rcu_read_lock(); 1320 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1321 if (unlikely(!memcg)) 1322 goto out; 1323 1324 switch (idx) { 1325 case PGFAULT: 1326 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); 1327 break; 1328 case PGMAJFAULT: 1329 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 1330 break; 1331 default: 1332 BUG(); 1333 } 1334 out: 1335 rcu_read_unlock(); 1336 } 1337 EXPORT_SYMBOL(__mem_cgroup_count_vm_event); 1338 1339 /** 1340 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1341 * @zone: zone of the wanted lruvec 1342 * @memcg: memcg of the wanted lruvec 1343 * 1344 * Returns the lru list vector holding pages for the given @zone and 1345 * @mem. This can be the global zone lruvec, if the memory controller 1346 * is disabled. 1347 */ 1348 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1349 struct mem_cgroup *memcg) 1350 { 1351 struct mem_cgroup_per_zone *mz; 1352 struct lruvec *lruvec; 1353 1354 if (mem_cgroup_disabled()) { 1355 lruvec = &zone->lruvec; 1356 goto out; 1357 } 1358 1359 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); 1360 lruvec = &mz->lruvec; 1361 out: 1362 /* 1363 * Since a node can be onlined after the mem_cgroup was created, 1364 * we have to be prepared to initialize lruvec->zone here; 1365 * and if offlined then reonlined, we need to reinitialize it. 1366 */ 1367 if (unlikely(lruvec->zone != zone)) 1368 lruvec->zone = zone; 1369 return lruvec; 1370 } 1371 1372 /* 1373 * Following LRU functions are allowed to be used without PCG_LOCK. 1374 * Operations are called by routine of global LRU independently from memcg. 1375 * What we have to take care of here is validness of pc->mem_cgroup. 1376 * 1377 * Changes to pc->mem_cgroup happens when 1378 * 1. charge 1379 * 2. moving account 1380 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 1381 * It is added to LRU before charge. 1382 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 1383 * When moving account, the page is not on LRU. It's isolated. 1384 */ 1385 1386 /** 1387 * mem_cgroup_page_lruvec - return lruvec for adding an lru page 1388 * @page: the page 1389 * @zone: zone of the page 1390 */ 1391 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1392 { 1393 struct mem_cgroup_per_zone *mz; 1394 struct mem_cgroup *memcg; 1395 struct page_cgroup *pc; 1396 struct lruvec *lruvec; 1397 1398 if (mem_cgroup_disabled()) { 1399 lruvec = &zone->lruvec; 1400 goto out; 1401 } 1402 1403 pc = lookup_page_cgroup(page); 1404 memcg = pc->mem_cgroup; 1405 1406 /* 1407 * Surreptitiously switch any uncharged offlist page to root: 1408 * an uncharged page off lru does nothing to secure 1409 * its former mem_cgroup from sudden removal. 1410 * 1411 * Our caller holds lru_lock, and PageCgroupUsed is updated 1412 * under page_cgroup lock: between them, they make all uses 1413 * of pc->mem_cgroup safe. 1414 */ 1415 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1416 pc->mem_cgroup = memcg = root_mem_cgroup; 1417 1418 mz = page_cgroup_zoneinfo(memcg, page); 1419 lruvec = &mz->lruvec; 1420 out: 1421 /* 1422 * Since a node can be onlined after the mem_cgroup was created, 1423 * we have to be prepared to initialize lruvec->zone here; 1424 * and if offlined then reonlined, we need to reinitialize it. 1425 */ 1426 if (unlikely(lruvec->zone != zone)) 1427 lruvec->zone = zone; 1428 return lruvec; 1429 } 1430 1431 /** 1432 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1433 * @lruvec: mem_cgroup per zone lru vector 1434 * @lru: index of lru list the page is sitting on 1435 * @nr_pages: positive when adding or negative when removing 1436 * 1437 * This function must be called when a page is added to or removed from an 1438 * lru list. 1439 */ 1440 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1441 int nr_pages) 1442 { 1443 struct mem_cgroup_per_zone *mz; 1444 unsigned long *lru_size; 1445 1446 if (mem_cgroup_disabled()) 1447 return; 1448 1449 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1450 lru_size = mz->lru_size + lru; 1451 *lru_size += nr_pages; 1452 VM_BUG_ON((long)(*lru_size) < 0); 1453 } 1454 1455 /* 1456 * Checks whether given mem is same or in the root_mem_cgroup's 1457 * hierarchy subtree 1458 */ 1459 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1460 struct mem_cgroup *memcg) 1461 { 1462 if (root_memcg == memcg) 1463 return true; 1464 if (!root_memcg->use_hierarchy || !memcg) 1465 return false; 1466 return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup); 1467 } 1468 1469 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1470 struct mem_cgroup *memcg) 1471 { 1472 bool ret; 1473 1474 rcu_read_lock(); 1475 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg); 1476 rcu_read_unlock(); 1477 return ret; 1478 } 1479 1480 bool task_in_mem_cgroup(struct task_struct *task, 1481 const struct mem_cgroup *memcg) 1482 { 1483 struct mem_cgroup *curr = NULL; 1484 struct task_struct *p; 1485 bool ret; 1486 1487 p = find_lock_task_mm(task); 1488 if (p) { 1489 curr = try_get_mem_cgroup_from_mm(p->mm); 1490 task_unlock(p); 1491 } else { 1492 /* 1493 * All threads may have already detached their mm's, but the oom 1494 * killer still needs to detect if they have already been oom 1495 * killed to prevent needlessly killing additional tasks. 1496 */ 1497 rcu_read_lock(); 1498 curr = mem_cgroup_from_task(task); 1499 if (curr) 1500 css_get(&curr->css); 1501 rcu_read_unlock(); 1502 } 1503 if (!curr) 1504 return false; 1505 /* 1506 * We should check use_hierarchy of "memcg" not "curr". Because checking 1507 * use_hierarchy of "curr" here make this function true if hierarchy is 1508 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* 1509 * hierarchy(even if use_hierarchy is disabled in "memcg"). 1510 */ 1511 ret = mem_cgroup_same_or_subtree(memcg, curr); 1512 css_put(&curr->css); 1513 return ret; 1514 } 1515 1516 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 1517 { 1518 unsigned long inactive_ratio; 1519 unsigned long inactive; 1520 unsigned long active; 1521 unsigned long gb; 1522 1523 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); 1524 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); 1525 1526 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1527 if (gb) 1528 inactive_ratio = int_sqrt(10 * gb); 1529 else 1530 inactive_ratio = 1; 1531 1532 return inactive * inactive_ratio < active; 1533 } 1534 1535 #define mem_cgroup_from_res_counter(counter, member) \ 1536 container_of(counter, struct mem_cgroup, member) 1537 1538 /** 1539 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1540 * @memcg: the memory cgroup 1541 * 1542 * Returns the maximum amount of memory @mem can be charged with, in 1543 * pages. 1544 */ 1545 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1546 { 1547 unsigned long long margin; 1548 1549 margin = res_counter_margin(&memcg->res); 1550 if (do_swap_account) 1551 margin = min(margin, res_counter_margin(&memcg->memsw)); 1552 return margin >> PAGE_SHIFT; 1553 } 1554 1555 int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1556 { 1557 /* root ? */ 1558 if (!css_parent(&memcg->css)) 1559 return vm_swappiness; 1560 1561 return memcg->swappiness; 1562 } 1563 1564 /* 1565 * memcg->moving_account is used for checking possibility that some thread is 1566 * calling move_account(). When a thread on CPU-A starts moving pages under 1567 * a memcg, other threads should check memcg->moving_account under 1568 * rcu_read_lock(), like this: 1569 * 1570 * CPU-A CPU-B 1571 * rcu_read_lock() 1572 * memcg->moving_account+1 if (memcg->mocing_account) 1573 * take heavy locks. 1574 * synchronize_rcu() update something. 1575 * rcu_read_unlock() 1576 * start move here. 1577 */ 1578 1579 /* for quick checking without looking up memcg */ 1580 atomic_t memcg_moving __read_mostly; 1581 1582 static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1583 { 1584 atomic_inc(&memcg_moving); 1585 atomic_inc(&memcg->moving_account); 1586 synchronize_rcu(); 1587 } 1588 1589 static void mem_cgroup_end_move(struct mem_cgroup *memcg) 1590 { 1591 /* 1592 * Now, mem_cgroup_clear_mc() may call this function with NULL. 1593 * We check NULL in callee rather than caller. 1594 */ 1595 if (memcg) { 1596 atomic_dec(&memcg_moving); 1597 atomic_dec(&memcg->moving_account); 1598 } 1599 } 1600 1601 /* 1602 * 2 routines for checking "mem" is under move_account() or not. 1603 * 1604 * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This 1605 * is used for avoiding races in accounting. If true, 1606 * pc->mem_cgroup may be overwritten. 1607 * 1608 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1609 * under hierarchy of moving cgroups. This is for 1610 * waiting at hith-memory prressure caused by "move". 1611 */ 1612 1613 static bool mem_cgroup_stolen(struct mem_cgroup *memcg) 1614 { 1615 VM_BUG_ON(!rcu_read_lock_held()); 1616 return atomic_read(&memcg->moving_account) > 0; 1617 } 1618 1619 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1620 { 1621 struct mem_cgroup *from; 1622 struct mem_cgroup *to; 1623 bool ret = false; 1624 /* 1625 * Unlike task_move routines, we access mc.to, mc.from not under 1626 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1627 */ 1628 spin_lock(&mc.lock); 1629 from = mc.from; 1630 to = mc.to; 1631 if (!from) 1632 goto unlock; 1633 1634 ret = mem_cgroup_same_or_subtree(memcg, from) 1635 || mem_cgroup_same_or_subtree(memcg, to); 1636 unlock: 1637 spin_unlock(&mc.lock); 1638 return ret; 1639 } 1640 1641 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1642 { 1643 if (mc.moving_task && current != mc.moving_task) { 1644 if (mem_cgroup_under_move(memcg)) { 1645 DEFINE_WAIT(wait); 1646 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1647 /* moving charge context might have finished. */ 1648 if (mc.moving_task) 1649 schedule(); 1650 finish_wait(&mc.waitq, &wait); 1651 return true; 1652 } 1653 } 1654 return false; 1655 } 1656 1657 /* 1658 * Take this lock when 1659 * - a code tries to modify page's memcg while it's USED. 1660 * - a code tries to modify page state accounting in a memcg. 1661 * see mem_cgroup_stolen(), too. 1662 */ 1663 static void move_lock_mem_cgroup(struct mem_cgroup *memcg, 1664 unsigned long *flags) 1665 { 1666 spin_lock_irqsave(&memcg->move_lock, *flags); 1667 } 1668 1669 static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, 1670 unsigned long *flags) 1671 { 1672 spin_unlock_irqrestore(&memcg->move_lock, *flags); 1673 } 1674 1675 #define K(x) ((x) << (PAGE_SHIFT-10)) 1676 /** 1677 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1678 * @memcg: The memory cgroup that went over limit 1679 * @p: Task that is going to be killed 1680 * 1681 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1682 * enabled 1683 */ 1684 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1685 { 1686 /* 1687 * protects memcg_name and makes sure that parallel ooms do not 1688 * interleave 1689 */ 1690 static DEFINE_SPINLOCK(oom_info_lock); 1691 struct cgroup *task_cgrp; 1692 struct cgroup *mem_cgrp; 1693 static char memcg_name[PATH_MAX]; 1694 int ret; 1695 struct mem_cgroup *iter; 1696 unsigned int i; 1697 1698 if (!p) 1699 return; 1700 1701 spin_lock(&oom_info_lock); 1702 rcu_read_lock(); 1703 1704 mem_cgrp = memcg->css.cgroup; 1705 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1706 1707 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1708 if (ret < 0) { 1709 /* 1710 * Unfortunately, we are unable to convert to a useful name 1711 * But we'll still print out the usage information 1712 */ 1713 rcu_read_unlock(); 1714 goto done; 1715 } 1716 rcu_read_unlock(); 1717 1718 pr_info("Task in %s killed", memcg_name); 1719 1720 rcu_read_lock(); 1721 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1722 if (ret < 0) { 1723 rcu_read_unlock(); 1724 goto done; 1725 } 1726 rcu_read_unlock(); 1727 1728 /* 1729 * Continues from above, so we don't need an KERN_ level 1730 */ 1731 pr_cont(" as a result of limit of %s\n", memcg_name); 1732 done: 1733 1734 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", 1735 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1736 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1737 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1738 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n", 1739 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1740 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1741 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1742 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n", 1743 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10, 1744 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10, 1745 res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); 1746 1747 for_each_mem_cgroup_tree(iter, memcg) { 1748 pr_info("Memory cgroup stats"); 1749 1750 rcu_read_lock(); 1751 ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX); 1752 if (!ret) 1753 pr_cont(" for %s", memcg_name); 1754 rcu_read_unlock(); 1755 pr_cont(":"); 1756 1757 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1758 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1759 continue; 1760 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1761 K(mem_cgroup_read_stat(iter, i))); 1762 } 1763 1764 for (i = 0; i < NR_LRU_LISTS; i++) 1765 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1766 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1767 1768 pr_cont("\n"); 1769 } 1770 spin_unlock(&oom_info_lock); 1771 } 1772 1773 /* 1774 * This function returns the number of memcg under hierarchy tree. Returns 1775 * 1(self count) if no children. 1776 */ 1777 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1778 { 1779 int num = 0; 1780 struct mem_cgroup *iter; 1781 1782 for_each_mem_cgroup_tree(iter, memcg) 1783 num++; 1784 return num; 1785 } 1786 1787 /* 1788 * Return the memory (and swap, if configured) limit for a memcg. 1789 */ 1790 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1791 { 1792 u64 limit; 1793 1794 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1795 1796 /* 1797 * Do not consider swap space if we cannot swap due to swappiness 1798 */ 1799 if (mem_cgroup_swappiness(memcg)) { 1800 u64 memsw; 1801 1802 limit += total_swap_pages << PAGE_SHIFT; 1803 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1804 1805 /* 1806 * If memsw is finite and limits the amount of swap space 1807 * available to this memcg, return that limit. 1808 */ 1809 limit = min(limit, memsw); 1810 } 1811 1812 return limit; 1813 } 1814 1815 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1816 int order) 1817 { 1818 struct mem_cgroup *iter; 1819 unsigned long chosen_points = 0; 1820 unsigned long totalpages; 1821 unsigned int points = 0; 1822 struct task_struct *chosen = NULL; 1823 1824 /* 1825 * If current has a pending SIGKILL or is exiting, then automatically 1826 * select it. The goal is to allow it to allocate so that it may 1827 * quickly exit and free its memory. 1828 */ 1829 if (fatal_signal_pending(current) || current->flags & PF_EXITING) { 1830 set_thread_flag(TIF_MEMDIE); 1831 return; 1832 } 1833 1834 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 1835 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; 1836 for_each_mem_cgroup_tree(iter, memcg) { 1837 struct css_task_iter it; 1838 struct task_struct *task; 1839 1840 css_task_iter_start(&iter->css, &it); 1841 while ((task = css_task_iter_next(&it))) { 1842 switch (oom_scan_process_thread(task, totalpages, NULL, 1843 false)) { 1844 case OOM_SCAN_SELECT: 1845 if (chosen) 1846 put_task_struct(chosen); 1847 chosen = task; 1848 chosen_points = ULONG_MAX; 1849 get_task_struct(chosen); 1850 /* fall through */ 1851 case OOM_SCAN_CONTINUE: 1852 continue; 1853 case OOM_SCAN_ABORT: 1854 css_task_iter_end(&it); 1855 mem_cgroup_iter_break(memcg, iter); 1856 if (chosen) 1857 put_task_struct(chosen); 1858 return; 1859 case OOM_SCAN_OK: 1860 break; 1861 }; 1862 points = oom_badness(task, memcg, NULL, totalpages); 1863 if (!points || points < chosen_points) 1864 continue; 1865 /* Prefer thread group leaders for display purposes */ 1866 if (points == chosen_points && 1867 thread_group_leader(chosen)) 1868 continue; 1869 1870 if (chosen) 1871 put_task_struct(chosen); 1872 chosen = task; 1873 chosen_points = points; 1874 get_task_struct(chosen); 1875 } 1876 css_task_iter_end(&it); 1877 } 1878 1879 if (!chosen) 1880 return; 1881 points = chosen_points * 1000 / totalpages; 1882 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, 1883 NULL, "Memory cgroup out of memory"); 1884 } 1885 1886 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1887 gfp_t gfp_mask, 1888 unsigned long flags) 1889 { 1890 unsigned long total = 0; 1891 bool noswap = false; 1892 int loop; 1893 1894 if (flags & MEM_CGROUP_RECLAIM_NOSWAP) 1895 noswap = true; 1896 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) 1897 noswap = true; 1898 1899 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { 1900 if (loop) 1901 drain_all_stock_async(memcg); 1902 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); 1903 /* 1904 * Allow limit shrinkers, which are triggered directly 1905 * by userspace, to catch signals and stop reclaim 1906 * after minimal progress, regardless of the margin. 1907 */ 1908 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) 1909 break; 1910 if (mem_cgroup_margin(memcg)) 1911 break; 1912 /* 1913 * If nothing was reclaimed after two attempts, there 1914 * may be no reclaimable pages in this hierarchy. 1915 */ 1916 if (loop && !total) 1917 break; 1918 } 1919 return total; 1920 } 1921 1922 /** 1923 * test_mem_cgroup_node_reclaimable 1924 * @memcg: the target memcg 1925 * @nid: the node ID to be checked. 1926 * @noswap : specify true here if the user wants flle only information. 1927 * 1928 * This function returns whether the specified memcg contains any 1929 * reclaimable pages on a node. Returns true if there are any reclaimable 1930 * pages in the node. 1931 */ 1932 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1933 int nid, bool noswap) 1934 { 1935 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1936 return true; 1937 if (noswap || !total_swap_pages) 1938 return false; 1939 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1940 return true; 1941 return false; 1942 1943 } 1944 #if MAX_NUMNODES > 1 1945 1946 /* 1947 * Always updating the nodemask is not very good - even if we have an empty 1948 * list or the wrong list here, we can start from some node and traverse all 1949 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1950 * 1951 */ 1952 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1953 { 1954 int nid; 1955 /* 1956 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1957 * pagein/pageout changes since the last update. 1958 */ 1959 if (!atomic_read(&memcg->numainfo_events)) 1960 return; 1961 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1962 return; 1963 1964 /* make a nodemask where this memcg uses memory from */ 1965 memcg->scan_nodes = node_states[N_MEMORY]; 1966 1967 for_each_node_mask(nid, node_states[N_MEMORY]) { 1968 1969 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1970 node_clear(nid, memcg->scan_nodes); 1971 } 1972 1973 atomic_set(&memcg->numainfo_events, 0); 1974 atomic_set(&memcg->numainfo_updating, 0); 1975 } 1976 1977 /* 1978 * Selecting a node where we start reclaim from. Because what we need is just 1979 * reducing usage counter, start from anywhere is O,K. Considering 1980 * memory reclaim from current node, there are pros. and cons. 1981 * 1982 * Freeing memory from current node means freeing memory from a node which 1983 * we'll use or we've used. So, it may make LRU bad. And if several threads 1984 * hit limits, it will see a contention on a node. But freeing from remote 1985 * node means more costs for memory reclaim because of memory latency. 1986 * 1987 * Now, we use round-robin. Better algorithm is welcomed. 1988 */ 1989 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1990 { 1991 int node; 1992 1993 mem_cgroup_may_update_nodemask(memcg); 1994 node = memcg->last_scanned_node; 1995 1996 node = next_node(node, memcg->scan_nodes); 1997 if (node == MAX_NUMNODES) 1998 node = first_node(memcg->scan_nodes); 1999 /* 2000 * We call this when we hit limit, not when pages are added to LRU. 2001 * No LRU may hold pages because all pages are UNEVICTABLE or 2002 * memcg is too small and all pages are not on LRU. In that case, 2003 * we use curret node. 2004 */ 2005 if (unlikely(node == MAX_NUMNODES)) 2006 node = numa_node_id(); 2007 2008 memcg->last_scanned_node = node; 2009 return node; 2010 } 2011 2012 /* 2013 * Check all nodes whether it contains reclaimable pages or not. 2014 * For quick scan, we make use of scan_nodes. This will allow us to skip 2015 * unused nodes. But scan_nodes is lazily updated and may not cotain 2016 * enough new information. We need to do double check. 2017 */ 2018 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 2019 { 2020 int nid; 2021 2022 /* 2023 * quick check...making use of scan_node. 2024 * We can skip unused nodes. 2025 */ 2026 if (!nodes_empty(memcg->scan_nodes)) { 2027 for (nid = first_node(memcg->scan_nodes); 2028 nid < MAX_NUMNODES; 2029 nid = next_node(nid, memcg->scan_nodes)) { 2030 2031 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 2032 return true; 2033 } 2034 } 2035 /* 2036 * Check rest of nodes. 2037 */ 2038 for_each_node_state(nid, N_MEMORY) { 2039 if (node_isset(nid, memcg->scan_nodes)) 2040 continue; 2041 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 2042 return true; 2043 } 2044 return false; 2045 } 2046 2047 #else 2048 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 2049 { 2050 return 0; 2051 } 2052 2053 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 2054 { 2055 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); 2056 } 2057 #endif 2058 2059 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 2060 struct zone *zone, 2061 gfp_t gfp_mask, 2062 unsigned long *total_scanned) 2063 { 2064 struct mem_cgroup *victim = NULL; 2065 int total = 0; 2066 int loop = 0; 2067 unsigned long excess; 2068 unsigned long nr_scanned; 2069 struct mem_cgroup_reclaim_cookie reclaim = { 2070 .zone = zone, 2071 .priority = 0, 2072 }; 2073 2074 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; 2075 2076 while (1) { 2077 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 2078 if (!victim) { 2079 loop++; 2080 if (loop >= 2) { 2081 /* 2082 * If we have not been able to reclaim 2083 * anything, it might because there are 2084 * no reclaimable pages under this hierarchy 2085 */ 2086 if (!total) 2087 break; 2088 /* 2089 * We want to do more targeted reclaim. 2090 * excess >> 2 is not to excessive so as to 2091 * reclaim too much, nor too less that we keep 2092 * coming back to reclaim from this cgroup 2093 */ 2094 if (total >= (excess >> 2) || 2095 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 2096 break; 2097 } 2098 continue; 2099 } 2100 if (!mem_cgroup_reclaimable(victim, false)) 2101 continue; 2102 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 2103 zone, &nr_scanned); 2104 *total_scanned += nr_scanned; 2105 if (!res_counter_soft_limit_excess(&root_memcg->res)) 2106 break; 2107 } 2108 mem_cgroup_iter_break(root_memcg, victim); 2109 return total; 2110 } 2111 2112 #ifdef CONFIG_LOCKDEP 2113 static struct lockdep_map memcg_oom_lock_dep_map = { 2114 .name = "memcg_oom_lock", 2115 }; 2116 #endif 2117 2118 static DEFINE_SPINLOCK(memcg_oom_lock); 2119 2120 /* 2121 * Check OOM-Killer is already running under our hierarchy. 2122 * If someone is running, return false. 2123 */ 2124 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 2125 { 2126 struct mem_cgroup *iter, *failed = NULL; 2127 2128 spin_lock(&memcg_oom_lock); 2129 2130 for_each_mem_cgroup_tree(iter, memcg) { 2131 if (iter->oom_lock) { 2132 /* 2133 * this subtree of our hierarchy is already locked 2134 * so we cannot give a lock. 2135 */ 2136 failed = iter; 2137 mem_cgroup_iter_break(memcg, iter); 2138 break; 2139 } else 2140 iter->oom_lock = true; 2141 } 2142 2143 if (failed) { 2144 /* 2145 * OK, we failed to lock the whole subtree so we have 2146 * to clean up what we set up to the failing subtree 2147 */ 2148 for_each_mem_cgroup_tree(iter, memcg) { 2149 if (iter == failed) { 2150 mem_cgroup_iter_break(memcg, iter); 2151 break; 2152 } 2153 iter->oom_lock = false; 2154 } 2155 } else 2156 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 2157 2158 spin_unlock(&memcg_oom_lock); 2159 2160 return !failed; 2161 } 2162 2163 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 2164 { 2165 struct mem_cgroup *iter; 2166 2167 spin_lock(&memcg_oom_lock); 2168 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 2169 for_each_mem_cgroup_tree(iter, memcg) 2170 iter->oom_lock = false; 2171 spin_unlock(&memcg_oom_lock); 2172 } 2173 2174 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 2175 { 2176 struct mem_cgroup *iter; 2177 2178 for_each_mem_cgroup_tree(iter, memcg) 2179 atomic_inc(&iter->under_oom); 2180 } 2181 2182 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 2183 { 2184 struct mem_cgroup *iter; 2185 2186 /* 2187 * When a new child is created while the hierarchy is under oom, 2188 * mem_cgroup_oom_lock() may not be called. We have to use 2189 * atomic_add_unless() here. 2190 */ 2191 for_each_mem_cgroup_tree(iter, memcg) 2192 atomic_add_unless(&iter->under_oom, -1, 0); 2193 } 2194 2195 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 2196 2197 struct oom_wait_info { 2198 struct mem_cgroup *memcg; 2199 wait_queue_t wait; 2200 }; 2201 2202 static int memcg_oom_wake_function(wait_queue_t *wait, 2203 unsigned mode, int sync, void *arg) 2204 { 2205 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 2206 struct mem_cgroup *oom_wait_memcg; 2207 struct oom_wait_info *oom_wait_info; 2208 2209 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 2210 oom_wait_memcg = oom_wait_info->memcg; 2211 2212 /* 2213 * Both of oom_wait_info->memcg and wake_memcg are stable under us. 2214 * Then we can use css_is_ancestor without taking care of RCU. 2215 */ 2216 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) 2217 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) 2218 return 0; 2219 return autoremove_wake_function(wait, mode, sync, arg); 2220 } 2221 2222 static void memcg_wakeup_oom(struct mem_cgroup *memcg) 2223 { 2224 atomic_inc(&memcg->oom_wakeups); 2225 /* for filtering, pass "memcg" as argument. */ 2226 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 2227 } 2228 2229 static void memcg_oom_recover(struct mem_cgroup *memcg) 2230 { 2231 if (memcg && atomic_read(&memcg->under_oom)) 2232 memcg_wakeup_oom(memcg); 2233 } 2234 2235 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 2236 { 2237 if (!current->memcg_oom.may_oom) 2238 return; 2239 /* 2240 * We are in the middle of the charge context here, so we 2241 * don't want to block when potentially sitting on a callstack 2242 * that holds all kinds of filesystem and mm locks. 2243 * 2244 * Also, the caller may handle a failed allocation gracefully 2245 * (like optional page cache readahead) and so an OOM killer 2246 * invocation might not even be necessary. 2247 * 2248 * That's why we don't do anything here except remember the 2249 * OOM context and then deal with it at the end of the page 2250 * fault when the stack is unwound, the locks are released, 2251 * and when we know whether the fault was overall successful. 2252 */ 2253 css_get(&memcg->css); 2254 current->memcg_oom.memcg = memcg; 2255 current->memcg_oom.gfp_mask = mask; 2256 current->memcg_oom.order = order; 2257 } 2258 2259 /** 2260 * mem_cgroup_oom_synchronize - complete memcg OOM handling 2261 * @handle: actually kill/wait or just clean up the OOM state 2262 * 2263 * This has to be called at the end of a page fault if the memcg OOM 2264 * handler was enabled. 2265 * 2266 * Memcg supports userspace OOM handling where failed allocations must 2267 * sleep on a waitqueue until the userspace task resolves the 2268 * situation. Sleeping directly in the charge context with all kinds 2269 * of locks held is not a good idea, instead we remember an OOM state 2270 * in the task and mem_cgroup_oom_synchronize() has to be called at 2271 * the end of the page fault to complete the OOM handling. 2272 * 2273 * Returns %true if an ongoing memcg OOM situation was detected and 2274 * completed, %false otherwise. 2275 */ 2276 bool mem_cgroup_oom_synchronize(bool handle) 2277 { 2278 struct mem_cgroup *memcg = current->memcg_oom.memcg; 2279 struct oom_wait_info owait; 2280 bool locked; 2281 2282 /* OOM is global, do not handle */ 2283 if (!memcg) 2284 return false; 2285 2286 if (!handle) 2287 goto cleanup; 2288 2289 owait.memcg = memcg; 2290 owait.wait.flags = 0; 2291 owait.wait.func = memcg_oom_wake_function; 2292 owait.wait.private = current; 2293 INIT_LIST_HEAD(&owait.wait.task_list); 2294 2295 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2296 mem_cgroup_mark_under_oom(memcg); 2297 2298 locked = mem_cgroup_oom_trylock(memcg); 2299 2300 if (locked) 2301 mem_cgroup_oom_notify(memcg); 2302 2303 if (locked && !memcg->oom_kill_disable) { 2304 mem_cgroup_unmark_under_oom(memcg); 2305 finish_wait(&memcg_oom_waitq, &owait.wait); 2306 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, 2307 current->memcg_oom.order); 2308 } else { 2309 schedule(); 2310 mem_cgroup_unmark_under_oom(memcg); 2311 finish_wait(&memcg_oom_waitq, &owait.wait); 2312 } 2313 2314 if (locked) { 2315 mem_cgroup_oom_unlock(memcg); 2316 /* 2317 * There is no guarantee that an OOM-lock contender 2318 * sees the wakeups triggered by the OOM kill 2319 * uncharges. Wake any sleepers explicitely. 2320 */ 2321 memcg_oom_recover(memcg); 2322 } 2323 cleanup: 2324 current->memcg_oom.memcg = NULL; 2325 css_put(&memcg->css); 2326 return true; 2327 } 2328 2329 /* 2330 * Currently used to update mapped file statistics, but the routine can be 2331 * generalized to update other statistics as well. 2332 * 2333 * Notes: Race condition 2334 * 2335 * We usually use page_cgroup_lock() for accessing page_cgroup member but 2336 * it tends to be costly. But considering some conditions, we doesn't need 2337 * to do so _always_. 2338 * 2339 * Considering "charge", lock_page_cgroup() is not required because all 2340 * file-stat operations happen after a page is attached to radix-tree. There 2341 * are no race with "charge". 2342 * 2343 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 2344 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 2345 * if there are race with "uncharge". Statistics itself is properly handled 2346 * by flags. 2347 * 2348 * Considering "move", this is an only case we see a race. To make the race 2349 * small, we check mm->moving_account and detect there are possibility of race 2350 * If there is, we take a lock. 2351 */ 2352 2353 void __mem_cgroup_begin_update_page_stat(struct page *page, 2354 bool *locked, unsigned long *flags) 2355 { 2356 struct mem_cgroup *memcg; 2357 struct page_cgroup *pc; 2358 2359 pc = lookup_page_cgroup(page); 2360 again: 2361 memcg = pc->mem_cgroup; 2362 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2363 return; 2364 /* 2365 * If this memory cgroup is not under account moving, we don't 2366 * need to take move_lock_mem_cgroup(). Because we already hold 2367 * rcu_read_lock(), any calls to move_account will be delayed until 2368 * rcu_read_unlock() if mem_cgroup_stolen() == true. 2369 */ 2370 if (!mem_cgroup_stolen(memcg)) 2371 return; 2372 2373 move_lock_mem_cgroup(memcg, flags); 2374 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { 2375 move_unlock_mem_cgroup(memcg, flags); 2376 goto again; 2377 } 2378 *locked = true; 2379 } 2380 2381 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) 2382 { 2383 struct page_cgroup *pc = lookup_page_cgroup(page); 2384 2385 /* 2386 * It's guaranteed that pc->mem_cgroup never changes while 2387 * lock is held because a routine modifies pc->mem_cgroup 2388 * should take move_lock_mem_cgroup(). 2389 */ 2390 move_unlock_mem_cgroup(pc->mem_cgroup, flags); 2391 } 2392 2393 void mem_cgroup_update_page_stat(struct page *page, 2394 enum mem_cgroup_stat_index idx, int val) 2395 { 2396 struct mem_cgroup *memcg; 2397 struct page_cgroup *pc = lookup_page_cgroup(page); 2398 unsigned long uninitialized_var(flags); 2399 2400 if (mem_cgroup_disabled()) 2401 return; 2402 2403 VM_BUG_ON(!rcu_read_lock_held()); 2404 memcg = pc->mem_cgroup; 2405 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2406 return; 2407 2408 this_cpu_add(memcg->stat->count[idx], val); 2409 } 2410 2411 /* 2412 * size of first charge trial. "32" comes from vmscan.c's magic value. 2413 * TODO: maybe necessary to use big numbers in big irons. 2414 */ 2415 #define CHARGE_BATCH 32U 2416 struct memcg_stock_pcp { 2417 struct mem_cgroup *cached; /* this never be root cgroup */ 2418 unsigned int nr_pages; 2419 struct work_struct work; 2420 unsigned long flags; 2421 #define FLUSHING_CACHED_CHARGE 0 2422 }; 2423 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2424 static DEFINE_MUTEX(percpu_charge_mutex); 2425 2426 /** 2427 * consume_stock: Try to consume stocked charge on this cpu. 2428 * @memcg: memcg to consume from. 2429 * @nr_pages: how many pages to charge. 2430 * 2431 * The charges will only happen if @memcg matches the current cpu's memcg 2432 * stock, and at least @nr_pages are available in that stock. Failure to 2433 * service an allocation will refill the stock. 2434 * 2435 * returns true if successful, false otherwise. 2436 */ 2437 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2438 { 2439 struct memcg_stock_pcp *stock; 2440 bool ret = true; 2441 2442 if (nr_pages > CHARGE_BATCH) 2443 return false; 2444 2445 stock = &get_cpu_var(memcg_stock); 2446 if (memcg == stock->cached && stock->nr_pages >= nr_pages) 2447 stock->nr_pages -= nr_pages; 2448 else /* need to call res_counter_charge */ 2449 ret = false; 2450 put_cpu_var(memcg_stock); 2451 return ret; 2452 } 2453 2454 /* 2455 * Returns stocks cached in percpu to res_counter and reset cached information. 2456 */ 2457 static void drain_stock(struct memcg_stock_pcp *stock) 2458 { 2459 struct mem_cgroup *old = stock->cached; 2460 2461 if (stock->nr_pages) { 2462 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2463 2464 res_counter_uncharge(&old->res, bytes); 2465 if (do_swap_account) 2466 res_counter_uncharge(&old->memsw, bytes); 2467 stock->nr_pages = 0; 2468 } 2469 stock->cached = NULL; 2470 } 2471 2472 /* 2473 * This must be called under preempt disabled or must be called by 2474 * a thread which is pinned to local cpu. 2475 */ 2476 static void drain_local_stock(struct work_struct *dummy) 2477 { 2478 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2479 drain_stock(stock); 2480 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2481 } 2482 2483 static void __init memcg_stock_init(void) 2484 { 2485 int cpu; 2486 2487 for_each_possible_cpu(cpu) { 2488 struct memcg_stock_pcp *stock = 2489 &per_cpu(memcg_stock, cpu); 2490 INIT_WORK(&stock->work, drain_local_stock); 2491 } 2492 } 2493 2494 /* 2495 * Cache charges(val) which is from res_counter, to local per_cpu area. 2496 * This will be consumed by consume_stock() function, later. 2497 */ 2498 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2499 { 2500 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2501 2502 if (stock->cached != memcg) { /* reset if necessary */ 2503 drain_stock(stock); 2504 stock->cached = memcg; 2505 } 2506 stock->nr_pages += nr_pages; 2507 put_cpu_var(memcg_stock); 2508 } 2509 2510 /* 2511 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2512 * of the hierarchy under it. sync flag says whether we should block 2513 * until the work is done. 2514 */ 2515 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) 2516 { 2517 int cpu, curcpu; 2518 2519 /* Notify other cpus that system-wide "drain" is running */ 2520 get_online_cpus(); 2521 curcpu = get_cpu(); 2522 for_each_online_cpu(cpu) { 2523 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2524 struct mem_cgroup *memcg; 2525 2526 memcg = stock->cached; 2527 if (!memcg || !stock->nr_pages) 2528 continue; 2529 if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) 2530 continue; 2531 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2532 if (cpu == curcpu) 2533 drain_local_stock(&stock->work); 2534 else 2535 schedule_work_on(cpu, &stock->work); 2536 } 2537 } 2538 put_cpu(); 2539 2540 if (!sync) 2541 goto out; 2542 2543 for_each_online_cpu(cpu) { 2544 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2545 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2546 flush_work(&stock->work); 2547 } 2548 out: 2549 put_online_cpus(); 2550 } 2551 2552 /* 2553 * Tries to drain stocked charges in other cpus. This function is asynchronous 2554 * and just put a work per cpu for draining localy on each cpu. Caller can 2555 * expects some charges will be back to res_counter later but cannot wait for 2556 * it. 2557 */ 2558 static void drain_all_stock_async(struct mem_cgroup *root_memcg) 2559 { 2560 /* 2561 * If someone calls draining, avoid adding more kworker runs. 2562 */ 2563 if (!mutex_trylock(&percpu_charge_mutex)) 2564 return; 2565 drain_all_stock(root_memcg, false); 2566 mutex_unlock(&percpu_charge_mutex); 2567 } 2568 2569 /* This is a synchronous drain interface. */ 2570 static void drain_all_stock_sync(struct mem_cgroup *root_memcg) 2571 { 2572 /* called when force_empty is called */ 2573 mutex_lock(&percpu_charge_mutex); 2574 drain_all_stock(root_memcg, true); 2575 mutex_unlock(&percpu_charge_mutex); 2576 } 2577 2578 /* 2579 * This function drains percpu counter value from DEAD cpu and 2580 * move it to local cpu. Note that this function can be preempted. 2581 */ 2582 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) 2583 { 2584 int i; 2585 2586 spin_lock(&memcg->pcp_counter_lock); 2587 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 2588 long x = per_cpu(memcg->stat->count[i], cpu); 2589 2590 per_cpu(memcg->stat->count[i], cpu) = 0; 2591 memcg->nocpu_base.count[i] += x; 2592 } 2593 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2594 unsigned long x = per_cpu(memcg->stat->events[i], cpu); 2595 2596 per_cpu(memcg->stat->events[i], cpu) = 0; 2597 memcg->nocpu_base.events[i] += x; 2598 } 2599 spin_unlock(&memcg->pcp_counter_lock); 2600 } 2601 2602 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 2603 unsigned long action, 2604 void *hcpu) 2605 { 2606 int cpu = (unsigned long)hcpu; 2607 struct memcg_stock_pcp *stock; 2608 struct mem_cgroup *iter; 2609 2610 if (action == CPU_ONLINE) 2611 return NOTIFY_OK; 2612 2613 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 2614 return NOTIFY_OK; 2615 2616 for_each_mem_cgroup(iter) 2617 mem_cgroup_drain_pcp_counter(iter, cpu); 2618 2619 stock = &per_cpu(memcg_stock, cpu); 2620 drain_stock(stock); 2621 return NOTIFY_OK; 2622 } 2623 2624 2625 /* See __mem_cgroup_try_charge() for details */ 2626 enum { 2627 CHARGE_OK, /* success */ 2628 CHARGE_RETRY, /* need to retry but retry is not bad */ 2629 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2630 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2631 }; 2632 2633 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2634 unsigned int nr_pages, unsigned int min_pages, 2635 bool invoke_oom) 2636 { 2637 unsigned long csize = nr_pages * PAGE_SIZE; 2638 struct mem_cgroup *mem_over_limit; 2639 struct res_counter *fail_res; 2640 unsigned long flags = 0; 2641 int ret; 2642 2643 ret = res_counter_charge(&memcg->res, csize, &fail_res); 2644 2645 if (likely(!ret)) { 2646 if (!do_swap_account) 2647 return CHARGE_OK; 2648 ret = res_counter_charge(&memcg->memsw, csize, &fail_res); 2649 if (likely(!ret)) 2650 return CHARGE_OK; 2651 2652 res_counter_uncharge(&memcg->res, csize); 2653 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2654 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2655 } else 2656 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2657 /* 2658 * Never reclaim on behalf of optional batching, retry with a 2659 * single page instead. 2660 */ 2661 if (nr_pages > min_pages) 2662 return CHARGE_RETRY; 2663 2664 if (!(gfp_mask & __GFP_WAIT)) 2665 return CHARGE_WOULDBLOCK; 2666 2667 if (gfp_mask & __GFP_NORETRY) 2668 return CHARGE_NOMEM; 2669 2670 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); 2671 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2672 return CHARGE_RETRY; 2673 /* 2674 * Even though the limit is exceeded at this point, reclaim 2675 * may have been able to free some pages. Retry the charge 2676 * before killing the task. 2677 * 2678 * Only for regular pages, though: huge pages are rather 2679 * unlikely to succeed so close to the limit, and we fall back 2680 * to regular pages anyway in case of failure. 2681 */ 2682 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret) 2683 return CHARGE_RETRY; 2684 2685 /* 2686 * At task move, charge accounts can be doubly counted. So, it's 2687 * better to wait until the end of task_move if something is going on. 2688 */ 2689 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2690 return CHARGE_RETRY; 2691 2692 if (invoke_oom) 2693 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize)); 2694 2695 return CHARGE_NOMEM; 2696 } 2697 2698 /* 2699 * __mem_cgroup_try_charge() does 2700 * 1. detect memcg to be charged against from passed *mm and *ptr, 2701 * 2. update res_counter 2702 * 3. call memory reclaim if necessary. 2703 * 2704 * In some special case, if the task is fatal, fatal_signal_pending() or 2705 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup 2706 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon 2707 * as possible without any hazards. 2: all pages should have a valid 2708 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg 2709 * pointer, that is treated as a charge to root_mem_cgroup. 2710 * 2711 * So __mem_cgroup_try_charge() will return 2712 * 0 ... on success, filling *ptr with a valid memcg pointer. 2713 * -ENOMEM ... charge failure because of resource limits. 2714 * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup. 2715 * 2716 * Unlike the exported interface, an "oom" parameter is added. if oom==true, 2717 * the oom-killer can be invoked. 2718 */ 2719 static int __mem_cgroup_try_charge(struct mm_struct *mm, 2720 gfp_t gfp_mask, 2721 unsigned int nr_pages, 2722 struct mem_cgroup **ptr, 2723 bool oom) 2724 { 2725 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2726 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2727 struct mem_cgroup *memcg = NULL; 2728 int ret; 2729 2730 /* 2731 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2732 * in system level. So, allow to go ahead dying process in addition to 2733 * MEMDIE process. 2734 */ 2735 if (unlikely(test_thread_flag(TIF_MEMDIE) 2736 || fatal_signal_pending(current))) 2737 goto bypass; 2738 2739 if (unlikely(task_in_memcg_oom(current))) 2740 goto nomem; 2741 2742 if (gfp_mask & __GFP_NOFAIL) 2743 oom = false; 2744 2745 /* 2746 * We always charge the cgroup the mm_struct belongs to. 2747 * The mm_struct's mem_cgroup changes on task migration if the 2748 * thread group leader migrates. It's possible that mm is not 2749 * set, if so charge the root memcg (happens for pagecache usage). 2750 */ 2751 if (!*ptr && !mm) 2752 *ptr = root_mem_cgroup; 2753 again: 2754 if (*ptr) { /* css should be a valid one */ 2755 memcg = *ptr; 2756 if (mem_cgroup_is_root(memcg)) 2757 goto done; 2758 if (consume_stock(memcg, nr_pages)) 2759 goto done; 2760 css_get(&memcg->css); 2761 } else { 2762 struct task_struct *p; 2763 2764 rcu_read_lock(); 2765 p = rcu_dereference(mm->owner); 2766 /* 2767 * Because we don't have task_lock(), "p" can exit. 2768 * In that case, "memcg" can point to root or p can be NULL with 2769 * race with swapoff. Then, we have small risk of mis-accouning. 2770 * But such kind of mis-account by race always happens because 2771 * we don't have cgroup_mutex(). It's overkill and we allo that 2772 * small race, here. 2773 * (*) swapoff at el will charge against mm-struct not against 2774 * task-struct. So, mm->owner can be NULL. 2775 */ 2776 memcg = mem_cgroup_from_task(p); 2777 if (!memcg) 2778 memcg = root_mem_cgroup; 2779 if (mem_cgroup_is_root(memcg)) { 2780 rcu_read_unlock(); 2781 goto done; 2782 } 2783 if (consume_stock(memcg, nr_pages)) { 2784 /* 2785 * It seems dagerous to access memcg without css_get(). 2786 * But considering how consume_stok works, it's not 2787 * necessary. If consume_stock success, some charges 2788 * from this memcg are cached on this cpu. So, we 2789 * don't need to call css_get()/css_tryget() before 2790 * calling consume_stock(). 2791 */ 2792 rcu_read_unlock(); 2793 goto done; 2794 } 2795 /* after here, we may be blocked. we need to get refcnt */ 2796 if (!css_tryget(&memcg->css)) { 2797 rcu_read_unlock(); 2798 goto again; 2799 } 2800 rcu_read_unlock(); 2801 } 2802 2803 do { 2804 bool invoke_oom = oom && !nr_oom_retries; 2805 2806 /* If killed, bypass charge */ 2807 if (fatal_signal_pending(current)) { 2808 css_put(&memcg->css); 2809 goto bypass; 2810 } 2811 2812 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, 2813 nr_pages, invoke_oom); 2814 switch (ret) { 2815 case CHARGE_OK: 2816 break; 2817 case CHARGE_RETRY: /* not in OOM situation but retry */ 2818 batch = nr_pages; 2819 css_put(&memcg->css); 2820 memcg = NULL; 2821 goto again; 2822 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2823 css_put(&memcg->css); 2824 goto nomem; 2825 case CHARGE_NOMEM: /* OOM routine works */ 2826 if (!oom || invoke_oom) { 2827 css_put(&memcg->css); 2828 goto nomem; 2829 } 2830 nr_oom_retries--; 2831 break; 2832 } 2833 } while (ret != CHARGE_OK); 2834 2835 if (batch > nr_pages) 2836 refill_stock(memcg, batch - nr_pages); 2837 css_put(&memcg->css); 2838 done: 2839 *ptr = memcg; 2840 return 0; 2841 nomem: 2842 if (!(gfp_mask & __GFP_NOFAIL)) { 2843 *ptr = NULL; 2844 return -ENOMEM; 2845 } 2846 bypass: 2847 *ptr = root_mem_cgroup; 2848 return -EINTR; 2849 } 2850 2851 /* 2852 * Somemtimes we have to undo a charge we got by try_charge(). 2853 * This function is for that and do uncharge, put css's refcnt. 2854 * gotten by try_charge(). 2855 */ 2856 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, 2857 unsigned int nr_pages) 2858 { 2859 if (!mem_cgroup_is_root(memcg)) { 2860 unsigned long bytes = nr_pages * PAGE_SIZE; 2861 2862 res_counter_uncharge(&memcg->res, bytes); 2863 if (do_swap_account) 2864 res_counter_uncharge(&memcg->memsw, bytes); 2865 } 2866 } 2867 2868 /* 2869 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup. 2870 * This is useful when moving usage to parent cgroup. 2871 */ 2872 static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, 2873 unsigned int nr_pages) 2874 { 2875 unsigned long bytes = nr_pages * PAGE_SIZE; 2876 2877 if (mem_cgroup_is_root(memcg)) 2878 return; 2879 2880 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); 2881 if (do_swap_account) 2882 res_counter_uncharge_until(&memcg->memsw, 2883 memcg->memsw.parent, bytes); 2884 } 2885 2886 /* 2887 * A helper function to get mem_cgroup from ID. must be called under 2888 * rcu_read_lock(). The caller is responsible for calling css_tryget if 2889 * the mem_cgroup is used for charging. (dropping refcnt from swap can be 2890 * called against removed memcg.) 2891 */ 2892 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2893 { 2894 /* ID 0 is unused ID */ 2895 if (!id) 2896 return NULL; 2897 return mem_cgroup_from_id(id); 2898 } 2899 2900 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2901 { 2902 struct mem_cgroup *memcg = NULL; 2903 struct page_cgroup *pc; 2904 unsigned short id; 2905 swp_entry_t ent; 2906 2907 VM_BUG_ON_PAGE(!PageLocked(page), page); 2908 2909 pc = lookup_page_cgroup(page); 2910 lock_page_cgroup(pc); 2911 if (PageCgroupUsed(pc)) { 2912 memcg = pc->mem_cgroup; 2913 if (memcg && !css_tryget(&memcg->css)) 2914 memcg = NULL; 2915 } else if (PageSwapCache(page)) { 2916 ent.val = page_private(page); 2917 id = lookup_swap_cgroup_id(ent); 2918 rcu_read_lock(); 2919 memcg = mem_cgroup_lookup(id); 2920 if (memcg && !css_tryget(&memcg->css)) 2921 memcg = NULL; 2922 rcu_read_unlock(); 2923 } 2924 unlock_page_cgroup(pc); 2925 return memcg; 2926 } 2927 2928 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, 2929 struct page *page, 2930 unsigned int nr_pages, 2931 enum charge_type ctype, 2932 bool lrucare) 2933 { 2934 struct page_cgroup *pc = lookup_page_cgroup(page); 2935 struct zone *uninitialized_var(zone); 2936 struct lruvec *lruvec; 2937 bool was_on_lru = false; 2938 bool anon; 2939 2940 lock_page_cgroup(pc); 2941 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); 2942 /* 2943 * we don't need page_cgroup_lock about tail pages, becase they are not 2944 * accessed by any other context at this point. 2945 */ 2946 2947 /* 2948 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2949 * may already be on some other mem_cgroup's LRU. Take care of it. 2950 */ 2951 if (lrucare) { 2952 zone = page_zone(page); 2953 spin_lock_irq(&zone->lru_lock); 2954 if (PageLRU(page)) { 2955 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2956 ClearPageLRU(page); 2957 del_page_from_lru_list(page, lruvec, page_lru(page)); 2958 was_on_lru = true; 2959 } 2960 } 2961 2962 pc->mem_cgroup = memcg; 2963 /* 2964 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2965 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2966 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2967 * before USED bit, we need memory barrier here. 2968 * See mem_cgroup_add_lru_list(), etc. 2969 */ 2970 smp_wmb(); 2971 SetPageCgroupUsed(pc); 2972 2973 if (lrucare) { 2974 if (was_on_lru) { 2975 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2976 VM_BUG_ON_PAGE(PageLRU(page), page); 2977 SetPageLRU(page); 2978 add_page_to_lru_list(page, lruvec, page_lru(page)); 2979 } 2980 spin_unlock_irq(&zone->lru_lock); 2981 } 2982 2983 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON) 2984 anon = true; 2985 else 2986 anon = false; 2987 2988 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages); 2989 unlock_page_cgroup(pc); 2990 2991 /* 2992 * "charge_statistics" updated event counter. Then, check it. 2993 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2994 * if they exceeds softlimit. 2995 */ 2996 memcg_check_events(memcg, page); 2997 } 2998 2999 static DEFINE_MUTEX(set_limit_mutex); 3000 3001 #ifdef CONFIG_MEMCG_KMEM 3002 static DEFINE_MUTEX(activate_kmem_mutex); 3003 3004 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) 3005 { 3006 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && 3007 memcg_kmem_is_active(memcg); 3008 } 3009 3010 /* 3011 * This is a bit cumbersome, but it is rarely used and avoids a backpointer 3012 * in the memcg_cache_params struct. 3013 */ 3014 static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) 3015 { 3016 struct kmem_cache *cachep; 3017 3018 VM_BUG_ON(p->is_root_cache); 3019 cachep = p->root_cache; 3020 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); 3021 } 3022 3023 #ifdef CONFIG_SLABINFO 3024 static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) 3025 { 3026 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3027 struct memcg_cache_params *params; 3028 3029 if (!memcg_can_account_kmem(memcg)) 3030 return -EIO; 3031 3032 print_slabinfo_header(m); 3033 3034 mutex_lock(&memcg->slab_caches_mutex); 3035 list_for_each_entry(params, &memcg->memcg_slab_caches, list) 3036 cache_show(memcg_params_to_cache(params), m); 3037 mutex_unlock(&memcg->slab_caches_mutex); 3038 3039 return 0; 3040 } 3041 #endif 3042 3043 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) 3044 { 3045 struct res_counter *fail_res; 3046 struct mem_cgroup *_memcg; 3047 int ret = 0; 3048 3049 ret = res_counter_charge(&memcg->kmem, size, &fail_res); 3050 if (ret) 3051 return ret; 3052 3053 _memcg = memcg; 3054 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, 3055 &_memcg, oom_gfp_allowed(gfp)); 3056 3057 if (ret == -EINTR) { 3058 /* 3059 * __mem_cgroup_try_charge() chosed to bypass to root due to 3060 * OOM kill or fatal signal. Since our only options are to 3061 * either fail the allocation or charge it to this cgroup, do 3062 * it as a temporary condition. But we can't fail. From a 3063 * kmem/slab perspective, the cache has already been selected, 3064 * by mem_cgroup_kmem_get_cache(), so it is too late to change 3065 * our minds. 3066 * 3067 * This condition will only trigger if the task entered 3068 * memcg_charge_kmem in a sane state, but was OOM-killed during 3069 * __mem_cgroup_try_charge() above. Tasks that were already 3070 * dying when the allocation triggers should have been already 3071 * directed to the root cgroup in memcontrol.h 3072 */ 3073 res_counter_charge_nofail(&memcg->res, size, &fail_res); 3074 if (do_swap_account) 3075 res_counter_charge_nofail(&memcg->memsw, size, 3076 &fail_res); 3077 ret = 0; 3078 } else if (ret) 3079 res_counter_uncharge(&memcg->kmem, size); 3080 3081 return ret; 3082 } 3083 3084 static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) 3085 { 3086 res_counter_uncharge(&memcg->res, size); 3087 if (do_swap_account) 3088 res_counter_uncharge(&memcg->memsw, size); 3089 3090 /* Not down to 0 */ 3091 if (res_counter_uncharge(&memcg->kmem, size)) 3092 return; 3093 3094 /* 3095 * Releases a reference taken in kmem_cgroup_css_offline in case 3096 * this last uncharge is racing with the offlining code or it is 3097 * outliving the memcg existence. 3098 * 3099 * The memory barrier imposed by test&clear is paired with the 3100 * explicit one in memcg_kmem_mark_dead(). 3101 */ 3102 if (memcg_kmem_test_and_clear_dead(memcg)) 3103 css_put(&memcg->css); 3104 } 3105 3106 /* 3107 * helper for acessing a memcg's index. It will be used as an index in the 3108 * child cache array in kmem_cache, and also to derive its name. This function 3109 * will return -1 when this is not a kmem-limited memcg. 3110 */ 3111 int memcg_cache_id(struct mem_cgroup *memcg) 3112 { 3113 return memcg ? memcg->kmemcg_id : -1; 3114 } 3115 3116 static size_t memcg_caches_array_size(int num_groups) 3117 { 3118 ssize_t size; 3119 if (num_groups <= 0) 3120 return 0; 3121 3122 size = 2 * num_groups; 3123 if (size < MEMCG_CACHES_MIN_SIZE) 3124 size = MEMCG_CACHES_MIN_SIZE; 3125 else if (size > MEMCG_CACHES_MAX_SIZE) 3126 size = MEMCG_CACHES_MAX_SIZE; 3127 3128 return size; 3129 } 3130 3131 /* 3132 * We should update the current array size iff all caches updates succeed. This 3133 * can only be done from the slab side. The slab mutex needs to be held when 3134 * calling this. 3135 */ 3136 void memcg_update_array_size(int num) 3137 { 3138 if (num > memcg_limited_groups_array_size) 3139 memcg_limited_groups_array_size = memcg_caches_array_size(num); 3140 } 3141 3142 static void kmem_cache_destroy_work_func(struct work_struct *w); 3143 3144 int memcg_update_cache_size(struct kmem_cache *s, int num_groups) 3145 { 3146 struct memcg_cache_params *cur_params = s->memcg_params; 3147 3148 VM_BUG_ON(!is_root_cache(s)); 3149 3150 if (num_groups > memcg_limited_groups_array_size) { 3151 int i; 3152 struct memcg_cache_params *new_params; 3153 ssize_t size = memcg_caches_array_size(num_groups); 3154 3155 size *= sizeof(void *); 3156 size += offsetof(struct memcg_cache_params, memcg_caches); 3157 3158 new_params = kzalloc(size, GFP_KERNEL); 3159 if (!new_params) 3160 return -ENOMEM; 3161 3162 new_params->is_root_cache = true; 3163 3164 /* 3165 * There is the chance it will be bigger than 3166 * memcg_limited_groups_array_size, if we failed an allocation 3167 * in a cache, in which case all caches updated before it, will 3168 * have a bigger array. 3169 * 3170 * But if that is the case, the data after 3171 * memcg_limited_groups_array_size is certainly unused 3172 */ 3173 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3174 if (!cur_params->memcg_caches[i]) 3175 continue; 3176 new_params->memcg_caches[i] = 3177 cur_params->memcg_caches[i]; 3178 } 3179 3180 /* 3181 * Ideally, we would wait until all caches succeed, and only 3182 * then free the old one. But this is not worth the extra 3183 * pointer per-cache we'd have to have for this. 3184 * 3185 * It is not a big deal if some caches are left with a size 3186 * bigger than the others. And all updates will reset this 3187 * anyway. 3188 */ 3189 rcu_assign_pointer(s->memcg_params, new_params); 3190 if (cur_params) 3191 kfree_rcu(cur_params, rcu_head); 3192 } 3193 return 0; 3194 } 3195 3196 int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, 3197 struct kmem_cache *root_cache) 3198 { 3199 size_t size; 3200 3201 if (!memcg_kmem_enabled()) 3202 return 0; 3203 3204 if (!memcg) { 3205 size = offsetof(struct memcg_cache_params, memcg_caches); 3206 size += memcg_limited_groups_array_size * sizeof(void *); 3207 } else 3208 size = sizeof(struct memcg_cache_params); 3209 3210 s->memcg_params = kzalloc(size, GFP_KERNEL); 3211 if (!s->memcg_params) 3212 return -ENOMEM; 3213 3214 if (memcg) { 3215 s->memcg_params->memcg = memcg; 3216 s->memcg_params->root_cache = root_cache; 3217 INIT_WORK(&s->memcg_params->destroy, 3218 kmem_cache_destroy_work_func); 3219 } else 3220 s->memcg_params->is_root_cache = true; 3221 3222 return 0; 3223 } 3224 3225 void memcg_free_cache_params(struct kmem_cache *s) 3226 { 3227 kfree(s->memcg_params); 3228 } 3229 3230 void memcg_register_cache(struct kmem_cache *s) 3231 { 3232 struct kmem_cache *root; 3233 struct mem_cgroup *memcg; 3234 int id; 3235 3236 if (is_root_cache(s)) 3237 return; 3238 3239 /* 3240 * Holding the slab_mutex assures nobody will touch the memcg_caches 3241 * array while we are modifying it. 3242 */ 3243 lockdep_assert_held(&slab_mutex); 3244 3245 root = s->memcg_params->root_cache; 3246 memcg = s->memcg_params->memcg; 3247 id = memcg_cache_id(memcg); 3248 3249 css_get(&memcg->css); 3250 3251 3252 /* 3253 * Since readers won't lock (see cache_from_memcg_idx()), we need a 3254 * barrier here to ensure nobody will see the kmem_cache partially 3255 * initialized. 3256 */ 3257 smp_wmb(); 3258 3259 /* 3260 * Initialize the pointer to this cache in its parent's memcg_params 3261 * before adding it to the memcg_slab_caches list, otherwise we can 3262 * fail to convert memcg_params_to_cache() while traversing the list. 3263 */ 3264 VM_BUG_ON(root->memcg_params->memcg_caches[id]); 3265 root->memcg_params->memcg_caches[id] = s; 3266 3267 mutex_lock(&memcg->slab_caches_mutex); 3268 list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); 3269 mutex_unlock(&memcg->slab_caches_mutex); 3270 } 3271 3272 void memcg_unregister_cache(struct kmem_cache *s) 3273 { 3274 struct kmem_cache *root; 3275 struct mem_cgroup *memcg; 3276 int id; 3277 3278 if (is_root_cache(s)) 3279 return; 3280 3281 /* 3282 * Holding the slab_mutex assures nobody will touch the memcg_caches 3283 * array while we are modifying it. 3284 */ 3285 lockdep_assert_held(&slab_mutex); 3286 3287 root = s->memcg_params->root_cache; 3288 memcg = s->memcg_params->memcg; 3289 id = memcg_cache_id(memcg); 3290 3291 mutex_lock(&memcg->slab_caches_mutex); 3292 list_del(&s->memcg_params->list); 3293 mutex_unlock(&memcg->slab_caches_mutex); 3294 3295 /* 3296 * Clear the pointer to this cache in its parent's memcg_params only 3297 * after removing it from the memcg_slab_caches list, otherwise we can 3298 * fail to convert memcg_params_to_cache() while traversing the list. 3299 */ 3300 VM_BUG_ON(!root->memcg_params->memcg_caches[id]); 3301 root->memcg_params->memcg_caches[id] = NULL; 3302 3303 css_put(&memcg->css); 3304 } 3305 3306 /* 3307 * During the creation a new cache, we need to disable our accounting mechanism 3308 * altogether. This is true even if we are not creating, but rather just 3309 * enqueing new caches to be created. 3310 * 3311 * This is because that process will trigger allocations; some visible, like 3312 * explicit kmallocs to auxiliary data structures, name strings and internal 3313 * cache structures; some well concealed, like INIT_WORK() that can allocate 3314 * objects during debug. 3315 * 3316 * If any allocation happens during memcg_kmem_get_cache, we will recurse back 3317 * to it. This may not be a bounded recursion: since the first cache creation 3318 * failed to complete (waiting on the allocation), we'll just try to create the 3319 * cache again, failing at the same point. 3320 * 3321 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of 3322 * memcg_kmem_skip_account. So we enclose anything that might allocate memory 3323 * inside the following two functions. 3324 */ 3325 static inline void memcg_stop_kmem_account(void) 3326 { 3327 VM_BUG_ON(!current->mm); 3328 current->memcg_kmem_skip_account++; 3329 } 3330 3331 static inline void memcg_resume_kmem_account(void) 3332 { 3333 VM_BUG_ON(!current->mm); 3334 current->memcg_kmem_skip_account--; 3335 } 3336 3337 static void kmem_cache_destroy_work_func(struct work_struct *w) 3338 { 3339 struct kmem_cache *cachep; 3340 struct memcg_cache_params *p; 3341 3342 p = container_of(w, struct memcg_cache_params, destroy); 3343 3344 cachep = memcg_params_to_cache(p); 3345 3346 /* 3347 * If we get down to 0 after shrink, we could delete right away. 3348 * However, memcg_release_pages() already puts us back in the workqueue 3349 * in that case. If we proceed deleting, we'll get a dangling 3350 * reference, and removing the object from the workqueue in that case 3351 * is unnecessary complication. We are not a fast path. 3352 * 3353 * Note that this case is fundamentally different from racing with 3354 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in 3355 * kmem_cache_shrink, not only we would be reinserting a dead cache 3356 * into the queue, but doing so from inside the worker racing to 3357 * destroy it. 3358 * 3359 * So if we aren't down to zero, we'll just schedule a worker and try 3360 * again 3361 */ 3362 if (atomic_read(&cachep->memcg_params->nr_pages) != 0) 3363 kmem_cache_shrink(cachep); 3364 else 3365 kmem_cache_destroy(cachep); 3366 } 3367 3368 void mem_cgroup_destroy_cache(struct kmem_cache *cachep) 3369 { 3370 if (!cachep->memcg_params->dead) 3371 return; 3372 3373 /* 3374 * There are many ways in which we can get here. 3375 * 3376 * We can get to a memory-pressure situation while the delayed work is 3377 * still pending to run. The vmscan shrinkers can then release all 3378 * cache memory and get us to destruction. If this is the case, we'll 3379 * be executed twice, which is a bug (the second time will execute over 3380 * bogus data). In this case, cancelling the work should be fine. 3381 * 3382 * But we can also get here from the worker itself, if 3383 * kmem_cache_shrink is enough to shake all the remaining objects and 3384 * get the page count to 0. In this case, we'll deadlock if we try to 3385 * cancel the work (the worker runs with an internal lock held, which 3386 * is the same lock we would hold for cancel_work_sync().) 3387 * 3388 * Since we can't possibly know who got us here, just refrain from 3389 * running if there is already work pending 3390 */ 3391 if (work_pending(&cachep->memcg_params->destroy)) 3392 return; 3393 /* 3394 * We have to defer the actual destroying to a workqueue, because 3395 * we might currently be in a context that cannot sleep. 3396 */ 3397 schedule_work(&cachep->memcg_params->destroy); 3398 } 3399 3400 static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3401 struct kmem_cache *s) 3402 { 3403 struct kmem_cache *new = NULL; 3404 static char *tmp_name = NULL; 3405 static DEFINE_MUTEX(mutex); /* protects tmp_name */ 3406 3407 BUG_ON(!memcg_can_account_kmem(memcg)); 3408 3409 mutex_lock(&mutex); 3410 /* 3411 * kmem_cache_create_memcg duplicates the given name and 3412 * cgroup_name for this name requires RCU context. 3413 * This static temporary buffer is used to prevent from 3414 * pointless shortliving allocation. 3415 */ 3416 if (!tmp_name) { 3417 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); 3418 if (!tmp_name) 3419 goto out; 3420 } 3421 3422 rcu_read_lock(); 3423 snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, 3424 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); 3425 rcu_read_unlock(); 3426 3427 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, 3428 (s->flags & ~SLAB_PANIC), s->ctor, s); 3429 if (new) 3430 new->allocflags |= __GFP_KMEMCG; 3431 else 3432 new = s; 3433 out: 3434 mutex_unlock(&mutex); 3435 return new; 3436 } 3437 3438 void kmem_cache_destroy_memcg_children(struct kmem_cache *s) 3439 { 3440 struct kmem_cache *c; 3441 int i; 3442 3443 if (!s->memcg_params) 3444 return; 3445 if (!s->memcg_params->is_root_cache) 3446 return; 3447 3448 /* 3449 * If the cache is being destroyed, we trust that there is no one else 3450 * requesting objects from it. Even if there are, the sanity checks in 3451 * kmem_cache_destroy should caught this ill-case. 3452 * 3453 * Still, we don't want anyone else freeing memcg_caches under our 3454 * noses, which can happen if a new memcg comes to life. As usual, 3455 * we'll take the activate_kmem_mutex to protect ourselves against 3456 * this. 3457 */ 3458 mutex_lock(&activate_kmem_mutex); 3459 for_each_memcg_cache_index(i) { 3460 c = cache_from_memcg_idx(s, i); 3461 if (!c) 3462 continue; 3463 3464 /* 3465 * We will now manually delete the caches, so to avoid races 3466 * we need to cancel all pending destruction workers and 3467 * proceed with destruction ourselves. 3468 * 3469 * kmem_cache_destroy() will call kmem_cache_shrink internally, 3470 * and that could spawn the workers again: it is likely that 3471 * the cache still have active pages until this very moment. 3472 * This would lead us back to mem_cgroup_destroy_cache. 3473 * 3474 * But that will not execute at all if the "dead" flag is not 3475 * set, so flip it down to guarantee we are in control. 3476 */ 3477 c->memcg_params->dead = false; 3478 cancel_work_sync(&c->memcg_params->destroy); 3479 kmem_cache_destroy(c); 3480 } 3481 mutex_unlock(&activate_kmem_mutex); 3482 } 3483 3484 struct create_work { 3485 struct mem_cgroup *memcg; 3486 struct kmem_cache *cachep; 3487 struct work_struct work; 3488 }; 3489 3490 static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3491 { 3492 struct kmem_cache *cachep; 3493 struct memcg_cache_params *params; 3494 3495 if (!memcg_kmem_is_active(memcg)) 3496 return; 3497 3498 mutex_lock(&memcg->slab_caches_mutex); 3499 list_for_each_entry(params, &memcg->memcg_slab_caches, list) { 3500 cachep = memcg_params_to_cache(params); 3501 cachep->memcg_params->dead = true; 3502 schedule_work(&cachep->memcg_params->destroy); 3503 } 3504 mutex_unlock(&memcg->slab_caches_mutex); 3505 } 3506 3507 static void memcg_create_cache_work_func(struct work_struct *w) 3508 { 3509 struct create_work *cw; 3510 3511 cw = container_of(w, struct create_work, work); 3512 memcg_create_kmem_cache(cw->memcg, cw->cachep); 3513 css_put(&cw->memcg->css); 3514 kfree(cw); 3515 } 3516 3517 /* 3518 * Enqueue the creation of a per-memcg kmem_cache. 3519 */ 3520 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3521 struct kmem_cache *cachep) 3522 { 3523 struct create_work *cw; 3524 3525 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); 3526 if (cw == NULL) { 3527 css_put(&memcg->css); 3528 return; 3529 } 3530 3531 cw->memcg = memcg; 3532 cw->cachep = cachep; 3533 3534 INIT_WORK(&cw->work, memcg_create_cache_work_func); 3535 schedule_work(&cw->work); 3536 } 3537 3538 static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3539 struct kmem_cache *cachep) 3540 { 3541 /* 3542 * We need to stop accounting when we kmalloc, because if the 3543 * corresponding kmalloc cache is not yet created, the first allocation 3544 * in __memcg_create_cache_enqueue will recurse. 3545 * 3546 * However, it is better to enclose the whole function. Depending on 3547 * the debugging options enabled, INIT_WORK(), for instance, can 3548 * trigger an allocation. This too, will make us recurse. Because at 3549 * this point we can't allow ourselves back into memcg_kmem_get_cache, 3550 * the safest choice is to do it like this, wrapping the whole function. 3551 */ 3552 memcg_stop_kmem_account(); 3553 __memcg_create_cache_enqueue(memcg, cachep); 3554 memcg_resume_kmem_account(); 3555 } 3556 /* 3557 * Return the kmem_cache we're supposed to use for a slab allocation. 3558 * We try to use the current memcg's version of the cache. 3559 * 3560 * If the cache does not exist yet, if we are the first user of it, 3561 * we either create it immediately, if possible, or create it asynchronously 3562 * in a workqueue. 3563 * In the latter case, we will let the current allocation go through with 3564 * the original cache. 3565 * 3566 * Can't be called in interrupt context or from kernel threads. 3567 * This function needs to be called with rcu_read_lock() held. 3568 */ 3569 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, 3570 gfp_t gfp) 3571 { 3572 struct mem_cgroup *memcg; 3573 struct kmem_cache *memcg_cachep; 3574 3575 VM_BUG_ON(!cachep->memcg_params); 3576 VM_BUG_ON(!cachep->memcg_params->is_root_cache); 3577 3578 if (!current->mm || current->memcg_kmem_skip_account) 3579 return cachep; 3580 3581 rcu_read_lock(); 3582 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); 3583 3584 if (!memcg_can_account_kmem(memcg)) 3585 goto out; 3586 3587 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); 3588 if (likely(memcg_cachep)) { 3589 cachep = memcg_cachep; 3590 goto out; 3591 } 3592 3593 /* The corresponding put will be done in the workqueue. */ 3594 if (!css_tryget(&memcg->css)) 3595 goto out; 3596 rcu_read_unlock(); 3597 3598 /* 3599 * If we are in a safe context (can wait, and not in interrupt 3600 * context), we could be be predictable and return right away. 3601 * This would guarantee that the allocation being performed 3602 * already belongs in the new cache. 3603 * 3604 * However, there are some clashes that can arrive from locking. 3605 * For instance, because we acquire the slab_mutex while doing 3606 * kmem_cache_dup, this means no further allocation could happen 3607 * with the slab_mutex held. 3608 * 3609 * Also, because cache creation issue get_online_cpus(), this 3610 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, 3611 * that ends up reversed during cpu hotplug. (cpuset allocates 3612 * a bunch of GFP_KERNEL memory during cpuup). Due to all that, 3613 * better to defer everything. 3614 */ 3615 memcg_create_cache_enqueue(memcg, cachep); 3616 return cachep; 3617 out: 3618 rcu_read_unlock(); 3619 return cachep; 3620 } 3621 EXPORT_SYMBOL(__memcg_kmem_get_cache); 3622 3623 /* 3624 * We need to verify if the allocation against current->mm->owner's memcg is 3625 * possible for the given order. But the page is not allocated yet, so we'll 3626 * need a further commit step to do the final arrangements. 3627 * 3628 * It is possible for the task to switch cgroups in this mean time, so at 3629 * commit time, we can't rely on task conversion any longer. We'll then use 3630 * the handle argument to return to the caller which cgroup we should commit 3631 * against. We could also return the memcg directly and avoid the pointer 3632 * passing, but a boolean return value gives better semantics considering 3633 * the compiled-out case as well. 3634 * 3635 * Returning true means the allocation is possible. 3636 */ 3637 bool 3638 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) 3639 { 3640 struct mem_cgroup *memcg; 3641 int ret; 3642 3643 *_memcg = NULL; 3644 3645 /* 3646 * Disabling accounting is only relevant for some specific memcg 3647 * internal allocations. Therefore we would initially not have such 3648 * check here, since direct calls to the page allocator that are marked 3649 * with GFP_KMEMCG only happen outside memcg core. We are mostly 3650 * concerned with cache allocations, and by having this test at 3651 * memcg_kmem_get_cache, we are already able to relay the allocation to 3652 * the root cache and bypass the memcg cache altogether. 3653 * 3654 * There is one exception, though: the SLUB allocator does not create 3655 * large order caches, but rather service large kmallocs directly from 3656 * the page allocator. Therefore, the following sequence when backed by 3657 * the SLUB allocator: 3658 * 3659 * memcg_stop_kmem_account(); 3660 * kmalloc(<large_number>) 3661 * memcg_resume_kmem_account(); 3662 * 3663 * would effectively ignore the fact that we should skip accounting, 3664 * since it will drive us directly to this function without passing 3665 * through the cache selector memcg_kmem_get_cache. Such large 3666 * allocations are extremely rare but can happen, for instance, for the 3667 * cache arrays. We bring this test here. 3668 */ 3669 if (!current->mm || current->memcg_kmem_skip_account) 3670 return true; 3671 3672 memcg = try_get_mem_cgroup_from_mm(current->mm); 3673 3674 /* 3675 * very rare case described in mem_cgroup_from_task. Unfortunately there 3676 * isn't much we can do without complicating this too much, and it would 3677 * be gfp-dependent anyway. Just let it go 3678 */ 3679 if (unlikely(!memcg)) 3680 return true; 3681 3682 if (!memcg_can_account_kmem(memcg)) { 3683 css_put(&memcg->css); 3684 return true; 3685 } 3686 3687 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); 3688 if (!ret) 3689 *_memcg = memcg; 3690 3691 css_put(&memcg->css); 3692 return (ret == 0); 3693 } 3694 3695 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, 3696 int order) 3697 { 3698 struct page_cgroup *pc; 3699 3700 VM_BUG_ON(mem_cgroup_is_root(memcg)); 3701 3702 /* The page allocation failed. Revert */ 3703 if (!page) { 3704 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3705 return; 3706 } 3707 3708 pc = lookup_page_cgroup(page); 3709 lock_page_cgroup(pc); 3710 pc->mem_cgroup = memcg; 3711 SetPageCgroupUsed(pc); 3712 unlock_page_cgroup(pc); 3713 } 3714 3715 void __memcg_kmem_uncharge_pages(struct page *page, int order) 3716 { 3717 struct mem_cgroup *memcg = NULL; 3718 struct page_cgroup *pc; 3719 3720 3721 pc = lookup_page_cgroup(page); 3722 /* 3723 * Fast unlocked return. Theoretically might have changed, have to 3724 * check again after locking. 3725 */ 3726 if (!PageCgroupUsed(pc)) 3727 return; 3728 3729 lock_page_cgroup(pc); 3730 if (PageCgroupUsed(pc)) { 3731 memcg = pc->mem_cgroup; 3732 ClearPageCgroupUsed(pc); 3733 } 3734 unlock_page_cgroup(pc); 3735 3736 /* 3737 * We trust that only if there is a memcg associated with the page, it 3738 * is a valid allocation 3739 */ 3740 if (!memcg) 3741 return; 3742 3743 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3744 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3745 } 3746 #else 3747 static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3748 { 3749 } 3750 #endif /* CONFIG_MEMCG_KMEM */ 3751 3752 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3753 3754 #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION) 3755 /* 3756 * Because tail pages are not marked as "used", set it. We're under 3757 * zone->lru_lock, 'splitting on pmd' and compound_lock. 3758 * charge/uncharge will be never happen and move_account() is done under 3759 * compound_lock(), so we don't have to take care of races. 3760 */ 3761 void mem_cgroup_split_huge_fixup(struct page *head) 3762 { 3763 struct page_cgroup *head_pc = lookup_page_cgroup(head); 3764 struct page_cgroup *pc; 3765 struct mem_cgroup *memcg; 3766 int i; 3767 3768 if (mem_cgroup_disabled()) 3769 return; 3770 3771 memcg = head_pc->mem_cgroup; 3772 for (i = 1; i < HPAGE_PMD_NR; i++) { 3773 pc = head_pc + i; 3774 pc->mem_cgroup = memcg; 3775 smp_wmb();/* see __commit_charge() */ 3776 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 3777 } 3778 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 3779 HPAGE_PMD_NR); 3780 } 3781 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3782 3783 static inline 3784 void mem_cgroup_move_account_page_stat(struct mem_cgroup *from, 3785 struct mem_cgroup *to, 3786 unsigned int nr_pages, 3787 enum mem_cgroup_stat_index idx) 3788 { 3789 /* Update stat data for mem_cgroup */ 3790 preempt_disable(); 3791 __this_cpu_sub(from->stat->count[idx], nr_pages); 3792 __this_cpu_add(to->stat->count[idx], nr_pages); 3793 preempt_enable(); 3794 } 3795 3796 /** 3797 * mem_cgroup_move_account - move account of the page 3798 * @page: the page 3799 * @nr_pages: number of regular pages (>1 for huge pages) 3800 * @pc: page_cgroup of the page. 3801 * @from: mem_cgroup which the page is moved from. 3802 * @to: mem_cgroup which the page is moved to. @from != @to. 3803 * 3804 * The caller must confirm following. 3805 * - page is not on LRU (isolate_page() is useful.) 3806 * - compound_lock is held when nr_pages > 1 3807 * 3808 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 3809 * from old cgroup. 3810 */ 3811 static int mem_cgroup_move_account(struct page *page, 3812 unsigned int nr_pages, 3813 struct page_cgroup *pc, 3814 struct mem_cgroup *from, 3815 struct mem_cgroup *to) 3816 { 3817 unsigned long flags; 3818 int ret; 3819 bool anon = PageAnon(page); 3820 3821 VM_BUG_ON(from == to); 3822 VM_BUG_ON_PAGE(PageLRU(page), page); 3823 /* 3824 * The page is isolated from LRU. So, collapse function 3825 * will not handle this page. But page splitting can happen. 3826 * Do this check under compound_page_lock(). The caller should 3827 * hold it. 3828 */ 3829 ret = -EBUSY; 3830 if (nr_pages > 1 && !PageTransHuge(page)) 3831 goto out; 3832 3833 lock_page_cgroup(pc); 3834 3835 ret = -EINVAL; 3836 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 3837 goto unlock; 3838 3839 move_lock_mem_cgroup(from, &flags); 3840 3841 if (!anon && page_mapped(page)) 3842 mem_cgroup_move_account_page_stat(from, to, nr_pages, 3843 MEM_CGROUP_STAT_FILE_MAPPED); 3844 3845 if (PageWriteback(page)) 3846 mem_cgroup_move_account_page_stat(from, to, nr_pages, 3847 MEM_CGROUP_STAT_WRITEBACK); 3848 3849 mem_cgroup_charge_statistics(from, page, anon, -nr_pages); 3850 3851 /* caller should have done css_get */ 3852 pc->mem_cgroup = to; 3853 mem_cgroup_charge_statistics(to, page, anon, nr_pages); 3854 move_unlock_mem_cgroup(from, &flags); 3855 ret = 0; 3856 unlock: 3857 unlock_page_cgroup(pc); 3858 /* 3859 * check events 3860 */ 3861 memcg_check_events(to, page); 3862 memcg_check_events(from, page); 3863 out: 3864 return ret; 3865 } 3866 3867 /** 3868 * mem_cgroup_move_parent - moves page to the parent group 3869 * @page: the page to move 3870 * @pc: page_cgroup of the page 3871 * @child: page's cgroup 3872 * 3873 * move charges to its parent or the root cgroup if the group has no 3874 * parent (aka use_hierarchy==0). 3875 * Although this might fail (get_page_unless_zero, isolate_lru_page or 3876 * mem_cgroup_move_account fails) the failure is always temporary and 3877 * it signals a race with a page removal/uncharge or migration. In the 3878 * first case the page is on the way out and it will vanish from the LRU 3879 * on the next attempt and the call should be retried later. 3880 * Isolation from the LRU fails only if page has been isolated from 3881 * the LRU since we looked at it and that usually means either global 3882 * reclaim or migration going on. The page will either get back to the 3883 * LRU or vanish. 3884 * Finaly mem_cgroup_move_account fails only if the page got uncharged 3885 * (!PageCgroupUsed) or moved to a different group. The page will 3886 * disappear in the next attempt. 3887 */ 3888 static int mem_cgroup_move_parent(struct page *page, 3889 struct page_cgroup *pc, 3890 struct mem_cgroup *child) 3891 { 3892 struct mem_cgroup *parent; 3893 unsigned int nr_pages; 3894 unsigned long uninitialized_var(flags); 3895 int ret; 3896 3897 VM_BUG_ON(mem_cgroup_is_root(child)); 3898 3899 ret = -EBUSY; 3900 if (!get_page_unless_zero(page)) 3901 goto out; 3902 if (isolate_lru_page(page)) 3903 goto put; 3904 3905 nr_pages = hpage_nr_pages(page); 3906 3907 parent = parent_mem_cgroup(child); 3908 /* 3909 * If no parent, move charges to root cgroup. 3910 */ 3911 if (!parent) 3912 parent = root_mem_cgroup; 3913 3914 if (nr_pages > 1) { 3915 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3916 flags = compound_lock_irqsave(page); 3917 } 3918 3919 ret = mem_cgroup_move_account(page, nr_pages, 3920 pc, child, parent); 3921 if (!ret) 3922 __mem_cgroup_cancel_local_charge(child, nr_pages); 3923 3924 if (nr_pages > 1) 3925 compound_unlock_irqrestore(page, flags); 3926 putback_lru_page(page); 3927 put: 3928 put_page(page); 3929 out: 3930 return ret; 3931 } 3932 3933 /* 3934 * Charge the memory controller for page usage. 3935 * Return 3936 * 0 if the charge was successful 3937 * < 0 if the cgroup is over its limit 3938 */ 3939 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 3940 gfp_t gfp_mask, enum charge_type ctype) 3941 { 3942 struct mem_cgroup *memcg = NULL; 3943 unsigned int nr_pages = 1; 3944 bool oom = true; 3945 int ret; 3946 3947 if (PageTransHuge(page)) { 3948 nr_pages <<= compound_order(page); 3949 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3950 /* 3951 * Never OOM-kill a process for a huge page. The 3952 * fault handler will fall back to regular pages. 3953 */ 3954 oom = false; 3955 } 3956 3957 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 3958 if (ret == -ENOMEM) 3959 return ret; 3960 __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false); 3961 return 0; 3962 } 3963 3964 int mem_cgroup_newpage_charge(struct page *page, 3965 struct mm_struct *mm, gfp_t gfp_mask) 3966 { 3967 if (mem_cgroup_disabled()) 3968 return 0; 3969 VM_BUG_ON_PAGE(page_mapped(page), page); 3970 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); 3971 VM_BUG_ON(!mm); 3972 return mem_cgroup_charge_common(page, mm, gfp_mask, 3973 MEM_CGROUP_CHARGE_TYPE_ANON); 3974 } 3975 3976 /* 3977 * While swap-in, try_charge -> commit or cancel, the page is locked. 3978 * And when try_charge() successfully returns, one refcnt to memcg without 3979 * struct page_cgroup is acquired. This refcnt will be consumed by 3980 * "commit()" or removed by "cancel()" 3981 */ 3982 static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm, 3983 struct page *page, 3984 gfp_t mask, 3985 struct mem_cgroup **memcgp) 3986 { 3987 struct mem_cgroup *memcg; 3988 struct page_cgroup *pc; 3989 int ret; 3990 3991 pc = lookup_page_cgroup(page); 3992 /* 3993 * Every swap fault against a single page tries to charge the 3994 * page, bail as early as possible. shmem_unuse() encounters 3995 * already charged pages, too. The USED bit is protected by 3996 * the page lock, which serializes swap cache removal, which 3997 * in turn serializes uncharging. 3998 */ 3999 if (PageCgroupUsed(pc)) 4000 return 0; 4001 if (!do_swap_account) 4002 goto charge_cur_mm; 4003 memcg = try_get_mem_cgroup_from_page(page); 4004 if (!memcg) 4005 goto charge_cur_mm; 4006 *memcgp = memcg; 4007 ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true); 4008 css_put(&memcg->css); 4009 if (ret == -EINTR) 4010 ret = 0; 4011 return ret; 4012 charge_cur_mm: 4013 ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true); 4014 if (ret == -EINTR) 4015 ret = 0; 4016 return ret; 4017 } 4018 4019 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, 4020 gfp_t gfp_mask, struct mem_cgroup **memcgp) 4021 { 4022 *memcgp = NULL; 4023 if (mem_cgroup_disabled()) 4024 return 0; 4025 /* 4026 * A racing thread's fault, or swapoff, may have already 4027 * updated the pte, and even removed page from swap cache: in 4028 * those cases unuse_pte()'s pte_same() test will fail; but 4029 * there's also a KSM case which does need to charge the page. 4030 */ 4031 if (!PageSwapCache(page)) { 4032 int ret; 4033 4034 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true); 4035 if (ret == -EINTR) 4036 ret = 0; 4037 return ret; 4038 } 4039 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp); 4040 } 4041 4042 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 4043 { 4044 if (mem_cgroup_disabled()) 4045 return; 4046 if (!memcg) 4047 return; 4048 __mem_cgroup_cancel_charge(memcg, 1); 4049 } 4050 4051 static void 4052 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 4053 enum charge_type ctype) 4054 { 4055 if (mem_cgroup_disabled()) 4056 return; 4057 if (!memcg) 4058 return; 4059 4060 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true); 4061 /* 4062 * Now swap is on-memory. This means this page may be 4063 * counted both as mem and swap....double count. 4064 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 4065 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 4066 * may call delete_from_swap_cache() before reach here. 4067 */ 4068 if (do_swap_account && PageSwapCache(page)) { 4069 swp_entry_t ent = {.val = page_private(page)}; 4070 mem_cgroup_uncharge_swap(ent); 4071 } 4072 } 4073 4074 void mem_cgroup_commit_charge_swapin(struct page *page, 4075 struct mem_cgroup *memcg) 4076 { 4077 __mem_cgroup_commit_charge_swapin(page, memcg, 4078 MEM_CGROUP_CHARGE_TYPE_ANON); 4079 } 4080 4081 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 4082 gfp_t gfp_mask) 4083 { 4084 struct mem_cgroup *memcg = NULL; 4085 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 4086 int ret; 4087 4088 if (mem_cgroup_disabled()) 4089 return 0; 4090 if (PageCompound(page)) 4091 return 0; 4092 4093 if (!PageSwapCache(page)) 4094 ret = mem_cgroup_charge_common(page, mm, gfp_mask, type); 4095 else { /* page is swapcache/shmem */ 4096 ret = __mem_cgroup_try_charge_swapin(mm, page, 4097 gfp_mask, &memcg); 4098 if (!ret) 4099 __mem_cgroup_commit_charge_swapin(page, memcg, type); 4100 } 4101 return ret; 4102 } 4103 4104 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg, 4105 unsigned int nr_pages, 4106 const enum charge_type ctype) 4107 { 4108 struct memcg_batch_info *batch = NULL; 4109 bool uncharge_memsw = true; 4110 4111 /* If swapout, usage of swap doesn't decrease */ 4112 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 4113 uncharge_memsw = false; 4114 4115 batch = ¤t->memcg_batch; 4116 /* 4117 * In usual, we do css_get() when we remember memcg pointer. 4118 * But in this case, we keep res->usage until end of a series of 4119 * uncharges. Then, it's ok to ignore memcg's refcnt. 4120 */ 4121 if (!batch->memcg) 4122 batch->memcg = memcg; 4123 /* 4124 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 4125 * In those cases, all pages freed continuously can be expected to be in 4126 * the same cgroup and we have chance to coalesce uncharges. 4127 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 4128 * because we want to do uncharge as soon as possible. 4129 */ 4130 4131 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 4132 goto direct_uncharge; 4133 4134 if (nr_pages > 1) 4135 goto direct_uncharge; 4136 4137 /* 4138 * In typical case, batch->memcg == mem. This means we can 4139 * merge a series of uncharges to an uncharge of res_counter. 4140 * If not, we uncharge res_counter ony by one. 4141 */ 4142 if (batch->memcg != memcg) 4143 goto direct_uncharge; 4144 /* remember freed charge and uncharge it later */ 4145 batch->nr_pages++; 4146 if (uncharge_memsw) 4147 batch->memsw_nr_pages++; 4148 return; 4149 direct_uncharge: 4150 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE); 4151 if (uncharge_memsw) 4152 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE); 4153 if (unlikely(batch->memcg != memcg)) 4154 memcg_oom_recover(memcg); 4155 } 4156 4157 /* 4158 * uncharge if !page_mapped(page) 4159 */ 4160 static struct mem_cgroup * 4161 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, 4162 bool end_migration) 4163 { 4164 struct mem_cgroup *memcg = NULL; 4165 unsigned int nr_pages = 1; 4166 struct page_cgroup *pc; 4167 bool anon; 4168 4169 if (mem_cgroup_disabled()) 4170 return NULL; 4171 4172 if (PageTransHuge(page)) { 4173 nr_pages <<= compound_order(page); 4174 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 4175 } 4176 /* 4177 * Check if our page_cgroup is valid 4178 */ 4179 pc = lookup_page_cgroup(page); 4180 if (unlikely(!PageCgroupUsed(pc))) 4181 return NULL; 4182 4183 lock_page_cgroup(pc); 4184 4185 memcg = pc->mem_cgroup; 4186 4187 if (!PageCgroupUsed(pc)) 4188 goto unlock_out; 4189 4190 anon = PageAnon(page); 4191 4192 switch (ctype) { 4193 case MEM_CGROUP_CHARGE_TYPE_ANON: 4194 /* 4195 * Generally PageAnon tells if it's the anon statistics to be 4196 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is 4197 * used before page reached the stage of being marked PageAnon. 4198 */ 4199 anon = true; 4200 /* fallthrough */ 4201 case MEM_CGROUP_CHARGE_TYPE_DROP: 4202 /* See mem_cgroup_prepare_migration() */ 4203 if (page_mapped(page)) 4204 goto unlock_out; 4205 /* 4206 * Pages under migration may not be uncharged. But 4207 * end_migration() /must/ be the one uncharging the 4208 * unused post-migration page and so it has to call 4209 * here with the migration bit still set. See the 4210 * res_counter handling below. 4211 */ 4212 if (!end_migration && PageCgroupMigration(pc)) 4213 goto unlock_out; 4214 break; 4215 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 4216 if (!PageAnon(page)) { /* Shared memory */ 4217 if (page->mapping && !page_is_file_cache(page)) 4218 goto unlock_out; 4219 } else if (page_mapped(page)) /* Anon */ 4220 goto unlock_out; 4221 break; 4222 default: 4223 break; 4224 } 4225 4226 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages); 4227 4228 ClearPageCgroupUsed(pc); 4229 /* 4230 * pc->mem_cgroup is not cleared here. It will be accessed when it's 4231 * freed from LRU. This is safe because uncharged page is expected not 4232 * to be reused (freed soon). Exception is SwapCache, it's handled by 4233 * special functions. 4234 */ 4235 4236 unlock_page_cgroup(pc); 4237 /* 4238 * even after unlock, we have memcg->res.usage here and this memcg 4239 * will never be freed, so it's safe to call css_get(). 4240 */ 4241 memcg_check_events(memcg, page); 4242 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 4243 mem_cgroup_swap_statistics(memcg, true); 4244 css_get(&memcg->css); 4245 } 4246 /* 4247 * Migration does not charge the res_counter for the 4248 * replacement page, so leave it alone when phasing out the 4249 * page that is unused after the migration. 4250 */ 4251 if (!end_migration && !mem_cgroup_is_root(memcg)) 4252 mem_cgroup_do_uncharge(memcg, nr_pages, ctype); 4253 4254 return memcg; 4255 4256 unlock_out: 4257 unlock_page_cgroup(pc); 4258 return NULL; 4259 } 4260 4261 void mem_cgroup_uncharge_page(struct page *page) 4262 { 4263 /* early check. */ 4264 if (page_mapped(page)) 4265 return; 4266 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); 4267 /* 4268 * If the page is in swap cache, uncharge should be deferred 4269 * to the swap path, which also properly accounts swap usage 4270 * and handles memcg lifetime. 4271 * 4272 * Note that this check is not stable and reclaim may add the 4273 * page to swap cache at any time after this. However, if the 4274 * page is not in swap cache by the time page->mapcount hits 4275 * 0, there won't be any page table references to the swap 4276 * slot, and reclaim will free it and not actually write the 4277 * page to disk. 4278 */ 4279 if (PageSwapCache(page)) 4280 return; 4281 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); 4282 } 4283 4284 void mem_cgroup_uncharge_cache_page(struct page *page) 4285 { 4286 VM_BUG_ON_PAGE(page_mapped(page), page); 4287 VM_BUG_ON_PAGE(page->mapping, page); 4288 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); 4289 } 4290 4291 /* 4292 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 4293 * In that cases, pages are freed continuously and we can expect pages 4294 * are in the same memcg. All these calls itself limits the number of 4295 * pages freed at once, then uncharge_start/end() is called properly. 4296 * This may be called prural(2) times in a context, 4297 */ 4298 4299 void mem_cgroup_uncharge_start(void) 4300 { 4301 current->memcg_batch.do_batch++; 4302 /* We can do nest. */ 4303 if (current->memcg_batch.do_batch == 1) { 4304 current->memcg_batch.memcg = NULL; 4305 current->memcg_batch.nr_pages = 0; 4306 current->memcg_batch.memsw_nr_pages = 0; 4307 } 4308 } 4309 4310 void mem_cgroup_uncharge_end(void) 4311 { 4312 struct memcg_batch_info *batch = ¤t->memcg_batch; 4313 4314 if (!batch->do_batch) 4315 return; 4316 4317 batch->do_batch--; 4318 if (batch->do_batch) /* If stacked, do nothing. */ 4319 return; 4320 4321 if (!batch->memcg) 4322 return; 4323 /* 4324 * This "batch->memcg" is valid without any css_get/put etc... 4325 * bacause we hide charges behind us. 4326 */ 4327 if (batch->nr_pages) 4328 res_counter_uncharge(&batch->memcg->res, 4329 batch->nr_pages * PAGE_SIZE); 4330 if (batch->memsw_nr_pages) 4331 res_counter_uncharge(&batch->memcg->memsw, 4332 batch->memsw_nr_pages * PAGE_SIZE); 4333 memcg_oom_recover(batch->memcg); 4334 /* forget this pointer (for sanity check) */ 4335 batch->memcg = NULL; 4336 } 4337 4338 #ifdef CONFIG_SWAP 4339 /* 4340 * called after __delete_from_swap_cache() and drop "page" account. 4341 * memcg information is recorded to swap_cgroup of "ent" 4342 */ 4343 void 4344 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 4345 { 4346 struct mem_cgroup *memcg; 4347 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 4348 4349 if (!swapout) /* this was a swap cache but the swap is unused ! */ 4350 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 4351 4352 memcg = __mem_cgroup_uncharge_common(page, ctype, false); 4353 4354 /* 4355 * record memcg information, if swapout && memcg != NULL, 4356 * css_get() was called in uncharge(). 4357 */ 4358 if (do_swap_account && swapout && memcg) 4359 swap_cgroup_record(ent, mem_cgroup_id(memcg)); 4360 } 4361 #endif 4362 4363 #ifdef CONFIG_MEMCG_SWAP 4364 /* 4365 * called from swap_entry_free(). remove record in swap_cgroup and 4366 * uncharge "memsw" account. 4367 */ 4368 void mem_cgroup_uncharge_swap(swp_entry_t ent) 4369 { 4370 struct mem_cgroup *memcg; 4371 unsigned short id; 4372 4373 if (!do_swap_account) 4374 return; 4375 4376 id = swap_cgroup_record(ent, 0); 4377 rcu_read_lock(); 4378 memcg = mem_cgroup_lookup(id); 4379 if (memcg) { 4380 /* 4381 * We uncharge this because swap is freed. 4382 * This memcg can be obsolete one. We avoid calling css_tryget 4383 */ 4384 if (!mem_cgroup_is_root(memcg)) 4385 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 4386 mem_cgroup_swap_statistics(memcg, false); 4387 css_put(&memcg->css); 4388 } 4389 rcu_read_unlock(); 4390 } 4391 4392 /** 4393 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 4394 * @entry: swap entry to be moved 4395 * @from: mem_cgroup which the entry is moved from 4396 * @to: mem_cgroup which the entry is moved to 4397 * 4398 * It succeeds only when the swap_cgroup's record for this entry is the same 4399 * as the mem_cgroup's id of @from. 4400 * 4401 * Returns 0 on success, -EINVAL on failure. 4402 * 4403 * The caller must have charged to @to, IOW, called res_counter_charge() about 4404 * both res and memsw, and called css_get(). 4405 */ 4406 static int mem_cgroup_move_swap_account(swp_entry_t entry, 4407 struct mem_cgroup *from, struct mem_cgroup *to) 4408 { 4409 unsigned short old_id, new_id; 4410 4411 old_id = mem_cgroup_id(from); 4412 new_id = mem_cgroup_id(to); 4413 4414 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 4415 mem_cgroup_swap_statistics(from, false); 4416 mem_cgroup_swap_statistics(to, true); 4417 /* 4418 * This function is only called from task migration context now. 4419 * It postpones res_counter and refcount handling till the end 4420 * of task migration(mem_cgroup_clear_mc()) for performance 4421 * improvement. But we cannot postpone css_get(to) because if 4422 * the process that has been moved to @to does swap-in, the 4423 * refcount of @to might be decreased to 0. 4424 * 4425 * We are in attach() phase, so the cgroup is guaranteed to be 4426 * alive, so we can just call css_get(). 4427 */ 4428 css_get(&to->css); 4429 return 0; 4430 } 4431 return -EINVAL; 4432 } 4433 #else 4434 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 4435 struct mem_cgroup *from, struct mem_cgroup *to) 4436 { 4437 return -EINVAL; 4438 } 4439 #endif 4440 4441 /* 4442 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 4443 * page belongs to. 4444 */ 4445 void mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 4446 struct mem_cgroup **memcgp) 4447 { 4448 struct mem_cgroup *memcg = NULL; 4449 unsigned int nr_pages = 1; 4450 struct page_cgroup *pc; 4451 enum charge_type ctype; 4452 4453 *memcgp = NULL; 4454 4455 if (mem_cgroup_disabled()) 4456 return; 4457 4458 if (PageTransHuge(page)) 4459 nr_pages <<= compound_order(page); 4460 4461 pc = lookup_page_cgroup(page); 4462 lock_page_cgroup(pc); 4463 if (PageCgroupUsed(pc)) { 4464 memcg = pc->mem_cgroup; 4465 css_get(&memcg->css); 4466 /* 4467 * At migrating an anonymous page, its mapcount goes down 4468 * to 0 and uncharge() will be called. But, even if it's fully 4469 * unmapped, migration may fail and this page has to be 4470 * charged again. We set MIGRATION flag here and delay uncharge 4471 * until end_migration() is called 4472 * 4473 * Corner Case Thinking 4474 * A) 4475 * When the old page was mapped as Anon and it's unmap-and-freed 4476 * while migration was ongoing. 4477 * If unmap finds the old page, uncharge() of it will be delayed 4478 * until end_migration(). If unmap finds a new page, it's 4479 * uncharged when it make mapcount to be 1->0. If unmap code 4480 * finds swap_migration_entry, the new page will not be mapped 4481 * and end_migration() will find it(mapcount==0). 4482 * 4483 * B) 4484 * When the old page was mapped but migraion fails, the kernel 4485 * remaps it. A charge for it is kept by MIGRATION flag even 4486 * if mapcount goes down to 0. We can do remap successfully 4487 * without charging it again. 4488 * 4489 * C) 4490 * The "old" page is under lock_page() until the end of 4491 * migration, so, the old page itself will not be swapped-out. 4492 * If the new page is swapped out before end_migraton, our 4493 * hook to usual swap-out path will catch the event. 4494 */ 4495 if (PageAnon(page)) 4496 SetPageCgroupMigration(pc); 4497 } 4498 unlock_page_cgroup(pc); 4499 /* 4500 * If the page is not charged at this point, 4501 * we return here. 4502 */ 4503 if (!memcg) 4504 return; 4505 4506 *memcgp = memcg; 4507 /* 4508 * We charge new page before it's used/mapped. So, even if unlock_page() 4509 * is called before end_migration, we can catch all events on this new 4510 * page. In the case new page is migrated but not remapped, new page's 4511 * mapcount will be finally 0 and we call uncharge in end_migration(). 4512 */ 4513 if (PageAnon(page)) 4514 ctype = MEM_CGROUP_CHARGE_TYPE_ANON; 4515 else 4516 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 4517 /* 4518 * The page is committed to the memcg, but it's not actually 4519 * charged to the res_counter since we plan on replacing the 4520 * old one and only one page is going to be left afterwards. 4521 */ 4522 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false); 4523 } 4524 4525 /* remove redundant charge if migration failed*/ 4526 void mem_cgroup_end_migration(struct mem_cgroup *memcg, 4527 struct page *oldpage, struct page *newpage, bool migration_ok) 4528 { 4529 struct page *used, *unused; 4530 struct page_cgroup *pc; 4531 bool anon; 4532 4533 if (!memcg) 4534 return; 4535 4536 if (!migration_ok) { 4537 used = oldpage; 4538 unused = newpage; 4539 } else { 4540 used = newpage; 4541 unused = oldpage; 4542 } 4543 anon = PageAnon(used); 4544 __mem_cgroup_uncharge_common(unused, 4545 anon ? MEM_CGROUP_CHARGE_TYPE_ANON 4546 : MEM_CGROUP_CHARGE_TYPE_CACHE, 4547 true); 4548 css_put(&memcg->css); 4549 /* 4550 * We disallowed uncharge of pages under migration because mapcount 4551 * of the page goes down to zero, temporarly. 4552 * Clear the flag and check the page should be charged. 4553 */ 4554 pc = lookup_page_cgroup(oldpage); 4555 lock_page_cgroup(pc); 4556 ClearPageCgroupMigration(pc); 4557 unlock_page_cgroup(pc); 4558 4559 /* 4560 * If a page is a file cache, radix-tree replacement is very atomic 4561 * and we can skip this check. When it was an Anon page, its mapcount 4562 * goes down to 0. But because we added MIGRATION flage, it's not 4563 * uncharged yet. There are several case but page->mapcount check 4564 * and USED bit check in mem_cgroup_uncharge_page() will do enough 4565 * check. (see prepare_charge() also) 4566 */ 4567 if (anon) 4568 mem_cgroup_uncharge_page(used); 4569 } 4570 4571 /* 4572 * At replace page cache, newpage is not under any memcg but it's on 4573 * LRU. So, this function doesn't touch res_counter but handles LRU 4574 * in correct way. Both pages are locked so we cannot race with uncharge. 4575 */ 4576 void mem_cgroup_replace_page_cache(struct page *oldpage, 4577 struct page *newpage) 4578 { 4579 struct mem_cgroup *memcg = NULL; 4580 struct page_cgroup *pc; 4581 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 4582 4583 if (mem_cgroup_disabled()) 4584 return; 4585 4586 pc = lookup_page_cgroup(oldpage); 4587 /* fix accounting on old pages */ 4588 lock_page_cgroup(pc); 4589 if (PageCgroupUsed(pc)) { 4590 memcg = pc->mem_cgroup; 4591 mem_cgroup_charge_statistics(memcg, oldpage, false, -1); 4592 ClearPageCgroupUsed(pc); 4593 } 4594 unlock_page_cgroup(pc); 4595 4596 /* 4597 * When called from shmem_replace_page(), in some cases the 4598 * oldpage has already been charged, and in some cases not. 4599 */ 4600 if (!memcg) 4601 return; 4602 /* 4603 * Even if newpage->mapping was NULL before starting replacement, 4604 * the newpage may be on LRU(or pagevec for LRU) already. We lock 4605 * LRU while we overwrite pc->mem_cgroup. 4606 */ 4607 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true); 4608 } 4609 4610 #ifdef CONFIG_DEBUG_VM 4611 static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 4612 { 4613 struct page_cgroup *pc; 4614 4615 pc = lookup_page_cgroup(page); 4616 /* 4617 * Can be NULL while feeding pages into the page allocator for 4618 * the first time, i.e. during boot or memory hotplug; 4619 * or when mem_cgroup_disabled(). 4620 */ 4621 if (likely(pc) && PageCgroupUsed(pc)) 4622 return pc; 4623 return NULL; 4624 } 4625 4626 bool mem_cgroup_bad_page_check(struct page *page) 4627 { 4628 if (mem_cgroup_disabled()) 4629 return false; 4630 4631 return lookup_page_cgroup_used(page) != NULL; 4632 } 4633 4634 void mem_cgroup_print_bad_page(struct page *page) 4635 { 4636 struct page_cgroup *pc; 4637 4638 pc = lookup_page_cgroup_used(page); 4639 if (pc) { 4640 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n", 4641 pc, pc->flags, pc->mem_cgroup); 4642 } 4643 } 4644 #endif 4645 4646 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 4647 unsigned long long val) 4648 { 4649 int retry_count; 4650 u64 memswlimit, memlimit; 4651 int ret = 0; 4652 int children = mem_cgroup_count_children(memcg); 4653 u64 curusage, oldusage; 4654 int enlarge; 4655 4656 /* 4657 * For keeping hierarchical_reclaim simple, how long we should retry 4658 * is depends on callers. We set our retry-count to be function 4659 * of # of children which we should visit in this loop. 4660 */ 4661 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 4662 4663 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 4664 4665 enlarge = 0; 4666 while (retry_count) { 4667 if (signal_pending(current)) { 4668 ret = -EINTR; 4669 break; 4670 } 4671 /* 4672 * Rather than hide all in some function, I do this in 4673 * open coded manner. You see what this really does. 4674 * We have to guarantee memcg->res.limit <= memcg->memsw.limit. 4675 */ 4676 mutex_lock(&set_limit_mutex); 4677 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4678 if (memswlimit < val) { 4679 ret = -EINVAL; 4680 mutex_unlock(&set_limit_mutex); 4681 break; 4682 } 4683 4684 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4685 if (memlimit < val) 4686 enlarge = 1; 4687 4688 ret = res_counter_set_limit(&memcg->res, val); 4689 if (!ret) { 4690 if (memswlimit == val) 4691 memcg->memsw_is_minimum = true; 4692 else 4693 memcg->memsw_is_minimum = false; 4694 } 4695 mutex_unlock(&set_limit_mutex); 4696 4697 if (!ret) 4698 break; 4699 4700 mem_cgroup_reclaim(memcg, GFP_KERNEL, 4701 MEM_CGROUP_RECLAIM_SHRINK); 4702 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 4703 /* Usage is reduced ? */ 4704 if (curusage >= oldusage) 4705 retry_count--; 4706 else 4707 oldusage = curusage; 4708 } 4709 if (!ret && enlarge) 4710 memcg_oom_recover(memcg); 4711 4712 return ret; 4713 } 4714 4715 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 4716 unsigned long long val) 4717 { 4718 int retry_count; 4719 u64 memlimit, memswlimit, oldusage, curusage; 4720 int children = mem_cgroup_count_children(memcg); 4721 int ret = -EBUSY; 4722 int enlarge = 0; 4723 4724 /* see mem_cgroup_resize_res_limit */ 4725 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 4726 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 4727 while (retry_count) { 4728 if (signal_pending(current)) { 4729 ret = -EINTR; 4730 break; 4731 } 4732 /* 4733 * Rather than hide all in some function, I do this in 4734 * open coded manner. You see what this really does. 4735 * We have to guarantee memcg->res.limit <= memcg->memsw.limit. 4736 */ 4737 mutex_lock(&set_limit_mutex); 4738 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4739 if (memlimit > val) { 4740 ret = -EINVAL; 4741 mutex_unlock(&set_limit_mutex); 4742 break; 4743 } 4744 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4745 if (memswlimit < val) 4746 enlarge = 1; 4747 ret = res_counter_set_limit(&memcg->memsw, val); 4748 if (!ret) { 4749 if (memlimit == val) 4750 memcg->memsw_is_minimum = true; 4751 else 4752 memcg->memsw_is_minimum = false; 4753 } 4754 mutex_unlock(&set_limit_mutex); 4755 4756 if (!ret) 4757 break; 4758 4759 mem_cgroup_reclaim(memcg, GFP_KERNEL, 4760 MEM_CGROUP_RECLAIM_NOSWAP | 4761 MEM_CGROUP_RECLAIM_SHRINK); 4762 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 4763 /* Usage is reduced ? */ 4764 if (curusage >= oldusage) 4765 retry_count--; 4766 else 4767 oldusage = curusage; 4768 } 4769 if (!ret && enlarge) 4770 memcg_oom_recover(memcg); 4771 return ret; 4772 } 4773 4774 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 4775 gfp_t gfp_mask, 4776 unsigned long *total_scanned) 4777 { 4778 unsigned long nr_reclaimed = 0; 4779 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 4780 unsigned long reclaimed; 4781 int loop = 0; 4782 struct mem_cgroup_tree_per_zone *mctz; 4783 unsigned long long excess; 4784 unsigned long nr_scanned; 4785 4786 if (order > 0) 4787 return 0; 4788 4789 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 4790 /* 4791 * This loop can run a while, specially if mem_cgroup's continuously 4792 * keep exceeding their soft limit and putting the system under 4793 * pressure 4794 */ 4795 do { 4796 if (next_mz) 4797 mz = next_mz; 4798 else 4799 mz = mem_cgroup_largest_soft_limit_node(mctz); 4800 if (!mz) 4801 break; 4802 4803 nr_scanned = 0; 4804 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 4805 gfp_mask, &nr_scanned); 4806 nr_reclaimed += reclaimed; 4807 *total_scanned += nr_scanned; 4808 spin_lock(&mctz->lock); 4809 4810 /* 4811 * If we failed to reclaim anything from this memory cgroup 4812 * it is time to move on to the next cgroup 4813 */ 4814 next_mz = NULL; 4815 if (!reclaimed) { 4816 do { 4817 /* 4818 * Loop until we find yet another one. 4819 * 4820 * By the time we get the soft_limit lock 4821 * again, someone might have aded the 4822 * group back on the RB tree. Iterate to 4823 * make sure we get a different mem. 4824 * mem_cgroup_largest_soft_limit_node returns 4825 * NULL if no other cgroup is present on 4826 * the tree 4827 */ 4828 next_mz = 4829 __mem_cgroup_largest_soft_limit_node(mctz); 4830 if (next_mz == mz) 4831 css_put(&next_mz->memcg->css); 4832 else /* next_mz == NULL or other memcg */ 4833 break; 4834 } while (1); 4835 } 4836 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 4837 excess = res_counter_soft_limit_excess(&mz->memcg->res); 4838 /* 4839 * One school of thought says that we should not add 4840 * back the node to the tree if reclaim returns 0. 4841 * But our reclaim could return 0, simply because due 4842 * to priority we are exposing a smaller subset of 4843 * memory to reclaim from. Consider this as a longer 4844 * term TODO. 4845 */ 4846 /* If excess == 0, no tree ops */ 4847 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess); 4848 spin_unlock(&mctz->lock); 4849 css_put(&mz->memcg->css); 4850 loop++; 4851 /* 4852 * Could not reclaim anything and there are no more 4853 * mem cgroups to try or we seem to be looping without 4854 * reclaiming anything. 4855 */ 4856 if (!nr_reclaimed && 4857 (next_mz == NULL || 4858 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 4859 break; 4860 } while (!nr_reclaimed); 4861 if (next_mz) 4862 css_put(&next_mz->memcg->css); 4863 return nr_reclaimed; 4864 } 4865 4866 /** 4867 * mem_cgroup_force_empty_list - clears LRU of a group 4868 * @memcg: group to clear 4869 * @node: NUMA node 4870 * @zid: zone id 4871 * @lru: lru to to clear 4872 * 4873 * Traverse a specified page_cgroup list and try to drop them all. This doesn't 4874 * reclaim the pages page themselves - pages are moved to the parent (or root) 4875 * group. 4876 */ 4877 static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, 4878 int node, int zid, enum lru_list lru) 4879 { 4880 struct lruvec *lruvec; 4881 unsigned long flags; 4882 struct list_head *list; 4883 struct page *busy; 4884 struct zone *zone; 4885 4886 zone = &NODE_DATA(node)->node_zones[zid]; 4887 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 4888 list = &lruvec->lists[lru]; 4889 4890 busy = NULL; 4891 do { 4892 struct page_cgroup *pc; 4893 struct page *page; 4894 4895 spin_lock_irqsave(&zone->lru_lock, flags); 4896 if (list_empty(list)) { 4897 spin_unlock_irqrestore(&zone->lru_lock, flags); 4898 break; 4899 } 4900 page = list_entry(list->prev, struct page, lru); 4901 if (busy == page) { 4902 list_move(&page->lru, list); 4903 busy = NULL; 4904 spin_unlock_irqrestore(&zone->lru_lock, flags); 4905 continue; 4906 } 4907 spin_unlock_irqrestore(&zone->lru_lock, flags); 4908 4909 pc = lookup_page_cgroup(page); 4910 4911 if (mem_cgroup_move_parent(page, pc, memcg)) { 4912 /* found lock contention or "pc" is obsolete. */ 4913 busy = page; 4914 cond_resched(); 4915 } else 4916 busy = NULL; 4917 } while (!list_empty(list)); 4918 } 4919 4920 /* 4921 * make mem_cgroup's charge to be 0 if there is no task by moving 4922 * all the charges and pages to the parent. 4923 * This enables deleting this mem_cgroup. 4924 * 4925 * Caller is responsible for holding css reference on the memcg. 4926 */ 4927 static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) 4928 { 4929 int node, zid; 4930 u64 usage; 4931 4932 do { 4933 /* This is for making all *used* pages to be on LRU. */ 4934 lru_add_drain_all(); 4935 drain_all_stock_sync(memcg); 4936 mem_cgroup_start_move(memcg); 4937 for_each_node_state(node, N_MEMORY) { 4938 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4939 enum lru_list lru; 4940 for_each_lru(lru) { 4941 mem_cgroup_force_empty_list(memcg, 4942 node, zid, lru); 4943 } 4944 } 4945 } 4946 mem_cgroup_end_move(memcg); 4947 memcg_oom_recover(memcg); 4948 cond_resched(); 4949 4950 /* 4951 * Kernel memory may not necessarily be trackable to a specific 4952 * process. So they are not migrated, and therefore we can't 4953 * expect their value to drop to 0 here. 4954 * Having res filled up with kmem only is enough. 4955 * 4956 * This is a safety check because mem_cgroup_force_empty_list 4957 * could have raced with mem_cgroup_replace_page_cache callers 4958 * so the lru seemed empty but the page could have been added 4959 * right after the check. RES_USAGE should be safe as we always 4960 * charge before adding to the LRU. 4961 */ 4962 usage = res_counter_read_u64(&memcg->res, RES_USAGE) - 4963 res_counter_read_u64(&memcg->kmem, RES_USAGE); 4964 } while (usage > 0); 4965 } 4966 4967 static inline bool memcg_has_children(struct mem_cgroup *memcg) 4968 { 4969 lockdep_assert_held(&memcg_create_mutex); 4970 /* 4971 * The lock does not prevent addition or deletion to the list 4972 * of children, but it prevents a new child from being 4973 * initialized based on this parent in css_online(), so it's 4974 * enough to decide whether hierarchically inherited 4975 * attributes can still be changed or not. 4976 */ 4977 return memcg->use_hierarchy && 4978 !list_empty(&memcg->css.cgroup->children); 4979 } 4980 4981 /* 4982 * Reclaims as many pages from the given memcg as possible and moves 4983 * the rest to the parent. 4984 * 4985 * Caller is responsible for holding css reference for memcg. 4986 */ 4987 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 4988 { 4989 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 4990 struct cgroup *cgrp = memcg->css.cgroup; 4991 4992 /* returns EBUSY if there is a task or if we come here twice. */ 4993 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 4994 return -EBUSY; 4995 4996 /* we call try-to-free pages for make this cgroup empty */ 4997 lru_add_drain_all(); 4998 /* try to free all pages in this cgroup */ 4999 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { 5000 int progress; 5001 5002 if (signal_pending(current)) 5003 return -EINTR; 5004 5005 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, 5006 false); 5007 if (!progress) { 5008 nr_retries--; 5009 /* maybe some writeback is necessary */ 5010 congestion_wait(BLK_RW_ASYNC, HZ/10); 5011 } 5012 5013 } 5014 lru_add_drain(); 5015 mem_cgroup_reparent_charges(memcg); 5016 5017 return 0; 5018 } 5019 5020 static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css, 5021 unsigned int event) 5022 { 5023 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5024 5025 if (mem_cgroup_is_root(memcg)) 5026 return -EINVAL; 5027 return mem_cgroup_force_empty(memcg); 5028 } 5029 5030 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 5031 struct cftype *cft) 5032 { 5033 return mem_cgroup_from_css(css)->use_hierarchy; 5034 } 5035 5036 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 5037 struct cftype *cft, u64 val) 5038 { 5039 int retval = 0; 5040 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5041 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css)); 5042 5043 mutex_lock(&memcg_create_mutex); 5044 5045 if (memcg->use_hierarchy == val) 5046 goto out; 5047 5048 /* 5049 * If parent's use_hierarchy is set, we can't make any modifications 5050 * in the child subtrees. If it is unset, then the change can 5051 * occur, provided the current cgroup has no children. 5052 * 5053 * For the root cgroup, parent_mem is NULL, we allow value to be 5054 * set if there are no children. 5055 */ 5056 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 5057 (val == 1 || val == 0)) { 5058 if (list_empty(&memcg->css.cgroup->children)) 5059 memcg->use_hierarchy = val; 5060 else 5061 retval = -EBUSY; 5062 } else 5063 retval = -EINVAL; 5064 5065 out: 5066 mutex_unlock(&memcg_create_mutex); 5067 5068 return retval; 5069 } 5070 5071 5072 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, 5073 enum mem_cgroup_stat_index idx) 5074 { 5075 struct mem_cgroup *iter; 5076 long val = 0; 5077 5078 /* Per-cpu values can be negative, use a signed accumulator */ 5079 for_each_mem_cgroup_tree(iter, memcg) 5080 val += mem_cgroup_read_stat(iter, idx); 5081 5082 if (val < 0) /* race ? */ 5083 val = 0; 5084 return val; 5085 } 5086 5087 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 5088 { 5089 u64 val; 5090 5091 if (!mem_cgroup_is_root(memcg)) { 5092 if (!swap) 5093 return res_counter_read_u64(&memcg->res, RES_USAGE); 5094 else 5095 return res_counter_read_u64(&memcg->memsw, RES_USAGE); 5096 } 5097 5098 /* 5099 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS 5100 * as well as in MEM_CGROUP_STAT_RSS_HUGE. 5101 */ 5102 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); 5103 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); 5104 5105 if (swap) 5106 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); 5107 5108 return val << PAGE_SHIFT; 5109 } 5110 5111 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 5112 struct cftype *cft) 5113 { 5114 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5115 u64 val; 5116 int name; 5117 enum res_type type; 5118 5119 type = MEMFILE_TYPE(cft->private); 5120 name = MEMFILE_ATTR(cft->private); 5121 5122 switch (type) { 5123 case _MEM: 5124 if (name == RES_USAGE) 5125 val = mem_cgroup_usage(memcg, false); 5126 else 5127 val = res_counter_read_u64(&memcg->res, name); 5128 break; 5129 case _MEMSWAP: 5130 if (name == RES_USAGE) 5131 val = mem_cgroup_usage(memcg, true); 5132 else 5133 val = res_counter_read_u64(&memcg->memsw, name); 5134 break; 5135 case _KMEM: 5136 val = res_counter_read_u64(&memcg->kmem, name); 5137 break; 5138 default: 5139 BUG(); 5140 } 5141 5142 return val; 5143 } 5144 5145 #ifdef CONFIG_MEMCG_KMEM 5146 /* should be called with activate_kmem_mutex held */ 5147 static int __memcg_activate_kmem(struct mem_cgroup *memcg, 5148 unsigned long long limit) 5149 { 5150 int err = 0; 5151 int memcg_id; 5152 5153 if (memcg_kmem_is_active(memcg)) 5154 return 0; 5155 5156 /* 5157 * We are going to allocate memory for data shared by all memory 5158 * cgroups so let's stop accounting here. 5159 */ 5160 memcg_stop_kmem_account(); 5161 5162 /* 5163 * For simplicity, we won't allow this to be disabled. It also can't 5164 * be changed if the cgroup has children already, or if tasks had 5165 * already joined. 5166 * 5167 * If tasks join before we set the limit, a person looking at 5168 * kmem.usage_in_bytes will have no way to determine when it took 5169 * place, which makes the value quite meaningless. 5170 * 5171 * After it first became limited, changes in the value of the limit are 5172 * of course permitted. 5173 */ 5174 mutex_lock(&memcg_create_mutex); 5175 if (cgroup_task_count(memcg->css.cgroup) || memcg_has_children(memcg)) 5176 err = -EBUSY; 5177 mutex_unlock(&memcg_create_mutex); 5178 if (err) 5179 goto out; 5180 5181 memcg_id = ida_simple_get(&kmem_limited_groups, 5182 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 5183 if (memcg_id < 0) { 5184 err = memcg_id; 5185 goto out; 5186 } 5187 5188 /* 5189 * Make sure we have enough space for this cgroup in each root cache's 5190 * memcg_params. 5191 */ 5192 err = memcg_update_all_caches(memcg_id + 1); 5193 if (err) 5194 goto out_rmid; 5195 5196 memcg->kmemcg_id = memcg_id; 5197 INIT_LIST_HEAD(&memcg->memcg_slab_caches); 5198 mutex_init(&memcg->slab_caches_mutex); 5199 5200 /* 5201 * We couldn't have accounted to this cgroup, because it hasn't got the 5202 * active bit set yet, so this should succeed. 5203 */ 5204 err = res_counter_set_limit(&memcg->kmem, limit); 5205 VM_BUG_ON(err); 5206 5207 static_key_slow_inc(&memcg_kmem_enabled_key); 5208 /* 5209 * Setting the active bit after enabling static branching will 5210 * guarantee no one starts accounting before all call sites are 5211 * patched. 5212 */ 5213 memcg_kmem_set_active(memcg); 5214 out: 5215 memcg_resume_kmem_account(); 5216 return err; 5217 5218 out_rmid: 5219 ida_simple_remove(&kmem_limited_groups, memcg_id); 5220 goto out; 5221 } 5222 5223 static int memcg_activate_kmem(struct mem_cgroup *memcg, 5224 unsigned long long limit) 5225 { 5226 int ret; 5227 5228 mutex_lock(&activate_kmem_mutex); 5229 ret = __memcg_activate_kmem(memcg, limit); 5230 mutex_unlock(&activate_kmem_mutex); 5231 return ret; 5232 } 5233 5234 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 5235 unsigned long long val) 5236 { 5237 int ret; 5238 5239 if (!memcg_kmem_is_active(memcg)) 5240 ret = memcg_activate_kmem(memcg, val); 5241 else 5242 ret = res_counter_set_limit(&memcg->kmem, val); 5243 return ret; 5244 } 5245 5246 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 5247 { 5248 int ret = 0; 5249 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5250 5251 if (!parent) 5252 return 0; 5253 5254 mutex_lock(&activate_kmem_mutex); 5255 /* 5256 * If the parent cgroup is not kmem-active now, it cannot be activated 5257 * after this point, because it has at least one child already. 5258 */ 5259 if (memcg_kmem_is_active(parent)) 5260 ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX); 5261 mutex_unlock(&activate_kmem_mutex); 5262 return ret; 5263 } 5264 #else 5265 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 5266 unsigned long long val) 5267 { 5268 return -EINVAL; 5269 } 5270 #endif /* CONFIG_MEMCG_KMEM */ 5271 5272 /* 5273 * The user of this function is... 5274 * RES_LIMIT. 5275 */ 5276 static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft, 5277 const char *buffer) 5278 { 5279 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5280 enum res_type type; 5281 int name; 5282 unsigned long long val; 5283 int ret; 5284 5285 type = MEMFILE_TYPE(cft->private); 5286 name = MEMFILE_ATTR(cft->private); 5287 5288 switch (name) { 5289 case RES_LIMIT: 5290 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 5291 ret = -EINVAL; 5292 break; 5293 } 5294 /* This function does all necessary parse...reuse it */ 5295 ret = res_counter_memparse_write_strategy(buffer, &val); 5296 if (ret) 5297 break; 5298 if (type == _MEM) 5299 ret = mem_cgroup_resize_limit(memcg, val); 5300 else if (type == _MEMSWAP) 5301 ret = mem_cgroup_resize_memsw_limit(memcg, val); 5302 else if (type == _KMEM) 5303 ret = memcg_update_kmem_limit(memcg, val); 5304 else 5305 return -EINVAL; 5306 break; 5307 case RES_SOFT_LIMIT: 5308 ret = res_counter_memparse_write_strategy(buffer, &val); 5309 if (ret) 5310 break; 5311 /* 5312 * For memsw, soft limits are hard to implement in terms 5313 * of semantics, for now, we support soft limits for 5314 * control without swap 5315 */ 5316 if (type == _MEM) 5317 ret = res_counter_set_soft_limit(&memcg->res, val); 5318 else 5319 ret = -EINVAL; 5320 break; 5321 default: 5322 ret = -EINVAL; /* should be BUG() ? */ 5323 break; 5324 } 5325 return ret; 5326 } 5327 5328 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 5329 unsigned long long *mem_limit, unsigned long long *memsw_limit) 5330 { 5331 unsigned long long min_limit, min_memsw_limit, tmp; 5332 5333 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 5334 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 5335 if (!memcg->use_hierarchy) 5336 goto out; 5337 5338 while (css_parent(&memcg->css)) { 5339 memcg = mem_cgroup_from_css(css_parent(&memcg->css)); 5340 if (!memcg->use_hierarchy) 5341 break; 5342 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 5343 min_limit = min(min_limit, tmp); 5344 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 5345 min_memsw_limit = min(min_memsw_limit, tmp); 5346 } 5347 out: 5348 *mem_limit = min_limit; 5349 *memsw_limit = min_memsw_limit; 5350 } 5351 5352 static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event) 5353 { 5354 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5355 int name; 5356 enum res_type type; 5357 5358 type = MEMFILE_TYPE(event); 5359 name = MEMFILE_ATTR(event); 5360 5361 switch (name) { 5362 case RES_MAX_USAGE: 5363 if (type == _MEM) 5364 res_counter_reset_max(&memcg->res); 5365 else if (type == _MEMSWAP) 5366 res_counter_reset_max(&memcg->memsw); 5367 else if (type == _KMEM) 5368 res_counter_reset_max(&memcg->kmem); 5369 else 5370 return -EINVAL; 5371 break; 5372 case RES_FAILCNT: 5373 if (type == _MEM) 5374 res_counter_reset_failcnt(&memcg->res); 5375 else if (type == _MEMSWAP) 5376 res_counter_reset_failcnt(&memcg->memsw); 5377 else if (type == _KMEM) 5378 res_counter_reset_failcnt(&memcg->kmem); 5379 else 5380 return -EINVAL; 5381 break; 5382 } 5383 5384 return 0; 5385 } 5386 5387 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 5388 struct cftype *cft) 5389 { 5390 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 5391 } 5392 5393 #ifdef CONFIG_MMU 5394 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 5395 struct cftype *cft, u64 val) 5396 { 5397 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5398 5399 if (val >= (1 << NR_MOVE_TYPE)) 5400 return -EINVAL; 5401 5402 /* 5403 * No kind of locking is needed in here, because ->can_attach() will 5404 * check this value once in the beginning of the process, and then carry 5405 * on with stale data. This means that changes to this value will only 5406 * affect task migrations starting after the change. 5407 */ 5408 memcg->move_charge_at_immigrate = val; 5409 return 0; 5410 } 5411 #else 5412 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 5413 struct cftype *cft, u64 val) 5414 { 5415 return -ENOSYS; 5416 } 5417 #endif 5418 5419 #ifdef CONFIG_NUMA 5420 static int memcg_numa_stat_show(struct seq_file *m, void *v) 5421 { 5422 struct numa_stat { 5423 const char *name; 5424 unsigned int lru_mask; 5425 }; 5426 5427 static const struct numa_stat stats[] = { 5428 { "total", LRU_ALL }, 5429 { "file", LRU_ALL_FILE }, 5430 { "anon", LRU_ALL_ANON }, 5431 { "unevictable", BIT(LRU_UNEVICTABLE) }, 5432 }; 5433 const struct numa_stat *stat; 5434 int nid; 5435 unsigned long nr; 5436 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5437 5438 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 5439 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 5440 seq_printf(m, "%s=%lu", stat->name, nr); 5441 for_each_node_state(nid, N_MEMORY) { 5442 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5443 stat->lru_mask); 5444 seq_printf(m, " N%d=%lu", nid, nr); 5445 } 5446 seq_putc(m, '\n'); 5447 } 5448 5449 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 5450 struct mem_cgroup *iter; 5451 5452 nr = 0; 5453 for_each_mem_cgroup_tree(iter, memcg) 5454 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 5455 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 5456 for_each_node_state(nid, N_MEMORY) { 5457 nr = 0; 5458 for_each_mem_cgroup_tree(iter, memcg) 5459 nr += mem_cgroup_node_nr_lru_pages( 5460 iter, nid, stat->lru_mask); 5461 seq_printf(m, " N%d=%lu", nid, nr); 5462 } 5463 seq_putc(m, '\n'); 5464 } 5465 5466 return 0; 5467 } 5468 #endif /* CONFIG_NUMA */ 5469 5470 static inline void mem_cgroup_lru_names_not_uptodate(void) 5471 { 5472 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 5473 } 5474 5475 static int memcg_stat_show(struct seq_file *m, void *v) 5476 { 5477 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5478 struct mem_cgroup *mi; 5479 unsigned int i; 5480 5481 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 5482 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 5483 continue; 5484 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 5485 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 5486 } 5487 5488 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 5489 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 5490 mem_cgroup_read_events(memcg, i)); 5491 5492 for (i = 0; i < NR_LRU_LISTS; i++) 5493 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 5494 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 5495 5496 /* Hierarchical information */ 5497 { 5498 unsigned long long limit, memsw_limit; 5499 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit); 5500 seq_printf(m, "hierarchical_memory_limit %llu\n", limit); 5501 if (do_swap_account) 5502 seq_printf(m, "hierarchical_memsw_limit %llu\n", 5503 memsw_limit); 5504 } 5505 5506 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 5507 long long val = 0; 5508 5509 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 5510 continue; 5511 for_each_mem_cgroup_tree(mi, memcg) 5512 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 5513 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 5514 } 5515 5516 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 5517 unsigned long long val = 0; 5518 5519 for_each_mem_cgroup_tree(mi, memcg) 5520 val += mem_cgroup_read_events(mi, i); 5521 seq_printf(m, "total_%s %llu\n", 5522 mem_cgroup_events_names[i], val); 5523 } 5524 5525 for (i = 0; i < NR_LRU_LISTS; i++) { 5526 unsigned long long val = 0; 5527 5528 for_each_mem_cgroup_tree(mi, memcg) 5529 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 5530 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 5531 } 5532 5533 #ifdef CONFIG_DEBUG_VM 5534 { 5535 int nid, zid; 5536 struct mem_cgroup_per_zone *mz; 5537 struct zone_reclaim_stat *rstat; 5538 unsigned long recent_rotated[2] = {0, 0}; 5539 unsigned long recent_scanned[2] = {0, 0}; 5540 5541 for_each_online_node(nid) 5542 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 5543 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 5544 rstat = &mz->lruvec.reclaim_stat; 5545 5546 recent_rotated[0] += rstat->recent_rotated[0]; 5547 recent_rotated[1] += rstat->recent_rotated[1]; 5548 recent_scanned[0] += rstat->recent_scanned[0]; 5549 recent_scanned[1] += rstat->recent_scanned[1]; 5550 } 5551 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 5552 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 5553 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 5554 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 5555 } 5556 #endif 5557 5558 return 0; 5559 } 5560 5561 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 5562 struct cftype *cft) 5563 { 5564 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5565 5566 return mem_cgroup_swappiness(memcg); 5567 } 5568 5569 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 5570 struct cftype *cft, u64 val) 5571 { 5572 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5573 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css)); 5574 5575 if (val > 100 || !parent) 5576 return -EINVAL; 5577 5578 mutex_lock(&memcg_create_mutex); 5579 5580 /* If under hierarchy, only empty-root can set this value */ 5581 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5582 mutex_unlock(&memcg_create_mutex); 5583 return -EINVAL; 5584 } 5585 5586 memcg->swappiness = val; 5587 5588 mutex_unlock(&memcg_create_mutex); 5589 5590 return 0; 5591 } 5592 5593 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 5594 { 5595 struct mem_cgroup_threshold_ary *t; 5596 u64 usage; 5597 int i; 5598 5599 rcu_read_lock(); 5600 if (!swap) 5601 t = rcu_dereference(memcg->thresholds.primary); 5602 else 5603 t = rcu_dereference(memcg->memsw_thresholds.primary); 5604 5605 if (!t) 5606 goto unlock; 5607 5608 usage = mem_cgroup_usage(memcg, swap); 5609 5610 /* 5611 * current_threshold points to threshold just below or equal to usage. 5612 * If it's not true, a threshold was crossed after last 5613 * call of __mem_cgroup_threshold(). 5614 */ 5615 i = t->current_threshold; 5616 5617 /* 5618 * Iterate backward over array of thresholds starting from 5619 * current_threshold and check if a threshold is crossed. 5620 * If none of thresholds below usage is crossed, we read 5621 * only one element of the array here. 5622 */ 5623 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 5624 eventfd_signal(t->entries[i].eventfd, 1); 5625 5626 /* i = current_threshold + 1 */ 5627 i++; 5628 5629 /* 5630 * Iterate forward over array of thresholds starting from 5631 * current_threshold+1 and check if a threshold is crossed. 5632 * If none of thresholds above usage is crossed, we read 5633 * only one element of the array here. 5634 */ 5635 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 5636 eventfd_signal(t->entries[i].eventfd, 1); 5637 5638 /* Update current_threshold */ 5639 t->current_threshold = i - 1; 5640 unlock: 5641 rcu_read_unlock(); 5642 } 5643 5644 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 5645 { 5646 while (memcg) { 5647 __mem_cgroup_threshold(memcg, false); 5648 if (do_swap_account) 5649 __mem_cgroup_threshold(memcg, true); 5650 5651 memcg = parent_mem_cgroup(memcg); 5652 } 5653 } 5654 5655 static int compare_thresholds(const void *a, const void *b) 5656 { 5657 const struct mem_cgroup_threshold *_a = a; 5658 const struct mem_cgroup_threshold *_b = b; 5659 5660 if (_a->threshold > _b->threshold) 5661 return 1; 5662 5663 if (_a->threshold < _b->threshold) 5664 return -1; 5665 5666 return 0; 5667 } 5668 5669 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 5670 { 5671 struct mem_cgroup_eventfd_list *ev; 5672 5673 list_for_each_entry(ev, &memcg->oom_notify, list) 5674 eventfd_signal(ev->eventfd, 1); 5675 return 0; 5676 } 5677 5678 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 5679 { 5680 struct mem_cgroup *iter; 5681 5682 for_each_mem_cgroup_tree(iter, memcg) 5683 mem_cgroup_oom_notify_cb(iter); 5684 } 5685 5686 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 5687 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 5688 { 5689 struct mem_cgroup_thresholds *thresholds; 5690 struct mem_cgroup_threshold_ary *new; 5691 u64 threshold, usage; 5692 int i, size, ret; 5693 5694 ret = res_counter_memparse_write_strategy(args, &threshold); 5695 if (ret) 5696 return ret; 5697 5698 mutex_lock(&memcg->thresholds_lock); 5699 5700 if (type == _MEM) 5701 thresholds = &memcg->thresholds; 5702 else if (type == _MEMSWAP) 5703 thresholds = &memcg->memsw_thresholds; 5704 else 5705 BUG(); 5706 5707 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 5708 5709 /* Check if a threshold crossed before adding a new one */ 5710 if (thresholds->primary) 5711 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 5712 5713 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 5714 5715 /* Allocate memory for new array of thresholds */ 5716 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 5717 GFP_KERNEL); 5718 if (!new) { 5719 ret = -ENOMEM; 5720 goto unlock; 5721 } 5722 new->size = size; 5723 5724 /* Copy thresholds (if any) to new array */ 5725 if (thresholds->primary) { 5726 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 5727 sizeof(struct mem_cgroup_threshold)); 5728 } 5729 5730 /* Add new threshold */ 5731 new->entries[size - 1].eventfd = eventfd; 5732 new->entries[size - 1].threshold = threshold; 5733 5734 /* Sort thresholds. Registering of new threshold isn't time-critical */ 5735 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 5736 compare_thresholds, NULL); 5737 5738 /* Find current threshold */ 5739 new->current_threshold = -1; 5740 for (i = 0; i < size; i++) { 5741 if (new->entries[i].threshold <= usage) { 5742 /* 5743 * new->current_threshold will not be used until 5744 * rcu_assign_pointer(), so it's safe to increment 5745 * it here. 5746 */ 5747 ++new->current_threshold; 5748 } else 5749 break; 5750 } 5751 5752 /* Free old spare buffer and save old primary buffer as spare */ 5753 kfree(thresholds->spare); 5754 thresholds->spare = thresholds->primary; 5755 5756 rcu_assign_pointer(thresholds->primary, new); 5757 5758 /* To be sure that nobody uses thresholds */ 5759 synchronize_rcu(); 5760 5761 unlock: 5762 mutex_unlock(&memcg->thresholds_lock); 5763 5764 return ret; 5765 } 5766 5767 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 5768 struct eventfd_ctx *eventfd, const char *args) 5769 { 5770 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 5771 } 5772 5773 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 5774 struct eventfd_ctx *eventfd, const char *args) 5775 { 5776 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 5777 } 5778 5779 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 5780 struct eventfd_ctx *eventfd, enum res_type type) 5781 { 5782 struct mem_cgroup_thresholds *thresholds; 5783 struct mem_cgroup_threshold_ary *new; 5784 u64 usage; 5785 int i, j, size; 5786 5787 mutex_lock(&memcg->thresholds_lock); 5788 if (type == _MEM) 5789 thresholds = &memcg->thresholds; 5790 else if (type == _MEMSWAP) 5791 thresholds = &memcg->memsw_thresholds; 5792 else 5793 BUG(); 5794 5795 if (!thresholds->primary) 5796 goto unlock; 5797 5798 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 5799 5800 /* Check if a threshold crossed before removing */ 5801 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 5802 5803 /* Calculate new number of threshold */ 5804 size = 0; 5805 for (i = 0; i < thresholds->primary->size; i++) { 5806 if (thresholds->primary->entries[i].eventfd != eventfd) 5807 size++; 5808 } 5809 5810 new = thresholds->spare; 5811 5812 /* Set thresholds array to NULL if we don't have thresholds */ 5813 if (!size) { 5814 kfree(new); 5815 new = NULL; 5816 goto swap_buffers; 5817 } 5818 5819 new->size = size; 5820 5821 /* Copy thresholds and find current threshold */ 5822 new->current_threshold = -1; 5823 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 5824 if (thresholds->primary->entries[i].eventfd == eventfd) 5825 continue; 5826 5827 new->entries[j] = thresholds->primary->entries[i]; 5828 if (new->entries[j].threshold <= usage) { 5829 /* 5830 * new->current_threshold will not be used 5831 * until rcu_assign_pointer(), so it's safe to increment 5832 * it here. 5833 */ 5834 ++new->current_threshold; 5835 } 5836 j++; 5837 } 5838 5839 swap_buffers: 5840 /* Swap primary and spare array */ 5841 thresholds->spare = thresholds->primary; 5842 /* If all events are unregistered, free the spare array */ 5843 if (!new) { 5844 kfree(thresholds->spare); 5845 thresholds->spare = NULL; 5846 } 5847 5848 rcu_assign_pointer(thresholds->primary, new); 5849 5850 /* To be sure that nobody uses thresholds */ 5851 synchronize_rcu(); 5852 unlock: 5853 mutex_unlock(&memcg->thresholds_lock); 5854 } 5855 5856 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 5857 struct eventfd_ctx *eventfd) 5858 { 5859 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 5860 } 5861 5862 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 5863 struct eventfd_ctx *eventfd) 5864 { 5865 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 5866 } 5867 5868 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 5869 struct eventfd_ctx *eventfd, const char *args) 5870 { 5871 struct mem_cgroup_eventfd_list *event; 5872 5873 event = kmalloc(sizeof(*event), GFP_KERNEL); 5874 if (!event) 5875 return -ENOMEM; 5876 5877 spin_lock(&memcg_oom_lock); 5878 5879 event->eventfd = eventfd; 5880 list_add(&event->list, &memcg->oom_notify); 5881 5882 /* already in OOM ? */ 5883 if (atomic_read(&memcg->under_oom)) 5884 eventfd_signal(eventfd, 1); 5885 spin_unlock(&memcg_oom_lock); 5886 5887 return 0; 5888 } 5889 5890 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 5891 struct eventfd_ctx *eventfd) 5892 { 5893 struct mem_cgroup_eventfd_list *ev, *tmp; 5894 5895 spin_lock(&memcg_oom_lock); 5896 5897 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 5898 if (ev->eventfd == eventfd) { 5899 list_del(&ev->list); 5900 kfree(ev); 5901 } 5902 } 5903 5904 spin_unlock(&memcg_oom_lock); 5905 } 5906 5907 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 5908 { 5909 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 5910 5911 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 5912 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom)); 5913 return 0; 5914 } 5915 5916 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 5917 struct cftype *cft, u64 val) 5918 { 5919 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5920 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css)); 5921 5922 /* cannot set to root cgroup and only 0 and 1 are allowed */ 5923 if (!parent || !((val == 0) || (val == 1))) 5924 return -EINVAL; 5925 5926 mutex_lock(&memcg_create_mutex); 5927 /* oom-kill-disable is a flag for subhierarchy. */ 5928 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5929 mutex_unlock(&memcg_create_mutex); 5930 return -EINVAL; 5931 } 5932 memcg->oom_kill_disable = val; 5933 if (!val) 5934 memcg_oom_recover(memcg); 5935 mutex_unlock(&memcg_create_mutex); 5936 return 0; 5937 } 5938 5939 #ifdef CONFIG_MEMCG_KMEM 5940 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5941 { 5942 int ret; 5943 5944 memcg->kmemcg_id = -1; 5945 ret = memcg_propagate_kmem(memcg); 5946 if (ret) 5947 return ret; 5948 5949 return mem_cgroup_sockets_init(memcg, ss); 5950 } 5951 5952 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 5953 { 5954 mem_cgroup_sockets_destroy(memcg); 5955 } 5956 5957 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) 5958 { 5959 if (!memcg_kmem_is_active(memcg)) 5960 return; 5961 5962 /* 5963 * kmem charges can outlive the cgroup. In the case of slab 5964 * pages, for instance, a page contain objects from various 5965 * processes. As we prevent from taking a reference for every 5966 * such allocation we have to be careful when doing uncharge 5967 * (see memcg_uncharge_kmem) and here during offlining. 5968 * 5969 * The idea is that that only the _last_ uncharge which sees 5970 * the dead memcg will drop the last reference. An additional 5971 * reference is taken here before the group is marked dead 5972 * which is then paired with css_put during uncharge resp. here. 5973 * 5974 * Although this might sound strange as this path is called from 5975 * css_offline() when the referencemight have dropped down to 0 5976 * and shouldn't be incremented anymore (css_tryget would fail) 5977 * we do not have other options because of the kmem allocations 5978 * lifetime. 5979 */ 5980 css_get(&memcg->css); 5981 5982 memcg_kmem_mark_dead(memcg); 5983 5984 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) 5985 return; 5986 5987 if (memcg_kmem_test_and_clear_dead(memcg)) 5988 css_put(&memcg->css); 5989 } 5990 #else 5991 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5992 { 5993 return 0; 5994 } 5995 5996 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 5997 { 5998 } 5999 6000 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) 6001 { 6002 } 6003 #endif 6004 6005 /* 6006 * DO NOT USE IN NEW FILES. 6007 * 6008 * "cgroup.event_control" implementation. 6009 * 6010 * This is way over-engineered. It tries to support fully configurable 6011 * events for each user. Such level of flexibility is completely 6012 * unnecessary especially in the light of the planned unified hierarchy. 6013 * 6014 * Please deprecate this and replace with something simpler if at all 6015 * possible. 6016 */ 6017 6018 /* 6019 * Unregister event and free resources. 6020 * 6021 * Gets called from workqueue. 6022 */ 6023 static void memcg_event_remove(struct work_struct *work) 6024 { 6025 struct mem_cgroup_event *event = 6026 container_of(work, struct mem_cgroup_event, remove); 6027 struct mem_cgroup *memcg = event->memcg; 6028 6029 remove_wait_queue(event->wqh, &event->wait); 6030 6031 event->unregister_event(memcg, event->eventfd); 6032 6033 /* Notify userspace the event is going away. */ 6034 eventfd_signal(event->eventfd, 1); 6035 6036 eventfd_ctx_put(event->eventfd); 6037 kfree(event); 6038 css_put(&memcg->css); 6039 } 6040 6041 /* 6042 * Gets called on POLLHUP on eventfd when user closes it. 6043 * 6044 * Called with wqh->lock held and interrupts disabled. 6045 */ 6046 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 6047 int sync, void *key) 6048 { 6049 struct mem_cgroup_event *event = 6050 container_of(wait, struct mem_cgroup_event, wait); 6051 struct mem_cgroup *memcg = event->memcg; 6052 unsigned long flags = (unsigned long)key; 6053 6054 if (flags & POLLHUP) { 6055 /* 6056 * If the event has been detached at cgroup removal, we 6057 * can simply return knowing the other side will cleanup 6058 * for us. 6059 * 6060 * We can't race against event freeing since the other 6061 * side will require wqh->lock via remove_wait_queue(), 6062 * which we hold. 6063 */ 6064 spin_lock(&memcg->event_list_lock); 6065 if (!list_empty(&event->list)) { 6066 list_del_init(&event->list); 6067 /* 6068 * We are in atomic context, but cgroup_event_remove() 6069 * may sleep, so we have to call it in workqueue. 6070 */ 6071 schedule_work(&event->remove); 6072 } 6073 spin_unlock(&memcg->event_list_lock); 6074 } 6075 6076 return 0; 6077 } 6078 6079 static void memcg_event_ptable_queue_proc(struct file *file, 6080 wait_queue_head_t *wqh, poll_table *pt) 6081 { 6082 struct mem_cgroup_event *event = 6083 container_of(pt, struct mem_cgroup_event, pt); 6084 6085 event->wqh = wqh; 6086 add_wait_queue(wqh, &event->wait); 6087 } 6088 6089 /* 6090 * DO NOT USE IN NEW FILES. 6091 * 6092 * Parse input and register new cgroup event handler. 6093 * 6094 * Input must be in format '<event_fd> <control_fd> <args>'. 6095 * Interpretation of args is defined by control file implementation. 6096 */ 6097 static int memcg_write_event_control(struct cgroup_subsys_state *css, 6098 struct cftype *cft, const char *buffer) 6099 { 6100 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6101 struct mem_cgroup_event *event; 6102 struct cgroup_subsys_state *cfile_css; 6103 unsigned int efd, cfd; 6104 struct fd efile; 6105 struct fd cfile; 6106 const char *name; 6107 char *endp; 6108 int ret; 6109 6110 efd = simple_strtoul(buffer, &endp, 10); 6111 if (*endp != ' ') 6112 return -EINVAL; 6113 buffer = endp + 1; 6114 6115 cfd = simple_strtoul(buffer, &endp, 10); 6116 if ((*endp != ' ') && (*endp != '\0')) 6117 return -EINVAL; 6118 buffer = endp + 1; 6119 6120 event = kzalloc(sizeof(*event), GFP_KERNEL); 6121 if (!event) 6122 return -ENOMEM; 6123 6124 event->memcg = memcg; 6125 INIT_LIST_HEAD(&event->list); 6126 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 6127 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 6128 INIT_WORK(&event->remove, memcg_event_remove); 6129 6130 efile = fdget(efd); 6131 if (!efile.file) { 6132 ret = -EBADF; 6133 goto out_kfree; 6134 } 6135 6136 event->eventfd = eventfd_ctx_fileget(efile.file); 6137 if (IS_ERR(event->eventfd)) { 6138 ret = PTR_ERR(event->eventfd); 6139 goto out_put_efile; 6140 } 6141 6142 cfile = fdget(cfd); 6143 if (!cfile.file) { 6144 ret = -EBADF; 6145 goto out_put_eventfd; 6146 } 6147 6148 /* the process need read permission on control file */ 6149 /* AV: shouldn't we check that it's been opened for read instead? */ 6150 ret = inode_permission(file_inode(cfile.file), MAY_READ); 6151 if (ret < 0) 6152 goto out_put_cfile; 6153 6154 /* 6155 * Determine the event callbacks and set them in @event. This used 6156 * to be done via struct cftype but cgroup core no longer knows 6157 * about these events. The following is crude but the whole thing 6158 * is for compatibility anyway. 6159 * 6160 * DO NOT ADD NEW FILES. 6161 */ 6162 name = cfile.file->f_dentry->d_name.name; 6163 6164 if (!strcmp(name, "memory.usage_in_bytes")) { 6165 event->register_event = mem_cgroup_usage_register_event; 6166 event->unregister_event = mem_cgroup_usage_unregister_event; 6167 } else if (!strcmp(name, "memory.oom_control")) { 6168 event->register_event = mem_cgroup_oom_register_event; 6169 event->unregister_event = mem_cgroup_oom_unregister_event; 6170 } else if (!strcmp(name, "memory.pressure_level")) { 6171 event->register_event = vmpressure_register_event; 6172 event->unregister_event = vmpressure_unregister_event; 6173 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 6174 event->register_event = memsw_cgroup_usage_register_event; 6175 event->unregister_event = memsw_cgroup_usage_unregister_event; 6176 } else { 6177 ret = -EINVAL; 6178 goto out_put_cfile; 6179 } 6180 6181 /* 6182 * Verify @cfile should belong to @css. Also, remaining events are 6183 * automatically removed on cgroup destruction but the removal is 6184 * asynchronous, so take an extra ref on @css. 6185 */ 6186 rcu_read_lock(); 6187 6188 ret = -EINVAL; 6189 cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, 6190 &mem_cgroup_subsys); 6191 if (cfile_css == css && css_tryget(css)) 6192 ret = 0; 6193 6194 rcu_read_unlock(); 6195 if (ret) 6196 goto out_put_cfile; 6197 6198 ret = event->register_event(memcg, event->eventfd, buffer); 6199 if (ret) 6200 goto out_put_css; 6201 6202 efile.file->f_op->poll(efile.file, &event->pt); 6203 6204 spin_lock(&memcg->event_list_lock); 6205 list_add(&event->list, &memcg->event_list); 6206 spin_unlock(&memcg->event_list_lock); 6207 6208 fdput(cfile); 6209 fdput(efile); 6210 6211 return 0; 6212 6213 out_put_css: 6214 css_put(css); 6215 out_put_cfile: 6216 fdput(cfile); 6217 out_put_eventfd: 6218 eventfd_ctx_put(event->eventfd); 6219 out_put_efile: 6220 fdput(efile); 6221 out_kfree: 6222 kfree(event); 6223 6224 return ret; 6225 } 6226 6227 static struct cftype mem_cgroup_files[] = { 6228 { 6229 .name = "usage_in_bytes", 6230 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 6231 .read_u64 = mem_cgroup_read_u64, 6232 }, 6233 { 6234 .name = "max_usage_in_bytes", 6235 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 6236 .trigger = mem_cgroup_reset, 6237 .read_u64 = mem_cgroup_read_u64, 6238 }, 6239 { 6240 .name = "limit_in_bytes", 6241 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 6242 .write_string = mem_cgroup_write, 6243 .read_u64 = mem_cgroup_read_u64, 6244 }, 6245 { 6246 .name = "soft_limit_in_bytes", 6247 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 6248 .write_string = mem_cgroup_write, 6249 .read_u64 = mem_cgroup_read_u64, 6250 }, 6251 { 6252 .name = "failcnt", 6253 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 6254 .trigger = mem_cgroup_reset, 6255 .read_u64 = mem_cgroup_read_u64, 6256 }, 6257 { 6258 .name = "stat", 6259 .seq_show = memcg_stat_show, 6260 }, 6261 { 6262 .name = "force_empty", 6263 .trigger = mem_cgroup_force_empty_write, 6264 }, 6265 { 6266 .name = "use_hierarchy", 6267 .flags = CFTYPE_INSANE, 6268 .write_u64 = mem_cgroup_hierarchy_write, 6269 .read_u64 = mem_cgroup_hierarchy_read, 6270 }, 6271 { 6272 .name = "cgroup.event_control", /* XXX: for compat */ 6273 .write_string = memcg_write_event_control, 6274 .flags = CFTYPE_NO_PREFIX, 6275 .mode = S_IWUGO, 6276 }, 6277 { 6278 .name = "swappiness", 6279 .read_u64 = mem_cgroup_swappiness_read, 6280 .write_u64 = mem_cgroup_swappiness_write, 6281 }, 6282 { 6283 .name = "move_charge_at_immigrate", 6284 .read_u64 = mem_cgroup_move_charge_read, 6285 .write_u64 = mem_cgroup_move_charge_write, 6286 }, 6287 { 6288 .name = "oom_control", 6289 .seq_show = mem_cgroup_oom_control_read, 6290 .write_u64 = mem_cgroup_oom_control_write, 6291 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 6292 }, 6293 { 6294 .name = "pressure_level", 6295 }, 6296 #ifdef CONFIG_NUMA 6297 { 6298 .name = "numa_stat", 6299 .seq_show = memcg_numa_stat_show, 6300 }, 6301 #endif 6302 #ifdef CONFIG_MEMCG_KMEM 6303 { 6304 .name = "kmem.limit_in_bytes", 6305 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 6306 .write_string = mem_cgroup_write, 6307 .read_u64 = mem_cgroup_read_u64, 6308 }, 6309 { 6310 .name = "kmem.usage_in_bytes", 6311 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 6312 .read_u64 = mem_cgroup_read_u64, 6313 }, 6314 { 6315 .name = "kmem.failcnt", 6316 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 6317 .trigger = mem_cgroup_reset, 6318 .read_u64 = mem_cgroup_read_u64, 6319 }, 6320 { 6321 .name = "kmem.max_usage_in_bytes", 6322 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 6323 .trigger = mem_cgroup_reset, 6324 .read_u64 = mem_cgroup_read_u64, 6325 }, 6326 #ifdef CONFIG_SLABINFO 6327 { 6328 .name = "kmem.slabinfo", 6329 .seq_show = mem_cgroup_slabinfo_read, 6330 }, 6331 #endif 6332 #endif 6333 { }, /* terminate */ 6334 }; 6335 6336 #ifdef CONFIG_MEMCG_SWAP 6337 static struct cftype memsw_cgroup_files[] = { 6338 { 6339 .name = "memsw.usage_in_bytes", 6340 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6341 .read_u64 = mem_cgroup_read_u64, 6342 }, 6343 { 6344 .name = "memsw.max_usage_in_bytes", 6345 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6346 .trigger = mem_cgroup_reset, 6347 .read_u64 = mem_cgroup_read_u64, 6348 }, 6349 { 6350 .name = "memsw.limit_in_bytes", 6351 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6352 .write_string = mem_cgroup_write, 6353 .read_u64 = mem_cgroup_read_u64, 6354 }, 6355 { 6356 .name = "memsw.failcnt", 6357 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6358 .trigger = mem_cgroup_reset, 6359 .read_u64 = mem_cgroup_read_u64, 6360 }, 6361 { }, /* terminate */ 6362 }; 6363 #endif 6364 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 6365 { 6366 struct mem_cgroup_per_node *pn; 6367 struct mem_cgroup_per_zone *mz; 6368 int zone, tmp = node; 6369 /* 6370 * This routine is called against possible nodes. 6371 * But it's BUG to call kmalloc() against offline node. 6372 * 6373 * TODO: this routine can waste much memory for nodes which will 6374 * never be onlined. It's better to use memory hotplug callback 6375 * function. 6376 */ 6377 if (!node_state(node, N_NORMAL_MEMORY)) 6378 tmp = -1; 6379 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 6380 if (!pn) 6381 return 1; 6382 6383 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6384 mz = &pn->zoneinfo[zone]; 6385 lruvec_init(&mz->lruvec); 6386 mz->usage_in_excess = 0; 6387 mz->on_tree = false; 6388 mz->memcg = memcg; 6389 } 6390 memcg->nodeinfo[node] = pn; 6391 return 0; 6392 } 6393 6394 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 6395 { 6396 kfree(memcg->nodeinfo[node]); 6397 } 6398 6399 static struct mem_cgroup *mem_cgroup_alloc(void) 6400 { 6401 struct mem_cgroup *memcg; 6402 size_t size; 6403 6404 size = sizeof(struct mem_cgroup); 6405 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 6406 6407 memcg = kzalloc(size, GFP_KERNEL); 6408 if (!memcg) 6409 return NULL; 6410 6411 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 6412 if (!memcg->stat) 6413 goto out_free; 6414 spin_lock_init(&memcg->pcp_counter_lock); 6415 return memcg; 6416 6417 out_free: 6418 kfree(memcg); 6419 return NULL; 6420 } 6421 6422 /* 6423 * At destroying mem_cgroup, references from swap_cgroup can remain. 6424 * (scanning all at force_empty is too costly...) 6425 * 6426 * Instead of clearing all references at force_empty, we remember 6427 * the number of reference from swap_cgroup and free mem_cgroup when 6428 * it goes down to 0. 6429 * 6430 * Removal of cgroup itself succeeds regardless of refs from swap. 6431 */ 6432 6433 static void __mem_cgroup_free(struct mem_cgroup *memcg) 6434 { 6435 int node; 6436 6437 mem_cgroup_remove_from_trees(memcg); 6438 6439 for_each_node(node) 6440 free_mem_cgroup_per_zone_info(memcg, node); 6441 6442 free_percpu(memcg->stat); 6443 6444 /* 6445 * We need to make sure that (at least for now), the jump label 6446 * destruction code runs outside of the cgroup lock. This is because 6447 * get_online_cpus(), which is called from the static_branch update, 6448 * can't be called inside the cgroup_lock. cpusets are the ones 6449 * enforcing this dependency, so if they ever change, we might as well. 6450 * 6451 * schedule_work() will guarantee this happens. Be careful if you need 6452 * to move this code around, and make sure it is outside 6453 * the cgroup_lock. 6454 */ 6455 disarm_static_keys(memcg); 6456 kfree(memcg); 6457 } 6458 6459 /* 6460 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 6461 */ 6462 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 6463 { 6464 if (!memcg->res.parent) 6465 return NULL; 6466 return mem_cgroup_from_res_counter(memcg->res.parent, res); 6467 } 6468 EXPORT_SYMBOL(parent_mem_cgroup); 6469 6470 static void __init mem_cgroup_soft_limit_tree_init(void) 6471 { 6472 struct mem_cgroup_tree_per_node *rtpn; 6473 struct mem_cgroup_tree_per_zone *rtpz; 6474 int tmp, node, zone; 6475 6476 for_each_node(node) { 6477 tmp = node; 6478 if (!node_state(node, N_NORMAL_MEMORY)) 6479 tmp = -1; 6480 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 6481 BUG_ON(!rtpn); 6482 6483 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6484 6485 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6486 rtpz = &rtpn->rb_tree_per_zone[zone]; 6487 rtpz->rb_root = RB_ROOT; 6488 spin_lock_init(&rtpz->lock); 6489 } 6490 } 6491 } 6492 6493 static struct cgroup_subsys_state * __ref 6494 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6495 { 6496 struct mem_cgroup *memcg; 6497 long error = -ENOMEM; 6498 int node; 6499 6500 memcg = mem_cgroup_alloc(); 6501 if (!memcg) 6502 return ERR_PTR(error); 6503 6504 for_each_node(node) 6505 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 6506 goto free_out; 6507 6508 /* root ? */ 6509 if (parent_css == NULL) { 6510 root_mem_cgroup = memcg; 6511 res_counter_init(&memcg->res, NULL); 6512 res_counter_init(&memcg->memsw, NULL); 6513 res_counter_init(&memcg->kmem, NULL); 6514 } 6515 6516 memcg->last_scanned_node = MAX_NUMNODES; 6517 INIT_LIST_HEAD(&memcg->oom_notify); 6518 memcg->move_charge_at_immigrate = 0; 6519 mutex_init(&memcg->thresholds_lock); 6520 spin_lock_init(&memcg->move_lock); 6521 vmpressure_init(&memcg->vmpressure); 6522 INIT_LIST_HEAD(&memcg->event_list); 6523 spin_lock_init(&memcg->event_list_lock); 6524 6525 return &memcg->css; 6526 6527 free_out: 6528 __mem_cgroup_free(memcg); 6529 return ERR_PTR(error); 6530 } 6531 6532 static int 6533 mem_cgroup_css_online(struct cgroup_subsys_state *css) 6534 { 6535 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6536 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css)); 6537 6538 if (css->cgroup->id > MEM_CGROUP_ID_MAX) 6539 return -ENOSPC; 6540 6541 if (!parent) 6542 return 0; 6543 6544 mutex_lock(&memcg_create_mutex); 6545 6546 memcg->use_hierarchy = parent->use_hierarchy; 6547 memcg->oom_kill_disable = parent->oom_kill_disable; 6548 memcg->swappiness = mem_cgroup_swappiness(parent); 6549 6550 if (parent->use_hierarchy) { 6551 res_counter_init(&memcg->res, &parent->res); 6552 res_counter_init(&memcg->memsw, &parent->memsw); 6553 res_counter_init(&memcg->kmem, &parent->kmem); 6554 6555 /* 6556 * No need to take a reference to the parent because cgroup 6557 * core guarantees its existence. 6558 */ 6559 } else { 6560 res_counter_init(&memcg->res, NULL); 6561 res_counter_init(&memcg->memsw, NULL); 6562 res_counter_init(&memcg->kmem, NULL); 6563 /* 6564 * Deeper hierachy with use_hierarchy == false doesn't make 6565 * much sense so let cgroup subsystem know about this 6566 * unfortunate state in our controller. 6567 */ 6568 if (parent != root_mem_cgroup) 6569 mem_cgroup_subsys.broken_hierarchy = true; 6570 } 6571 mutex_unlock(&memcg_create_mutex); 6572 6573 return memcg_init_kmem(memcg, &mem_cgroup_subsys); 6574 } 6575 6576 /* 6577 * Announce all parents that a group from their hierarchy is gone. 6578 */ 6579 static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) 6580 { 6581 struct mem_cgroup *parent = memcg; 6582 6583 while ((parent = parent_mem_cgroup(parent))) 6584 mem_cgroup_iter_invalidate(parent); 6585 6586 /* 6587 * if the root memcg is not hierarchical we have to check it 6588 * explicitely. 6589 */ 6590 if (!root_mem_cgroup->use_hierarchy) 6591 mem_cgroup_iter_invalidate(root_mem_cgroup); 6592 } 6593 6594 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 6595 { 6596 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6597 struct mem_cgroup_event *event, *tmp; 6598 6599 /* 6600 * Unregister events and notify userspace. 6601 * Notify userspace about cgroup removing only after rmdir of cgroup 6602 * directory to avoid race between userspace and kernelspace. 6603 */ 6604 spin_lock(&memcg->event_list_lock); 6605 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 6606 list_del_init(&event->list); 6607 schedule_work(&event->remove); 6608 } 6609 spin_unlock(&memcg->event_list_lock); 6610 6611 kmem_cgroup_css_offline(memcg); 6612 6613 mem_cgroup_invalidate_reclaim_iterators(memcg); 6614 mem_cgroup_reparent_charges(memcg); 6615 mem_cgroup_destroy_all_caches(memcg); 6616 vmpressure_cleanup(&memcg->vmpressure); 6617 } 6618 6619 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 6620 { 6621 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6622 /* 6623 * XXX: css_offline() would be where we should reparent all 6624 * memory to prepare the cgroup for destruction. However, 6625 * memcg does not do css_tryget() and res_counter charging 6626 * under the same RCU lock region, which means that charging 6627 * could race with offlining. Offlining only happens to 6628 * cgroups with no tasks in them but charges can show up 6629 * without any tasks from the swapin path when the target 6630 * memcg is looked up from the swapout record and not from the 6631 * current task as it usually is. A race like this can leak 6632 * charges and put pages with stale cgroup pointers into 6633 * circulation: 6634 * 6635 * #0 #1 6636 * lookup_swap_cgroup_id() 6637 * rcu_read_lock() 6638 * mem_cgroup_lookup() 6639 * css_tryget() 6640 * rcu_read_unlock() 6641 * disable css_tryget() 6642 * call_rcu() 6643 * offline_css() 6644 * reparent_charges() 6645 * res_counter_charge() 6646 * css_put() 6647 * css_free() 6648 * pc->mem_cgroup = dead memcg 6649 * add page to lru 6650 * 6651 * The bulk of the charges are still moved in offline_css() to 6652 * avoid pinning a lot of pages in case a long-term reference 6653 * like a swapout record is deferring the css_free() to long 6654 * after offlining. But this makes sure we catch any charges 6655 * made after offlining: 6656 */ 6657 mem_cgroup_reparent_charges(memcg); 6658 6659 memcg_destroy_kmem(memcg); 6660 __mem_cgroup_free(memcg); 6661 } 6662 6663 #ifdef CONFIG_MMU 6664 /* Handlers for move charge at task migration. */ 6665 #define PRECHARGE_COUNT_AT_ONCE 256 6666 static int mem_cgroup_do_precharge(unsigned long count) 6667 { 6668 int ret = 0; 6669 int batch_count = PRECHARGE_COUNT_AT_ONCE; 6670 struct mem_cgroup *memcg = mc.to; 6671 6672 if (mem_cgroup_is_root(memcg)) { 6673 mc.precharge += count; 6674 /* we don't need css_get for root */ 6675 return ret; 6676 } 6677 /* try to charge at once */ 6678 if (count > 1) { 6679 struct res_counter *dummy; 6680 /* 6681 * "memcg" cannot be under rmdir() because we've already checked 6682 * by cgroup_lock_live_cgroup() that it is not removed and we 6683 * are still under the same cgroup_mutex. So we can postpone 6684 * css_get(). 6685 */ 6686 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy)) 6687 goto one_by_one; 6688 if (do_swap_account && res_counter_charge(&memcg->memsw, 6689 PAGE_SIZE * count, &dummy)) { 6690 res_counter_uncharge(&memcg->res, PAGE_SIZE * count); 6691 goto one_by_one; 6692 } 6693 mc.precharge += count; 6694 return ret; 6695 } 6696 one_by_one: 6697 /* fall back to one by one charge */ 6698 while (count--) { 6699 if (signal_pending(current)) { 6700 ret = -EINTR; 6701 break; 6702 } 6703 if (!batch_count--) { 6704 batch_count = PRECHARGE_COUNT_AT_ONCE; 6705 cond_resched(); 6706 } 6707 ret = __mem_cgroup_try_charge(NULL, 6708 GFP_KERNEL, 1, &memcg, false); 6709 if (ret) 6710 /* mem_cgroup_clear_mc() will do uncharge later */ 6711 return ret; 6712 mc.precharge++; 6713 } 6714 return ret; 6715 } 6716 6717 /** 6718 * get_mctgt_type - get target type of moving charge 6719 * @vma: the vma the pte to be checked belongs 6720 * @addr: the address corresponding to the pte to be checked 6721 * @ptent: the pte to be checked 6722 * @target: the pointer the target page or swap ent will be stored(can be NULL) 6723 * 6724 * Returns 6725 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 6726 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 6727 * move charge. if @target is not NULL, the page is stored in target->page 6728 * with extra refcnt got(Callers should handle it). 6729 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 6730 * target for charge migration. if @target is not NULL, the entry is stored 6731 * in target->ent. 6732 * 6733 * Called with pte lock held. 6734 */ 6735 union mc_target { 6736 struct page *page; 6737 swp_entry_t ent; 6738 }; 6739 6740 enum mc_target_type { 6741 MC_TARGET_NONE = 0, 6742 MC_TARGET_PAGE, 6743 MC_TARGET_SWAP, 6744 }; 6745 6746 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 6747 unsigned long addr, pte_t ptent) 6748 { 6749 struct page *page = vm_normal_page(vma, addr, ptent); 6750 6751 if (!page || !page_mapped(page)) 6752 return NULL; 6753 if (PageAnon(page)) { 6754 /* we don't move shared anon */ 6755 if (!move_anon()) 6756 return NULL; 6757 } else if (!move_file()) 6758 /* we ignore mapcount for file pages */ 6759 return NULL; 6760 if (!get_page_unless_zero(page)) 6761 return NULL; 6762 6763 return page; 6764 } 6765 6766 #ifdef CONFIG_SWAP 6767 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 6768 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6769 { 6770 struct page *page = NULL; 6771 swp_entry_t ent = pte_to_swp_entry(ptent); 6772 6773 if (!move_anon() || non_swap_entry(ent)) 6774 return NULL; 6775 /* 6776 * Because lookup_swap_cache() updates some statistics counter, 6777 * we call find_get_page() with swapper_space directly. 6778 */ 6779 page = find_get_page(swap_address_space(ent), ent.val); 6780 if (do_swap_account) 6781 entry->val = ent.val; 6782 6783 return page; 6784 } 6785 #else 6786 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 6787 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6788 { 6789 return NULL; 6790 } 6791 #endif 6792 6793 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 6794 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6795 { 6796 struct page *page = NULL; 6797 struct address_space *mapping; 6798 pgoff_t pgoff; 6799 6800 if (!vma->vm_file) /* anonymous vma */ 6801 return NULL; 6802 if (!move_file()) 6803 return NULL; 6804 6805 mapping = vma->vm_file->f_mapping; 6806 if (pte_none(ptent)) 6807 pgoff = linear_page_index(vma, addr); 6808 else /* pte_file(ptent) is true */ 6809 pgoff = pte_to_pgoff(ptent); 6810 6811 /* page is moved even if it's not RSS of this task(page-faulted). */ 6812 page = find_get_page(mapping, pgoff); 6813 6814 #ifdef CONFIG_SWAP 6815 /* shmem/tmpfs may report page out on swap: account for that too. */ 6816 if (radix_tree_exceptional_entry(page)) { 6817 swp_entry_t swap = radix_to_swp_entry(page); 6818 if (do_swap_account) 6819 *entry = swap; 6820 page = find_get_page(swap_address_space(swap), swap.val); 6821 } 6822 #endif 6823 return page; 6824 } 6825 6826 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 6827 unsigned long addr, pte_t ptent, union mc_target *target) 6828 { 6829 struct page *page = NULL; 6830 struct page_cgroup *pc; 6831 enum mc_target_type ret = MC_TARGET_NONE; 6832 swp_entry_t ent = { .val = 0 }; 6833 6834 if (pte_present(ptent)) 6835 page = mc_handle_present_pte(vma, addr, ptent); 6836 else if (is_swap_pte(ptent)) 6837 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 6838 else if (pte_none(ptent) || pte_file(ptent)) 6839 page = mc_handle_file_pte(vma, addr, ptent, &ent); 6840 6841 if (!page && !ent.val) 6842 return ret; 6843 if (page) { 6844 pc = lookup_page_cgroup(page); 6845 /* 6846 * Do only loose check w/o page_cgroup lock. 6847 * mem_cgroup_move_account() checks the pc is valid or not under 6848 * the lock. 6849 */ 6850 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 6851 ret = MC_TARGET_PAGE; 6852 if (target) 6853 target->page = page; 6854 } 6855 if (!ret || !target) 6856 put_page(page); 6857 } 6858 /* There is a swap entry and a page doesn't exist or isn't charged */ 6859 if (ent.val && !ret && 6860 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 6861 ret = MC_TARGET_SWAP; 6862 if (target) 6863 target->ent = ent; 6864 } 6865 return ret; 6866 } 6867 6868 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6869 /* 6870 * We don't consider swapping or file mapped pages because THP does not 6871 * support them for now. 6872 * Caller should make sure that pmd_trans_huge(pmd) is true. 6873 */ 6874 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6875 unsigned long addr, pmd_t pmd, union mc_target *target) 6876 { 6877 struct page *page = NULL; 6878 struct page_cgroup *pc; 6879 enum mc_target_type ret = MC_TARGET_NONE; 6880 6881 page = pmd_page(pmd); 6882 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 6883 if (!move_anon()) 6884 return ret; 6885 pc = lookup_page_cgroup(page); 6886 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 6887 ret = MC_TARGET_PAGE; 6888 if (target) { 6889 get_page(page); 6890 target->page = page; 6891 } 6892 } 6893 return ret; 6894 } 6895 #else 6896 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6897 unsigned long addr, pmd_t pmd, union mc_target *target) 6898 { 6899 return MC_TARGET_NONE; 6900 } 6901 #endif 6902 6903 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 6904 unsigned long addr, unsigned long end, 6905 struct mm_walk *walk) 6906 { 6907 struct vm_area_struct *vma = walk->private; 6908 pte_t *pte; 6909 spinlock_t *ptl; 6910 6911 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 6912 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 6913 mc.precharge += HPAGE_PMD_NR; 6914 spin_unlock(ptl); 6915 return 0; 6916 } 6917 6918 if (pmd_trans_unstable(pmd)) 6919 return 0; 6920 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6921 for (; addr != end; pte++, addr += PAGE_SIZE) 6922 if (get_mctgt_type(vma, addr, *pte, NULL)) 6923 mc.precharge++; /* increment precharge temporarily */ 6924 pte_unmap_unlock(pte - 1, ptl); 6925 cond_resched(); 6926 6927 return 0; 6928 } 6929 6930 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 6931 { 6932 unsigned long precharge; 6933 struct vm_area_struct *vma; 6934 6935 down_read(&mm->mmap_sem); 6936 for (vma = mm->mmap; vma; vma = vma->vm_next) { 6937 struct mm_walk mem_cgroup_count_precharge_walk = { 6938 .pmd_entry = mem_cgroup_count_precharge_pte_range, 6939 .mm = mm, 6940 .private = vma, 6941 }; 6942 if (is_vm_hugetlb_page(vma)) 6943 continue; 6944 walk_page_range(vma->vm_start, vma->vm_end, 6945 &mem_cgroup_count_precharge_walk); 6946 } 6947 up_read(&mm->mmap_sem); 6948 6949 precharge = mc.precharge; 6950 mc.precharge = 0; 6951 6952 return precharge; 6953 } 6954 6955 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 6956 { 6957 unsigned long precharge = mem_cgroup_count_precharge(mm); 6958 6959 VM_BUG_ON(mc.moving_task); 6960 mc.moving_task = current; 6961 return mem_cgroup_do_precharge(precharge); 6962 } 6963 6964 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 6965 static void __mem_cgroup_clear_mc(void) 6966 { 6967 struct mem_cgroup *from = mc.from; 6968 struct mem_cgroup *to = mc.to; 6969 int i; 6970 6971 /* we must uncharge all the leftover precharges from mc.to */ 6972 if (mc.precharge) { 6973 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 6974 mc.precharge = 0; 6975 } 6976 /* 6977 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 6978 * we must uncharge here. 6979 */ 6980 if (mc.moved_charge) { 6981 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 6982 mc.moved_charge = 0; 6983 } 6984 /* we must fixup refcnts and charges */ 6985 if (mc.moved_swap) { 6986 /* uncharge swap account from the old cgroup */ 6987 if (!mem_cgroup_is_root(mc.from)) 6988 res_counter_uncharge(&mc.from->memsw, 6989 PAGE_SIZE * mc.moved_swap); 6990 6991 for (i = 0; i < mc.moved_swap; i++) 6992 css_put(&mc.from->css); 6993 6994 if (!mem_cgroup_is_root(mc.to)) { 6995 /* 6996 * we charged both to->res and to->memsw, so we should 6997 * uncharge to->res. 6998 */ 6999 res_counter_uncharge(&mc.to->res, 7000 PAGE_SIZE * mc.moved_swap); 7001 } 7002 /* we've already done css_get(mc.to) */ 7003 mc.moved_swap = 0; 7004 } 7005 memcg_oom_recover(from); 7006 memcg_oom_recover(to); 7007 wake_up_all(&mc.waitq); 7008 } 7009 7010 static void mem_cgroup_clear_mc(void) 7011 { 7012 struct mem_cgroup *from = mc.from; 7013 7014 /* 7015 * we must clear moving_task before waking up waiters at the end of 7016 * task migration. 7017 */ 7018 mc.moving_task = NULL; 7019 __mem_cgroup_clear_mc(); 7020 spin_lock(&mc.lock); 7021 mc.from = NULL; 7022 mc.to = NULL; 7023 spin_unlock(&mc.lock); 7024 mem_cgroup_end_move(from); 7025 } 7026 7027 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 7028 struct cgroup_taskset *tset) 7029 { 7030 struct task_struct *p = cgroup_taskset_first(tset); 7031 int ret = 0; 7032 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7033 unsigned long move_charge_at_immigrate; 7034 7035 /* 7036 * We are now commited to this value whatever it is. Changes in this 7037 * tunable will only affect upcoming migrations, not the current one. 7038 * So we need to save it, and keep it going. 7039 */ 7040 move_charge_at_immigrate = memcg->move_charge_at_immigrate; 7041 if (move_charge_at_immigrate) { 7042 struct mm_struct *mm; 7043 struct mem_cgroup *from = mem_cgroup_from_task(p); 7044 7045 VM_BUG_ON(from == memcg); 7046 7047 mm = get_task_mm(p); 7048 if (!mm) 7049 return 0; 7050 /* We move charges only when we move a owner of the mm */ 7051 if (mm->owner == p) { 7052 VM_BUG_ON(mc.from); 7053 VM_BUG_ON(mc.to); 7054 VM_BUG_ON(mc.precharge); 7055 VM_BUG_ON(mc.moved_charge); 7056 VM_BUG_ON(mc.moved_swap); 7057 mem_cgroup_start_move(from); 7058 spin_lock(&mc.lock); 7059 mc.from = from; 7060 mc.to = memcg; 7061 mc.immigrate_flags = move_charge_at_immigrate; 7062 spin_unlock(&mc.lock); 7063 /* We set mc.moving_task later */ 7064 7065 ret = mem_cgroup_precharge_mc(mm); 7066 if (ret) 7067 mem_cgroup_clear_mc(); 7068 } 7069 mmput(mm); 7070 } 7071 return ret; 7072 } 7073 7074 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 7075 struct cgroup_taskset *tset) 7076 { 7077 mem_cgroup_clear_mc(); 7078 } 7079 7080 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 7081 unsigned long addr, unsigned long end, 7082 struct mm_walk *walk) 7083 { 7084 int ret = 0; 7085 struct vm_area_struct *vma = walk->private; 7086 pte_t *pte; 7087 spinlock_t *ptl; 7088 enum mc_target_type target_type; 7089 union mc_target target; 7090 struct page *page; 7091 struct page_cgroup *pc; 7092 7093 /* 7094 * We don't take compound_lock() here but no race with splitting thp 7095 * happens because: 7096 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 7097 * under splitting, which means there's no concurrent thp split, 7098 * - if another thread runs into split_huge_page() just after we 7099 * entered this if-block, the thread must wait for page table lock 7100 * to be unlocked in __split_huge_page_splitting(), where the main 7101 * part of thp split is not executed yet. 7102 */ 7103 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 7104 if (mc.precharge < HPAGE_PMD_NR) { 7105 spin_unlock(ptl); 7106 return 0; 7107 } 7108 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 7109 if (target_type == MC_TARGET_PAGE) { 7110 page = target.page; 7111 if (!isolate_lru_page(page)) { 7112 pc = lookup_page_cgroup(page); 7113 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 7114 pc, mc.from, mc.to)) { 7115 mc.precharge -= HPAGE_PMD_NR; 7116 mc.moved_charge += HPAGE_PMD_NR; 7117 } 7118 putback_lru_page(page); 7119 } 7120 put_page(page); 7121 } 7122 spin_unlock(ptl); 7123 return 0; 7124 } 7125 7126 if (pmd_trans_unstable(pmd)) 7127 return 0; 7128 retry: 7129 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 7130 for (; addr != end; addr += PAGE_SIZE) { 7131 pte_t ptent = *(pte++); 7132 swp_entry_t ent; 7133 7134 if (!mc.precharge) 7135 break; 7136 7137 switch (get_mctgt_type(vma, addr, ptent, &target)) { 7138 case MC_TARGET_PAGE: 7139 page = target.page; 7140 if (isolate_lru_page(page)) 7141 goto put; 7142 pc = lookup_page_cgroup(page); 7143 if (!mem_cgroup_move_account(page, 1, pc, 7144 mc.from, mc.to)) { 7145 mc.precharge--; 7146 /* we uncharge from mc.from later. */ 7147 mc.moved_charge++; 7148 } 7149 putback_lru_page(page); 7150 put: /* get_mctgt_type() gets the page */ 7151 put_page(page); 7152 break; 7153 case MC_TARGET_SWAP: 7154 ent = target.ent; 7155 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 7156 mc.precharge--; 7157 /* we fixup refcnts and charges later. */ 7158 mc.moved_swap++; 7159 } 7160 break; 7161 default: 7162 break; 7163 } 7164 } 7165 pte_unmap_unlock(pte - 1, ptl); 7166 cond_resched(); 7167 7168 if (addr != end) { 7169 /* 7170 * We have consumed all precharges we got in can_attach(). 7171 * We try charge one by one, but don't do any additional 7172 * charges to mc.to if we have failed in charge once in attach() 7173 * phase. 7174 */ 7175 ret = mem_cgroup_do_precharge(1); 7176 if (!ret) 7177 goto retry; 7178 } 7179 7180 return ret; 7181 } 7182 7183 static void mem_cgroup_move_charge(struct mm_struct *mm) 7184 { 7185 struct vm_area_struct *vma; 7186 7187 lru_add_drain_all(); 7188 retry: 7189 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 7190 /* 7191 * Someone who are holding the mmap_sem might be waiting in 7192 * waitq. So we cancel all extra charges, wake up all waiters, 7193 * and retry. Because we cancel precharges, we might not be able 7194 * to move enough charges, but moving charge is a best-effort 7195 * feature anyway, so it wouldn't be a big problem. 7196 */ 7197 __mem_cgroup_clear_mc(); 7198 cond_resched(); 7199 goto retry; 7200 } 7201 for (vma = mm->mmap; vma; vma = vma->vm_next) { 7202 int ret; 7203 struct mm_walk mem_cgroup_move_charge_walk = { 7204 .pmd_entry = mem_cgroup_move_charge_pte_range, 7205 .mm = mm, 7206 .private = vma, 7207 }; 7208 if (is_vm_hugetlb_page(vma)) 7209 continue; 7210 ret = walk_page_range(vma->vm_start, vma->vm_end, 7211 &mem_cgroup_move_charge_walk); 7212 if (ret) 7213 /* 7214 * means we have consumed all precharges and failed in 7215 * doing additional charge. Just abandon here. 7216 */ 7217 break; 7218 } 7219 up_read(&mm->mmap_sem); 7220 } 7221 7222 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 7223 struct cgroup_taskset *tset) 7224 { 7225 struct task_struct *p = cgroup_taskset_first(tset); 7226 struct mm_struct *mm = get_task_mm(p); 7227 7228 if (mm) { 7229 if (mc.to) 7230 mem_cgroup_move_charge(mm); 7231 mmput(mm); 7232 } 7233 if (mc.to) 7234 mem_cgroup_clear_mc(); 7235 } 7236 #else /* !CONFIG_MMU */ 7237 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 7238 struct cgroup_taskset *tset) 7239 { 7240 return 0; 7241 } 7242 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 7243 struct cgroup_taskset *tset) 7244 { 7245 } 7246 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 7247 struct cgroup_taskset *tset) 7248 { 7249 } 7250 #endif 7251 7252 /* 7253 * Cgroup retains root cgroups across [un]mount cycles making it necessary 7254 * to verify sane_behavior flag on each mount attempt. 7255 */ 7256 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 7257 { 7258 /* 7259 * use_hierarchy is forced with sane_behavior. cgroup core 7260 * guarantees that @root doesn't have any children, so turning it 7261 * on for the root memcg is enough. 7262 */ 7263 if (cgroup_sane_behavior(root_css->cgroup)) 7264 mem_cgroup_from_css(root_css)->use_hierarchy = true; 7265 } 7266 7267 struct cgroup_subsys mem_cgroup_subsys = { 7268 .name = "memory", 7269 .subsys_id = mem_cgroup_subsys_id, 7270 .css_alloc = mem_cgroup_css_alloc, 7271 .css_online = mem_cgroup_css_online, 7272 .css_offline = mem_cgroup_css_offline, 7273 .css_free = mem_cgroup_css_free, 7274 .can_attach = mem_cgroup_can_attach, 7275 .cancel_attach = mem_cgroup_cancel_attach, 7276 .attach = mem_cgroup_move_task, 7277 .bind = mem_cgroup_bind, 7278 .base_cftypes = mem_cgroup_files, 7279 .early_init = 0, 7280 }; 7281 7282 #ifdef CONFIG_MEMCG_SWAP 7283 static int __init enable_swap_account(char *s) 7284 { 7285 if (!strcmp(s, "1")) 7286 really_do_swap_account = 1; 7287 else if (!strcmp(s, "0")) 7288 really_do_swap_account = 0; 7289 return 1; 7290 } 7291 __setup("swapaccount=", enable_swap_account); 7292 7293 static void __init memsw_file_init(void) 7294 { 7295 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files)); 7296 } 7297 7298 static void __init enable_swap_cgroup(void) 7299 { 7300 if (!mem_cgroup_disabled() && really_do_swap_account) { 7301 do_swap_account = 1; 7302 memsw_file_init(); 7303 } 7304 } 7305 7306 #else 7307 static void __init enable_swap_cgroup(void) 7308 { 7309 } 7310 #endif 7311 7312 /* 7313 * subsys_initcall() for memory controller. 7314 * 7315 * Some parts like hotcpu_notifier() have to be initialized from this context 7316 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 7317 * everything that doesn't depend on a specific mem_cgroup structure should 7318 * be initialized from here. 7319 */ 7320 static int __init mem_cgroup_init(void) 7321 { 7322 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7323 enable_swap_cgroup(); 7324 mem_cgroup_soft_limit_tree_init(); 7325 memcg_stock_init(); 7326 return 0; 7327 } 7328 subsys_initcall(mem_cgroup_init); 7329