1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_ZSWAP_B, 39 MEMCG_ZSWAPPED, 40 MEMCG_NR_STAT, 41 }; 42 43 enum memcg_memory_event { 44 MEMCG_LOW, 45 MEMCG_HIGH, 46 MEMCG_MAX, 47 MEMCG_OOM, 48 MEMCG_OOM_KILL, 49 MEMCG_OOM_GROUP_KILL, 50 MEMCG_SWAP_HIGH, 51 MEMCG_SWAP_MAX, 52 MEMCG_SWAP_FAIL, 53 MEMCG_NR_MEMORY_EVENTS, 54 }; 55 56 struct mem_cgroup_reclaim_cookie { 57 pg_data_t *pgdat; 58 unsigned int generation; 59 }; 60 61 #ifdef CONFIG_MEMCG 62 63 #define MEM_CGROUP_ID_SHIFT 16 64 #define MEM_CGROUP_ID_MAX USHRT_MAX 65 66 struct mem_cgroup_id { 67 int id; 68 refcount_t ref; 69 }; 70 71 /* 72 * Per memcg event counter is incremented at every pagein/pageout. With THP, 73 * it will be incremented by the number of pages. This counter is used 74 * to trigger some periodic events. This is straightforward and better 75 * than using jiffies etc. to handle periodic memcg event. 76 */ 77 enum mem_cgroup_events_target { 78 MEM_CGROUP_TARGET_THRESH, 79 MEM_CGROUP_TARGET_SOFTLIMIT, 80 MEM_CGROUP_NTARGETS, 81 }; 82 83 struct memcg_vmstats_percpu; 84 struct memcg_vmstats; 85 86 struct mem_cgroup_reclaim_iter { 87 struct mem_cgroup *position; 88 /* scan generation, increased every round-trip */ 89 unsigned int generation; 90 }; 91 92 /* 93 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 94 * shrinkers, which have elements charged to this memcg. 95 */ 96 struct shrinker_info { 97 struct rcu_head rcu; 98 atomic_long_t *nr_deferred; 99 unsigned long *map; 100 }; 101 102 struct lruvec_stats_percpu { 103 /* Local (CPU and cgroup) state */ 104 long state[NR_VM_NODE_STAT_ITEMS]; 105 106 /* Delta calculation for lockless upward propagation */ 107 long state_prev[NR_VM_NODE_STAT_ITEMS]; 108 }; 109 110 struct lruvec_stats { 111 /* Aggregated (CPU and subtree) state */ 112 long state[NR_VM_NODE_STAT_ITEMS]; 113 114 /* Pending child counts during tree propagation */ 115 long state_pending[NR_VM_NODE_STAT_ITEMS]; 116 }; 117 118 /* 119 * per-node information in memory controller. 120 */ 121 struct mem_cgroup_per_node { 122 struct lruvec lruvec; 123 124 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 125 struct lruvec_stats lruvec_stats; 126 127 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 128 129 struct mem_cgroup_reclaim_iter iter; 130 131 struct shrinker_info __rcu *shrinker_info; 132 133 struct rb_node tree_node; /* RB tree node */ 134 unsigned long usage_in_excess;/* Set to the value by which */ 135 /* the soft limit is exceeded*/ 136 bool on_tree; 137 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 138 /* use container_of */ 139 }; 140 141 struct mem_cgroup_threshold { 142 struct eventfd_ctx *eventfd; 143 unsigned long threshold; 144 }; 145 146 /* For threshold */ 147 struct mem_cgroup_threshold_ary { 148 /* An array index points to threshold just below or equal to usage. */ 149 int current_threshold; 150 /* Size of entries[] */ 151 unsigned int size; 152 /* Array of thresholds */ 153 struct mem_cgroup_threshold entries[]; 154 }; 155 156 struct mem_cgroup_thresholds { 157 /* Primary thresholds array */ 158 struct mem_cgroup_threshold_ary *primary; 159 /* 160 * Spare threshold array. 161 * This is needed to make mem_cgroup_unregister_event() "never fail". 162 * It must be able to store at least primary->size - 1 entries. 163 */ 164 struct mem_cgroup_threshold_ary *spare; 165 }; 166 167 /* 168 * Remember four most recent foreign writebacks with dirty pages in this 169 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 170 * one in a given round, we're likely to catch it later if it keeps 171 * foreign-dirtying, so a fairly low count should be enough. 172 * 173 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 174 */ 175 #define MEMCG_CGWB_FRN_CNT 4 176 177 struct memcg_cgwb_frn { 178 u64 bdi_id; /* bdi->id of the foreign inode */ 179 int memcg_id; /* memcg->css.id of foreign inode */ 180 u64 at; /* jiffies_64 at the time of dirtying */ 181 struct wb_completion done; /* tracks in-flight foreign writebacks */ 182 }; 183 184 /* 185 * Bucket for arbitrarily byte-sized objects charged to a memory 186 * cgroup. The bucket can be reparented in one piece when the cgroup 187 * is destroyed, without having to round up the individual references 188 * of all live memory objects in the wild. 189 */ 190 struct obj_cgroup { 191 struct percpu_ref refcnt; 192 struct mem_cgroup *memcg; 193 atomic_t nr_charged_bytes; 194 union { 195 struct list_head list; /* protected by objcg_lock */ 196 struct rcu_head rcu; 197 }; 198 }; 199 200 /* 201 * The memory controller data structure. The memory controller controls both 202 * page cache and RSS per cgroup. We would eventually like to provide 203 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 204 * to help the administrator determine what knobs to tune. 205 */ 206 struct mem_cgroup { 207 struct cgroup_subsys_state css; 208 209 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 210 struct mem_cgroup_id id; 211 212 /* Accounted resources */ 213 struct page_counter memory; /* Both v1 & v2 */ 214 215 union { 216 struct page_counter swap; /* v2 only */ 217 struct page_counter memsw; /* v1 only */ 218 }; 219 220 /* Legacy consumer-oriented counters */ 221 struct page_counter kmem; /* v1 only */ 222 struct page_counter tcpmem; /* v1 only */ 223 224 /* Range enforcement for interrupt charges */ 225 struct work_struct high_work; 226 227 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 228 unsigned long zswap_max; 229 #endif 230 231 unsigned long soft_limit; 232 233 /* vmpressure notifications */ 234 struct vmpressure vmpressure; 235 236 /* 237 * Should the OOM killer kill all belonging tasks, had it kill one? 238 */ 239 bool oom_group; 240 241 /* protected by memcg_oom_lock */ 242 bool oom_lock; 243 int under_oom; 244 245 int swappiness; 246 /* OOM-Killer disable */ 247 int oom_kill_disable; 248 249 /* memory.events and memory.events.local */ 250 struct cgroup_file events_file; 251 struct cgroup_file events_local_file; 252 253 /* handle for "memory.swap.events" */ 254 struct cgroup_file swap_events_file; 255 256 /* protect arrays of thresholds */ 257 struct mutex thresholds_lock; 258 259 /* thresholds for memory usage. RCU-protected */ 260 struct mem_cgroup_thresholds thresholds; 261 262 /* thresholds for mem+swap usage. RCU-protected */ 263 struct mem_cgroup_thresholds memsw_thresholds; 264 265 /* For oom notifier event fd */ 266 struct list_head oom_notify; 267 268 /* 269 * Should we move charges of a task when a task is moved into this 270 * mem_cgroup ? And what type of charges should we move ? 271 */ 272 unsigned long move_charge_at_immigrate; 273 /* taken only while moving_account > 0 */ 274 spinlock_t move_lock; 275 unsigned long move_lock_flags; 276 277 CACHELINE_PADDING(_pad1_); 278 279 /* memory.stat */ 280 struct memcg_vmstats *vmstats; 281 282 /* memory.events */ 283 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 284 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 285 286 unsigned long socket_pressure; 287 288 /* Legacy tcp memory accounting */ 289 bool tcpmem_active; 290 int tcpmem_pressure; 291 292 #ifdef CONFIG_MEMCG_KMEM 293 int kmemcg_id; 294 struct obj_cgroup __rcu *objcg; 295 /* list of inherited objcgs, protected by objcg_lock */ 296 struct list_head objcg_list; 297 #endif 298 299 CACHELINE_PADDING(_pad2_); 300 301 /* 302 * set > 0 if pages under this cgroup are moving to other cgroup. 303 */ 304 atomic_t moving_account; 305 struct task_struct *move_lock_task; 306 307 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 308 309 #ifdef CONFIG_CGROUP_WRITEBACK 310 struct list_head cgwb_list; 311 struct wb_domain cgwb_domain; 312 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 313 #endif 314 315 /* List of events which userspace want to receive */ 316 struct list_head event_list; 317 spinlock_t event_list_lock; 318 319 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 320 struct deferred_split deferred_split_queue; 321 #endif 322 323 #ifdef CONFIG_LRU_GEN 324 /* per-memcg mm_struct list */ 325 struct lru_gen_mm_list mm_list; 326 #endif 327 328 struct mem_cgroup_per_node *nodeinfo[]; 329 }; 330 331 /* 332 * size of first charge trial. 333 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 334 * workload. 335 */ 336 #define MEMCG_CHARGE_BATCH 64U 337 338 extern struct mem_cgroup *root_mem_cgroup; 339 340 enum page_memcg_data_flags { 341 /* page->memcg_data is a pointer to an objcgs vector */ 342 MEMCG_DATA_OBJCGS = (1UL << 0), 343 /* page has been accounted as a non-slab kernel page */ 344 MEMCG_DATA_KMEM = (1UL << 1), 345 /* the next bit after the last actual flag */ 346 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 347 }; 348 349 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 350 351 static inline bool folio_memcg_kmem(struct folio *folio); 352 353 /* 354 * After the initialization objcg->memcg is always pointing at 355 * a valid memcg, but can be atomically swapped to the parent memcg. 356 * 357 * The caller must ensure that the returned memcg won't be released: 358 * e.g. acquire the rcu_read_lock or css_set_lock. 359 */ 360 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 361 { 362 return READ_ONCE(objcg->memcg); 363 } 364 365 /* 366 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 367 * @folio: Pointer to the folio. 368 * 369 * Returns a pointer to the memory cgroup associated with the folio, 370 * or NULL. This function assumes that the folio is known to have a 371 * proper memory cgroup pointer. It's not safe to call this function 372 * against some type of folios, e.g. slab folios or ex-slab folios or 373 * kmem folios. 374 */ 375 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 376 { 377 unsigned long memcg_data = folio->memcg_data; 378 379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 380 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 382 383 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 384 } 385 386 /* 387 * __folio_objcg - get the object cgroup associated with a kmem folio. 388 * @folio: Pointer to the folio. 389 * 390 * Returns a pointer to the object cgroup associated with the folio, 391 * or NULL. This function assumes that the folio is known to have a 392 * proper object cgroup pointer. It's not safe to call this function 393 * against some type of folios, e.g. slab folios or ex-slab folios or 394 * LRU folios. 395 */ 396 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 397 { 398 unsigned long memcg_data = folio->memcg_data; 399 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 402 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 403 404 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 405 } 406 407 /* 408 * folio_memcg - Get the memory cgroup associated with a folio. 409 * @folio: Pointer to the folio. 410 * 411 * Returns a pointer to the memory cgroup associated with the folio, 412 * or NULL. This function assumes that the folio is known to have a 413 * proper memory cgroup pointer. It's not safe to call this function 414 * against some type of folios, e.g. slab folios or ex-slab folios. 415 * 416 * For a non-kmem folio any of the following ensures folio and memcg binding 417 * stability: 418 * 419 * - the folio lock 420 * - LRU isolation 421 * - lock_page_memcg() 422 * - exclusive reference 423 * - mem_cgroup_trylock_pages() 424 * 425 * For a kmem folio a caller should hold an rcu read lock to protect memcg 426 * associated with a kmem folio from being released. 427 */ 428 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 429 { 430 if (folio_memcg_kmem(folio)) 431 return obj_cgroup_memcg(__folio_objcg(folio)); 432 return __folio_memcg(folio); 433 } 434 435 static inline struct mem_cgroup *page_memcg(struct page *page) 436 { 437 return folio_memcg(page_folio(page)); 438 } 439 440 /** 441 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 442 * @folio: Pointer to the folio. 443 * 444 * This function assumes that the folio is known to have a 445 * proper memory cgroup pointer. It's not safe to call this function 446 * against some type of folios, e.g. slab folios or ex-slab folios. 447 * 448 * Return: A pointer to the memory cgroup associated with the folio, 449 * or NULL. 450 */ 451 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 452 { 453 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 454 455 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 456 WARN_ON_ONCE(!rcu_read_lock_held()); 457 458 if (memcg_data & MEMCG_DATA_KMEM) { 459 struct obj_cgroup *objcg; 460 461 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 462 return obj_cgroup_memcg(objcg); 463 } 464 465 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 466 } 467 468 /* 469 * page_memcg_check - get the memory cgroup associated with a page 470 * @page: a pointer to the page struct 471 * 472 * Returns a pointer to the memory cgroup associated with the page, 473 * or NULL. This function unlike page_memcg() can take any page 474 * as an argument. It has to be used in cases when it's not known if a page 475 * has an associated memory cgroup pointer or an object cgroups vector or 476 * an object cgroup. 477 * 478 * For a non-kmem page any of the following ensures page and memcg binding 479 * stability: 480 * 481 * - the page lock 482 * - LRU isolation 483 * - lock_page_memcg() 484 * - exclusive reference 485 * - mem_cgroup_trylock_pages() 486 * 487 * For a kmem page a caller should hold an rcu read lock to protect memcg 488 * associated with a kmem page from being released. 489 */ 490 static inline struct mem_cgroup *page_memcg_check(struct page *page) 491 { 492 /* 493 * Because page->memcg_data might be changed asynchronously 494 * for slab pages, READ_ONCE() should be used here. 495 */ 496 unsigned long memcg_data = READ_ONCE(page->memcg_data); 497 498 if (memcg_data & MEMCG_DATA_OBJCGS) 499 return NULL; 500 501 if (memcg_data & MEMCG_DATA_KMEM) { 502 struct obj_cgroup *objcg; 503 504 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 505 return obj_cgroup_memcg(objcg); 506 } 507 508 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 509 } 510 511 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 512 { 513 struct mem_cgroup *memcg; 514 515 rcu_read_lock(); 516 retry: 517 memcg = obj_cgroup_memcg(objcg); 518 if (unlikely(!css_tryget(&memcg->css))) 519 goto retry; 520 rcu_read_unlock(); 521 522 return memcg; 523 } 524 525 #ifdef CONFIG_MEMCG_KMEM 526 /* 527 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 528 * @folio: Pointer to the folio. 529 * 530 * Checks if the folio has MemcgKmem flag set. The caller must ensure 531 * that the folio has an associated memory cgroup. It's not safe to call 532 * this function against some types of folios, e.g. slab folios. 533 */ 534 static inline bool folio_memcg_kmem(struct folio *folio) 535 { 536 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 537 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 538 return folio->memcg_data & MEMCG_DATA_KMEM; 539 } 540 541 542 #else 543 static inline bool folio_memcg_kmem(struct folio *folio) 544 { 545 return false; 546 } 547 548 #endif 549 550 static inline bool PageMemcgKmem(struct page *page) 551 { 552 return folio_memcg_kmem(page_folio(page)); 553 } 554 555 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 556 { 557 return (memcg == root_mem_cgroup); 558 } 559 560 static inline bool mem_cgroup_disabled(void) 561 { 562 return !cgroup_subsys_enabled(memory_cgrp_subsys); 563 } 564 565 static inline void mem_cgroup_protection(struct mem_cgroup *root, 566 struct mem_cgroup *memcg, 567 unsigned long *min, 568 unsigned long *low) 569 { 570 *min = *low = 0; 571 572 if (mem_cgroup_disabled()) 573 return; 574 575 /* 576 * There is no reclaim protection applied to a targeted reclaim. 577 * We are special casing this specific case here because 578 * mem_cgroup_protected calculation is not robust enough to keep 579 * the protection invariant for calculated effective values for 580 * parallel reclaimers with different reclaim target. This is 581 * especially a problem for tail memcgs (as they have pages on LRU) 582 * which would want to have effective values 0 for targeted reclaim 583 * but a different value for external reclaim. 584 * 585 * Example 586 * Let's have global and A's reclaim in parallel: 587 * | 588 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 589 * |\ 590 * | C (low = 1G, usage = 2.5G) 591 * B (low = 1G, usage = 0.5G) 592 * 593 * For the global reclaim 594 * A.elow = A.low 595 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 596 * C.elow = min(C.usage, C.low) 597 * 598 * With the effective values resetting we have A reclaim 599 * A.elow = 0 600 * B.elow = B.low 601 * C.elow = C.low 602 * 603 * If the global reclaim races with A's reclaim then 604 * B.elow = C.elow = 0 because children_low_usage > A.elow) 605 * is possible and reclaiming B would be violating the protection. 606 * 607 */ 608 if (root == memcg) 609 return; 610 611 *min = READ_ONCE(memcg->memory.emin); 612 *low = READ_ONCE(memcg->memory.elow); 613 } 614 615 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 616 struct mem_cgroup *memcg); 617 618 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 619 struct mem_cgroup *memcg) 620 { 621 /* 622 * The root memcg doesn't account charges, and doesn't support 623 * protection. The target memcg's protection is ignored, see 624 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 625 */ 626 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 627 memcg == target; 628 } 629 630 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 631 struct mem_cgroup *memcg) 632 { 633 if (mem_cgroup_unprotected(target, memcg)) 634 return false; 635 636 return READ_ONCE(memcg->memory.elow) >= 637 page_counter_read(&memcg->memory); 638 } 639 640 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 641 struct mem_cgroup *memcg) 642 { 643 if (mem_cgroup_unprotected(target, memcg)) 644 return false; 645 646 return READ_ONCE(memcg->memory.emin) >= 647 page_counter_read(&memcg->memory); 648 } 649 650 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 651 652 /** 653 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 654 * @folio: Folio to charge. 655 * @mm: mm context of the allocating task. 656 * @gfp: Reclaim mode. 657 * 658 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 659 * pages according to @gfp if necessary. If @mm is NULL, try to 660 * charge to the active memcg. 661 * 662 * Do not use this for folios allocated for swapin. 663 * 664 * Return: 0 on success. Otherwise, an error code is returned. 665 */ 666 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 667 gfp_t gfp) 668 { 669 if (mem_cgroup_disabled()) 670 return 0; 671 return __mem_cgroup_charge(folio, mm, gfp); 672 } 673 674 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 675 gfp_t gfp, swp_entry_t entry); 676 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 677 678 void __mem_cgroup_uncharge(struct folio *folio); 679 680 /** 681 * mem_cgroup_uncharge - Uncharge a folio. 682 * @folio: Folio to uncharge. 683 * 684 * Uncharge a folio previously charged with mem_cgroup_charge(). 685 */ 686 static inline void mem_cgroup_uncharge(struct folio *folio) 687 { 688 if (mem_cgroup_disabled()) 689 return; 690 __mem_cgroup_uncharge(folio); 691 } 692 693 void __mem_cgroup_uncharge_list(struct list_head *page_list); 694 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 695 { 696 if (mem_cgroup_disabled()) 697 return; 698 __mem_cgroup_uncharge_list(page_list); 699 } 700 701 void mem_cgroup_migrate(struct folio *old, struct folio *new); 702 703 /** 704 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 705 * @memcg: memcg of the wanted lruvec 706 * @pgdat: pglist_data 707 * 708 * Returns the lru list vector holding pages for a given @memcg & 709 * @pgdat combination. This can be the node lruvec, if the memory 710 * controller is disabled. 711 */ 712 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 713 struct pglist_data *pgdat) 714 { 715 struct mem_cgroup_per_node *mz; 716 struct lruvec *lruvec; 717 718 if (mem_cgroup_disabled()) { 719 lruvec = &pgdat->__lruvec; 720 goto out; 721 } 722 723 if (!memcg) 724 memcg = root_mem_cgroup; 725 726 mz = memcg->nodeinfo[pgdat->node_id]; 727 lruvec = &mz->lruvec; 728 out: 729 /* 730 * Since a node can be onlined after the mem_cgroup was created, 731 * we have to be prepared to initialize lruvec->pgdat here; 732 * and if offlined then reonlined, we need to reinitialize it. 733 */ 734 if (unlikely(lruvec->pgdat != pgdat)) 735 lruvec->pgdat = pgdat; 736 return lruvec; 737 } 738 739 /** 740 * folio_lruvec - return lruvec for isolating/putting an LRU folio 741 * @folio: Pointer to the folio. 742 * 743 * This function relies on folio->mem_cgroup being stable. 744 */ 745 static inline struct lruvec *folio_lruvec(struct folio *folio) 746 { 747 struct mem_cgroup *memcg = folio_memcg(folio); 748 749 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 750 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 751 } 752 753 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 754 755 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 756 757 struct lruvec *folio_lruvec_lock(struct folio *folio); 758 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 759 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 760 unsigned long *flags); 761 762 #ifdef CONFIG_DEBUG_VM 763 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 764 #else 765 static inline 766 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 767 { 768 } 769 #endif 770 771 static inline 772 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 773 return css ? container_of(css, struct mem_cgroup, css) : NULL; 774 } 775 776 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 777 { 778 return percpu_ref_tryget(&objcg->refcnt); 779 } 780 781 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 782 { 783 percpu_ref_get(&objcg->refcnt); 784 } 785 786 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 787 unsigned long nr) 788 { 789 percpu_ref_get_many(&objcg->refcnt, nr); 790 } 791 792 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 793 { 794 percpu_ref_put(&objcg->refcnt); 795 } 796 797 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 798 { 799 if (memcg) 800 css_put(&memcg->css); 801 } 802 803 #define mem_cgroup_from_counter(counter, member) \ 804 container_of(counter, struct mem_cgroup, member) 805 806 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 807 struct mem_cgroup *, 808 struct mem_cgroup_reclaim_cookie *); 809 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 810 int mem_cgroup_scan_tasks(struct mem_cgroup *, 811 int (*)(struct task_struct *, void *), void *); 812 813 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 814 { 815 if (mem_cgroup_disabled()) 816 return 0; 817 818 return memcg->id.id; 819 } 820 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 821 822 #ifdef CONFIG_SHRINKER_DEBUG 823 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 824 { 825 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 826 } 827 828 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 829 #endif 830 831 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 832 { 833 return mem_cgroup_from_css(seq_css(m)); 834 } 835 836 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 837 { 838 struct mem_cgroup_per_node *mz; 839 840 if (mem_cgroup_disabled()) 841 return NULL; 842 843 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 844 return mz->memcg; 845 } 846 847 /** 848 * parent_mem_cgroup - find the accounting parent of a memcg 849 * @memcg: memcg whose parent to find 850 * 851 * Returns the parent memcg, or NULL if this is the root or the memory 852 * controller is in legacy no-hierarchy mode. 853 */ 854 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 855 { 856 return mem_cgroup_from_css(memcg->css.parent); 857 } 858 859 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 860 struct mem_cgroup *root) 861 { 862 if (root == memcg) 863 return true; 864 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 865 } 866 867 static inline bool mm_match_cgroup(struct mm_struct *mm, 868 struct mem_cgroup *memcg) 869 { 870 struct mem_cgroup *task_memcg; 871 bool match = false; 872 873 rcu_read_lock(); 874 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 875 if (task_memcg) 876 match = mem_cgroup_is_descendant(task_memcg, memcg); 877 rcu_read_unlock(); 878 return match; 879 } 880 881 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 882 ino_t page_cgroup_ino(struct page *page); 883 884 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 885 { 886 if (mem_cgroup_disabled()) 887 return true; 888 return !!(memcg->css.flags & CSS_ONLINE); 889 } 890 891 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 892 int zid, int nr_pages); 893 894 static inline 895 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 896 enum lru_list lru, int zone_idx) 897 { 898 struct mem_cgroup_per_node *mz; 899 900 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 901 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 902 } 903 904 void mem_cgroup_handle_over_high(void); 905 906 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 907 908 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 909 910 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 911 struct task_struct *p); 912 913 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 914 915 static inline void mem_cgroup_enter_user_fault(void) 916 { 917 WARN_ON(current->in_user_fault); 918 current->in_user_fault = 1; 919 } 920 921 static inline void mem_cgroup_exit_user_fault(void) 922 { 923 WARN_ON(!current->in_user_fault); 924 current->in_user_fault = 0; 925 } 926 927 static inline bool task_in_memcg_oom(struct task_struct *p) 928 { 929 return p->memcg_in_oom; 930 } 931 932 bool mem_cgroup_oom_synchronize(bool wait); 933 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 934 struct mem_cgroup *oom_domain); 935 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 936 937 void folio_memcg_lock(struct folio *folio); 938 void folio_memcg_unlock(struct folio *folio); 939 void lock_page_memcg(struct page *page); 940 void unlock_page_memcg(struct page *page); 941 942 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 943 944 /* try to stablize folio_memcg() for all the pages in a memcg */ 945 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 946 { 947 rcu_read_lock(); 948 949 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) 950 return true; 951 952 rcu_read_unlock(); 953 return false; 954 } 955 956 static inline void mem_cgroup_unlock_pages(void) 957 { 958 rcu_read_unlock(); 959 } 960 961 /* idx can be of type enum memcg_stat_item or node_stat_item */ 962 static inline void mod_memcg_state(struct mem_cgroup *memcg, 963 int idx, int val) 964 { 965 unsigned long flags; 966 967 local_irq_save(flags); 968 __mod_memcg_state(memcg, idx, val); 969 local_irq_restore(flags); 970 } 971 972 static inline void mod_memcg_page_state(struct page *page, 973 int idx, int val) 974 { 975 struct mem_cgroup *memcg; 976 977 if (mem_cgroup_disabled()) 978 return; 979 980 rcu_read_lock(); 981 memcg = page_memcg(page); 982 if (memcg) 983 mod_memcg_state(memcg, idx, val); 984 rcu_read_unlock(); 985 } 986 987 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 988 989 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 990 enum node_stat_item idx) 991 { 992 struct mem_cgroup_per_node *pn; 993 long x; 994 995 if (mem_cgroup_disabled()) 996 return node_page_state(lruvec_pgdat(lruvec), idx); 997 998 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 999 x = READ_ONCE(pn->lruvec_stats.state[idx]); 1000 #ifdef CONFIG_SMP 1001 if (x < 0) 1002 x = 0; 1003 #endif 1004 return x; 1005 } 1006 1007 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1008 enum node_stat_item idx) 1009 { 1010 struct mem_cgroup_per_node *pn; 1011 long x = 0; 1012 int cpu; 1013 1014 if (mem_cgroup_disabled()) 1015 return node_page_state(lruvec_pgdat(lruvec), idx); 1016 1017 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1018 for_each_possible_cpu(cpu) 1019 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 1020 #ifdef CONFIG_SMP 1021 if (x < 0) 1022 x = 0; 1023 #endif 1024 return x; 1025 } 1026 1027 void mem_cgroup_flush_stats(void); 1028 void mem_cgroup_flush_stats_delayed(void); 1029 1030 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1031 int val); 1032 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1033 1034 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1035 int val) 1036 { 1037 unsigned long flags; 1038 1039 local_irq_save(flags); 1040 __mod_lruvec_kmem_state(p, idx, val); 1041 local_irq_restore(flags); 1042 } 1043 1044 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1045 enum node_stat_item idx, int val) 1046 { 1047 unsigned long flags; 1048 1049 local_irq_save(flags); 1050 __mod_memcg_lruvec_state(lruvec, idx, val); 1051 local_irq_restore(flags); 1052 } 1053 1054 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1055 unsigned long count); 1056 1057 static inline void count_memcg_events(struct mem_cgroup *memcg, 1058 enum vm_event_item idx, 1059 unsigned long count) 1060 { 1061 unsigned long flags; 1062 1063 local_irq_save(flags); 1064 __count_memcg_events(memcg, idx, count); 1065 local_irq_restore(flags); 1066 } 1067 1068 static inline void count_memcg_page_event(struct page *page, 1069 enum vm_event_item idx) 1070 { 1071 struct mem_cgroup *memcg = page_memcg(page); 1072 1073 if (memcg) 1074 count_memcg_events(memcg, idx, 1); 1075 } 1076 1077 static inline void count_memcg_folio_events(struct folio *folio, 1078 enum vm_event_item idx, unsigned long nr) 1079 { 1080 struct mem_cgroup *memcg = folio_memcg(folio); 1081 1082 if (memcg) 1083 count_memcg_events(memcg, idx, nr); 1084 } 1085 1086 static inline void count_memcg_event_mm(struct mm_struct *mm, 1087 enum vm_event_item idx) 1088 { 1089 struct mem_cgroup *memcg; 1090 1091 if (mem_cgroup_disabled()) 1092 return; 1093 1094 rcu_read_lock(); 1095 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1096 if (likely(memcg)) 1097 count_memcg_events(memcg, idx, 1); 1098 rcu_read_unlock(); 1099 } 1100 1101 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1102 enum memcg_memory_event event) 1103 { 1104 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1105 event == MEMCG_SWAP_FAIL; 1106 1107 atomic_long_inc(&memcg->memory_events_local[event]); 1108 if (!swap_event) 1109 cgroup_file_notify(&memcg->events_local_file); 1110 1111 do { 1112 atomic_long_inc(&memcg->memory_events[event]); 1113 if (swap_event) 1114 cgroup_file_notify(&memcg->swap_events_file); 1115 else 1116 cgroup_file_notify(&memcg->events_file); 1117 1118 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1119 break; 1120 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1121 break; 1122 } while ((memcg = parent_mem_cgroup(memcg)) && 1123 !mem_cgroup_is_root(memcg)); 1124 } 1125 1126 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1127 enum memcg_memory_event event) 1128 { 1129 struct mem_cgroup *memcg; 1130 1131 if (mem_cgroup_disabled()) 1132 return; 1133 1134 rcu_read_lock(); 1135 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1136 if (likely(memcg)) 1137 memcg_memory_event(memcg, event); 1138 rcu_read_unlock(); 1139 } 1140 1141 void split_page_memcg(struct page *head, unsigned int nr); 1142 1143 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1144 gfp_t gfp_mask, 1145 unsigned long *total_scanned); 1146 1147 #else /* CONFIG_MEMCG */ 1148 1149 #define MEM_CGROUP_ID_SHIFT 0 1150 #define MEM_CGROUP_ID_MAX 0 1151 1152 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1153 { 1154 return NULL; 1155 } 1156 1157 static inline struct mem_cgroup *page_memcg(struct page *page) 1158 { 1159 return NULL; 1160 } 1161 1162 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1163 { 1164 WARN_ON_ONCE(!rcu_read_lock_held()); 1165 return NULL; 1166 } 1167 1168 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1169 { 1170 return NULL; 1171 } 1172 1173 static inline bool folio_memcg_kmem(struct folio *folio) 1174 { 1175 return false; 1176 } 1177 1178 static inline bool PageMemcgKmem(struct page *page) 1179 { 1180 return false; 1181 } 1182 1183 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1184 { 1185 return true; 1186 } 1187 1188 static inline bool mem_cgroup_disabled(void) 1189 { 1190 return true; 1191 } 1192 1193 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1194 enum memcg_memory_event event) 1195 { 1196 } 1197 1198 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1199 enum memcg_memory_event event) 1200 { 1201 } 1202 1203 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1204 struct mem_cgroup *memcg, 1205 unsigned long *min, 1206 unsigned long *low) 1207 { 1208 *min = *low = 0; 1209 } 1210 1211 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1212 struct mem_cgroup *memcg) 1213 { 1214 } 1215 1216 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1217 struct mem_cgroup *memcg) 1218 { 1219 return true; 1220 } 1221 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1222 struct mem_cgroup *memcg) 1223 { 1224 return false; 1225 } 1226 1227 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1228 struct mem_cgroup *memcg) 1229 { 1230 return false; 1231 } 1232 1233 static inline int mem_cgroup_charge(struct folio *folio, 1234 struct mm_struct *mm, gfp_t gfp) 1235 { 1236 return 0; 1237 } 1238 1239 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1240 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1241 { 1242 return 0; 1243 } 1244 1245 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1246 { 1247 } 1248 1249 static inline void mem_cgroup_uncharge(struct folio *folio) 1250 { 1251 } 1252 1253 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1254 { 1255 } 1256 1257 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1258 { 1259 } 1260 1261 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1262 struct pglist_data *pgdat) 1263 { 1264 return &pgdat->__lruvec; 1265 } 1266 1267 static inline struct lruvec *folio_lruvec(struct folio *folio) 1268 { 1269 struct pglist_data *pgdat = folio_pgdat(folio); 1270 return &pgdat->__lruvec; 1271 } 1272 1273 static inline 1274 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1275 { 1276 } 1277 1278 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1279 { 1280 return NULL; 1281 } 1282 1283 static inline bool mm_match_cgroup(struct mm_struct *mm, 1284 struct mem_cgroup *memcg) 1285 { 1286 return true; 1287 } 1288 1289 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1290 { 1291 return NULL; 1292 } 1293 1294 static inline 1295 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1296 { 1297 return NULL; 1298 } 1299 1300 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1301 { 1302 } 1303 1304 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1305 { 1306 } 1307 1308 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1309 { 1310 struct pglist_data *pgdat = folio_pgdat(folio); 1311 1312 spin_lock(&pgdat->__lruvec.lru_lock); 1313 return &pgdat->__lruvec; 1314 } 1315 1316 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1317 { 1318 struct pglist_data *pgdat = folio_pgdat(folio); 1319 1320 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1321 return &pgdat->__lruvec; 1322 } 1323 1324 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1325 unsigned long *flagsp) 1326 { 1327 struct pglist_data *pgdat = folio_pgdat(folio); 1328 1329 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1330 return &pgdat->__lruvec; 1331 } 1332 1333 static inline struct mem_cgroup * 1334 mem_cgroup_iter(struct mem_cgroup *root, 1335 struct mem_cgroup *prev, 1336 struct mem_cgroup_reclaim_cookie *reclaim) 1337 { 1338 return NULL; 1339 } 1340 1341 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1342 struct mem_cgroup *prev) 1343 { 1344 } 1345 1346 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1347 int (*fn)(struct task_struct *, void *), void *arg) 1348 { 1349 return 0; 1350 } 1351 1352 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1353 { 1354 return 0; 1355 } 1356 1357 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1358 { 1359 WARN_ON_ONCE(id); 1360 /* XXX: This should always return root_mem_cgroup */ 1361 return NULL; 1362 } 1363 1364 #ifdef CONFIG_SHRINKER_DEBUG 1365 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1366 { 1367 return 0; 1368 } 1369 1370 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1371 { 1372 return NULL; 1373 } 1374 #endif 1375 1376 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1377 { 1378 return NULL; 1379 } 1380 1381 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1382 { 1383 return NULL; 1384 } 1385 1386 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1387 { 1388 return true; 1389 } 1390 1391 static inline 1392 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1393 enum lru_list lru, int zone_idx) 1394 { 1395 return 0; 1396 } 1397 1398 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1399 { 1400 return 0; 1401 } 1402 1403 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1404 { 1405 return 0; 1406 } 1407 1408 static inline void 1409 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1410 { 1411 } 1412 1413 static inline void 1414 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1415 { 1416 } 1417 1418 static inline void lock_page_memcg(struct page *page) 1419 { 1420 } 1421 1422 static inline void unlock_page_memcg(struct page *page) 1423 { 1424 } 1425 1426 static inline void folio_memcg_lock(struct folio *folio) 1427 { 1428 } 1429 1430 static inline void folio_memcg_unlock(struct folio *folio) 1431 { 1432 } 1433 1434 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 1435 { 1436 /* to match folio_memcg_rcu() */ 1437 rcu_read_lock(); 1438 return true; 1439 } 1440 1441 static inline void mem_cgroup_unlock_pages(void) 1442 { 1443 rcu_read_unlock(); 1444 } 1445 1446 static inline void mem_cgroup_handle_over_high(void) 1447 { 1448 } 1449 1450 static inline void mem_cgroup_enter_user_fault(void) 1451 { 1452 } 1453 1454 static inline void mem_cgroup_exit_user_fault(void) 1455 { 1456 } 1457 1458 static inline bool task_in_memcg_oom(struct task_struct *p) 1459 { 1460 return false; 1461 } 1462 1463 static inline bool mem_cgroup_oom_synchronize(bool wait) 1464 { 1465 return false; 1466 } 1467 1468 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1469 struct task_struct *victim, struct mem_cgroup *oom_domain) 1470 { 1471 return NULL; 1472 } 1473 1474 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1475 { 1476 } 1477 1478 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1479 int idx, 1480 int nr) 1481 { 1482 } 1483 1484 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1485 int idx, 1486 int nr) 1487 { 1488 } 1489 1490 static inline void mod_memcg_page_state(struct page *page, 1491 int idx, int val) 1492 { 1493 } 1494 1495 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1496 { 1497 return 0; 1498 } 1499 1500 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1501 enum node_stat_item idx) 1502 { 1503 return node_page_state(lruvec_pgdat(lruvec), idx); 1504 } 1505 1506 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1507 enum node_stat_item idx) 1508 { 1509 return node_page_state(lruvec_pgdat(lruvec), idx); 1510 } 1511 1512 static inline void mem_cgroup_flush_stats(void) 1513 { 1514 } 1515 1516 static inline void mem_cgroup_flush_stats_delayed(void) 1517 { 1518 } 1519 1520 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1521 enum node_stat_item idx, int val) 1522 { 1523 } 1524 1525 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1526 int val) 1527 { 1528 struct page *page = virt_to_head_page(p); 1529 1530 __mod_node_page_state(page_pgdat(page), idx, val); 1531 } 1532 1533 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1534 int val) 1535 { 1536 struct page *page = virt_to_head_page(p); 1537 1538 mod_node_page_state(page_pgdat(page), idx, val); 1539 } 1540 1541 static inline void count_memcg_events(struct mem_cgroup *memcg, 1542 enum vm_event_item idx, 1543 unsigned long count) 1544 { 1545 } 1546 1547 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1548 enum vm_event_item idx, 1549 unsigned long count) 1550 { 1551 } 1552 1553 static inline void count_memcg_page_event(struct page *page, 1554 int idx) 1555 { 1556 } 1557 1558 static inline void count_memcg_folio_events(struct folio *folio, 1559 enum vm_event_item idx, unsigned long nr) 1560 { 1561 } 1562 1563 static inline 1564 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1565 { 1566 } 1567 1568 static inline void split_page_memcg(struct page *head, unsigned int nr) 1569 { 1570 } 1571 1572 static inline 1573 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1574 gfp_t gfp_mask, 1575 unsigned long *total_scanned) 1576 { 1577 return 0; 1578 } 1579 #endif /* CONFIG_MEMCG */ 1580 1581 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1582 { 1583 __mod_lruvec_kmem_state(p, idx, 1); 1584 } 1585 1586 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1587 { 1588 __mod_lruvec_kmem_state(p, idx, -1); 1589 } 1590 1591 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1592 { 1593 struct mem_cgroup *memcg; 1594 1595 memcg = lruvec_memcg(lruvec); 1596 if (!memcg) 1597 return NULL; 1598 memcg = parent_mem_cgroup(memcg); 1599 if (!memcg) 1600 return NULL; 1601 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1602 } 1603 1604 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1605 { 1606 spin_unlock(&lruvec->lru_lock); 1607 } 1608 1609 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1610 { 1611 spin_unlock_irq(&lruvec->lru_lock); 1612 } 1613 1614 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1615 unsigned long flags) 1616 { 1617 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1618 } 1619 1620 /* Test requires a stable page->memcg binding, see page_memcg() */ 1621 static inline bool folio_matches_lruvec(struct folio *folio, 1622 struct lruvec *lruvec) 1623 { 1624 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1625 lruvec_memcg(lruvec) == folio_memcg(folio); 1626 } 1627 1628 /* Don't lock again iff page's lruvec locked */ 1629 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1630 struct lruvec *locked_lruvec) 1631 { 1632 if (locked_lruvec) { 1633 if (folio_matches_lruvec(folio, locked_lruvec)) 1634 return locked_lruvec; 1635 1636 unlock_page_lruvec_irq(locked_lruvec); 1637 } 1638 1639 return folio_lruvec_lock_irq(folio); 1640 } 1641 1642 /* Don't lock again iff page's lruvec locked */ 1643 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1644 struct lruvec *locked_lruvec, unsigned long *flags) 1645 { 1646 if (locked_lruvec) { 1647 if (folio_matches_lruvec(folio, locked_lruvec)) 1648 return locked_lruvec; 1649 1650 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1651 } 1652 1653 return folio_lruvec_lock_irqsave(folio, flags); 1654 } 1655 1656 #ifdef CONFIG_CGROUP_WRITEBACK 1657 1658 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1659 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1660 unsigned long *pheadroom, unsigned long *pdirty, 1661 unsigned long *pwriteback); 1662 1663 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1664 struct bdi_writeback *wb); 1665 1666 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1667 struct bdi_writeback *wb) 1668 { 1669 if (mem_cgroup_disabled()) 1670 return; 1671 1672 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css)) 1673 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1674 } 1675 1676 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1677 1678 #else /* CONFIG_CGROUP_WRITEBACK */ 1679 1680 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1681 { 1682 return NULL; 1683 } 1684 1685 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1686 unsigned long *pfilepages, 1687 unsigned long *pheadroom, 1688 unsigned long *pdirty, 1689 unsigned long *pwriteback) 1690 { 1691 } 1692 1693 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1694 struct bdi_writeback *wb) 1695 { 1696 } 1697 1698 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1699 { 1700 } 1701 1702 #endif /* CONFIG_CGROUP_WRITEBACK */ 1703 1704 struct sock; 1705 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1706 gfp_t gfp_mask); 1707 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1708 #ifdef CONFIG_MEMCG 1709 extern struct static_key_false memcg_sockets_enabled_key; 1710 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1711 void mem_cgroup_sk_alloc(struct sock *sk); 1712 void mem_cgroup_sk_free(struct sock *sk); 1713 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1714 { 1715 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1716 return true; 1717 do { 1718 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1719 return true; 1720 } while ((memcg = parent_mem_cgroup(memcg))); 1721 return false; 1722 } 1723 1724 int alloc_shrinker_info(struct mem_cgroup *memcg); 1725 void free_shrinker_info(struct mem_cgroup *memcg); 1726 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1727 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1728 #else 1729 #define mem_cgroup_sockets_enabled 0 1730 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1731 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1732 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1733 { 1734 return false; 1735 } 1736 1737 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1738 int nid, int shrinker_id) 1739 { 1740 } 1741 #endif 1742 1743 #ifdef CONFIG_MEMCG_KMEM 1744 bool mem_cgroup_kmem_disabled(void); 1745 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1746 void __memcg_kmem_uncharge_page(struct page *page, int order); 1747 1748 struct obj_cgroup *get_obj_cgroup_from_current(void); 1749 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); 1750 1751 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1752 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1753 1754 extern struct static_key_false memcg_kmem_enabled_key; 1755 1756 static inline bool memcg_kmem_enabled(void) 1757 { 1758 return static_branch_likely(&memcg_kmem_enabled_key); 1759 } 1760 1761 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1762 int order) 1763 { 1764 if (memcg_kmem_enabled()) 1765 return __memcg_kmem_charge_page(page, gfp, order); 1766 return 0; 1767 } 1768 1769 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1770 { 1771 if (memcg_kmem_enabled()) 1772 __memcg_kmem_uncharge_page(page, order); 1773 } 1774 1775 /* 1776 * A helper for accessing memcg's kmem_id, used for getting 1777 * corresponding LRU lists. 1778 */ 1779 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1780 { 1781 return memcg ? memcg->kmemcg_id : -1; 1782 } 1783 1784 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1785 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1786 1787 static inline void count_objcg_event(struct obj_cgroup *objcg, 1788 enum vm_event_item idx) 1789 { 1790 struct mem_cgroup *memcg; 1791 1792 if (!memcg_kmem_enabled()) 1793 return; 1794 1795 rcu_read_lock(); 1796 memcg = obj_cgroup_memcg(objcg); 1797 count_memcg_events(memcg, idx, 1); 1798 rcu_read_unlock(); 1799 } 1800 1801 #else 1802 static inline bool mem_cgroup_kmem_disabled(void) 1803 { 1804 return true; 1805 } 1806 1807 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1808 int order) 1809 { 1810 return 0; 1811 } 1812 1813 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1814 { 1815 } 1816 1817 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1818 int order) 1819 { 1820 return 0; 1821 } 1822 1823 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1824 { 1825 } 1826 1827 static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) 1828 { 1829 return NULL; 1830 } 1831 1832 static inline bool memcg_kmem_enabled(void) 1833 { 1834 return false; 1835 } 1836 1837 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1838 { 1839 return -1; 1840 } 1841 1842 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1843 { 1844 return NULL; 1845 } 1846 1847 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1848 { 1849 return NULL; 1850 } 1851 1852 static inline void count_objcg_event(struct obj_cgroup *objcg, 1853 enum vm_event_item idx) 1854 { 1855 } 1856 1857 #endif /* CONFIG_MEMCG_KMEM */ 1858 1859 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1860 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1861 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1862 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1863 #else 1864 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1865 { 1866 return true; 1867 } 1868 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1869 size_t size) 1870 { 1871 } 1872 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1873 size_t size) 1874 { 1875 } 1876 #endif 1877 1878 #endif /* _LINUX_MEMCONTROL_H */ 1879