1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_NR_STAT, 38 }; 39 40 enum memcg_memory_event { 41 MEMCG_LOW, 42 MEMCG_HIGH, 43 MEMCG_MAX, 44 MEMCG_OOM, 45 MEMCG_OOM_KILL, 46 MEMCG_OOM_GROUP_KILL, 47 MEMCG_SWAP_HIGH, 48 MEMCG_SWAP_MAX, 49 MEMCG_SWAP_FAIL, 50 MEMCG_NR_MEMORY_EVENTS, 51 }; 52 53 struct mem_cgroup_reclaim_cookie { 54 pg_data_t *pgdat; 55 unsigned int generation; 56 }; 57 58 #ifdef CONFIG_MEMCG 59 60 #define MEM_CGROUP_ID_SHIFT 16 61 #define MEM_CGROUP_ID_MAX USHRT_MAX 62 63 struct mem_cgroup_id { 64 int id; 65 refcount_t ref; 66 }; 67 68 /* 69 * Per memcg event counter is incremented at every pagein/pageout. With THP, 70 * it will be incremented by the number of pages. This counter is used 71 * to trigger some periodic events. This is straightforward and better 72 * than using jiffies etc. to handle periodic memcg event. 73 */ 74 enum mem_cgroup_events_target { 75 MEM_CGROUP_TARGET_THRESH, 76 MEM_CGROUP_TARGET_SOFTLIMIT, 77 MEM_CGROUP_NTARGETS, 78 }; 79 80 struct memcg_vmstats_percpu { 81 /* Local (CPU and cgroup) page state & events */ 82 long state[MEMCG_NR_STAT]; 83 unsigned long events[NR_VM_EVENT_ITEMS]; 84 85 /* Delta calculation for lockless upward propagation */ 86 long state_prev[MEMCG_NR_STAT]; 87 unsigned long events_prev[NR_VM_EVENT_ITEMS]; 88 89 /* Cgroup1: threshold notifications & softlimit tree updates */ 90 unsigned long nr_page_events; 91 unsigned long targets[MEM_CGROUP_NTARGETS]; 92 }; 93 94 struct memcg_vmstats { 95 /* Aggregated (CPU and subtree) page state & events */ 96 long state[MEMCG_NR_STAT]; 97 unsigned long events[NR_VM_EVENT_ITEMS]; 98 99 /* Pending child counts during tree propagation */ 100 long state_pending[MEMCG_NR_STAT]; 101 unsigned long events_pending[NR_VM_EVENT_ITEMS]; 102 }; 103 104 struct mem_cgroup_reclaim_iter { 105 struct mem_cgroup *position; 106 /* scan generation, increased every round-trip */ 107 unsigned int generation; 108 }; 109 110 /* 111 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 112 * shrinkers, which have elements charged to this memcg. 113 */ 114 struct shrinker_info { 115 struct rcu_head rcu; 116 atomic_long_t *nr_deferred; 117 unsigned long *map; 118 }; 119 120 struct lruvec_stats_percpu { 121 /* Local (CPU and cgroup) state */ 122 long state[NR_VM_NODE_STAT_ITEMS]; 123 124 /* Delta calculation for lockless upward propagation */ 125 long state_prev[NR_VM_NODE_STAT_ITEMS]; 126 }; 127 128 struct lruvec_stats { 129 /* Aggregated (CPU and subtree) state */ 130 long state[NR_VM_NODE_STAT_ITEMS]; 131 132 /* Pending child counts during tree propagation */ 133 long state_pending[NR_VM_NODE_STAT_ITEMS]; 134 }; 135 136 /* 137 * per-node information in memory controller. 138 */ 139 struct mem_cgroup_per_node { 140 struct lruvec lruvec; 141 142 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 143 struct lruvec_stats lruvec_stats; 144 145 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 146 147 struct mem_cgroup_reclaim_iter iter; 148 149 struct shrinker_info __rcu *shrinker_info; 150 151 struct rb_node tree_node; /* RB tree node */ 152 unsigned long usage_in_excess;/* Set to the value by which */ 153 /* the soft limit is exceeded*/ 154 bool on_tree; 155 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 156 /* use container_of */ 157 }; 158 159 struct mem_cgroup_threshold { 160 struct eventfd_ctx *eventfd; 161 unsigned long threshold; 162 }; 163 164 /* For threshold */ 165 struct mem_cgroup_threshold_ary { 166 /* An array index points to threshold just below or equal to usage. */ 167 int current_threshold; 168 /* Size of entries[] */ 169 unsigned int size; 170 /* Array of thresholds */ 171 struct mem_cgroup_threshold entries[]; 172 }; 173 174 struct mem_cgroup_thresholds { 175 /* Primary thresholds array */ 176 struct mem_cgroup_threshold_ary *primary; 177 /* 178 * Spare threshold array. 179 * This is needed to make mem_cgroup_unregister_event() "never fail". 180 * It must be able to store at least primary->size - 1 entries. 181 */ 182 struct mem_cgroup_threshold_ary *spare; 183 }; 184 185 #if defined(CONFIG_SMP) 186 struct memcg_padding { 187 char x[0]; 188 } ____cacheline_internodealigned_in_smp; 189 #define MEMCG_PADDING(name) struct memcg_padding name 190 #else 191 #define MEMCG_PADDING(name) 192 #endif 193 194 /* 195 * Remember four most recent foreign writebacks with dirty pages in this 196 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 197 * one in a given round, we're likely to catch it later if it keeps 198 * foreign-dirtying, so a fairly low count should be enough. 199 * 200 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 201 */ 202 #define MEMCG_CGWB_FRN_CNT 4 203 204 struct memcg_cgwb_frn { 205 u64 bdi_id; /* bdi->id of the foreign inode */ 206 int memcg_id; /* memcg->css.id of foreign inode */ 207 u64 at; /* jiffies_64 at the time of dirtying */ 208 struct wb_completion done; /* tracks in-flight foreign writebacks */ 209 }; 210 211 /* 212 * Bucket for arbitrarily byte-sized objects charged to a memory 213 * cgroup. The bucket can be reparented in one piece when the cgroup 214 * is destroyed, without having to round up the individual references 215 * of all live memory objects in the wild. 216 */ 217 struct obj_cgroup { 218 struct percpu_ref refcnt; 219 struct mem_cgroup *memcg; 220 atomic_t nr_charged_bytes; 221 union { 222 struct list_head list; 223 struct rcu_head rcu; 224 }; 225 }; 226 227 /* 228 * The memory controller data structure. The memory controller controls both 229 * page cache and RSS per cgroup. We would eventually like to provide 230 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 231 * to help the administrator determine what knobs to tune. 232 */ 233 struct mem_cgroup { 234 struct cgroup_subsys_state css; 235 236 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 237 struct mem_cgroup_id id; 238 239 /* Accounted resources */ 240 struct page_counter memory; /* Both v1 & v2 */ 241 242 union { 243 struct page_counter swap; /* v2 only */ 244 struct page_counter memsw; /* v1 only */ 245 }; 246 247 /* Legacy consumer-oriented counters */ 248 struct page_counter kmem; /* v1 only */ 249 struct page_counter tcpmem; /* v1 only */ 250 251 /* Range enforcement for interrupt charges */ 252 struct work_struct high_work; 253 254 unsigned long soft_limit; 255 256 /* vmpressure notifications */ 257 struct vmpressure vmpressure; 258 259 /* 260 * Should the OOM killer kill all belonging tasks, had it kill one? 261 */ 262 bool oom_group; 263 264 /* protected by memcg_oom_lock */ 265 bool oom_lock; 266 int under_oom; 267 268 int swappiness; 269 /* OOM-Killer disable */ 270 int oom_kill_disable; 271 272 /* memory.events and memory.events.local */ 273 struct cgroup_file events_file; 274 struct cgroup_file events_local_file; 275 276 /* handle for "memory.swap.events" */ 277 struct cgroup_file swap_events_file; 278 279 /* protect arrays of thresholds */ 280 struct mutex thresholds_lock; 281 282 /* thresholds for memory usage. RCU-protected */ 283 struct mem_cgroup_thresholds thresholds; 284 285 /* thresholds for mem+swap usage. RCU-protected */ 286 struct mem_cgroup_thresholds memsw_thresholds; 287 288 /* For oom notifier event fd */ 289 struct list_head oom_notify; 290 291 /* 292 * Should we move charges of a task when a task is moved into this 293 * mem_cgroup ? And what type of charges should we move ? 294 */ 295 unsigned long move_charge_at_immigrate; 296 /* taken only while moving_account > 0 */ 297 spinlock_t move_lock; 298 unsigned long move_lock_flags; 299 300 MEMCG_PADDING(_pad1_); 301 302 /* memory.stat */ 303 struct memcg_vmstats vmstats; 304 305 /* memory.events */ 306 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 307 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 308 309 unsigned long socket_pressure; 310 311 /* Legacy tcp memory accounting */ 312 bool tcpmem_active; 313 int tcpmem_pressure; 314 315 #ifdef CONFIG_MEMCG_KMEM 316 int kmemcg_id; 317 struct obj_cgroup __rcu *objcg; 318 struct list_head objcg_list; /* list of inherited objcgs */ 319 #endif 320 321 MEMCG_PADDING(_pad2_); 322 323 /* 324 * set > 0 if pages under this cgroup are moving to other cgroup. 325 */ 326 atomic_t moving_account; 327 struct task_struct *move_lock_task; 328 329 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 330 331 #ifdef CONFIG_CGROUP_WRITEBACK 332 struct list_head cgwb_list; 333 struct wb_domain cgwb_domain; 334 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 335 #endif 336 337 /* List of events which userspace want to receive */ 338 struct list_head event_list; 339 spinlock_t event_list_lock; 340 341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 342 struct deferred_split deferred_split_queue; 343 #endif 344 345 struct mem_cgroup_per_node *nodeinfo[]; 346 }; 347 348 /* 349 * size of first charge trial. "32" comes from vmscan.c's magic value. 350 * TODO: maybe necessary to use big numbers in big irons. 351 */ 352 #define MEMCG_CHARGE_BATCH 32U 353 354 extern struct mem_cgroup *root_mem_cgroup; 355 356 enum page_memcg_data_flags { 357 /* page->memcg_data is a pointer to an objcgs vector */ 358 MEMCG_DATA_OBJCGS = (1UL << 0), 359 /* page has been accounted as a non-slab kernel page */ 360 MEMCG_DATA_KMEM = (1UL << 1), 361 /* the next bit after the last actual flag */ 362 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 363 }; 364 365 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 366 367 static inline bool folio_memcg_kmem(struct folio *folio); 368 369 /* 370 * After the initialization objcg->memcg is always pointing at 371 * a valid memcg, but can be atomically swapped to the parent memcg. 372 * 373 * The caller must ensure that the returned memcg won't be released: 374 * e.g. acquire the rcu_read_lock or css_set_lock. 375 */ 376 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 377 { 378 return READ_ONCE(objcg->memcg); 379 } 380 381 /* 382 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 383 * @folio: Pointer to the folio. 384 * 385 * Returns a pointer to the memory cgroup associated with the folio, 386 * or NULL. This function assumes that the folio is known to have a 387 * proper memory cgroup pointer. It's not safe to call this function 388 * against some type of folios, e.g. slab folios or ex-slab folios or 389 * kmem folios. 390 */ 391 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 392 { 393 unsigned long memcg_data = folio->memcg_data; 394 395 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 396 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 397 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 398 399 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 400 } 401 402 /* 403 * __folio_objcg - get the object cgroup associated with a kmem folio. 404 * @folio: Pointer to the folio. 405 * 406 * Returns a pointer to the object cgroup associated with the folio, 407 * or NULL. This function assumes that the folio is known to have a 408 * proper object cgroup pointer. It's not safe to call this function 409 * against some type of folios, e.g. slab folios or ex-slab folios or 410 * LRU folios. 411 */ 412 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 413 { 414 unsigned long memcg_data = folio->memcg_data; 415 416 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 417 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 418 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 419 420 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 421 } 422 423 /* 424 * folio_memcg - Get the memory cgroup associated with a folio. 425 * @folio: Pointer to the folio. 426 * 427 * Returns a pointer to the memory cgroup associated with the folio, 428 * or NULL. This function assumes that the folio is known to have a 429 * proper memory cgroup pointer. It's not safe to call this function 430 * against some type of folios, e.g. slab folios or ex-slab folios. 431 * 432 * For a non-kmem folio any of the following ensures folio and memcg binding 433 * stability: 434 * 435 * - the folio lock 436 * - LRU isolation 437 * - lock_page_memcg() 438 * - exclusive reference 439 * 440 * For a kmem folio a caller should hold an rcu read lock to protect memcg 441 * associated with a kmem folio from being released. 442 */ 443 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 444 { 445 if (folio_memcg_kmem(folio)) 446 return obj_cgroup_memcg(__folio_objcg(folio)); 447 return __folio_memcg(folio); 448 } 449 450 static inline struct mem_cgroup *page_memcg(struct page *page) 451 { 452 return folio_memcg(page_folio(page)); 453 } 454 455 /** 456 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 457 * @folio: Pointer to the folio. 458 * 459 * This function assumes that the folio is known to have a 460 * proper memory cgroup pointer. It's not safe to call this function 461 * against some type of folios, e.g. slab folios or ex-slab folios. 462 * 463 * Return: A pointer to the memory cgroup associated with the folio, 464 * or NULL. 465 */ 466 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 467 { 468 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 469 470 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 471 WARN_ON_ONCE(!rcu_read_lock_held()); 472 473 if (memcg_data & MEMCG_DATA_KMEM) { 474 struct obj_cgroup *objcg; 475 476 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 477 return obj_cgroup_memcg(objcg); 478 } 479 480 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 481 } 482 483 /* 484 * page_memcg_check - get the memory cgroup associated with a page 485 * @page: a pointer to the page struct 486 * 487 * Returns a pointer to the memory cgroup associated with the page, 488 * or NULL. This function unlike page_memcg() can take any page 489 * as an argument. It has to be used in cases when it's not known if a page 490 * has an associated memory cgroup pointer or an object cgroups vector or 491 * an object cgroup. 492 * 493 * For a non-kmem page any of the following ensures page and memcg binding 494 * stability: 495 * 496 * - the page lock 497 * - LRU isolation 498 * - lock_page_memcg() 499 * - exclusive reference 500 * 501 * For a kmem page a caller should hold an rcu read lock to protect memcg 502 * associated with a kmem page from being released. 503 */ 504 static inline struct mem_cgroup *page_memcg_check(struct page *page) 505 { 506 /* 507 * Because page->memcg_data might be changed asynchronously 508 * for slab pages, READ_ONCE() should be used here. 509 */ 510 unsigned long memcg_data = READ_ONCE(page->memcg_data); 511 512 if (memcg_data & MEMCG_DATA_OBJCGS) 513 return NULL; 514 515 if (memcg_data & MEMCG_DATA_KMEM) { 516 struct obj_cgroup *objcg; 517 518 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 519 return obj_cgroup_memcg(objcg); 520 } 521 522 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 523 } 524 525 #ifdef CONFIG_MEMCG_KMEM 526 /* 527 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 528 * @folio: Pointer to the folio. 529 * 530 * Checks if the folio has MemcgKmem flag set. The caller must ensure 531 * that the folio has an associated memory cgroup. It's not safe to call 532 * this function against some types of folios, e.g. slab folios. 533 */ 534 static inline bool folio_memcg_kmem(struct folio *folio) 535 { 536 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 537 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 538 return folio->memcg_data & MEMCG_DATA_KMEM; 539 } 540 541 542 #else 543 static inline bool folio_memcg_kmem(struct folio *folio) 544 { 545 return false; 546 } 547 548 #endif 549 550 static inline bool PageMemcgKmem(struct page *page) 551 { 552 return folio_memcg_kmem(page_folio(page)); 553 } 554 555 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 556 { 557 return (memcg == root_mem_cgroup); 558 } 559 560 static inline bool mem_cgroup_disabled(void) 561 { 562 return !cgroup_subsys_enabled(memory_cgrp_subsys); 563 } 564 565 static inline void mem_cgroup_protection(struct mem_cgroup *root, 566 struct mem_cgroup *memcg, 567 unsigned long *min, 568 unsigned long *low) 569 { 570 *min = *low = 0; 571 572 if (mem_cgroup_disabled()) 573 return; 574 575 /* 576 * There is no reclaim protection applied to a targeted reclaim. 577 * We are special casing this specific case here because 578 * mem_cgroup_protected calculation is not robust enough to keep 579 * the protection invariant for calculated effective values for 580 * parallel reclaimers with different reclaim target. This is 581 * especially a problem for tail memcgs (as they have pages on LRU) 582 * which would want to have effective values 0 for targeted reclaim 583 * but a different value for external reclaim. 584 * 585 * Example 586 * Let's have global and A's reclaim in parallel: 587 * | 588 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 589 * |\ 590 * | C (low = 1G, usage = 2.5G) 591 * B (low = 1G, usage = 0.5G) 592 * 593 * For the global reclaim 594 * A.elow = A.low 595 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 596 * C.elow = min(C.usage, C.low) 597 * 598 * With the effective values resetting we have A reclaim 599 * A.elow = 0 600 * B.elow = B.low 601 * C.elow = C.low 602 * 603 * If the global reclaim races with A's reclaim then 604 * B.elow = C.elow = 0 because children_low_usage > A.elow) 605 * is possible and reclaiming B would be violating the protection. 606 * 607 */ 608 if (root == memcg) 609 return; 610 611 *min = READ_ONCE(memcg->memory.emin); 612 *low = READ_ONCE(memcg->memory.elow); 613 } 614 615 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 616 struct mem_cgroup *memcg); 617 618 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) 619 { 620 /* 621 * The root memcg doesn't account charges, and doesn't support 622 * protection. 623 */ 624 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); 625 626 } 627 628 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 629 { 630 if (!mem_cgroup_supports_protection(memcg)) 631 return false; 632 633 return READ_ONCE(memcg->memory.elow) >= 634 page_counter_read(&memcg->memory); 635 } 636 637 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 638 { 639 if (!mem_cgroup_supports_protection(memcg)) 640 return false; 641 642 return READ_ONCE(memcg->memory.emin) >= 643 page_counter_read(&memcg->memory); 644 } 645 646 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 647 648 /** 649 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 650 * @folio: Folio to charge. 651 * @mm: mm context of the allocating task. 652 * @gfp: Reclaim mode. 653 * 654 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 655 * pages according to @gfp if necessary. If @mm is NULL, try to 656 * charge to the active memcg. 657 * 658 * Do not use this for folios allocated for swapin. 659 * 660 * Return: 0 on success. Otherwise, an error code is returned. 661 */ 662 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 663 gfp_t gfp) 664 { 665 if (mem_cgroup_disabled()) 666 return 0; 667 return __mem_cgroup_charge(folio, mm, gfp); 668 } 669 670 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 671 gfp_t gfp, swp_entry_t entry); 672 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 673 674 void __mem_cgroup_uncharge(struct folio *folio); 675 676 /** 677 * mem_cgroup_uncharge - Uncharge a folio. 678 * @folio: Folio to uncharge. 679 * 680 * Uncharge a folio previously charged with mem_cgroup_charge(). 681 */ 682 static inline void mem_cgroup_uncharge(struct folio *folio) 683 { 684 if (mem_cgroup_disabled()) 685 return; 686 __mem_cgroup_uncharge(folio); 687 } 688 689 void __mem_cgroup_uncharge_list(struct list_head *page_list); 690 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 691 { 692 if (mem_cgroup_disabled()) 693 return; 694 __mem_cgroup_uncharge_list(page_list); 695 } 696 697 void mem_cgroup_migrate(struct folio *old, struct folio *new); 698 699 /** 700 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 701 * @memcg: memcg of the wanted lruvec 702 * @pgdat: pglist_data 703 * 704 * Returns the lru list vector holding pages for a given @memcg & 705 * @pgdat combination. This can be the node lruvec, if the memory 706 * controller is disabled. 707 */ 708 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 709 struct pglist_data *pgdat) 710 { 711 struct mem_cgroup_per_node *mz; 712 struct lruvec *lruvec; 713 714 if (mem_cgroup_disabled()) { 715 lruvec = &pgdat->__lruvec; 716 goto out; 717 } 718 719 if (!memcg) 720 memcg = root_mem_cgroup; 721 722 mz = memcg->nodeinfo[pgdat->node_id]; 723 lruvec = &mz->lruvec; 724 out: 725 /* 726 * Since a node can be onlined after the mem_cgroup was created, 727 * we have to be prepared to initialize lruvec->pgdat here; 728 * and if offlined then reonlined, we need to reinitialize it. 729 */ 730 if (unlikely(lruvec->pgdat != pgdat)) 731 lruvec->pgdat = pgdat; 732 return lruvec; 733 } 734 735 /** 736 * folio_lruvec - return lruvec for isolating/putting an LRU folio 737 * @folio: Pointer to the folio. 738 * 739 * This function relies on folio->mem_cgroup being stable. 740 */ 741 static inline struct lruvec *folio_lruvec(struct folio *folio) 742 { 743 struct mem_cgroup *memcg = folio_memcg(folio); 744 745 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 746 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 747 } 748 749 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 750 751 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 752 753 struct lruvec *folio_lruvec_lock(struct folio *folio); 754 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 755 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 756 unsigned long *flags); 757 758 #ifdef CONFIG_DEBUG_VM 759 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 760 #else 761 static inline 762 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 763 { 764 } 765 #endif 766 767 static inline 768 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 769 return css ? container_of(css, struct mem_cgroup, css) : NULL; 770 } 771 772 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 773 { 774 return percpu_ref_tryget(&objcg->refcnt); 775 } 776 777 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 778 { 779 percpu_ref_get(&objcg->refcnt); 780 } 781 782 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 783 unsigned long nr) 784 { 785 percpu_ref_get_many(&objcg->refcnt, nr); 786 } 787 788 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 789 { 790 percpu_ref_put(&objcg->refcnt); 791 } 792 793 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 794 { 795 if (memcg) 796 css_put(&memcg->css); 797 } 798 799 #define mem_cgroup_from_counter(counter, member) \ 800 container_of(counter, struct mem_cgroup, member) 801 802 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 803 struct mem_cgroup *, 804 struct mem_cgroup_reclaim_cookie *); 805 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 806 int mem_cgroup_scan_tasks(struct mem_cgroup *, 807 int (*)(struct task_struct *, void *), void *); 808 809 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 810 { 811 if (mem_cgroup_disabled()) 812 return 0; 813 814 return memcg->id.id; 815 } 816 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 817 818 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 819 { 820 return mem_cgroup_from_css(seq_css(m)); 821 } 822 823 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 824 { 825 struct mem_cgroup_per_node *mz; 826 827 if (mem_cgroup_disabled()) 828 return NULL; 829 830 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 831 return mz->memcg; 832 } 833 834 /** 835 * parent_mem_cgroup - find the accounting parent of a memcg 836 * @memcg: memcg whose parent to find 837 * 838 * Returns the parent memcg, or NULL if this is the root or the memory 839 * controller is in legacy no-hierarchy mode. 840 */ 841 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 842 { 843 if (!memcg->memory.parent) 844 return NULL; 845 return mem_cgroup_from_counter(memcg->memory.parent, memory); 846 } 847 848 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 849 struct mem_cgroup *root) 850 { 851 if (root == memcg) 852 return true; 853 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 854 } 855 856 static inline bool mm_match_cgroup(struct mm_struct *mm, 857 struct mem_cgroup *memcg) 858 { 859 struct mem_cgroup *task_memcg; 860 bool match = false; 861 862 rcu_read_lock(); 863 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 864 if (task_memcg) 865 match = mem_cgroup_is_descendant(task_memcg, memcg); 866 rcu_read_unlock(); 867 return match; 868 } 869 870 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 871 ino_t page_cgroup_ino(struct page *page); 872 873 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 874 { 875 if (mem_cgroup_disabled()) 876 return true; 877 return !!(memcg->css.flags & CSS_ONLINE); 878 } 879 880 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 881 int zid, int nr_pages); 882 883 static inline 884 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 885 enum lru_list lru, int zone_idx) 886 { 887 struct mem_cgroup_per_node *mz; 888 889 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 890 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 891 } 892 893 void mem_cgroup_handle_over_high(void); 894 895 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 896 897 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 898 899 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 900 struct task_struct *p); 901 902 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 903 904 static inline void mem_cgroup_enter_user_fault(void) 905 { 906 WARN_ON(current->in_user_fault); 907 current->in_user_fault = 1; 908 } 909 910 static inline void mem_cgroup_exit_user_fault(void) 911 { 912 WARN_ON(!current->in_user_fault); 913 current->in_user_fault = 0; 914 } 915 916 static inline bool task_in_memcg_oom(struct task_struct *p) 917 { 918 return p->memcg_in_oom; 919 } 920 921 bool mem_cgroup_oom_synchronize(bool wait); 922 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 923 struct mem_cgroup *oom_domain); 924 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 925 926 #ifdef CONFIG_MEMCG_SWAP 927 extern bool cgroup_memory_noswap; 928 #endif 929 930 void folio_memcg_lock(struct folio *folio); 931 void folio_memcg_unlock(struct folio *folio); 932 void lock_page_memcg(struct page *page); 933 void unlock_page_memcg(struct page *page); 934 935 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 936 937 /* idx can be of type enum memcg_stat_item or node_stat_item */ 938 static inline void mod_memcg_state(struct mem_cgroup *memcg, 939 int idx, int val) 940 { 941 unsigned long flags; 942 943 local_irq_save(flags); 944 __mod_memcg_state(memcg, idx, val); 945 local_irq_restore(flags); 946 } 947 948 static inline void mod_memcg_page_state(struct page *page, 949 int idx, int val) 950 { 951 struct mem_cgroup *memcg; 952 953 if (mem_cgroup_disabled()) 954 return; 955 956 rcu_read_lock(); 957 memcg = page_memcg(page); 958 if (memcg) 959 mod_memcg_state(memcg, idx, val); 960 rcu_read_unlock(); 961 } 962 963 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 964 { 965 return READ_ONCE(memcg->vmstats.state[idx]); 966 } 967 968 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 969 enum node_stat_item idx) 970 { 971 struct mem_cgroup_per_node *pn; 972 973 if (mem_cgroup_disabled()) 974 return node_page_state(lruvec_pgdat(lruvec), idx); 975 976 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 977 return READ_ONCE(pn->lruvec_stats.state[idx]); 978 } 979 980 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 981 enum node_stat_item idx) 982 { 983 struct mem_cgroup_per_node *pn; 984 long x = 0; 985 int cpu; 986 987 if (mem_cgroup_disabled()) 988 return node_page_state(lruvec_pgdat(lruvec), idx); 989 990 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 991 for_each_possible_cpu(cpu) 992 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 993 #ifdef CONFIG_SMP 994 if (x < 0) 995 x = 0; 996 #endif 997 return x; 998 } 999 1000 void mem_cgroup_flush_stats(void); 1001 1002 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1003 int val); 1004 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1005 1006 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1007 int val) 1008 { 1009 unsigned long flags; 1010 1011 local_irq_save(flags); 1012 __mod_lruvec_kmem_state(p, idx, val); 1013 local_irq_restore(flags); 1014 } 1015 1016 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1017 enum node_stat_item idx, int val) 1018 { 1019 unsigned long flags; 1020 1021 local_irq_save(flags); 1022 __mod_memcg_lruvec_state(lruvec, idx, val); 1023 local_irq_restore(flags); 1024 } 1025 1026 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1027 unsigned long count); 1028 1029 static inline void count_memcg_events(struct mem_cgroup *memcg, 1030 enum vm_event_item idx, 1031 unsigned long count) 1032 { 1033 unsigned long flags; 1034 1035 local_irq_save(flags); 1036 __count_memcg_events(memcg, idx, count); 1037 local_irq_restore(flags); 1038 } 1039 1040 static inline void count_memcg_page_event(struct page *page, 1041 enum vm_event_item idx) 1042 { 1043 struct mem_cgroup *memcg = page_memcg(page); 1044 1045 if (memcg) 1046 count_memcg_events(memcg, idx, 1); 1047 } 1048 1049 static inline void count_memcg_event_mm(struct mm_struct *mm, 1050 enum vm_event_item idx) 1051 { 1052 struct mem_cgroup *memcg; 1053 1054 if (mem_cgroup_disabled()) 1055 return; 1056 1057 rcu_read_lock(); 1058 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1059 if (likely(memcg)) 1060 count_memcg_events(memcg, idx, 1); 1061 rcu_read_unlock(); 1062 } 1063 1064 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1065 enum memcg_memory_event event) 1066 { 1067 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1068 event == MEMCG_SWAP_FAIL; 1069 1070 atomic_long_inc(&memcg->memory_events_local[event]); 1071 if (!swap_event) 1072 cgroup_file_notify(&memcg->events_local_file); 1073 1074 do { 1075 atomic_long_inc(&memcg->memory_events[event]); 1076 if (swap_event) 1077 cgroup_file_notify(&memcg->swap_events_file); 1078 else 1079 cgroup_file_notify(&memcg->events_file); 1080 1081 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1082 break; 1083 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1084 break; 1085 } while ((memcg = parent_mem_cgroup(memcg)) && 1086 !mem_cgroup_is_root(memcg)); 1087 } 1088 1089 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1090 enum memcg_memory_event event) 1091 { 1092 struct mem_cgroup *memcg; 1093 1094 if (mem_cgroup_disabled()) 1095 return; 1096 1097 rcu_read_lock(); 1098 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1099 if (likely(memcg)) 1100 memcg_memory_event(memcg, event); 1101 rcu_read_unlock(); 1102 } 1103 1104 void split_page_memcg(struct page *head, unsigned int nr); 1105 1106 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1107 gfp_t gfp_mask, 1108 unsigned long *total_scanned); 1109 1110 #else /* CONFIG_MEMCG */ 1111 1112 #define MEM_CGROUP_ID_SHIFT 0 1113 #define MEM_CGROUP_ID_MAX 0 1114 1115 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1116 { 1117 return NULL; 1118 } 1119 1120 static inline struct mem_cgroup *page_memcg(struct page *page) 1121 { 1122 return NULL; 1123 } 1124 1125 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1126 { 1127 WARN_ON_ONCE(!rcu_read_lock_held()); 1128 return NULL; 1129 } 1130 1131 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1132 { 1133 return NULL; 1134 } 1135 1136 static inline bool folio_memcg_kmem(struct folio *folio) 1137 { 1138 return false; 1139 } 1140 1141 static inline bool PageMemcgKmem(struct page *page) 1142 { 1143 return false; 1144 } 1145 1146 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1147 { 1148 return true; 1149 } 1150 1151 static inline bool mem_cgroup_disabled(void) 1152 { 1153 return true; 1154 } 1155 1156 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1157 enum memcg_memory_event event) 1158 { 1159 } 1160 1161 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1162 enum memcg_memory_event event) 1163 { 1164 } 1165 1166 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1167 struct mem_cgroup *memcg, 1168 unsigned long *min, 1169 unsigned long *low) 1170 { 1171 *min = *low = 0; 1172 } 1173 1174 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1175 struct mem_cgroup *memcg) 1176 { 1177 } 1178 1179 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 1180 { 1181 return false; 1182 } 1183 1184 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 1185 { 1186 return false; 1187 } 1188 1189 static inline int mem_cgroup_charge(struct folio *folio, 1190 struct mm_struct *mm, gfp_t gfp) 1191 { 1192 return 0; 1193 } 1194 1195 static inline int mem_cgroup_swapin_charge_page(struct page *page, 1196 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1197 { 1198 return 0; 1199 } 1200 1201 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1202 { 1203 } 1204 1205 static inline void mem_cgroup_uncharge(struct folio *folio) 1206 { 1207 } 1208 1209 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1210 { 1211 } 1212 1213 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1214 { 1215 } 1216 1217 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1218 struct pglist_data *pgdat) 1219 { 1220 return &pgdat->__lruvec; 1221 } 1222 1223 static inline struct lruvec *folio_lruvec(struct folio *folio) 1224 { 1225 struct pglist_data *pgdat = folio_pgdat(folio); 1226 return &pgdat->__lruvec; 1227 } 1228 1229 static inline 1230 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1231 { 1232 } 1233 1234 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1235 { 1236 return NULL; 1237 } 1238 1239 static inline bool mm_match_cgroup(struct mm_struct *mm, 1240 struct mem_cgroup *memcg) 1241 { 1242 return true; 1243 } 1244 1245 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1246 { 1247 return NULL; 1248 } 1249 1250 static inline 1251 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1252 { 1253 return NULL; 1254 } 1255 1256 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1257 { 1258 } 1259 1260 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1261 { 1262 struct pglist_data *pgdat = folio_pgdat(folio); 1263 1264 spin_lock(&pgdat->__lruvec.lru_lock); 1265 return &pgdat->__lruvec; 1266 } 1267 1268 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1269 { 1270 struct pglist_data *pgdat = folio_pgdat(folio); 1271 1272 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1273 return &pgdat->__lruvec; 1274 } 1275 1276 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1277 unsigned long *flagsp) 1278 { 1279 struct pglist_data *pgdat = folio_pgdat(folio); 1280 1281 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1282 return &pgdat->__lruvec; 1283 } 1284 1285 static inline struct mem_cgroup * 1286 mem_cgroup_iter(struct mem_cgroup *root, 1287 struct mem_cgroup *prev, 1288 struct mem_cgroup_reclaim_cookie *reclaim) 1289 { 1290 return NULL; 1291 } 1292 1293 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1294 struct mem_cgroup *prev) 1295 { 1296 } 1297 1298 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1299 int (*fn)(struct task_struct *, void *), void *arg) 1300 { 1301 return 0; 1302 } 1303 1304 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1305 { 1306 return 0; 1307 } 1308 1309 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1310 { 1311 WARN_ON_ONCE(id); 1312 /* XXX: This should always return root_mem_cgroup */ 1313 return NULL; 1314 } 1315 1316 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1317 { 1318 return NULL; 1319 } 1320 1321 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1322 { 1323 return NULL; 1324 } 1325 1326 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1327 { 1328 return true; 1329 } 1330 1331 static inline 1332 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1333 enum lru_list lru, int zone_idx) 1334 { 1335 return 0; 1336 } 1337 1338 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1339 { 1340 return 0; 1341 } 1342 1343 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1344 { 1345 return 0; 1346 } 1347 1348 static inline void 1349 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1350 { 1351 } 1352 1353 static inline void 1354 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1355 { 1356 } 1357 1358 static inline void lock_page_memcg(struct page *page) 1359 { 1360 } 1361 1362 static inline void unlock_page_memcg(struct page *page) 1363 { 1364 } 1365 1366 static inline void folio_memcg_lock(struct folio *folio) 1367 { 1368 } 1369 1370 static inline void folio_memcg_unlock(struct folio *folio) 1371 { 1372 } 1373 1374 static inline void mem_cgroup_handle_over_high(void) 1375 { 1376 } 1377 1378 static inline void mem_cgroup_enter_user_fault(void) 1379 { 1380 } 1381 1382 static inline void mem_cgroup_exit_user_fault(void) 1383 { 1384 } 1385 1386 static inline bool task_in_memcg_oom(struct task_struct *p) 1387 { 1388 return false; 1389 } 1390 1391 static inline bool mem_cgroup_oom_synchronize(bool wait) 1392 { 1393 return false; 1394 } 1395 1396 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1397 struct task_struct *victim, struct mem_cgroup *oom_domain) 1398 { 1399 return NULL; 1400 } 1401 1402 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1403 { 1404 } 1405 1406 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1407 int idx, 1408 int nr) 1409 { 1410 } 1411 1412 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1413 int idx, 1414 int nr) 1415 { 1416 } 1417 1418 static inline void mod_memcg_page_state(struct page *page, 1419 int idx, int val) 1420 { 1421 } 1422 1423 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1424 { 1425 return 0; 1426 } 1427 1428 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1429 enum node_stat_item idx) 1430 { 1431 return node_page_state(lruvec_pgdat(lruvec), idx); 1432 } 1433 1434 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1435 enum node_stat_item idx) 1436 { 1437 return node_page_state(lruvec_pgdat(lruvec), idx); 1438 } 1439 1440 static inline void mem_cgroup_flush_stats(void) 1441 { 1442 } 1443 1444 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1445 enum node_stat_item idx, int val) 1446 { 1447 } 1448 1449 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1450 int val) 1451 { 1452 struct page *page = virt_to_head_page(p); 1453 1454 __mod_node_page_state(page_pgdat(page), idx, val); 1455 } 1456 1457 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1458 int val) 1459 { 1460 struct page *page = virt_to_head_page(p); 1461 1462 mod_node_page_state(page_pgdat(page), idx, val); 1463 } 1464 1465 static inline void count_memcg_events(struct mem_cgroup *memcg, 1466 enum vm_event_item idx, 1467 unsigned long count) 1468 { 1469 } 1470 1471 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1472 enum vm_event_item idx, 1473 unsigned long count) 1474 { 1475 } 1476 1477 static inline void count_memcg_page_event(struct page *page, 1478 int idx) 1479 { 1480 } 1481 1482 static inline 1483 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1484 { 1485 } 1486 1487 static inline void split_page_memcg(struct page *head, unsigned int nr) 1488 { 1489 } 1490 1491 static inline 1492 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1493 gfp_t gfp_mask, 1494 unsigned long *total_scanned) 1495 { 1496 return 0; 1497 } 1498 #endif /* CONFIG_MEMCG */ 1499 1500 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1501 { 1502 __mod_lruvec_kmem_state(p, idx, 1); 1503 } 1504 1505 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1506 { 1507 __mod_lruvec_kmem_state(p, idx, -1); 1508 } 1509 1510 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1511 { 1512 struct mem_cgroup *memcg; 1513 1514 memcg = lruvec_memcg(lruvec); 1515 if (!memcg) 1516 return NULL; 1517 memcg = parent_mem_cgroup(memcg); 1518 if (!memcg) 1519 return NULL; 1520 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1521 } 1522 1523 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1524 { 1525 spin_unlock(&lruvec->lru_lock); 1526 } 1527 1528 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1529 { 1530 spin_unlock_irq(&lruvec->lru_lock); 1531 } 1532 1533 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1534 unsigned long flags) 1535 { 1536 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1537 } 1538 1539 /* Test requires a stable page->memcg binding, see page_memcg() */ 1540 static inline bool folio_matches_lruvec(struct folio *folio, 1541 struct lruvec *lruvec) 1542 { 1543 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1544 lruvec_memcg(lruvec) == folio_memcg(folio); 1545 } 1546 1547 /* Don't lock again iff page's lruvec locked */ 1548 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1549 struct lruvec *locked_lruvec) 1550 { 1551 if (locked_lruvec) { 1552 if (folio_matches_lruvec(folio, locked_lruvec)) 1553 return locked_lruvec; 1554 1555 unlock_page_lruvec_irq(locked_lruvec); 1556 } 1557 1558 return folio_lruvec_lock_irq(folio); 1559 } 1560 1561 /* Don't lock again iff page's lruvec locked */ 1562 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1563 struct lruvec *locked_lruvec, unsigned long *flags) 1564 { 1565 if (locked_lruvec) { 1566 if (folio_matches_lruvec(folio, locked_lruvec)) 1567 return locked_lruvec; 1568 1569 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1570 } 1571 1572 return folio_lruvec_lock_irqsave(folio, flags); 1573 } 1574 1575 #ifdef CONFIG_CGROUP_WRITEBACK 1576 1577 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1578 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1579 unsigned long *pheadroom, unsigned long *pdirty, 1580 unsigned long *pwriteback); 1581 1582 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1583 struct bdi_writeback *wb); 1584 1585 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1586 struct bdi_writeback *wb) 1587 { 1588 if (mem_cgroup_disabled()) 1589 return; 1590 1591 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css)) 1592 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1593 } 1594 1595 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1596 1597 #else /* CONFIG_CGROUP_WRITEBACK */ 1598 1599 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1600 { 1601 return NULL; 1602 } 1603 1604 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1605 unsigned long *pfilepages, 1606 unsigned long *pheadroom, 1607 unsigned long *pdirty, 1608 unsigned long *pwriteback) 1609 { 1610 } 1611 1612 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1613 struct bdi_writeback *wb) 1614 { 1615 } 1616 1617 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1618 { 1619 } 1620 1621 #endif /* CONFIG_CGROUP_WRITEBACK */ 1622 1623 struct sock; 1624 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1625 gfp_t gfp_mask); 1626 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1627 #ifdef CONFIG_MEMCG 1628 extern struct static_key_false memcg_sockets_enabled_key; 1629 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1630 void mem_cgroup_sk_alloc(struct sock *sk); 1631 void mem_cgroup_sk_free(struct sock *sk); 1632 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1633 { 1634 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1635 return true; 1636 do { 1637 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1638 return true; 1639 } while ((memcg = parent_mem_cgroup(memcg))); 1640 return false; 1641 } 1642 1643 int alloc_shrinker_info(struct mem_cgroup *memcg); 1644 void free_shrinker_info(struct mem_cgroup *memcg); 1645 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1646 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1647 #else 1648 #define mem_cgroup_sockets_enabled 0 1649 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1650 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1651 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1652 { 1653 return false; 1654 } 1655 1656 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1657 int nid, int shrinker_id) 1658 { 1659 } 1660 #endif 1661 1662 #ifdef CONFIG_MEMCG_KMEM 1663 bool mem_cgroup_kmem_disabled(void); 1664 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1665 void __memcg_kmem_uncharge_page(struct page *page, int order); 1666 1667 struct obj_cgroup *get_obj_cgroup_from_current(void); 1668 1669 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1670 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1671 1672 extern struct static_key_false memcg_kmem_enabled_key; 1673 1674 extern int memcg_nr_cache_ids; 1675 void memcg_get_cache_ids(void); 1676 void memcg_put_cache_ids(void); 1677 1678 /* 1679 * Helper macro to loop through all memcg-specific caches. Callers must still 1680 * check if the cache is valid (it is either valid or NULL). 1681 * the slab_mutex must be held when looping through those caches 1682 */ 1683 #define for_each_memcg_cache_index(_idx) \ 1684 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) 1685 1686 static inline bool memcg_kmem_enabled(void) 1687 { 1688 return static_branch_likely(&memcg_kmem_enabled_key); 1689 } 1690 1691 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1692 int order) 1693 { 1694 if (memcg_kmem_enabled()) 1695 return __memcg_kmem_charge_page(page, gfp, order); 1696 return 0; 1697 } 1698 1699 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1700 { 1701 if (memcg_kmem_enabled()) 1702 __memcg_kmem_uncharge_page(page, order); 1703 } 1704 1705 /* 1706 * A helper for accessing memcg's kmem_id, used for getting 1707 * corresponding LRU lists. 1708 */ 1709 static inline int memcg_cache_id(struct mem_cgroup *memcg) 1710 { 1711 return memcg ? memcg->kmemcg_id : -1; 1712 } 1713 1714 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1715 1716 #else 1717 static inline bool mem_cgroup_kmem_disabled(void) 1718 { 1719 return true; 1720 } 1721 1722 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1723 int order) 1724 { 1725 return 0; 1726 } 1727 1728 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1729 { 1730 } 1731 1732 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1733 int order) 1734 { 1735 return 0; 1736 } 1737 1738 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1739 { 1740 } 1741 1742 #define for_each_memcg_cache_index(_idx) \ 1743 for (; NULL; ) 1744 1745 static inline bool memcg_kmem_enabled(void) 1746 { 1747 return false; 1748 } 1749 1750 static inline int memcg_cache_id(struct mem_cgroup *memcg) 1751 { 1752 return -1; 1753 } 1754 1755 static inline void memcg_get_cache_ids(void) 1756 { 1757 } 1758 1759 static inline void memcg_put_cache_ids(void) 1760 { 1761 } 1762 1763 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1764 { 1765 return NULL; 1766 } 1767 1768 #endif /* CONFIG_MEMCG_KMEM */ 1769 1770 #endif /* _LINUX_MEMCONTROL_H */ 1771