1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/huge_mm.h> 6 #include <linux/swap.h> 7 8 /** 9 * page_is_file_lru - should the page be on a file LRU or anon LRU? 10 * @page: the page to test 11 * 12 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily 13 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal 14 * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by 15 * functions that manipulate the LRU lists, to sort a page onto the right LRU 16 * list. 17 * 18 * We would like to get this info without a page flag, but the state 19 * needs to survive until the page is last deleted from the LRU, which 20 * could be as far down as __page_cache_release. 21 */ 22 static inline int page_is_file_lru(struct page *page) 23 { 24 return !PageSwapBacked(page); 25 } 26 27 static __always_inline void __update_lru_size(struct lruvec *lruvec, 28 enum lru_list lru, enum zone_type zid, 29 int nr_pages) 30 { 31 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 32 33 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 34 __mod_zone_page_state(&pgdat->node_zones[zid], 35 NR_ZONE_LRU_BASE + lru, nr_pages); 36 } 37 38 static __always_inline void update_lru_size(struct lruvec *lruvec, 39 enum lru_list lru, enum zone_type zid, 40 int nr_pages) 41 { 42 __update_lru_size(lruvec, lru, zid, nr_pages); 43 #ifdef CONFIG_MEMCG 44 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 45 #endif 46 } 47 48 static __always_inline void add_page_to_lru_list(struct page *page, 49 struct lruvec *lruvec, enum lru_list lru) 50 { 51 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); 52 list_add(&page->lru, &lruvec->lists[lru]); 53 } 54 55 static __always_inline void add_page_to_lru_list_tail(struct page *page, 56 struct lruvec *lruvec, enum lru_list lru) 57 { 58 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); 59 list_add_tail(&page->lru, &lruvec->lists[lru]); 60 } 61 62 static __always_inline void del_page_from_lru_list(struct page *page, 63 struct lruvec *lruvec, enum lru_list lru) 64 { 65 list_del(&page->lru); 66 update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); 67 } 68 69 /** 70 * page_lru_base_type - which LRU list type should a page be on? 71 * @page: the page to test 72 * 73 * Used for LRU list index arithmetic. 74 * 75 * Returns the base LRU type - file or anon - @page should be on. 76 */ 77 static inline enum lru_list page_lru_base_type(struct page *page) 78 { 79 if (page_is_file_lru(page)) 80 return LRU_INACTIVE_FILE; 81 return LRU_INACTIVE_ANON; 82 } 83 84 /** 85 * page_off_lru - which LRU list was page on? clearing its lru flags. 86 * @page: the page to test 87 * 88 * Returns the LRU list a page was on, as an index into the array of LRU 89 * lists; and clears its Unevictable or Active flags, ready for freeing. 90 */ 91 static __always_inline enum lru_list page_off_lru(struct page *page) 92 { 93 enum lru_list lru; 94 95 if (PageUnevictable(page)) { 96 __ClearPageUnevictable(page); 97 lru = LRU_UNEVICTABLE; 98 } else { 99 lru = page_lru_base_type(page); 100 if (PageActive(page)) { 101 __ClearPageActive(page); 102 lru += LRU_ACTIVE; 103 } 104 } 105 return lru; 106 } 107 108 /** 109 * page_lru - which LRU list should a page be on? 110 * @page: the page to test 111 * 112 * Returns the LRU list a page should be on, as an index 113 * into the array of LRU lists. 114 */ 115 static __always_inline enum lru_list page_lru(struct page *page) 116 { 117 enum lru_list lru; 118 119 if (PageUnevictable(page)) 120 lru = LRU_UNEVICTABLE; 121 else { 122 lru = page_lru_base_type(page); 123 if (PageActive(page)) 124 lru += LRU_ACTIVE; 125 } 126 return lru; 127 } 128 #endif 129