xref: /openbmc/linux/mm/page_idle.c (revision 82e6fdd6)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/bootmem.h>
4 #include <linux/fs.h>
5 #include <linux/sysfs.h>
6 #include <linux/kobject.h>
7 #include <linux/mm.h>
8 #include <linux/mmzone.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/page_ext.h>
13 #include <linux/page_idle.h>
14 
15 #define BITMAP_CHUNK_SIZE	sizeof(u64)
16 #define BITMAP_CHUNK_BITS	(BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
17 
18 /*
19  * Idle page tracking only considers user memory pages, for other types of
20  * pages the idle flag is always unset and an attempt to set it is silently
21  * ignored.
22  *
23  * We treat a page as a user memory page if it is on an LRU list, because it is
24  * always safe to pass such a page to rmap_walk(), which is essential for idle
25  * page tracking. With such an indicator of user pages we can skip isolated
26  * pages, but since there are not usually many of them, it will hardly affect
27  * the overall result.
28  *
29  * This function tries to get a user memory page by pfn as described above.
30  */
31 static struct page *page_idle_get_page(unsigned long pfn)
32 {
33 	struct page *page;
34 	struct zone *zone;
35 
36 	if (!pfn_valid(pfn))
37 		return NULL;
38 
39 	page = pfn_to_page(pfn);
40 	if (!page || !PageLRU(page) ||
41 	    !get_page_unless_zero(page))
42 		return NULL;
43 
44 	zone = page_zone(page);
45 	spin_lock_irq(zone_lru_lock(zone));
46 	if (unlikely(!PageLRU(page))) {
47 		put_page(page);
48 		page = NULL;
49 	}
50 	spin_unlock_irq(zone_lru_lock(zone));
51 	return page;
52 }
53 
54 static bool page_idle_clear_pte_refs_one(struct page *page,
55 					struct vm_area_struct *vma,
56 					unsigned long addr, void *arg)
57 {
58 	struct page_vma_mapped_walk pvmw = {
59 		.page = page,
60 		.vma = vma,
61 		.address = addr,
62 	};
63 	bool referenced = false;
64 
65 	while (page_vma_mapped_walk(&pvmw)) {
66 		addr = pvmw.address;
67 		if (pvmw.pte) {
68 			referenced = ptep_clear_young_notify(vma, addr,
69 					pvmw.pte);
70 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
71 			referenced = pmdp_clear_young_notify(vma, addr,
72 					pvmw.pmd);
73 		} else {
74 			/* unexpected pmd-mapped page? */
75 			WARN_ON_ONCE(1);
76 		}
77 	}
78 
79 	if (referenced) {
80 		clear_page_idle(page);
81 		/*
82 		 * We cleared the referenced bit in a mapping to this page. To
83 		 * avoid interference with page reclaim, mark it young so that
84 		 * page_referenced() will return > 0.
85 		 */
86 		set_page_young(page);
87 	}
88 	return true;
89 }
90 
91 static void page_idle_clear_pte_refs(struct page *page)
92 {
93 	/*
94 	 * Since rwc.arg is unused, rwc is effectively immutable, so we
95 	 * can make it static const to save some cycles and stack.
96 	 */
97 	static const struct rmap_walk_control rwc = {
98 		.rmap_one = page_idle_clear_pte_refs_one,
99 		.anon_lock = page_lock_anon_vma_read,
100 	};
101 	bool need_lock;
102 
103 	if (!page_mapped(page) ||
104 	    !page_rmapping(page))
105 		return;
106 
107 	need_lock = !PageAnon(page) || PageKsm(page);
108 	if (need_lock && !trylock_page(page))
109 		return;
110 
111 	rmap_walk(page, (struct rmap_walk_control *)&rwc);
112 
113 	if (need_lock)
114 		unlock_page(page);
115 }
116 
117 static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
118 				     struct bin_attribute *attr, char *buf,
119 				     loff_t pos, size_t count)
120 {
121 	u64 *out = (u64 *)buf;
122 	struct page *page;
123 	unsigned long pfn, end_pfn;
124 	int bit;
125 
126 	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
127 		return -EINVAL;
128 
129 	pfn = pos * BITS_PER_BYTE;
130 	if (pfn >= max_pfn)
131 		return 0;
132 
133 	end_pfn = pfn + count * BITS_PER_BYTE;
134 	if (end_pfn > max_pfn)
135 		end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
136 
137 	for (; pfn < end_pfn; pfn++) {
138 		bit = pfn % BITMAP_CHUNK_BITS;
139 		if (!bit)
140 			*out = 0ULL;
141 		page = page_idle_get_page(pfn);
142 		if (page) {
143 			if (page_is_idle(page)) {
144 				/*
145 				 * The page might have been referenced via a
146 				 * pte, in which case it is not idle. Clear
147 				 * refs and recheck.
148 				 */
149 				page_idle_clear_pte_refs(page);
150 				if (page_is_idle(page))
151 					*out |= 1ULL << bit;
152 			}
153 			put_page(page);
154 		}
155 		if (bit == BITMAP_CHUNK_BITS - 1)
156 			out++;
157 		cond_resched();
158 	}
159 	return (char *)out - buf;
160 }
161 
162 static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
163 				      struct bin_attribute *attr, char *buf,
164 				      loff_t pos, size_t count)
165 {
166 	const u64 *in = (u64 *)buf;
167 	struct page *page;
168 	unsigned long pfn, end_pfn;
169 	int bit;
170 
171 	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
172 		return -EINVAL;
173 
174 	pfn = pos * BITS_PER_BYTE;
175 	if (pfn >= max_pfn)
176 		return -ENXIO;
177 
178 	end_pfn = pfn + count * BITS_PER_BYTE;
179 	if (end_pfn > max_pfn)
180 		end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
181 
182 	for (; pfn < end_pfn; pfn++) {
183 		bit = pfn % BITMAP_CHUNK_BITS;
184 		if ((*in >> bit) & 1) {
185 			page = page_idle_get_page(pfn);
186 			if (page) {
187 				page_idle_clear_pte_refs(page);
188 				set_page_idle(page);
189 				put_page(page);
190 			}
191 		}
192 		if (bit == BITMAP_CHUNK_BITS - 1)
193 			in++;
194 		cond_resched();
195 	}
196 	return (char *)in - buf;
197 }
198 
199 static struct bin_attribute page_idle_bitmap_attr =
200 		__BIN_ATTR(bitmap, S_IRUSR | S_IWUSR,
201 			   page_idle_bitmap_read, page_idle_bitmap_write, 0);
202 
203 static struct bin_attribute *page_idle_bin_attrs[] = {
204 	&page_idle_bitmap_attr,
205 	NULL,
206 };
207 
208 static const struct attribute_group page_idle_attr_group = {
209 	.bin_attrs = page_idle_bin_attrs,
210 	.name = "page_idle",
211 };
212 
213 #ifndef CONFIG_64BIT
214 static bool need_page_idle(void)
215 {
216 	return true;
217 }
218 struct page_ext_operations page_idle_ops = {
219 	.need = need_page_idle,
220 };
221 #endif
222 
223 static int __init page_idle_init(void)
224 {
225 	int err;
226 
227 	err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
228 	if (err) {
229 		pr_err("page_idle: register sysfs failed\n");
230 		return err;
231 	}
232 	return 0;
233 }
234 subsys_initcall(page_idle_init);
235