xref: /openbmc/linux/mm/damon/paddr.c (revision 54a611b6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 
16 #include "../internal.h"
17 #include "ops-common.h"
18 
19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
20 		unsigned long addr, void *arg)
21 {
22 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
23 
24 	while (page_vma_mapped_walk(&pvmw)) {
25 		addr = pvmw.address;
26 		if (pvmw.pte)
27 			damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
28 		else
29 			damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
30 	}
31 	return true;
32 }
33 
34 static void damon_pa_mkold(unsigned long paddr)
35 {
36 	struct folio *folio;
37 	struct page *page = damon_get_page(PHYS_PFN(paddr));
38 	struct rmap_walk_control rwc = {
39 		.rmap_one = __damon_pa_mkold,
40 		.anon_lock = folio_lock_anon_vma_read,
41 	};
42 	bool need_lock;
43 
44 	if (!page)
45 		return;
46 	folio = page_folio(page);
47 
48 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
49 		folio_set_idle(folio);
50 		goto out;
51 	}
52 
53 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
54 	if (need_lock && !folio_trylock(folio))
55 		goto out;
56 
57 	rmap_walk(folio, &rwc);
58 
59 	if (need_lock)
60 		folio_unlock(folio);
61 
62 out:
63 	folio_put(folio);
64 }
65 
66 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
67 					    struct damon_region *r)
68 {
69 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
70 
71 	damon_pa_mkold(r->sampling_addr);
72 }
73 
74 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
75 {
76 	struct damon_target *t;
77 	struct damon_region *r;
78 
79 	damon_for_each_target(t, ctx) {
80 		damon_for_each_region(r, t)
81 			__damon_pa_prepare_access_check(ctx, r);
82 	}
83 }
84 
85 struct damon_pa_access_chk_result {
86 	unsigned long page_sz;
87 	bool accessed;
88 };
89 
90 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
91 		unsigned long addr, void *arg)
92 {
93 	struct damon_pa_access_chk_result *result = arg;
94 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
95 
96 	result->accessed = false;
97 	result->page_sz = PAGE_SIZE;
98 	while (page_vma_mapped_walk(&pvmw)) {
99 		addr = pvmw.address;
100 		if (pvmw.pte) {
101 			result->accessed = pte_young(*pvmw.pte) ||
102 				!folio_test_idle(folio) ||
103 				mmu_notifier_test_young(vma->vm_mm, addr);
104 		} else {
105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
106 			result->accessed = pmd_young(*pvmw.pmd) ||
107 				!folio_test_idle(folio) ||
108 				mmu_notifier_test_young(vma->vm_mm, addr);
109 			result->page_sz = HPAGE_PMD_SIZE;
110 #else
111 			WARN_ON_ONCE(1);
112 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
113 		}
114 		if (result->accessed) {
115 			page_vma_mapped_walk_done(&pvmw);
116 			break;
117 		}
118 	}
119 
120 	/* If accessed, stop walking */
121 	return !result->accessed;
122 }
123 
124 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
125 {
126 	struct folio *folio;
127 	struct page *page = damon_get_page(PHYS_PFN(paddr));
128 	struct damon_pa_access_chk_result result = {
129 		.page_sz = PAGE_SIZE,
130 		.accessed = false,
131 	};
132 	struct rmap_walk_control rwc = {
133 		.arg = &result,
134 		.rmap_one = __damon_pa_young,
135 		.anon_lock = folio_lock_anon_vma_read,
136 	};
137 	bool need_lock;
138 
139 	if (!page)
140 		return false;
141 	folio = page_folio(page);
142 
143 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
144 		if (folio_test_idle(folio))
145 			result.accessed = false;
146 		else
147 			result.accessed = true;
148 		folio_put(folio);
149 		goto out;
150 	}
151 
152 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
153 	if (need_lock && !folio_trylock(folio)) {
154 		folio_put(folio);
155 		return false;
156 	}
157 
158 	rmap_walk(folio, &rwc);
159 
160 	if (need_lock)
161 		folio_unlock(folio);
162 	folio_put(folio);
163 
164 out:
165 	*page_sz = result.page_sz;
166 	return result.accessed;
167 }
168 
169 static void __damon_pa_check_access(struct damon_region *r)
170 {
171 	static unsigned long last_addr;
172 	static unsigned long last_page_sz = PAGE_SIZE;
173 	static bool last_accessed;
174 
175 	/* If the region is in the last checked page, reuse the result */
176 	if (ALIGN_DOWN(last_addr, last_page_sz) ==
177 				ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
178 		if (last_accessed)
179 			r->nr_accesses++;
180 		return;
181 	}
182 
183 	last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
184 	if (last_accessed)
185 		r->nr_accesses++;
186 
187 	last_addr = r->sampling_addr;
188 }
189 
190 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
191 {
192 	struct damon_target *t;
193 	struct damon_region *r;
194 	unsigned int max_nr_accesses = 0;
195 
196 	damon_for_each_target(t, ctx) {
197 		damon_for_each_region(r, t) {
198 			__damon_pa_check_access(r);
199 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
200 		}
201 	}
202 
203 	return max_nr_accesses;
204 }
205 
206 static unsigned long damon_pa_pageout(struct damon_region *r)
207 {
208 	unsigned long addr, applied;
209 	LIST_HEAD(page_list);
210 
211 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
212 		struct page *page = damon_get_page(PHYS_PFN(addr));
213 
214 		if (!page)
215 			continue;
216 
217 		ClearPageReferenced(page);
218 		test_and_clear_page_young(page);
219 		if (isolate_lru_page(page)) {
220 			put_page(page);
221 			continue;
222 		}
223 		if (PageUnevictable(page)) {
224 			putback_lru_page(page);
225 		} else {
226 			list_add(&page->lru, &page_list);
227 			put_page(page);
228 		}
229 	}
230 	applied = reclaim_pages(&page_list);
231 	cond_resched();
232 	return applied * PAGE_SIZE;
233 }
234 
235 static unsigned long damon_pa_mark_accessed(struct damon_region *r)
236 {
237 	unsigned long addr, applied = 0;
238 
239 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
240 		struct page *page = damon_get_page(PHYS_PFN(addr));
241 
242 		if (!page)
243 			continue;
244 		mark_page_accessed(page);
245 		put_page(page);
246 		applied++;
247 	}
248 	return applied * PAGE_SIZE;
249 }
250 
251 static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
252 {
253 	unsigned long addr, applied = 0;
254 
255 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
256 		struct page *page = damon_get_page(PHYS_PFN(addr));
257 
258 		if (!page)
259 			continue;
260 		deactivate_page(page);
261 		put_page(page);
262 		applied++;
263 	}
264 	return applied * PAGE_SIZE;
265 }
266 
267 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
268 		struct damon_target *t, struct damon_region *r,
269 		struct damos *scheme)
270 {
271 	switch (scheme->action) {
272 	case DAMOS_PAGEOUT:
273 		return damon_pa_pageout(r);
274 	case DAMOS_LRU_PRIO:
275 		return damon_pa_mark_accessed(r);
276 	case DAMOS_LRU_DEPRIO:
277 		return damon_pa_deactivate_pages(r);
278 	default:
279 		break;
280 	}
281 	return 0;
282 }
283 
284 static int damon_pa_scheme_score(struct damon_ctx *context,
285 		struct damon_target *t, struct damon_region *r,
286 		struct damos *scheme)
287 {
288 	switch (scheme->action) {
289 	case DAMOS_PAGEOUT:
290 		return damon_pageout_score(context, r, scheme);
291 	case DAMOS_LRU_PRIO:
292 		return damon_hot_score(context, r, scheme);
293 	case DAMOS_LRU_DEPRIO:
294 		return damon_pageout_score(context, r, scheme);
295 	default:
296 		break;
297 	}
298 
299 	return DAMOS_MAX_SCORE;
300 }
301 
302 static int __init damon_pa_initcall(void)
303 {
304 	struct damon_operations ops = {
305 		.id = DAMON_OPS_PADDR,
306 		.init = NULL,
307 		.update = NULL,
308 		.prepare_access_checks = damon_pa_prepare_access_checks,
309 		.check_accesses = damon_pa_check_accesses,
310 		.reset_aggregated = NULL,
311 		.target_valid = NULL,
312 		.cleanup = NULL,
313 		.apply_scheme = damon_pa_apply_scheme,
314 		.get_scheme_score = damon_pa_scheme_score,
315 	};
316 
317 	return damon_register_ops(&ops);
318 };
319 
320 subsys_initcall(damon_pa_initcall);
321