1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON Primitives for The Physical Address Space 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon-pa: " fmt 9 10 #include <linux/mmu_notifier.h> 11 #include <linux/page_idle.h> 12 #include <linux/pagemap.h> 13 #include <linux/rmap.h> 14 #include <linux/swap.h> 15 16 #include "../internal.h" 17 #include "ops-common.h" 18 19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, 20 unsigned long addr, void *arg) 21 { 22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 23 24 while (page_vma_mapped_walk(&pvmw)) { 25 addr = pvmw.address; 26 if (pvmw.pte) 27 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); 28 else 29 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); 30 } 31 return true; 32 } 33 34 static void damon_pa_mkold(unsigned long paddr) 35 { 36 struct folio *folio; 37 struct page *page = damon_get_page(PHYS_PFN(paddr)); 38 struct rmap_walk_control rwc = { 39 .rmap_one = __damon_pa_mkold, 40 .anon_lock = folio_lock_anon_vma_read, 41 }; 42 bool need_lock; 43 44 if (!page) 45 return; 46 folio = page_folio(page); 47 48 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { 49 folio_set_idle(folio); 50 goto out; 51 } 52 53 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); 54 if (need_lock && !folio_trylock(folio)) 55 goto out; 56 57 rmap_walk(folio, &rwc); 58 59 if (need_lock) 60 folio_unlock(folio); 61 62 out: 63 folio_put(folio); 64 } 65 66 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx, 67 struct damon_region *r) 68 { 69 r->sampling_addr = damon_rand(r->ar.start, r->ar.end); 70 71 damon_pa_mkold(r->sampling_addr); 72 } 73 74 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) 75 { 76 struct damon_target *t; 77 struct damon_region *r; 78 79 damon_for_each_target(t, ctx) { 80 damon_for_each_region(r, t) 81 __damon_pa_prepare_access_check(ctx, r); 82 } 83 } 84 85 struct damon_pa_access_chk_result { 86 unsigned long page_sz; 87 bool accessed; 88 }; 89 90 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, 91 unsigned long addr, void *arg) 92 { 93 struct damon_pa_access_chk_result *result = arg; 94 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 95 96 result->accessed = false; 97 result->page_sz = PAGE_SIZE; 98 while (page_vma_mapped_walk(&pvmw)) { 99 addr = pvmw.address; 100 if (pvmw.pte) { 101 result->accessed = pte_young(*pvmw.pte) || 102 !folio_test_idle(folio) || 103 mmu_notifier_test_young(vma->vm_mm, addr); 104 } else { 105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 106 result->accessed = pmd_young(*pvmw.pmd) || 107 !folio_test_idle(folio) || 108 mmu_notifier_test_young(vma->vm_mm, addr); 109 result->page_sz = HPAGE_PMD_SIZE; 110 #else 111 WARN_ON_ONCE(1); 112 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 113 } 114 if (result->accessed) { 115 page_vma_mapped_walk_done(&pvmw); 116 break; 117 } 118 } 119 120 /* If accessed, stop walking */ 121 return !result->accessed; 122 } 123 124 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) 125 { 126 struct folio *folio; 127 struct page *page = damon_get_page(PHYS_PFN(paddr)); 128 struct damon_pa_access_chk_result result = { 129 .page_sz = PAGE_SIZE, 130 .accessed = false, 131 }; 132 struct rmap_walk_control rwc = { 133 .arg = &result, 134 .rmap_one = __damon_pa_young, 135 .anon_lock = folio_lock_anon_vma_read, 136 }; 137 bool need_lock; 138 139 if (!page) 140 return false; 141 folio = page_folio(page); 142 143 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { 144 if (folio_test_idle(folio)) 145 result.accessed = false; 146 else 147 result.accessed = true; 148 folio_put(folio); 149 goto out; 150 } 151 152 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); 153 if (need_lock && !folio_trylock(folio)) { 154 folio_put(folio); 155 return false; 156 } 157 158 rmap_walk(folio, &rwc); 159 160 if (need_lock) 161 folio_unlock(folio); 162 folio_put(folio); 163 164 out: 165 *page_sz = result.page_sz; 166 return result.accessed; 167 } 168 169 static void __damon_pa_check_access(struct damon_ctx *ctx, 170 struct damon_region *r) 171 { 172 static unsigned long last_addr; 173 static unsigned long last_page_sz = PAGE_SIZE; 174 static bool last_accessed; 175 176 /* If the region is in the last checked page, reuse the result */ 177 if (ALIGN_DOWN(last_addr, last_page_sz) == 178 ALIGN_DOWN(r->sampling_addr, last_page_sz)) { 179 if (last_accessed) 180 r->nr_accesses++; 181 return; 182 } 183 184 last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz); 185 if (last_accessed) 186 r->nr_accesses++; 187 188 last_addr = r->sampling_addr; 189 } 190 191 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) 192 { 193 struct damon_target *t; 194 struct damon_region *r; 195 unsigned int max_nr_accesses = 0; 196 197 damon_for_each_target(t, ctx) { 198 damon_for_each_region(r, t) { 199 __damon_pa_check_access(ctx, r); 200 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); 201 } 202 } 203 204 return max_nr_accesses; 205 } 206 207 static unsigned long damon_pa_pageout(struct damon_region *r) 208 { 209 unsigned long addr, applied; 210 LIST_HEAD(page_list); 211 212 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { 213 struct page *page = damon_get_page(PHYS_PFN(addr)); 214 215 if (!page) 216 continue; 217 218 ClearPageReferenced(page); 219 test_and_clear_page_young(page); 220 if (isolate_lru_page(page)) { 221 put_page(page); 222 continue; 223 } 224 if (PageUnevictable(page)) { 225 putback_lru_page(page); 226 } else { 227 list_add(&page->lru, &page_list); 228 put_page(page); 229 } 230 } 231 applied = reclaim_pages(&page_list); 232 cond_resched(); 233 return applied * PAGE_SIZE; 234 } 235 236 static unsigned long damon_pa_mark_accessed(struct damon_region *r) 237 { 238 unsigned long addr, applied = 0; 239 240 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { 241 struct page *page = damon_get_page(PHYS_PFN(addr)); 242 243 if (!page) 244 continue; 245 mark_page_accessed(page); 246 put_page(page); 247 applied++; 248 } 249 return applied * PAGE_SIZE; 250 } 251 252 static unsigned long damon_pa_deactivate_pages(struct damon_region *r) 253 { 254 unsigned long addr, applied = 0; 255 256 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { 257 struct page *page = damon_get_page(PHYS_PFN(addr)); 258 259 if (!page) 260 continue; 261 deactivate_page(page); 262 put_page(page); 263 applied++; 264 } 265 return applied * PAGE_SIZE; 266 } 267 268 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, 269 struct damon_target *t, struct damon_region *r, 270 struct damos *scheme) 271 { 272 switch (scheme->action) { 273 case DAMOS_PAGEOUT: 274 return damon_pa_pageout(r); 275 case DAMOS_LRU_PRIO: 276 return damon_pa_mark_accessed(r); 277 case DAMOS_LRU_DEPRIO: 278 return damon_pa_deactivate_pages(r); 279 default: 280 break; 281 } 282 return 0; 283 } 284 285 static int damon_pa_scheme_score(struct damon_ctx *context, 286 struct damon_target *t, struct damon_region *r, 287 struct damos *scheme) 288 { 289 switch (scheme->action) { 290 case DAMOS_PAGEOUT: 291 return damon_pageout_score(context, r, scheme); 292 case DAMOS_LRU_PRIO: 293 return damon_hot_score(context, r, scheme); 294 case DAMOS_LRU_DEPRIO: 295 return damon_pageout_score(context, r, scheme); 296 default: 297 break; 298 } 299 300 return DAMOS_MAX_SCORE; 301 } 302 303 static int __init damon_pa_initcall(void) 304 { 305 struct damon_operations ops = { 306 .id = DAMON_OPS_PADDR, 307 .init = NULL, 308 .update = NULL, 309 .prepare_access_checks = damon_pa_prepare_access_checks, 310 .check_accesses = damon_pa_check_accesses, 311 .reset_aggregated = NULL, 312 .target_valid = NULL, 313 .cleanup = NULL, 314 .apply_scheme = damon_pa_apply_scheme, 315 .get_scheme_score = damon_pa_scheme_score, 316 }; 317 318 return damon_register_ops(&ops); 319 }; 320 321 subsys_initcall(damon_pa_initcall); 322