1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON Primitives for Virtual Address Spaces 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon-va: " fmt 9 10 #include <asm-generic/mman-common.h> 11 #include <linux/highmem.h> 12 #include <linux/hugetlb.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/page_idle.h> 15 #include <linux/pagewalk.h> 16 #include <linux/sched/mm.h> 17 18 #include "ops-common.h" 19 20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 /* 26 * 't->pid' should be the pointer to the relevant 'struct pid' having reference 27 * count. Caller must put the returned task, unless it is NULL. 28 */ 29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t) 30 { 31 return get_pid_task(t->pid, PIDTYPE_PID); 32 } 33 34 /* 35 * Get the mm_struct of the given target 36 * 37 * Caller _must_ put the mm_struct after use, unless it is NULL. 38 * 39 * Returns the mm_struct of the target on success, NULL on failure 40 */ 41 static struct mm_struct *damon_get_mm(struct damon_target *t) 42 { 43 struct task_struct *task; 44 struct mm_struct *mm; 45 46 task = damon_get_task_struct(t); 47 if (!task) 48 return NULL; 49 50 mm = get_task_mm(task); 51 put_task_struct(task); 52 return mm; 53 } 54 55 /* 56 * Functions for the initial monitoring target regions construction 57 */ 58 59 /* 60 * Size-evenly split a region into 'nr_pieces' small regions 61 * 62 * Returns 0 on success, or negative error code otherwise. 63 */ 64 static int damon_va_evenly_split_region(struct damon_target *t, 65 struct damon_region *r, unsigned int nr_pieces) 66 { 67 unsigned long sz_orig, sz_piece, orig_end; 68 struct damon_region *n = NULL, *next; 69 unsigned long start; 70 71 if (!r || !nr_pieces) 72 return -EINVAL; 73 74 orig_end = r->ar.end; 75 sz_orig = r->ar.end - r->ar.start; 76 sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); 77 78 if (!sz_piece) 79 return -EINVAL; 80 81 r->ar.end = r->ar.start + sz_piece; 82 next = damon_next_region(r); 83 for (start = r->ar.end; start + sz_piece <= orig_end; 84 start += sz_piece) { 85 n = damon_new_region(start, start + sz_piece); 86 if (!n) 87 return -ENOMEM; 88 damon_insert_region(n, r, next, t); 89 r = n; 90 } 91 /* complement last region for possible rounding error */ 92 if (n) 93 n->ar.end = orig_end; 94 95 return 0; 96 } 97 98 static unsigned long sz_range(struct damon_addr_range *r) 99 { 100 return r->end - r->start; 101 } 102 103 /* 104 * Find three regions separated by two biggest unmapped regions 105 * 106 * vma the head vma of the target address space 107 * regions an array of three address ranges that results will be saved 108 * 109 * This function receives an address space and finds three regions in it which 110 * separated by the two biggest unmapped regions in the space. Please refer to 111 * below comments of '__damon_va_init_regions()' function to know why this is 112 * necessary. 113 * 114 * Returns 0 if success, or negative error code otherwise. 115 */ 116 static int __damon_va_three_regions(struct vm_area_struct *vma, 117 struct damon_addr_range regions[3]) 118 { 119 struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0}; 120 struct vm_area_struct *last_vma = NULL; 121 unsigned long start = 0; 122 struct rb_root rbroot; 123 124 /* Find two biggest gaps so that first_gap > second_gap > others */ 125 for (; vma; vma = vma->vm_next) { 126 if (!last_vma) { 127 start = vma->vm_start; 128 goto next; 129 } 130 131 if (vma->rb_subtree_gap <= sz_range(&second_gap)) { 132 rbroot.rb_node = &vma->vm_rb; 133 vma = rb_entry(rb_last(&rbroot), 134 struct vm_area_struct, vm_rb); 135 goto next; 136 } 137 138 gap.start = last_vma->vm_end; 139 gap.end = vma->vm_start; 140 if (sz_range(&gap) > sz_range(&second_gap)) { 141 swap(gap, second_gap); 142 if (sz_range(&second_gap) > sz_range(&first_gap)) 143 swap(second_gap, first_gap); 144 } 145 next: 146 last_vma = vma; 147 } 148 149 if (!sz_range(&second_gap) || !sz_range(&first_gap)) 150 return -EINVAL; 151 152 /* Sort the two biggest gaps by address */ 153 if (first_gap.start > second_gap.start) 154 swap(first_gap, second_gap); 155 156 /* Store the result */ 157 regions[0].start = ALIGN(start, DAMON_MIN_REGION); 158 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); 159 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); 160 regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); 161 regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); 162 regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION); 163 164 return 0; 165 } 166 167 /* 168 * Get the three regions in the given target (task) 169 * 170 * Returns 0 on success, negative error code otherwise. 171 */ 172 static int damon_va_three_regions(struct damon_target *t, 173 struct damon_addr_range regions[3]) 174 { 175 struct mm_struct *mm; 176 int rc; 177 178 mm = damon_get_mm(t); 179 if (!mm) 180 return -EINVAL; 181 182 mmap_read_lock(mm); 183 rc = __damon_va_three_regions(mm->mmap, regions); 184 mmap_read_unlock(mm); 185 186 mmput(mm); 187 return rc; 188 } 189 190 /* 191 * Initialize the monitoring target regions for the given target (task) 192 * 193 * t the given target 194 * 195 * Because only a number of small portions of the entire address space 196 * is actually mapped to the memory and accessed, monitoring the unmapped 197 * regions is wasteful. That said, because we can deal with small noises, 198 * tracking every mapping is not strictly required but could even incur a high 199 * overhead if the mapping frequently changes or the number of mappings is 200 * high. The adaptive regions adjustment mechanism will further help to deal 201 * with the noise by simply identifying the unmapped areas as a region that 202 * has no access. Moreover, applying the real mappings that would have many 203 * unmapped areas inside will make the adaptive mechanism quite complex. That 204 * said, too huge unmapped areas inside the monitoring target should be removed 205 * to not take the time for the adaptive mechanism. 206 * 207 * For the reason, we convert the complex mappings to three distinct regions 208 * that cover every mapped area of the address space. Also the two gaps 209 * between the three regions are the two biggest unmapped areas in the given 210 * address space. In detail, this function first identifies the start and the 211 * end of the mappings and the two biggest unmapped areas of the address space. 212 * Then, it constructs the three regions as below: 213 * 214 * [mappings[0]->start, big_two_unmapped_areas[0]->start) 215 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start) 216 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end) 217 * 218 * As usual memory map of processes is as below, the gap between the heap and 219 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed 220 * region and the stack will be two biggest unmapped regions. Because these 221 * gaps are exceptionally huge areas in usual address space, excluding these 222 * two biggest unmapped regions will be sufficient to make a trade-off. 223 * 224 * <heap> 225 * <BIG UNMAPPED REGION 1> 226 * <uppermost mmap()-ed region> 227 * (other mmap()-ed regions and small unmapped regions) 228 * <lowermost mmap()-ed region> 229 * <BIG UNMAPPED REGION 2> 230 * <stack> 231 */ 232 static void __damon_va_init_regions(struct damon_ctx *ctx, 233 struct damon_target *t) 234 { 235 struct damon_target *ti; 236 struct damon_region *r; 237 struct damon_addr_range regions[3]; 238 unsigned long sz = 0, nr_pieces; 239 int i, tidx = 0; 240 241 if (damon_va_three_regions(t, regions)) { 242 damon_for_each_target(ti, ctx) { 243 if (ti == t) 244 break; 245 tidx++; 246 } 247 pr_debug("Failed to get three regions of %dth target\n", tidx); 248 return; 249 } 250 251 for (i = 0; i < 3; i++) 252 sz += regions[i].end - regions[i].start; 253 if (ctx->min_nr_regions) 254 sz /= ctx->min_nr_regions; 255 if (sz < DAMON_MIN_REGION) 256 sz = DAMON_MIN_REGION; 257 258 /* Set the initial three regions of the target */ 259 for (i = 0; i < 3; i++) { 260 r = damon_new_region(regions[i].start, regions[i].end); 261 if (!r) { 262 pr_err("%d'th init region creation failed\n", i); 263 return; 264 } 265 damon_add_region(r, t); 266 267 nr_pieces = (regions[i].end - regions[i].start) / sz; 268 damon_va_evenly_split_region(t, r, nr_pieces); 269 } 270 } 271 272 /* Initialize '->regions_list' of every target (task) */ 273 static void damon_va_init(struct damon_ctx *ctx) 274 { 275 struct damon_target *t; 276 277 damon_for_each_target(t, ctx) { 278 /* the user may set the target regions as they want */ 279 if (!damon_nr_regions(t)) 280 __damon_va_init_regions(ctx, t); 281 } 282 } 283 284 /* 285 * Update regions for current memory mappings 286 */ 287 static void damon_va_update(struct damon_ctx *ctx) 288 { 289 struct damon_addr_range three_regions[3]; 290 struct damon_target *t; 291 292 damon_for_each_target(t, ctx) { 293 if (damon_va_three_regions(t, three_regions)) 294 continue; 295 damon_set_regions(t, three_regions, 3); 296 } 297 } 298 299 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, 300 unsigned long next, struct mm_walk *walk) 301 { 302 pte_t *pte; 303 spinlock_t *ptl; 304 305 if (pmd_trans_huge(*pmd)) { 306 ptl = pmd_lock(walk->mm, pmd); 307 if (!pmd_present(*pmd)) { 308 spin_unlock(ptl); 309 return 0; 310 } 311 312 if (pmd_trans_huge(*pmd)) { 313 damon_pmdp_mkold(pmd, walk->mm, addr); 314 spin_unlock(ptl); 315 return 0; 316 } 317 spin_unlock(ptl); 318 } 319 320 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 321 return 0; 322 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 323 if (!pte_present(*pte)) 324 goto out; 325 damon_ptep_mkold(pte, walk->mm, addr); 326 out: 327 pte_unmap_unlock(pte, ptl); 328 return 0; 329 } 330 331 #ifdef CONFIG_HUGETLB_PAGE 332 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, 333 struct vm_area_struct *vma, unsigned long addr) 334 { 335 bool referenced = false; 336 pte_t entry = huge_ptep_get(pte); 337 struct page *page = pte_page(entry); 338 339 get_page(page); 340 341 if (pte_young(entry)) { 342 referenced = true; 343 entry = pte_mkold(entry); 344 set_huge_pte_at(mm, addr, pte, entry); 345 } 346 347 #ifdef CONFIG_MMU_NOTIFIER 348 if (mmu_notifier_clear_young(mm, addr, 349 addr + huge_page_size(hstate_vma(vma)))) 350 referenced = true; 351 #endif /* CONFIG_MMU_NOTIFIER */ 352 353 if (referenced) 354 set_page_young(page); 355 356 set_page_idle(page); 357 put_page(page); 358 } 359 360 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, 361 unsigned long addr, unsigned long end, 362 struct mm_walk *walk) 363 { 364 struct hstate *h = hstate_vma(walk->vma); 365 spinlock_t *ptl; 366 pte_t entry; 367 368 ptl = huge_pte_lock(h, walk->mm, pte); 369 entry = huge_ptep_get(pte); 370 if (!pte_present(entry)) 371 goto out; 372 373 damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr); 374 375 out: 376 spin_unlock(ptl); 377 return 0; 378 } 379 #else 380 #define damon_mkold_hugetlb_entry NULL 381 #endif /* CONFIG_HUGETLB_PAGE */ 382 383 static const struct mm_walk_ops damon_mkold_ops = { 384 .pmd_entry = damon_mkold_pmd_entry, 385 .hugetlb_entry = damon_mkold_hugetlb_entry, 386 }; 387 388 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) 389 { 390 mmap_read_lock(mm); 391 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); 392 mmap_read_unlock(mm); 393 } 394 395 /* 396 * Functions for the access checking of the regions 397 */ 398 399 static void __damon_va_prepare_access_check(struct damon_ctx *ctx, 400 struct mm_struct *mm, struct damon_region *r) 401 { 402 r->sampling_addr = damon_rand(r->ar.start, r->ar.end); 403 404 damon_va_mkold(mm, r->sampling_addr); 405 } 406 407 static void damon_va_prepare_access_checks(struct damon_ctx *ctx) 408 { 409 struct damon_target *t; 410 struct mm_struct *mm; 411 struct damon_region *r; 412 413 damon_for_each_target(t, ctx) { 414 mm = damon_get_mm(t); 415 if (!mm) 416 continue; 417 damon_for_each_region(r, t) 418 __damon_va_prepare_access_check(ctx, mm, r); 419 mmput(mm); 420 } 421 } 422 423 struct damon_young_walk_private { 424 unsigned long *page_sz; 425 bool young; 426 }; 427 428 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, 429 unsigned long next, struct mm_walk *walk) 430 { 431 pte_t *pte; 432 spinlock_t *ptl; 433 struct page *page; 434 struct damon_young_walk_private *priv = walk->private; 435 436 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 437 if (pmd_trans_huge(*pmd)) { 438 ptl = pmd_lock(walk->mm, pmd); 439 if (!pmd_present(*pmd)) { 440 spin_unlock(ptl); 441 return 0; 442 } 443 444 if (!pmd_trans_huge(*pmd)) { 445 spin_unlock(ptl); 446 goto regular_page; 447 } 448 page = damon_get_page(pmd_pfn(*pmd)); 449 if (!page) 450 goto huge_out; 451 if (pmd_young(*pmd) || !page_is_idle(page) || 452 mmu_notifier_test_young(walk->mm, 453 addr)) { 454 *priv->page_sz = HPAGE_PMD_SIZE; 455 priv->young = true; 456 } 457 put_page(page); 458 huge_out: 459 spin_unlock(ptl); 460 return 0; 461 } 462 463 regular_page: 464 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 465 466 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 467 return -EINVAL; 468 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 469 if (!pte_present(*pte)) 470 goto out; 471 page = damon_get_page(pte_pfn(*pte)); 472 if (!page) 473 goto out; 474 if (pte_young(*pte) || !page_is_idle(page) || 475 mmu_notifier_test_young(walk->mm, addr)) { 476 *priv->page_sz = PAGE_SIZE; 477 priv->young = true; 478 } 479 put_page(page); 480 out: 481 pte_unmap_unlock(pte, ptl); 482 return 0; 483 } 484 485 #ifdef CONFIG_HUGETLB_PAGE 486 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, 487 unsigned long addr, unsigned long end, 488 struct mm_walk *walk) 489 { 490 struct damon_young_walk_private *priv = walk->private; 491 struct hstate *h = hstate_vma(walk->vma); 492 struct page *page; 493 spinlock_t *ptl; 494 pte_t entry; 495 496 ptl = huge_pte_lock(h, walk->mm, pte); 497 entry = huge_ptep_get(pte); 498 if (!pte_present(entry)) 499 goto out; 500 501 page = pte_page(entry); 502 get_page(page); 503 504 if (pte_young(entry) || !page_is_idle(page) || 505 mmu_notifier_test_young(walk->mm, addr)) { 506 *priv->page_sz = huge_page_size(h); 507 priv->young = true; 508 } 509 510 put_page(page); 511 512 out: 513 spin_unlock(ptl); 514 return 0; 515 } 516 #else 517 #define damon_young_hugetlb_entry NULL 518 #endif /* CONFIG_HUGETLB_PAGE */ 519 520 static const struct mm_walk_ops damon_young_ops = { 521 .pmd_entry = damon_young_pmd_entry, 522 .hugetlb_entry = damon_young_hugetlb_entry, 523 }; 524 525 static bool damon_va_young(struct mm_struct *mm, unsigned long addr, 526 unsigned long *page_sz) 527 { 528 struct damon_young_walk_private arg = { 529 .page_sz = page_sz, 530 .young = false, 531 }; 532 533 mmap_read_lock(mm); 534 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); 535 mmap_read_unlock(mm); 536 return arg.young; 537 } 538 539 /* 540 * Check whether the region was accessed after the last preparation 541 * 542 * mm 'mm_struct' for the given virtual address space 543 * r the region to be checked 544 */ 545 static void __damon_va_check_access(struct mm_struct *mm, 546 struct damon_region *r, bool same_target) 547 { 548 static unsigned long last_addr; 549 static unsigned long last_page_sz = PAGE_SIZE; 550 static bool last_accessed; 551 552 /* If the region is in the last checked page, reuse the result */ 553 if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) == 554 ALIGN_DOWN(r->sampling_addr, last_page_sz))) { 555 if (last_accessed) 556 r->nr_accesses++; 557 return; 558 } 559 560 last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz); 561 if (last_accessed) 562 r->nr_accesses++; 563 564 last_addr = r->sampling_addr; 565 } 566 567 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) 568 { 569 struct damon_target *t; 570 struct mm_struct *mm; 571 struct damon_region *r; 572 unsigned int max_nr_accesses = 0; 573 bool same_target; 574 575 damon_for_each_target(t, ctx) { 576 mm = damon_get_mm(t); 577 if (!mm) 578 continue; 579 same_target = false; 580 damon_for_each_region(r, t) { 581 __damon_va_check_access(mm, r, same_target); 582 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); 583 same_target = true; 584 } 585 mmput(mm); 586 } 587 588 return max_nr_accesses; 589 } 590 591 /* 592 * Functions for the target validity check and cleanup 593 */ 594 595 static bool damon_va_target_valid(void *target) 596 { 597 struct damon_target *t = target; 598 struct task_struct *task; 599 600 task = damon_get_task_struct(t); 601 if (task) { 602 put_task_struct(task); 603 return true; 604 } 605 606 return false; 607 } 608 609 #ifndef CONFIG_ADVISE_SYSCALLS 610 static unsigned long damos_madvise(struct damon_target *target, 611 struct damon_region *r, int behavior) 612 { 613 return 0; 614 } 615 #else 616 static unsigned long damos_madvise(struct damon_target *target, 617 struct damon_region *r, int behavior) 618 { 619 struct mm_struct *mm; 620 unsigned long start = PAGE_ALIGN(r->ar.start); 621 unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start); 622 unsigned long applied; 623 624 mm = damon_get_mm(target); 625 if (!mm) 626 return 0; 627 628 applied = do_madvise(mm, start, len, behavior) ? 0 : len; 629 mmput(mm); 630 631 return applied; 632 } 633 #endif /* CONFIG_ADVISE_SYSCALLS */ 634 635 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, 636 struct damon_target *t, struct damon_region *r, 637 struct damos *scheme) 638 { 639 int madv_action; 640 641 switch (scheme->action) { 642 case DAMOS_WILLNEED: 643 madv_action = MADV_WILLNEED; 644 break; 645 case DAMOS_COLD: 646 madv_action = MADV_COLD; 647 break; 648 case DAMOS_PAGEOUT: 649 madv_action = MADV_PAGEOUT; 650 break; 651 case DAMOS_HUGEPAGE: 652 madv_action = MADV_HUGEPAGE; 653 break; 654 case DAMOS_NOHUGEPAGE: 655 madv_action = MADV_NOHUGEPAGE; 656 break; 657 case DAMOS_STAT: 658 return 0; 659 default: 660 return 0; 661 } 662 663 return damos_madvise(t, r, madv_action); 664 } 665 666 static int damon_va_scheme_score(struct damon_ctx *context, 667 struct damon_target *t, struct damon_region *r, 668 struct damos *scheme) 669 { 670 671 switch (scheme->action) { 672 case DAMOS_PAGEOUT: 673 return damon_pageout_score(context, r, scheme); 674 default: 675 break; 676 } 677 678 return DAMOS_MAX_SCORE; 679 } 680 681 static int __init damon_va_initcall(void) 682 { 683 struct damon_operations ops = { 684 .id = DAMON_OPS_VADDR, 685 .init = damon_va_init, 686 .update = damon_va_update, 687 .prepare_access_checks = damon_va_prepare_access_checks, 688 .check_accesses = damon_va_check_accesses, 689 .reset_aggregated = NULL, 690 .target_valid = damon_va_target_valid, 691 .cleanup = NULL, 692 .apply_scheme = damon_va_apply_scheme, 693 .get_scheme_score = damon_va_scheme_score, 694 }; 695 /* ops for fixed virtual address ranges */ 696 struct damon_operations ops_fvaddr = ops; 697 int err; 698 699 /* Don't set the monitoring target regions for the entire mapping */ 700 ops_fvaddr.id = DAMON_OPS_FVADDR; 701 ops_fvaddr.init = NULL; 702 ops_fvaddr.update = NULL; 703 704 err = damon_register_ops(&ops); 705 if (err) 706 return err; 707 return damon_register_ops(&ops_fvaddr); 708 }; 709 710 subsys_initcall(damon_va_initcall); 711 712 #include "vaddr-test.h" 713