1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON Primitives for Virtual Address Spaces 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon-va: " fmt 9 10 #include <asm-generic/mman-common.h> 11 #include <linux/highmem.h> 12 #include <linux/hugetlb.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/page_idle.h> 15 #include <linux/pagewalk.h> 16 17 #include "prmtv-common.h" 18 19 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 20 #undef DAMON_MIN_REGION 21 #define DAMON_MIN_REGION 1 22 #endif 23 24 /* 25 * 't->id' should be the pointer to the relevant 'struct pid' having reference 26 * count. Caller must put the returned task, unless it is NULL. 27 */ 28 #define damon_get_task_struct(t) \ 29 (get_pid_task((struct pid *)t->id, PIDTYPE_PID)) 30 31 /* 32 * Get the mm_struct of the given target 33 * 34 * Caller _must_ put the mm_struct after use, unless it is NULL. 35 * 36 * Returns the mm_struct of the target on success, NULL on failure 37 */ 38 static struct mm_struct *damon_get_mm(struct damon_target *t) 39 { 40 struct task_struct *task; 41 struct mm_struct *mm; 42 43 task = damon_get_task_struct(t); 44 if (!task) 45 return NULL; 46 47 mm = get_task_mm(task); 48 put_task_struct(task); 49 return mm; 50 } 51 52 /* 53 * Functions for the initial monitoring target regions construction 54 */ 55 56 /* 57 * Size-evenly split a region into 'nr_pieces' small regions 58 * 59 * Returns 0 on success, or negative error code otherwise. 60 */ 61 static int damon_va_evenly_split_region(struct damon_target *t, 62 struct damon_region *r, unsigned int nr_pieces) 63 { 64 unsigned long sz_orig, sz_piece, orig_end; 65 struct damon_region *n = NULL, *next; 66 unsigned long start; 67 68 if (!r || !nr_pieces) 69 return -EINVAL; 70 71 orig_end = r->ar.end; 72 sz_orig = r->ar.end - r->ar.start; 73 sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); 74 75 if (!sz_piece) 76 return -EINVAL; 77 78 r->ar.end = r->ar.start + sz_piece; 79 next = damon_next_region(r); 80 for (start = r->ar.end; start + sz_piece <= orig_end; 81 start += sz_piece) { 82 n = damon_new_region(start, start + sz_piece); 83 if (!n) 84 return -ENOMEM; 85 damon_insert_region(n, r, next, t); 86 r = n; 87 } 88 /* complement last region for possible rounding error */ 89 if (n) 90 n->ar.end = orig_end; 91 92 return 0; 93 } 94 95 static unsigned long sz_range(struct damon_addr_range *r) 96 { 97 return r->end - r->start; 98 } 99 100 static void swap_ranges(struct damon_addr_range *r1, 101 struct damon_addr_range *r2) 102 { 103 struct damon_addr_range tmp; 104 105 tmp = *r1; 106 *r1 = *r2; 107 *r2 = tmp; 108 } 109 110 /* 111 * Find three regions separated by two biggest unmapped regions 112 * 113 * vma the head vma of the target address space 114 * regions an array of three address ranges that results will be saved 115 * 116 * This function receives an address space and finds three regions in it which 117 * separated by the two biggest unmapped regions in the space. Please refer to 118 * below comments of '__damon_va_init_regions()' function to know why this is 119 * necessary. 120 * 121 * Returns 0 if success, or negative error code otherwise. 122 */ 123 static int __damon_va_three_regions(struct vm_area_struct *vma, 124 struct damon_addr_range regions[3]) 125 { 126 struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0}; 127 struct vm_area_struct *last_vma = NULL; 128 unsigned long start = 0; 129 struct rb_root rbroot; 130 131 /* Find two biggest gaps so that first_gap > second_gap > others */ 132 for (; vma; vma = vma->vm_next) { 133 if (!last_vma) { 134 start = vma->vm_start; 135 goto next; 136 } 137 138 if (vma->rb_subtree_gap <= sz_range(&second_gap)) { 139 rbroot.rb_node = &vma->vm_rb; 140 vma = rb_entry(rb_last(&rbroot), 141 struct vm_area_struct, vm_rb); 142 goto next; 143 } 144 145 gap.start = last_vma->vm_end; 146 gap.end = vma->vm_start; 147 if (sz_range(&gap) > sz_range(&second_gap)) { 148 swap_ranges(&gap, &second_gap); 149 if (sz_range(&second_gap) > sz_range(&first_gap)) 150 swap_ranges(&second_gap, &first_gap); 151 } 152 next: 153 last_vma = vma; 154 } 155 156 if (!sz_range(&second_gap) || !sz_range(&first_gap)) 157 return -EINVAL; 158 159 /* Sort the two biggest gaps by address */ 160 if (first_gap.start > second_gap.start) 161 swap_ranges(&first_gap, &second_gap); 162 163 /* Store the result */ 164 regions[0].start = ALIGN(start, DAMON_MIN_REGION); 165 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); 166 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); 167 regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); 168 regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); 169 regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION); 170 171 return 0; 172 } 173 174 /* 175 * Get the three regions in the given target (task) 176 * 177 * Returns 0 on success, negative error code otherwise. 178 */ 179 static int damon_va_three_regions(struct damon_target *t, 180 struct damon_addr_range regions[3]) 181 { 182 struct mm_struct *mm; 183 int rc; 184 185 mm = damon_get_mm(t); 186 if (!mm) 187 return -EINVAL; 188 189 mmap_read_lock(mm); 190 rc = __damon_va_three_regions(mm->mmap, regions); 191 mmap_read_unlock(mm); 192 193 mmput(mm); 194 return rc; 195 } 196 197 /* 198 * Initialize the monitoring target regions for the given target (task) 199 * 200 * t the given target 201 * 202 * Because only a number of small portions of the entire address space 203 * is actually mapped to the memory and accessed, monitoring the unmapped 204 * regions is wasteful. That said, because we can deal with small noises, 205 * tracking every mapping is not strictly required but could even incur a high 206 * overhead if the mapping frequently changes or the number of mappings is 207 * high. The adaptive regions adjustment mechanism will further help to deal 208 * with the noise by simply identifying the unmapped areas as a region that 209 * has no access. Moreover, applying the real mappings that would have many 210 * unmapped areas inside will make the adaptive mechanism quite complex. That 211 * said, too huge unmapped areas inside the monitoring target should be removed 212 * to not take the time for the adaptive mechanism. 213 * 214 * For the reason, we convert the complex mappings to three distinct regions 215 * that cover every mapped area of the address space. Also the two gaps 216 * between the three regions are the two biggest unmapped areas in the given 217 * address space. In detail, this function first identifies the start and the 218 * end of the mappings and the two biggest unmapped areas of the address space. 219 * Then, it constructs the three regions as below: 220 * 221 * [mappings[0]->start, big_two_unmapped_areas[0]->start) 222 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start) 223 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end) 224 * 225 * As usual memory map of processes is as below, the gap between the heap and 226 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed 227 * region and the stack will be two biggest unmapped regions. Because these 228 * gaps are exceptionally huge areas in usual address space, excluding these 229 * two biggest unmapped regions will be sufficient to make a trade-off. 230 * 231 * <heap> 232 * <BIG UNMAPPED REGION 1> 233 * <uppermost mmap()-ed region> 234 * (other mmap()-ed regions and small unmapped regions) 235 * <lowermost mmap()-ed region> 236 * <BIG UNMAPPED REGION 2> 237 * <stack> 238 */ 239 static void __damon_va_init_regions(struct damon_ctx *ctx, 240 struct damon_target *t) 241 { 242 struct damon_region *r; 243 struct damon_addr_range regions[3]; 244 unsigned long sz = 0, nr_pieces; 245 int i; 246 247 if (damon_va_three_regions(t, regions)) { 248 pr_err("Failed to get three regions of target %lu\n", t->id); 249 return; 250 } 251 252 for (i = 0; i < 3; i++) 253 sz += regions[i].end - regions[i].start; 254 if (ctx->min_nr_regions) 255 sz /= ctx->min_nr_regions; 256 if (sz < DAMON_MIN_REGION) 257 sz = DAMON_MIN_REGION; 258 259 /* Set the initial three regions of the target */ 260 for (i = 0; i < 3; i++) { 261 r = damon_new_region(regions[i].start, regions[i].end); 262 if (!r) { 263 pr_err("%d'th init region creation failed\n", i); 264 return; 265 } 266 damon_add_region(r, t); 267 268 nr_pieces = (regions[i].end - regions[i].start) / sz; 269 damon_va_evenly_split_region(t, r, nr_pieces); 270 } 271 } 272 273 /* Initialize '->regions_list' of every target (task) */ 274 void damon_va_init(struct damon_ctx *ctx) 275 { 276 struct damon_target *t; 277 278 damon_for_each_target(t, ctx) { 279 /* the user may set the target regions as they want */ 280 if (!damon_nr_regions(t)) 281 __damon_va_init_regions(ctx, t); 282 } 283 } 284 285 /* 286 * Functions for the dynamic monitoring target regions update 287 */ 288 289 /* 290 * Check whether a region is intersecting an address range 291 * 292 * Returns true if it is. 293 */ 294 static bool damon_intersect(struct damon_region *r, struct damon_addr_range *re) 295 { 296 return !(r->ar.end <= re->start || re->end <= r->ar.start); 297 } 298 299 /* 300 * Update damon regions for the three big regions of the given target 301 * 302 * t the given target 303 * bregions the three big regions of the target 304 */ 305 static void damon_va_apply_three_regions(struct damon_target *t, 306 struct damon_addr_range bregions[3]) 307 { 308 struct damon_region *r, *next; 309 unsigned int i; 310 311 /* Remove regions which are not in the three big regions now */ 312 damon_for_each_region_safe(r, next, t) { 313 for (i = 0; i < 3; i++) { 314 if (damon_intersect(r, &bregions[i])) 315 break; 316 } 317 if (i == 3) 318 damon_destroy_region(r, t); 319 } 320 321 /* Adjust intersecting regions to fit with the three big regions */ 322 for (i = 0; i < 3; i++) { 323 struct damon_region *first = NULL, *last; 324 struct damon_region *newr; 325 struct damon_addr_range *br; 326 327 br = &bregions[i]; 328 /* Get the first and last regions which intersects with br */ 329 damon_for_each_region(r, t) { 330 if (damon_intersect(r, br)) { 331 if (!first) 332 first = r; 333 last = r; 334 } 335 if (r->ar.start >= br->end) 336 break; 337 } 338 if (!first) { 339 /* no damon_region intersects with this big region */ 340 newr = damon_new_region( 341 ALIGN_DOWN(br->start, 342 DAMON_MIN_REGION), 343 ALIGN(br->end, DAMON_MIN_REGION)); 344 if (!newr) 345 continue; 346 damon_insert_region(newr, damon_prev_region(r), r, t); 347 } else { 348 first->ar.start = ALIGN_DOWN(br->start, 349 DAMON_MIN_REGION); 350 last->ar.end = ALIGN(br->end, DAMON_MIN_REGION); 351 } 352 } 353 } 354 355 /* 356 * Update regions for current memory mappings 357 */ 358 void damon_va_update(struct damon_ctx *ctx) 359 { 360 struct damon_addr_range three_regions[3]; 361 struct damon_target *t; 362 363 damon_for_each_target(t, ctx) { 364 if (damon_va_three_regions(t, three_regions)) 365 continue; 366 damon_va_apply_three_regions(t, three_regions); 367 } 368 } 369 370 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, 371 unsigned long next, struct mm_walk *walk) 372 { 373 pte_t *pte; 374 spinlock_t *ptl; 375 376 if (pmd_huge(*pmd)) { 377 ptl = pmd_lock(walk->mm, pmd); 378 if (pmd_huge(*pmd)) { 379 damon_pmdp_mkold(pmd, walk->mm, addr); 380 spin_unlock(ptl); 381 return 0; 382 } 383 spin_unlock(ptl); 384 } 385 386 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 387 return 0; 388 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 389 if (!pte_present(*pte)) 390 goto out; 391 damon_ptep_mkold(pte, walk->mm, addr); 392 out: 393 pte_unmap_unlock(pte, ptl); 394 return 0; 395 } 396 397 static const struct mm_walk_ops damon_mkold_ops = { 398 .pmd_entry = damon_mkold_pmd_entry, 399 }; 400 401 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) 402 { 403 mmap_read_lock(mm); 404 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); 405 mmap_read_unlock(mm); 406 } 407 408 /* 409 * Functions for the access checking of the regions 410 */ 411 412 static void damon_va_prepare_access_check(struct damon_ctx *ctx, 413 struct mm_struct *mm, struct damon_region *r) 414 { 415 r->sampling_addr = damon_rand(r->ar.start, r->ar.end); 416 417 damon_va_mkold(mm, r->sampling_addr); 418 } 419 420 void damon_va_prepare_access_checks(struct damon_ctx *ctx) 421 { 422 struct damon_target *t; 423 struct mm_struct *mm; 424 struct damon_region *r; 425 426 damon_for_each_target(t, ctx) { 427 mm = damon_get_mm(t); 428 if (!mm) 429 continue; 430 damon_for_each_region(r, t) 431 damon_va_prepare_access_check(ctx, mm, r); 432 mmput(mm); 433 } 434 } 435 436 struct damon_young_walk_private { 437 unsigned long *page_sz; 438 bool young; 439 }; 440 441 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, 442 unsigned long next, struct mm_walk *walk) 443 { 444 pte_t *pte; 445 spinlock_t *ptl; 446 struct page *page; 447 struct damon_young_walk_private *priv = walk->private; 448 449 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 450 if (pmd_huge(*pmd)) { 451 ptl = pmd_lock(walk->mm, pmd); 452 if (!pmd_huge(*pmd)) { 453 spin_unlock(ptl); 454 goto regular_page; 455 } 456 page = damon_get_page(pmd_pfn(*pmd)); 457 if (!page) 458 goto huge_out; 459 if (pmd_young(*pmd) || !page_is_idle(page) || 460 mmu_notifier_test_young(walk->mm, 461 addr)) { 462 *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT); 463 priv->young = true; 464 } 465 put_page(page); 466 huge_out: 467 spin_unlock(ptl); 468 return 0; 469 } 470 471 regular_page: 472 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 473 474 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 475 return -EINVAL; 476 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 477 if (!pte_present(*pte)) 478 goto out; 479 page = damon_get_page(pte_pfn(*pte)); 480 if (!page) 481 goto out; 482 if (pte_young(*pte) || !page_is_idle(page) || 483 mmu_notifier_test_young(walk->mm, addr)) { 484 *priv->page_sz = PAGE_SIZE; 485 priv->young = true; 486 } 487 put_page(page); 488 out: 489 pte_unmap_unlock(pte, ptl); 490 return 0; 491 } 492 493 static const struct mm_walk_ops damon_young_ops = { 494 .pmd_entry = damon_young_pmd_entry, 495 }; 496 497 static bool damon_va_young(struct mm_struct *mm, unsigned long addr, 498 unsigned long *page_sz) 499 { 500 struct damon_young_walk_private arg = { 501 .page_sz = page_sz, 502 .young = false, 503 }; 504 505 mmap_read_lock(mm); 506 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); 507 mmap_read_unlock(mm); 508 return arg.young; 509 } 510 511 /* 512 * Check whether the region was accessed after the last preparation 513 * 514 * mm 'mm_struct' for the given virtual address space 515 * r the region to be checked 516 */ 517 static void damon_va_check_access(struct damon_ctx *ctx, 518 struct mm_struct *mm, struct damon_region *r) 519 { 520 static struct mm_struct *last_mm; 521 static unsigned long last_addr; 522 static unsigned long last_page_sz = PAGE_SIZE; 523 static bool last_accessed; 524 525 /* If the region is in the last checked page, reuse the result */ 526 if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) == 527 ALIGN_DOWN(r->sampling_addr, last_page_sz))) { 528 if (last_accessed) 529 r->nr_accesses++; 530 return; 531 } 532 533 last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz); 534 if (last_accessed) 535 r->nr_accesses++; 536 537 last_mm = mm; 538 last_addr = r->sampling_addr; 539 } 540 541 unsigned int damon_va_check_accesses(struct damon_ctx *ctx) 542 { 543 struct damon_target *t; 544 struct mm_struct *mm; 545 struct damon_region *r; 546 unsigned int max_nr_accesses = 0; 547 548 damon_for_each_target(t, ctx) { 549 mm = damon_get_mm(t); 550 if (!mm) 551 continue; 552 damon_for_each_region(r, t) { 553 damon_va_check_access(ctx, mm, r); 554 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); 555 } 556 mmput(mm); 557 } 558 559 return max_nr_accesses; 560 } 561 562 /* 563 * Functions for the target validity check and cleanup 564 */ 565 566 bool damon_va_target_valid(void *target) 567 { 568 struct damon_target *t = target; 569 struct task_struct *task; 570 571 task = damon_get_task_struct(t); 572 if (task) { 573 put_task_struct(task); 574 return true; 575 } 576 577 return false; 578 } 579 580 #ifndef CONFIG_ADVISE_SYSCALLS 581 static int damos_madvise(struct damon_target *target, struct damon_region *r, 582 int behavior) 583 { 584 return -EINVAL; 585 } 586 #else 587 static int damos_madvise(struct damon_target *target, struct damon_region *r, 588 int behavior) 589 { 590 struct mm_struct *mm; 591 int ret = -ENOMEM; 592 593 mm = damon_get_mm(target); 594 if (!mm) 595 goto out; 596 597 ret = do_madvise(mm, PAGE_ALIGN(r->ar.start), 598 PAGE_ALIGN(r->ar.end - r->ar.start), behavior); 599 mmput(mm); 600 out: 601 return ret; 602 } 603 #endif /* CONFIG_ADVISE_SYSCALLS */ 604 605 int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, 606 struct damon_region *r, struct damos *scheme) 607 { 608 int madv_action; 609 610 switch (scheme->action) { 611 case DAMOS_WILLNEED: 612 madv_action = MADV_WILLNEED; 613 break; 614 case DAMOS_COLD: 615 madv_action = MADV_COLD; 616 break; 617 case DAMOS_PAGEOUT: 618 madv_action = MADV_PAGEOUT; 619 break; 620 case DAMOS_HUGEPAGE: 621 madv_action = MADV_HUGEPAGE; 622 break; 623 case DAMOS_NOHUGEPAGE: 624 madv_action = MADV_NOHUGEPAGE; 625 break; 626 case DAMOS_STAT: 627 return 0; 628 default: 629 pr_warn("Wrong action %d\n", scheme->action); 630 return -EINVAL; 631 } 632 633 return damos_madvise(t, r, madv_action); 634 } 635 636 int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t, 637 struct damon_region *r, struct damos *scheme) 638 { 639 640 switch (scheme->action) { 641 case DAMOS_PAGEOUT: 642 return damon_pageout_score(context, r, scheme); 643 default: 644 break; 645 } 646 647 return DAMOS_MAX_SCORE; 648 } 649 650 void damon_va_set_primitives(struct damon_ctx *ctx) 651 { 652 ctx->primitive.init = damon_va_init; 653 ctx->primitive.update = damon_va_update; 654 ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks; 655 ctx->primitive.check_accesses = damon_va_check_accesses; 656 ctx->primitive.reset_aggregated = NULL; 657 ctx->primitive.target_valid = damon_va_target_valid; 658 ctx->primitive.cleanup = NULL; 659 ctx->primitive.apply_scheme = damon_va_apply_scheme; 660 ctx->primitive.get_scheme_score = damon_va_scheme_score; 661 } 662 663 #include "vaddr-test.h" 664