13f49584bSSeongJae Park // SPDX-License-Identifier: GPL-2.0 23f49584bSSeongJae Park /* 33f49584bSSeongJae Park * DAMON Primitives for Virtual Address Spaces 43f49584bSSeongJae Park * 53f49584bSSeongJae Park * Author: SeongJae Park <sjpark@amazon.de> 63f49584bSSeongJae Park */ 73f49584bSSeongJae Park 83f49584bSSeongJae Park #define pr_fmt(fmt) "damon-va: " fmt 93f49584bSSeongJae Park 106dea8addSSeongJae Park #include <asm-generic/mman-common.h> 113f49584bSSeongJae Park #include <linux/highmem.h> 1246c3a0acSSeongJae Park #include <linux/hugetlb.h> 1346c3a0acSSeongJae Park #include <linux/mmu_notifier.h> 143f49584bSSeongJae Park #include <linux/page_idle.h> 153f49584bSSeongJae Park #include <linux/pagewalk.h> 1646c3a0acSSeongJae Park 1746c3a0acSSeongJae Park #include "prmtv-common.h" 183f49584bSSeongJae Park 1917ccae8bSSeongJae Park #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 2017ccae8bSSeongJae Park #undef DAMON_MIN_REGION 2117ccae8bSSeongJae Park #define DAMON_MIN_REGION 1 2217ccae8bSSeongJae Park #endif 2317ccae8bSSeongJae Park 243f49584bSSeongJae Park /* 253f49584bSSeongJae Park * 't->id' should be the pointer to the relevant 'struct pid' having reference 263f49584bSSeongJae Park * count. Caller must put the returned task, unless it is NULL. 273f49584bSSeongJae Park */ 283f49584bSSeongJae Park #define damon_get_task_struct(t) \ 293f49584bSSeongJae Park (get_pid_task((struct pid *)t->id, PIDTYPE_PID)) 303f49584bSSeongJae Park 313f49584bSSeongJae Park /* 323f49584bSSeongJae Park * Get the mm_struct of the given target 333f49584bSSeongJae Park * 343f49584bSSeongJae Park * Caller _must_ put the mm_struct after use, unless it is NULL. 353f49584bSSeongJae Park * 363f49584bSSeongJae Park * Returns the mm_struct of the target on success, NULL on failure 373f49584bSSeongJae Park */ 383f49584bSSeongJae Park static struct mm_struct *damon_get_mm(struct damon_target *t) 393f49584bSSeongJae Park { 403f49584bSSeongJae Park struct task_struct *task; 413f49584bSSeongJae Park struct mm_struct *mm; 423f49584bSSeongJae Park 433f49584bSSeongJae Park task = damon_get_task_struct(t); 443f49584bSSeongJae Park if (!task) 453f49584bSSeongJae Park return NULL; 463f49584bSSeongJae Park 473f49584bSSeongJae Park mm = get_task_mm(task); 483f49584bSSeongJae Park put_task_struct(task); 493f49584bSSeongJae Park return mm; 503f49584bSSeongJae Park } 513f49584bSSeongJae Park 523f49584bSSeongJae Park /* 533f49584bSSeongJae Park * Functions for the initial monitoring target regions construction 543f49584bSSeongJae Park */ 553f49584bSSeongJae Park 563f49584bSSeongJae Park /* 573f49584bSSeongJae Park * Size-evenly split a region into 'nr_pieces' small regions 583f49584bSSeongJae Park * 593f49584bSSeongJae Park * Returns 0 on success, or negative error code otherwise. 603f49584bSSeongJae Park */ 613f49584bSSeongJae Park static int damon_va_evenly_split_region(struct damon_target *t, 623f49584bSSeongJae Park struct damon_region *r, unsigned int nr_pieces) 633f49584bSSeongJae Park { 643f49584bSSeongJae Park unsigned long sz_orig, sz_piece, orig_end; 653f49584bSSeongJae Park struct damon_region *n = NULL, *next; 663f49584bSSeongJae Park unsigned long start; 673f49584bSSeongJae Park 683f49584bSSeongJae Park if (!r || !nr_pieces) 693f49584bSSeongJae Park return -EINVAL; 703f49584bSSeongJae Park 713f49584bSSeongJae Park orig_end = r->ar.end; 723f49584bSSeongJae Park sz_orig = r->ar.end - r->ar.start; 733f49584bSSeongJae Park sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); 743f49584bSSeongJae Park 753f49584bSSeongJae Park if (!sz_piece) 763f49584bSSeongJae Park return -EINVAL; 773f49584bSSeongJae Park 783f49584bSSeongJae Park r->ar.end = r->ar.start + sz_piece; 793f49584bSSeongJae Park next = damon_next_region(r); 803f49584bSSeongJae Park for (start = r->ar.end; start + sz_piece <= orig_end; 813f49584bSSeongJae Park start += sz_piece) { 823f49584bSSeongJae Park n = damon_new_region(start, start + sz_piece); 833f49584bSSeongJae Park if (!n) 843f49584bSSeongJae Park return -ENOMEM; 853f49584bSSeongJae Park damon_insert_region(n, r, next, t); 863f49584bSSeongJae Park r = n; 873f49584bSSeongJae Park } 883f49584bSSeongJae Park /* complement last region for possible rounding error */ 893f49584bSSeongJae Park if (n) 903f49584bSSeongJae Park n->ar.end = orig_end; 913f49584bSSeongJae Park 923f49584bSSeongJae Park return 0; 933f49584bSSeongJae Park } 943f49584bSSeongJae Park 953f49584bSSeongJae Park static unsigned long sz_range(struct damon_addr_range *r) 963f49584bSSeongJae Park { 973f49584bSSeongJae Park return r->end - r->start; 983f49584bSSeongJae Park } 993f49584bSSeongJae Park 1003f49584bSSeongJae Park static void swap_ranges(struct damon_addr_range *r1, 1013f49584bSSeongJae Park struct damon_addr_range *r2) 1023f49584bSSeongJae Park { 1033f49584bSSeongJae Park struct damon_addr_range tmp; 1043f49584bSSeongJae Park 1053f49584bSSeongJae Park tmp = *r1; 1063f49584bSSeongJae Park *r1 = *r2; 1073f49584bSSeongJae Park *r2 = tmp; 1083f49584bSSeongJae Park } 1093f49584bSSeongJae Park 1103f49584bSSeongJae Park /* 1113f49584bSSeongJae Park * Find three regions separated by two biggest unmapped regions 1123f49584bSSeongJae Park * 1133f49584bSSeongJae Park * vma the head vma of the target address space 1143f49584bSSeongJae Park * regions an array of three address ranges that results will be saved 1153f49584bSSeongJae Park * 1163f49584bSSeongJae Park * This function receives an address space and finds three regions in it which 1173f49584bSSeongJae Park * separated by the two biggest unmapped regions in the space. Please refer to 1183f49584bSSeongJae Park * below comments of '__damon_va_init_regions()' function to know why this is 1193f49584bSSeongJae Park * necessary. 1203f49584bSSeongJae Park * 1213f49584bSSeongJae Park * Returns 0 if success, or negative error code otherwise. 1223f49584bSSeongJae Park */ 1233f49584bSSeongJae Park static int __damon_va_three_regions(struct vm_area_struct *vma, 1243f49584bSSeongJae Park struct damon_addr_range regions[3]) 1253f49584bSSeongJae Park { 1263f49584bSSeongJae Park struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0}; 1273f49584bSSeongJae Park struct vm_area_struct *last_vma = NULL; 1283f49584bSSeongJae Park unsigned long start = 0; 1293f49584bSSeongJae Park struct rb_root rbroot; 1303f49584bSSeongJae Park 1313f49584bSSeongJae Park /* Find two biggest gaps so that first_gap > second_gap > others */ 1323f49584bSSeongJae Park for (; vma; vma = vma->vm_next) { 1333f49584bSSeongJae Park if (!last_vma) { 1343f49584bSSeongJae Park start = vma->vm_start; 1353f49584bSSeongJae Park goto next; 1363f49584bSSeongJae Park } 1373f49584bSSeongJae Park 1383f49584bSSeongJae Park if (vma->rb_subtree_gap <= sz_range(&second_gap)) { 1393f49584bSSeongJae Park rbroot.rb_node = &vma->vm_rb; 1403f49584bSSeongJae Park vma = rb_entry(rb_last(&rbroot), 1413f49584bSSeongJae Park struct vm_area_struct, vm_rb); 1423f49584bSSeongJae Park goto next; 1433f49584bSSeongJae Park } 1443f49584bSSeongJae Park 1453f49584bSSeongJae Park gap.start = last_vma->vm_end; 1463f49584bSSeongJae Park gap.end = vma->vm_start; 1473f49584bSSeongJae Park if (sz_range(&gap) > sz_range(&second_gap)) { 1483f49584bSSeongJae Park swap_ranges(&gap, &second_gap); 1493f49584bSSeongJae Park if (sz_range(&second_gap) > sz_range(&first_gap)) 1503f49584bSSeongJae Park swap_ranges(&second_gap, &first_gap); 1513f49584bSSeongJae Park } 1523f49584bSSeongJae Park next: 1533f49584bSSeongJae Park last_vma = vma; 1543f49584bSSeongJae Park } 1553f49584bSSeongJae Park 1563f49584bSSeongJae Park if (!sz_range(&second_gap) || !sz_range(&first_gap)) 1573f49584bSSeongJae Park return -EINVAL; 1583f49584bSSeongJae Park 1593f49584bSSeongJae Park /* Sort the two biggest gaps by address */ 1603f49584bSSeongJae Park if (first_gap.start > second_gap.start) 1613f49584bSSeongJae Park swap_ranges(&first_gap, &second_gap); 1623f49584bSSeongJae Park 1633f49584bSSeongJae Park /* Store the result */ 1643f49584bSSeongJae Park regions[0].start = ALIGN(start, DAMON_MIN_REGION); 1653f49584bSSeongJae Park regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); 1663f49584bSSeongJae Park regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); 1673f49584bSSeongJae Park regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); 1683f49584bSSeongJae Park regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); 1693f49584bSSeongJae Park regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION); 1703f49584bSSeongJae Park 1713f49584bSSeongJae Park return 0; 1723f49584bSSeongJae Park } 1733f49584bSSeongJae Park 1743f49584bSSeongJae Park /* 1753f49584bSSeongJae Park * Get the three regions in the given target (task) 1763f49584bSSeongJae Park * 1773f49584bSSeongJae Park * Returns 0 on success, negative error code otherwise. 1783f49584bSSeongJae Park */ 1793f49584bSSeongJae Park static int damon_va_three_regions(struct damon_target *t, 1803f49584bSSeongJae Park struct damon_addr_range regions[3]) 1813f49584bSSeongJae Park { 1823f49584bSSeongJae Park struct mm_struct *mm; 1833f49584bSSeongJae Park int rc; 1843f49584bSSeongJae Park 1853f49584bSSeongJae Park mm = damon_get_mm(t); 1863f49584bSSeongJae Park if (!mm) 1873f49584bSSeongJae Park return -EINVAL; 1883f49584bSSeongJae Park 1893f49584bSSeongJae Park mmap_read_lock(mm); 1903f49584bSSeongJae Park rc = __damon_va_three_regions(mm->mmap, regions); 1913f49584bSSeongJae Park mmap_read_unlock(mm); 1923f49584bSSeongJae Park 1933f49584bSSeongJae Park mmput(mm); 1943f49584bSSeongJae Park return rc; 1953f49584bSSeongJae Park } 1963f49584bSSeongJae Park 1973f49584bSSeongJae Park /* 1983f49584bSSeongJae Park * Initialize the monitoring target regions for the given target (task) 1993f49584bSSeongJae Park * 2003f49584bSSeongJae Park * t the given target 2013f49584bSSeongJae Park * 2023f49584bSSeongJae Park * Because only a number of small portions of the entire address space 2033f49584bSSeongJae Park * is actually mapped to the memory and accessed, monitoring the unmapped 2043f49584bSSeongJae Park * regions is wasteful. That said, because we can deal with small noises, 2053f49584bSSeongJae Park * tracking every mapping is not strictly required but could even incur a high 2063f49584bSSeongJae Park * overhead if the mapping frequently changes or the number of mappings is 2073f49584bSSeongJae Park * high. The adaptive regions adjustment mechanism will further help to deal 2083f49584bSSeongJae Park * with the noise by simply identifying the unmapped areas as a region that 2093f49584bSSeongJae Park * has no access. Moreover, applying the real mappings that would have many 2103f49584bSSeongJae Park * unmapped areas inside will make the adaptive mechanism quite complex. That 2113f49584bSSeongJae Park * said, too huge unmapped areas inside the monitoring target should be removed 2123f49584bSSeongJae Park * to not take the time for the adaptive mechanism. 2133f49584bSSeongJae Park * 2143f49584bSSeongJae Park * For the reason, we convert the complex mappings to three distinct regions 2153f49584bSSeongJae Park * that cover every mapped area of the address space. Also the two gaps 2163f49584bSSeongJae Park * between the three regions are the two biggest unmapped areas in the given 2173f49584bSSeongJae Park * address space. In detail, this function first identifies the start and the 2183f49584bSSeongJae Park * end of the mappings and the two biggest unmapped areas of the address space. 2193f49584bSSeongJae Park * Then, it constructs the three regions as below: 2203f49584bSSeongJae Park * 2213f49584bSSeongJae Park * [mappings[0]->start, big_two_unmapped_areas[0]->start) 2223f49584bSSeongJae Park * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start) 2233f49584bSSeongJae Park * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end) 2243f49584bSSeongJae Park * 2253f49584bSSeongJae Park * As usual memory map of processes is as below, the gap between the heap and 2263f49584bSSeongJae Park * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed 2273f49584bSSeongJae Park * region and the stack will be two biggest unmapped regions. Because these 2283f49584bSSeongJae Park * gaps are exceptionally huge areas in usual address space, excluding these 2293f49584bSSeongJae Park * two biggest unmapped regions will be sufficient to make a trade-off. 2303f49584bSSeongJae Park * 2313f49584bSSeongJae Park * <heap> 2323f49584bSSeongJae Park * <BIG UNMAPPED REGION 1> 2333f49584bSSeongJae Park * <uppermost mmap()-ed region> 2343f49584bSSeongJae Park * (other mmap()-ed regions and small unmapped regions) 2353f49584bSSeongJae Park * <lowermost mmap()-ed region> 2363f49584bSSeongJae Park * <BIG UNMAPPED REGION 2> 2373f49584bSSeongJae Park * <stack> 2383f49584bSSeongJae Park */ 2393f49584bSSeongJae Park static void __damon_va_init_regions(struct damon_ctx *ctx, 2403f49584bSSeongJae Park struct damon_target *t) 2413f49584bSSeongJae Park { 2423f49584bSSeongJae Park struct damon_region *r; 2433f49584bSSeongJae Park struct damon_addr_range regions[3]; 2443f49584bSSeongJae Park unsigned long sz = 0, nr_pieces; 2453f49584bSSeongJae Park int i; 2463f49584bSSeongJae Park 2473f49584bSSeongJae Park if (damon_va_three_regions(t, regions)) { 2483f49584bSSeongJae Park pr_err("Failed to get three regions of target %lu\n", t->id); 2493f49584bSSeongJae Park return; 2503f49584bSSeongJae Park } 2513f49584bSSeongJae Park 2523f49584bSSeongJae Park for (i = 0; i < 3; i++) 2533f49584bSSeongJae Park sz += regions[i].end - regions[i].start; 2543f49584bSSeongJae Park if (ctx->min_nr_regions) 2553f49584bSSeongJae Park sz /= ctx->min_nr_regions; 2563f49584bSSeongJae Park if (sz < DAMON_MIN_REGION) 2573f49584bSSeongJae Park sz = DAMON_MIN_REGION; 2583f49584bSSeongJae Park 2593f49584bSSeongJae Park /* Set the initial three regions of the target */ 2603f49584bSSeongJae Park for (i = 0; i < 3; i++) { 2613f49584bSSeongJae Park r = damon_new_region(regions[i].start, regions[i].end); 2623f49584bSSeongJae Park if (!r) { 2633f49584bSSeongJae Park pr_err("%d'th init region creation failed\n", i); 2643f49584bSSeongJae Park return; 2653f49584bSSeongJae Park } 2663f49584bSSeongJae Park damon_add_region(r, t); 2673f49584bSSeongJae Park 2683f49584bSSeongJae Park nr_pieces = (regions[i].end - regions[i].start) / sz; 2693f49584bSSeongJae Park damon_va_evenly_split_region(t, r, nr_pieces); 2703f49584bSSeongJae Park } 2713f49584bSSeongJae Park } 2723f49584bSSeongJae Park 2733f49584bSSeongJae Park /* Initialize '->regions_list' of every target (task) */ 2743f49584bSSeongJae Park void damon_va_init(struct damon_ctx *ctx) 2753f49584bSSeongJae Park { 2763f49584bSSeongJae Park struct damon_target *t; 2773f49584bSSeongJae Park 2783f49584bSSeongJae Park damon_for_each_target(t, ctx) { 2793f49584bSSeongJae Park /* the user may set the target regions as they want */ 2803f49584bSSeongJae Park if (!damon_nr_regions(t)) 2813f49584bSSeongJae Park __damon_va_init_regions(ctx, t); 2823f49584bSSeongJae Park } 2833f49584bSSeongJae Park } 2843f49584bSSeongJae Park 2853f49584bSSeongJae Park /* 2863f49584bSSeongJae Park * Functions for the dynamic monitoring target regions update 2873f49584bSSeongJae Park */ 2883f49584bSSeongJae Park 2893f49584bSSeongJae Park /* 2903f49584bSSeongJae Park * Check whether a region is intersecting an address range 2913f49584bSSeongJae Park * 2923f49584bSSeongJae Park * Returns true if it is. 2933f49584bSSeongJae Park */ 2943f49584bSSeongJae Park static bool damon_intersect(struct damon_region *r, struct damon_addr_range *re) 2953f49584bSSeongJae Park { 2963f49584bSSeongJae Park return !(r->ar.end <= re->start || re->end <= r->ar.start); 2973f49584bSSeongJae Park } 2983f49584bSSeongJae Park 2993f49584bSSeongJae Park /* 3003f49584bSSeongJae Park * Update damon regions for the three big regions of the given target 3013f49584bSSeongJae Park * 3023f49584bSSeongJae Park * t the given target 3033f49584bSSeongJae Park * bregions the three big regions of the target 3043f49584bSSeongJae Park */ 3053f49584bSSeongJae Park static void damon_va_apply_three_regions(struct damon_target *t, 3063f49584bSSeongJae Park struct damon_addr_range bregions[3]) 3073f49584bSSeongJae Park { 3083f49584bSSeongJae Park struct damon_region *r, *next; 3093f49584bSSeongJae Park unsigned int i = 0; 3103f49584bSSeongJae Park 3113f49584bSSeongJae Park /* Remove regions which are not in the three big regions now */ 3123f49584bSSeongJae Park damon_for_each_region_safe(r, next, t) { 3133f49584bSSeongJae Park for (i = 0; i < 3; i++) { 3143f49584bSSeongJae Park if (damon_intersect(r, &bregions[i])) 3153f49584bSSeongJae Park break; 3163f49584bSSeongJae Park } 3173f49584bSSeongJae Park if (i == 3) 3183f49584bSSeongJae Park damon_destroy_region(r, t); 3193f49584bSSeongJae Park } 3203f49584bSSeongJae Park 3213f49584bSSeongJae Park /* Adjust intersecting regions to fit with the three big regions */ 3223f49584bSSeongJae Park for (i = 0; i < 3; i++) { 3233f49584bSSeongJae Park struct damon_region *first = NULL, *last; 3243f49584bSSeongJae Park struct damon_region *newr; 3253f49584bSSeongJae Park struct damon_addr_range *br; 3263f49584bSSeongJae Park 3273f49584bSSeongJae Park br = &bregions[i]; 3283f49584bSSeongJae Park /* Get the first and last regions which intersects with br */ 3293f49584bSSeongJae Park damon_for_each_region(r, t) { 3303f49584bSSeongJae Park if (damon_intersect(r, br)) { 3313f49584bSSeongJae Park if (!first) 3323f49584bSSeongJae Park first = r; 3333f49584bSSeongJae Park last = r; 3343f49584bSSeongJae Park } 3353f49584bSSeongJae Park if (r->ar.start >= br->end) 3363f49584bSSeongJae Park break; 3373f49584bSSeongJae Park } 3383f49584bSSeongJae Park if (!first) { 3393f49584bSSeongJae Park /* no damon_region intersects with this big region */ 3403f49584bSSeongJae Park newr = damon_new_region( 3413f49584bSSeongJae Park ALIGN_DOWN(br->start, 3423f49584bSSeongJae Park DAMON_MIN_REGION), 3433f49584bSSeongJae Park ALIGN(br->end, DAMON_MIN_REGION)); 3443f49584bSSeongJae Park if (!newr) 3453f49584bSSeongJae Park continue; 3463f49584bSSeongJae Park damon_insert_region(newr, damon_prev_region(r), r, t); 3473f49584bSSeongJae Park } else { 3483f49584bSSeongJae Park first->ar.start = ALIGN_DOWN(br->start, 3493f49584bSSeongJae Park DAMON_MIN_REGION); 3503f49584bSSeongJae Park last->ar.end = ALIGN(br->end, DAMON_MIN_REGION); 3513f49584bSSeongJae Park } 3523f49584bSSeongJae Park } 3533f49584bSSeongJae Park } 3543f49584bSSeongJae Park 3553f49584bSSeongJae Park /* 3563f49584bSSeongJae Park * Update regions for current memory mappings 3573f49584bSSeongJae Park */ 3583f49584bSSeongJae Park void damon_va_update(struct damon_ctx *ctx) 3593f49584bSSeongJae Park { 3603f49584bSSeongJae Park struct damon_addr_range three_regions[3]; 3613f49584bSSeongJae Park struct damon_target *t; 3623f49584bSSeongJae Park 3633f49584bSSeongJae Park damon_for_each_target(t, ctx) { 3643f49584bSSeongJae Park if (damon_va_three_regions(t, three_regions)) 3653f49584bSSeongJae Park continue; 3663f49584bSSeongJae Park damon_va_apply_three_regions(t, three_regions); 3673f49584bSSeongJae Park } 3683f49584bSSeongJae Park } 3693f49584bSSeongJae Park 3703f49584bSSeongJae Park static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, 3713f49584bSSeongJae Park unsigned long next, struct mm_walk *walk) 3723f49584bSSeongJae Park { 3733f49584bSSeongJae Park pte_t *pte; 3743f49584bSSeongJae Park spinlock_t *ptl; 3753f49584bSSeongJae Park 3763f49584bSSeongJae Park if (pmd_huge(*pmd)) { 3773f49584bSSeongJae Park ptl = pmd_lock(walk->mm, pmd); 3783f49584bSSeongJae Park if (pmd_huge(*pmd)) { 3793f49584bSSeongJae Park damon_pmdp_mkold(pmd, walk->mm, addr); 3803f49584bSSeongJae Park spin_unlock(ptl); 3813f49584bSSeongJae Park return 0; 3823f49584bSSeongJae Park } 3833f49584bSSeongJae Park spin_unlock(ptl); 3843f49584bSSeongJae Park } 3853f49584bSSeongJae Park 3863f49584bSSeongJae Park if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 3873f49584bSSeongJae Park return 0; 3883f49584bSSeongJae Park pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 3893f49584bSSeongJae Park if (!pte_present(*pte)) 3903f49584bSSeongJae Park goto out; 3913f49584bSSeongJae Park damon_ptep_mkold(pte, walk->mm, addr); 3923f49584bSSeongJae Park out: 3933f49584bSSeongJae Park pte_unmap_unlock(pte, ptl); 3943f49584bSSeongJae Park return 0; 3953f49584bSSeongJae Park } 3963f49584bSSeongJae Park 397*199b50f4SRikard Falkeborn static const struct mm_walk_ops damon_mkold_ops = { 3983f49584bSSeongJae Park .pmd_entry = damon_mkold_pmd_entry, 3993f49584bSSeongJae Park }; 4003f49584bSSeongJae Park 4013f49584bSSeongJae Park static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) 4023f49584bSSeongJae Park { 4033f49584bSSeongJae Park mmap_read_lock(mm); 4043f49584bSSeongJae Park walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); 4053f49584bSSeongJae Park mmap_read_unlock(mm); 4063f49584bSSeongJae Park } 4073f49584bSSeongJae Park 4083f49584bSSeongJae Park /* 4093f49584bSSeongJae Park * Functions for the access checking of the regions 4103f49584bSSeongJae Park */ 4113f49584bSSeongJae Park 4123f49584bSSeongJae Park static void damon_va_prepare_access_check(struct damon_ctx *ctx, 4133f49584bSSeongJae Park struct mm_struct *mm, struct damon_region *r) 4143f49584bSSeongJae Park { 4153f49584bSSeongJae Park r->sampling_addr = damon_rand(r->ar.start, r->ar.end); 4163f49584bSSeongJae Park 4173f49584bSSeongJae Park damon_va_mkold(mm, r->sampling_addr); 4183f49584bSSeongJae Park } 4193f49584bSSeongJae Park 4203f49584bSSeongJae Park void damon_va_prepare_access_checks(struct damon_ctx *ctx) 4213f49584bSSeongJae Park { 4223f49584bSSeongJae Park struct damon_target *t; 4233f49584bSSeongJae Park struct mm_struct *mm; 4243f49584bSSeongJae Park struct damon_region *r; 4253f49584bSSeongJae Park 4263f49584bSSeongJae Park damon_for_each_target(t, ctx) { 4273f49584bSSeongJae Park mm = damon_get_mm(t); 4283f49584bSSeongJae Park if (!mm) 4293f49584bSSeongJae Park continue; 4303f49584bSSeongJae Park damon_for_each_region(r, t) 4313f49584bSSeongJae Park damon_va_prepare_access_check(ctx, mm, r); 4323f49584bSSeongJae Park mmput(mm); 4333f49584bSSeongJae Park } 4343f49584bSSeongJae Park } 4353f49584bSSeongJae Park 4363f49584bSSeongJae Park struct damon_young_walk_private { 4373f49584bSSeongJae Park unsigned long *page_sz; 4383f49584bSSeongJae Park bool young; 4393f49584bSSeongJae Park }; 4403f49584bSSeongJae Park 4413f49584bSSeongJae Park static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, 4423f49584bSSeongJae Park unsigned long next, struct mm_walk *walk) 4433f49584bSSeongJae Park { 4443f49584bSSeongJae Park pte_t *pte; 4453f49584bSSeongJae Park spinlock_t *ptl; 4463f49584bSSeongJae Park struct page *page; 4473f49584bSSeongJae Park struct damon_young_walk_private *priv = walk->private; 4483f49584bSSeongJae Park 4493f49584bSSeongJae Park #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4503f49584bSSeongJae Park if (pmd_huge(*pmd)) { 4513f49584bSSeongJae Park ptl = pmd_lock(walk->mm, pmd); 4523f49584bSSeongJae Park if (!pmd_huge(*pmd)) { 4533f49584bSSeongJae Park spin_unlock(ptl); 4543f49584bSSeongJae Park goto regular_page; 4553f49584bSSeongJae Park } 4563f49584bSSeongJae Park page = damon_get_page(pmd_pfn(*pmd)); 4573f49584bSSeongJae Park if (!page) 4583f49584bSSeongJae Park goto huge_out; 4593f49584bSSeongJae Park if (pmd_young(*pmd) || !page_is_idle(page) || 4603f49584bSSeongJae Park mmu_notifier_test_young(walk->mm, 4613f49584bSSeongJae Park addr)) { 4623f49584bSSeongJae Park *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT); 4633f49584bSSeongJae Park priv->young = true; 4643f49584bSSeongJae Park } 4653f49584bSSeongJae Park put_page(page); 4663f49584bSSeongJae Park huge_out: 4673f49584bSSeongJae Park spin_unlock(ptl); 4683f49584bSSeongJae Park return 0; 4693f49584bSSeongJae Park } 4703f49584bSSeongJae Park 4713f49584bSSeongJae Park regular_page: 4723f49584bSSeongJae Park #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4733f49584bSSeongJae Park 4743f49584bSSeongJae Park if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 4753f49584bSSeongJae Park return -EINVAL; 4763f49584bSSeongJae Park pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 4773f49584bSSeongJae Park if (!pte_present(*pte)) 4783f49584bSSeongJae Park goto out; 4793f49584bSSeongJae Park page = damon_get_page(pte_pfn(*pte)); 4803f49584bSSeongJae Park if (!page) 4813f49584bSSeongJae Park goto out; 4823f49584bSSeongJae Park if (pte_young(*pte) || !page_is_idle(page) || 4833f49584bSSeongJae Park mmu_notifier_test_young(walk->mm, addr)) { 4843f49584bSSeongJae Park *priv->page_sz = PAGE_SIZE; 4853f49584bSSeongJae Park priv->young = true; 4863f49584bSSeongJae Park } 4873f49584bSSeongJae Park put_page(page); 4883f49584bSSeongJae Park out: 4893f49584bSSeongJae Park pte_unmap_unlock(pte, ptl); 4903f49584bSSeongJae Park return 0; 4913f49584bSSeongJae Park } 4923f49584bSSeongJae Park 493*199b50f4SRikard Falkeborn static const struct mm_walk_ops damon_young_ops = { 4943f49584bSSeongJae Park .pmd_entry = damon_young_pmd_entry, 4953f49584bSSeongJae Park }; 4963f49584bSSeongJae Park 4973f49584bSSeongJae Park static bool damon_va_young(struct mm_struct *mm, unsigned long addr, 4983f49584bSSeongJae Park unsigned long *page_sz) 4993f49584bSSeongJae Park { 5003f49584bSSeongJae Park struct damon_young_walk_private arg = { 5013f49584bSSeongJae Park .page_sz = page_sz, 5023f49584bSSeongJae Park .young = false, 5033f49584bSSeongJae Park }; 5043f49584bSSeongJae Park 5053f49584bSSeongJae Park mmap_read_lock(mm); 5063f49584bSSeongJae Park walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); 5073f49584bSSeongJae Park mmap_read_unlock(mm); 5083f49584bSSeongJae Park return arg.young; 5093f49584bSSeongJae Park } 5103f49584bSSeongJae Park 5113f49584bSSeongJae Park /* 5123f49584bSSeongJae Park * Check whether the region was accessed after the last preparation 5133f49584bSSeongJae Park * 5143f49584bSSeongJae Park * mm 'mm_struct' for the given virtual address space 5153f49584bSSeongJae Park * r the region to be checked 5163f49584bSSeongJae Park */ 5173f49584bSSeongJae Park static void damon_va_check_access(struct damon_ctx *ctx, 5183f49584bSSeongJae Park struct mm_struct *mm, struct damon_region *r) 5193f49584bSSeongJae Park { 5203f49584bSSeongJae Park static struct mm_struct *last_mm; 5213f49584bSSeongJae Park static unsigned long last_addr; 5223f49584bSSeongJae Park static unsigned long last_page_sz = PAGE_SIZE; 5233f49584bSSeongJae Park static bool last_accessed; 5243f49584bSSeongJae Park 5253f49584bSSeongJae Park /* If the region is in the last checked page, reuse the result */ 5263f49584bSSeongJae Park if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) == 5273f49584bSSeongJae Park ALIGN_DOWN(r->sampling_addr, last_page_sz))) { 5283f49584bSSeongJae Park if (last_accessed) 5293f49584bSSeongJae Park r->nr_accesses++; 5303f49584bSSeongJae Park return; 5313f49584bSSeongJae Park } 5323f49584bSSeongJae Park 5333f49584bSSeongJae Park last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz); 5343f49584bSSeongJae Park if (last_accessed) 5353f49584bSSeongJae Park r->nr_accesses++; 5363f49584bSSeongJae Park 5373f49584bSSeongJae Park last_mm = mm; 5383f49584bSSeongJae Park last_addr = r->sampling_addr; 5393f49584bSSeongJae Park } 5403f49584bSSeongJae Park 5413f49584bSSeongJae Park unsigned int damon_va_check_accesses(struct damon_ctx *ctx) 5423f49584bSSeongJae Park { 5433f49584bSSeongJae Park struct damon_target *t; 5443f49584bSSeongJae Park struct mm_struct *mm; 5453f49584bSSeongJae Park struct damon_region *r; 5463f49584bSSeongJae Park unsigned int max_nr_accesses = 0; 5473f49584bSSeongJae Park 5483f49584bSSeongJae Park damon_for_each_target(t, ctx) { 5493f49584bSSeongJae Park mm = damon_get_mm(t); 5503f49584bSSeongJae Park if (!mm) 5513f49584bSSeongJae Park continue; 5523f49584bSSeongJae Park damon_for_each_region(r, t) { 5533f49584bSSeongJae Park damon_va_check_access(ctx, mm, r); 5543f49584bSSeongJae Park max_nr_accesses = max(r->nr_accesses, max_nr_accesses); 5553f49584bSSeongJae Park } 5563f49584bSSeongJae Park mmput(mm); 5573f49584bSSeongJae Park } 5583f49584bSSeongJae Park 5593f49584bSSeongJae Park return max_nr_accesses; 5603f49584bSSeongJae Park } 5613f49584bSSeongJae Park 5623f49584bSSeongJae Park /* 5633f49584bSSeongJae Park * Functions for the target validity check and cleanup 5643f49584bSSeongJae Park */ 5653f49584bSSeongJae Park 5663f49584bSSeongJae Park bool damon_va_target_valid(void *target) 5673f49584bSSeongJae Park { 5683f49584bSSeongJae Park struct damon_target *t = target; 5693f49584bSSeongJae Park struct task_struct *task; 5703f49584bSSeongJae Park 5713f49584bSSeongJae Park task = damon_get_task_struct(t); 5723f49584bSSeongJae Park if (task) { 5733f49584bSSeongJae Park put_task_struct(task); 5743f49584bSSeongJae Park return true; 5753f49584bSSeongJae Park } 5763f49584bSSeongJae Park 5773f49584bSSeongJae Park return false; 5783f49584bSSeongJae Park } 5793f49584bSSeongJae Park 5806dea8addSSeongJae Park #ifndef CONFIG_ADVISE_SYSCALLS 5816dea8addSSeongJae Park static int damos_madvise(struct damon_target *target, struct damon_region *r, 5826dea8addSSeongJae Park int behavior) 5836dea8addSSeongJae Park { 5846dea8addSSeongJae Park return -EINVAL; 5856dea8addSSeongJae Park } 5866dea8addSSeongJae Park #else 5876dea8addSSeongJae Park static int damos_madvise(struct damon_target *target, struct damon_region *r, 5886dea8addSSeongJae Park int behavior) 5896dea8addSSeongJae Park { 5906dea8addSSeongJae Park struct mm_struct *mm; 5916dea8addSSeongJae Park int ret = -ENOMEM; 5926dea8addSSeongJae Park 5936dea8addSSeongJae Park mm = damon_get_mm(target); 5946dea8addSSeongJae Park if (!mm) 5956dea8addSSeongJae Park goto out; 5966dea8addSSeongJae Park 5976dea8addSSeongJae Park ret = do_madvise(mm, PAGE_ALIGN(r->ar.start), 5986dea8addSSeongJae Park PAGE_ALIGN(r->ar.end - r->ar.start), behavior); 5996dea8addSSeongJae Park mmput(mm); 6006dea8addSSeongJae Park out: 6016dea8addSSeongJae Park return ret; 6026dea8addSSeongJae Park } 6036dea8addSSeongJae Park #endif /* CONFIG_ADVISE_SYSCALLS */ 6046dea8addSSeongJae Park 6056dea8addSSeongJae Park int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, 6066dea8addSSeongJae Park struct damon_region *r, struct damos *scheme) 6076dea8addSSeongJae Park { 6086dea8addSSeongJae Park int madv_action; 6096dea8addSSeongJae Park 6106dea8addSSeongJae Park switch (scheme->action) { 6116dea8addSSeongJae Park case DAMOS_WILLNEED: 6126dea8addSSeongJae Park madv_action = MADV_WILLNEED; 6136dea8addSSeongJae Park break; 6146dea8addSSeongJae Park case DAMOS_COLD: 6156dea8addSSeongJae Park madv_action = MADV_COLD; 6166dea8addSSeongJae Park break; 6176dea8addSSeongJae Park case DAMOS_PAGEOUT: 6186dea8addSSeongJae Park madv_action = MADV_PAGEOUT; 6196dea8addSSeongJae Park break; 6206dea8addSSeongJae Park case DAMOS_HUGEPAGE: 6216dea8addSSeongJae Park madv_action = MADV_HUGEPAGE; 6226dea8addSSeongJae Park break; 6236dea8addSSeongJae Park case DAMOS_NOHUGEPAGE: 6246dea8addSSeongJae Park madv_action = MADV_NOHUGEPAGE; 6256dea8addSSeongJae Park break; 6262f0b548cSSeongJae Park case DAMOS_STAT: 6272f0b548cSSeongJae Park return 0; 6286dea8addSSeongJae Park default: 6296dea8addSSeongJae Park pr_warn("Wrong action %d\n", scheme->action); 6306dea8addSSeongJae Park return -EINVAL; 6316dea8addSSeongJae Park } 6326dea8addSSeongJae Park 6336dea8addSSeongJae Park return damos_madvise(t, r, madv_action); 6346dea8addSSeongJae Park } 6356dea8addSSeongJae Park 6363f49584bSSeongJae Park void damon_va_set_primitives(struct damon_ctx *ctx) 6373f49584bSSeongJae Park { 6383f49584bSSeongJae Park ctx->primitive.init = damon_va_init; 6393f49584bSSeongJae Park ctx->primitive.update = damon_va_update; 6403f49584bSSeongJae Park ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks; 6413f49584bSSeongJae Park ctx->primitive.check_accesses = damon_va_check_accesses; 6423f49584bSSeongJae Park ctx->primitive.reset_aggregated = NULL; 6433f49584bSSeongJae Park ctx->primitive.target_valid = damon_va_target_valid; 6443f49584bSSeongJae Park ctx->primitive.cleanup = NULL; 6456dea8addSSeongJae Park ctx->primitive.apply_scheme = damon_va_apply_scheme; 6463f49584bSSeongJae Park } 64717ccae8bSSeongJae Park 64817ccae8bSSeongJae Park #include "vaddr-test.h" 649