xref: /openbmc/linux/mm/damon/vaddr.c (revision 8aaaf2f3af2ae212428f4db1af34214225f5cec3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for Virtual Address Spaces
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon-va: " fmt
9 
10 #include <asm-generic/mman-common.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
17 
18 #include "prmtv-common.h"
19 
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24 
25 /*
26  * 't->id' should be the pointer to the relevant 'struct pid' having reference
27  * count.  Caller must put the returned task, unless it is NULL.
28  */
29 #define damon_get_task_struct(t) \
30 	(get_pid_task((struct pid *)t->id, PIDTYPE_PID))
31 
32 /*
33  * Get the mm_struct of the given target
34  *
35  * Caller _must_ put the mm_struct after use, unless it is NULL.
36  *
37  * Returns the mm_struct of the target on success, NULL on failure
38  */
39 static struct mm_struct *damon_get_mm(struct damon_target *t)
40 {
41 	struct task_struct *task;
42 	struct mm_struct *mm;
43 
44 	task = damon_get_task_struct(t);
45 	if (!task)
46 		return NULL;
47 
48 	mm = get_task_mm(task);
49 	put_task_struct(task);
50 	return mm;
51 }
52 
53 /*
54  * Functions for the initial monitoring target regions construction
55  */
56 
57 /*
58  * Size-evenly split a region into 'nr_pieces' small regions
59  *
60  * Returns 0 on success, or negative error code otherwise.
61  */
62 static int damon_va_evenly_split_region(struct damon_target *t,
63 		struct damon_region *r, unsigned int nr_pieces)
64 {
65 	unsigned long sz_orig, sz_piece, orig_end;
66 	struct damon_region *n = NULL, *next;
67 	unsigned long start;
68 
69 	if (!r || !nr_pieces)
70 		return -EINVAL;
71 
72 	orig_end = r->ar.end;
73 	sz_orig = r->ar.end - r->ar.start;
74 	sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
75 
76 	if (!sz_piece)
77 		return -EINVAL;
78 
79 	r->ar.end = r->ar.start + sz_piece;
80 	next = damon_next_region(r);
81 	for (start = r->ar.end; start + sz_piece <= orig_end;
82 			start += sz_piece) {
83 		n = damon_new_region(start, start + sz_piece);
84 		if (!n)
85 			return -ENOMEM;
86 		damon_insert_region(n, r, next, t);
87 		r = n;
88 	}
89 	/* complement last region for possible rounding error */
90 	if (n)
91 		n->ar.end = orig_end;
92 
93 	return 0;
94 }
95 
96 static unsigned long sz_range(struct damon_addr_range *r)
97 {
98 	return r->end - r->start;
99 }
100 
101 static void swap_ranges(struct damon_addr_range *r1,
102 			struct damon_addr_range *r2)
103 {
104 	struct damon_addr_range tmp;
105 
106 	tmp = *r1;
107 	*r1 = *r2;
108 	*r2 = tmp;
109 }
110 
111 /*
112  * Find three regions separated by two biggest unmapped regions
113  *
114  * vma		the head vma of the target address space
115  * regions	an array of three address ranges that results will be saved
116  *
117  * This function receives an address space and finds three regions in it which
118  * separated by the two biggest unmapped regions in the space.  Please refer to
119  * below comments of '__damon_va_init_regions()' function to know why this is
120  * necessary.
121  *
122  * Returns 0 if success, or negative error code otherwise.
123  */
124 static int __damon_va_three_regions(struct vm_area_struct *vma,
125 				       struct damon_addr_range regions[3])
126 {
127 	struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
128 	struct vm_area_struct *last_vma = NULL;
129 	unsigned long start = 0;
130 	struct rb_root rbroot;
131 
132 	/* Find two biggest gaps so that first_gap > second_gap > others */
133 	for (; vma; vma = vma->vm_next) {
134 		if (!last_vma) {
135 			start = vma->vm_start;
136 			goto next;
137 		}
138 
139 		if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
140 			rbroot.rb_node = &vma->vm_rb;
141 			vma = rb_entry(rb_last(&rbroot),
142 					struct vm_area_struct, vm_rb);
143 			goto next;
144 		}
145 
146 		gap.start = last_vma->vm_end;
147 		gap.end = vma->vm_start;
148 		if (sz_range(&gap) > sz_range(&second_gap)) {
149 			swap_ranges(&gap, &second_gap);
150 			if (sz_range(&second_gap) > sz_range(&first_gap))
151 				swap_ranges(&second_gap, &first_gap);
152 		}
153 next:
154 		last_vma = vma;
155 	}
156 
157 	if (!sz_range(&second_gap) || !sz_range(&first_gap))
158 		return -EINVAL;
159 
160 	/* Sort the two biggest gaps by address */
161 	if (first_gap.start > second_gap.start)
162 		swap_ranges(&first_gap, &second_gap);
163 
164 	/* Store the result */
165 	regions[0].start = ALIGN(start, DAMON_MIN_REGION);
166 	regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
167 	regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
168 	regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
169 	regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
170 	regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
171 
172 	return 0;
173 }
174 
175 /*
176  * Get the three regions in the given target (task)
177  *
178  * Returns 0 on success, negative error code otherwise.
179  */
180 static int damon_va_three_regions(struct damon_target *t,
181 				struct damon_addr_range regions[3])
182 {
183 	struct mm_struct *mm;
184 	int rc;
185 
186 	mm = damon_get_mm(t);
187 	if (!mm)
188 		return -EINVAL;
189 
190 	mmap_read_lock(mm);
191 	rc = __damon_va_three_regions(mm->mmap, regions);
192 	mmap_read_unlock(mm);
193 
194 	mmput(mm);
195 	return rc;
196 }
197 
198 /*
199  * Initialize the monitoring target regions for the given target (task)
200  *
201  * t	the given target
202  *
203  * Because only a number of small portions of the entire address space
204  * is actually mapped to the memory and accessed, monitoring the unmapped
205  * regions is wasteful.  That said, because we can deal with small noises,
206  * tracking every mapping is not strictly required but could even incur a high
207  * overhead if the mapping frequently changes or the number of mappings is
208  * high.  The adaptive regions adjustment mechanism will further help to deal
209  * with the noise by simply identifying the unmapped areas as a region that
210  * has no access.  Moreover, applying the real mappings that would have many
211  * unmapped areas inside will make the adaptive mechanism quite complex.  That
212  * said, too huge unmapped areas inside the monitoring target should be removed
213  * to not take the time for the adaptive mechanism.
214  *
215  * For the reason, we convert the complex mappings to three distinct regions
216  * that cover every mapped area of the address space.  Also the two gaps
217  * between the three regions are the two biggest unmapped areas in the given
218  * address space.  In detail, this function first identifies the start and the
219  * end of the mappings and the two biggest unmapped areas of the address space.
220  * Then, it constructs the three regions as below:
221  *
222  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
223  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
224  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
225  *
226  * As usual memory map of processes is as below, the gap between the heap and
227  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
228  * region and the stack will be two biggest unmapped regions.  Because these
229  * gaps are exceptionally huge areas in usual address space, excluding these
230  * two biggest unmapped regions will be sufficient to make a trade-off.
231  *
232  *   <heap>
233  *   <BIG UNMAPPED REGION 1>
234  *   <uppermost mmap()-ed region>
235  *   (other mmap()-ed regions and small unmapped regions)
236  *   <lowermost mmap()-ed region>
237  *   <BIG UNMAPPED REGION 2>
238  *   <stack>
239  */
240 static void __damon_va_init_regions(struct damon_ctx *ctx,
241 				     struct damon_target *t)
242 {
243 	struct damon_region *r;
244 	struct damon_addr_range regions[3];
245 	unsigned long sz = 0, nr_pieces;
246 	int i;
247 
248 	if (damon_va_three_regions(t, regions)) {
249 		pr_err("Failed to get three regions of target %lu\n", t->id);
250 		return;
251 	}
252 
253 	for (i = 0; i < 3; i++)
254 		sz += regions[i].end - regions[i].start;
255 	if (ctx->min_nr_regions)
256 		sz /= ctx->min_nr_regions;
257 	if (sz < DAMON_MIN_REGION)
258 		sz = DAMON_MIN_REGION;
259 
260 	/* Set the initial three regions of the target */
261 	for (i = 0; i < 3; i++) {
262 		r = damon_new_region(regions[i].start, regions[i].end);
263 		if (!r) {
264 			pr_err("%d'th init region creation failed\n", i);
265 			return;
266 		}
267 		damon_add_region(r, t);
268 
269 		nr_pieces = (regions[i].end - regions[i].start) / sz;
270 		damon_va_evenly_split_region(t, r, nr_pieces);
271 	}
272 }
273 
274 /* Initialize '->regions_list' of every target (task) */
275 void damon_va_init(struct damon_ctx *ctx)
276 {
277 	struct damon_target *t;
278 
279 	damon_for_each_target(t, ctx) {
280 		/* the user may set the target regions as they want */
281 		if (!damon_nr_regions(t))
282 			__damon_va_init_regions(ctx, t);
283 	}
284 }
285 
286 /*
287  * Functions for the dynamic monitoring target regions update
288  */
289 
290 /*
291  * Check whether a region is intersecting an address range
292  *
293  * Returns true if it is.
294  */
295 static bool damon_intersect(struct damon_region *r, struct damon_addr_range *re)
296 {
297 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
298 }
299 
300 /*
301  * Update damon regions for the three big regions of the given target
302  *
303  * t		the given target
304  * bregions	the three big regions of the target
305  */
306 static void damon_va_apply_three_regions(struct damon_target *t,
307 		struct damon_addr_range bregions[3])
308 {
309 	struct damon_region *r, *next;
310 	unsigned int i;
311 
312 	/* Remove regions which are not in the three big regions now */
313 	damon_for_each_region_safe(r, next, t) {
314 		for (i = 0; i < 3; i++) {
315 			if (damon_intersect(r, &bregions[i]))
316 				break;
317 		}
318 		if (i == 3)
319 			damon_destroy_region(r, t);
320 	}
321 
322 	/* Adjust intersecting regions to fit with the three big regions */
323 	for (i = 0; i < 3; i++) {
324 		struct damon_region *first = NULL, *last;
325 		struct damon_region *newr;
326 		struct damon_addr_range *br;
327 
328 		br = &bregions[i];
329 		/* Get the first and last regions which intersects with br */
330 		damon_for_each_region(r, t) {
331 			if (damon_intersect(r, br)) {
332 				if (!first)
333 					first = r;
334 				last = r;
335 			}
336 			if (r->ar.start >= br->end)
337 				break;
338 		}
339 		if (!first) {
340 			/* no damon_region intersects with this big region */
341 			newr = damon_new_region(
342 					ALIGN_DOWN(br->start,
343 						DAMON_MIN_REGION),
344 					ALIGN(br->end, DAMON_MIN_REGION));
345 			if (!newr)
346 				continue;
347 			damon_insert_region(newr, damon_prev_region(r), r, t);
348 		} else {
349 			first->ar.start = ALIGN_DOWN(br->start,
350 					DAMON_MIN_REGION);
351 			last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
352 		}
353 	}
354 }
355 
356 /*
357  * Update regions for current memory mappings
358  */
359 void damon_va_update(struct damon_ctx *ctx)
360 {
361 	struct damon_addr_range three_regions[3];
362 	struct damon_target *t;
363 
364 	damon_for_each_target(t, ctx) {
365 		if (damon_va_three_regions(t, three_regions))
366 			continue;
367 		damon_va_apply_three_regions(t, three_regions);
368 	}
369 }
370 
371 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
372 		unsigned long next, struct mm_walk *walk)
373 {
374 	pte_t *pte;
375 	spinlock_t *ptl;
376 
377 	if (pmd_huge(*pmd)) {
378 		ptl = pmd_lock(walk->mm, pmd);
379 		if (pmd_huge(*pmd)) {
380 			damon_pmdp_mkold(pmd, walk->mm, addr);
381 			spin_unlock(ptl);
382 			return 0;
383 		}
384 		spin_unlock(ptl);
385 	}
386 
387 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
388 		return 0;
389 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
390 	if (!pte_present(*pte))
391 		goto out;
392 	damon_ptep_mkold(pte, walk->mm, addr);
393 out:
394 	pte_unmap_unlock(pte, ptl);
395 	return 0;
396 }
397 
398 static const struct mm_walk_ops damon_mkold_ops = {
399 	.pmd_entry = damon_mkold_pmd_entry,
400 };
401 
402 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
403 {
404 	mmap_read_lock(mm);
405 	walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
406 	mmap_read_unlock(mm);
407 }
408 
409 /*
410  * Functions for the access checking of the regions
411  */
412 
413 static void damon_va_prepare_access_check(struct damon_ctx *ctx,
414 			struct mm_struct *mm, struct damon_region *r)
415 {
416 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
417 
418 	damon_va_mkold(mm, r->sampling_addr);
419 }
420 
421 void damon_va_prepare_access_checks(struct damon_ctx *ctx)
422 {
423 	struct damon_target *t;
424 	struct mm_struct *mm;
425 	struct damon_region *r;
426 
427 	damon_for_each_target(t, ctx) {
428 		mm = damon_get_mm(t);
429 		if (!mm)
430 			continue;
431 		damon_for_each_region(r, t)
432 			damon_va_prepare_access_check(ctx, mm, r);
433 		mmput(mm);
434 	}
435 }
436 
437 struct damon_young_walk_private {
438 	unsigned long *page_sz;
439 	bool young;
440 };
441 
442 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
443 		unsigned long next, struct mm_walk *walk)
444 {
445 	pte_t *pte;
446 	spinlock_t *ptl;
447 	struct page *page;
448 	struct damon_young_walk_private *priv = walk->private;
449 
450 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
451 	if (pmd_huge(*pmd)) {
452 		ptl = pmd_lock(walk->mm, pmd);
453 		if (!pmd_huge(*pmd)) {
454 			spin_unlock(ptl);
455 			goto regular_page;
456 		}
457 		page = damon_get_page(pmd_pfn(*pmd));
458 		if (!page)
459 			goto huge_out;
460 		if (pmd_young(*pmd) || !page_is_idle(page) ||
461 					mmu_notifier_test_young(walk->mm,
462 						addr)) {
463 			*priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
464 			priv->young = true;
465 		}
466 		put_page(page);
467 huge_out:
468 		spin_unlock(ptl);
469 		return 0;
470 	}
471 
472 regular_page:
473 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
474 
475 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
476 		return -EINVAL;
477 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
478 	if (!pte_present(*pte))
479 		goto out;
480 	page = damon_get_page(pte_pfn(*pte));
481 	if (!page)
482 		goto out;
483 	if (pte_young(*pte) || !page_is_idle(page) ||
484 			mmu_notifier_test_young(walk->mm, addr)) {
485 		*priv->page_sz = PAGE_SIZE;
486 		priv->young = true;
487 	}
488 	put_page(page);
489 out:
490 	pte_unmap_unlock(pte, ptl);
491 	return 0;
492 }
493 
494 static const struct mm_walk_ops damon_young_ops = {
495 	.pmd_entry = damon_young_pmd_entry,
496 };
497 
498 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
499 		unsigned long *page_sz)
500 {
501 	struct damon_young_walk_private arg = {
502 		.page_sz = page_sz,
503 		.young = false,
504 	};
505 
506 	mmap_read_lock(mm);
507 	walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
508 	mmap_read_unlock(mm);
509 	return arg.young;
510 }
511 
512 /*
513  * Check whether the region was accessed after the last preparation
514  *
515  * mm	'mm_struct' for the given virtual address space
516  * r	the region to be checked
517  */
518 static void damon_va_check_access(struct damon_ctx *ctx,
519 			       struct mm_struct *mm, struct damon_region *r)
520 {
521 	static struct mm_struct *last_mm;
522 	static unsigned long last_addr;
523 	static unsigned long last_page_sz = PAGE_SIZE;
524 	static bool last_accessed;
525 
526 	/* If the region is in the last checked page, reuse the result */
527 	if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
528 				ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
529 		if (last_accessed)
530 			r->nr_accesses++;
531 		return;
532 	}
533 
534 	last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
535 	if (last_accessed)
536 		r->nr_accesses++;
537 
538 	last_mm = mm;
539 	last_addr = r->sampling_addr;
540 }
541 
542 unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
543 {
544 	struct damon_target *t;
545 	struct mm_struct *mm;
546 	struct damon_region *r;
547 	unsigned int max_nr_accesses = 0;
548 
549 	damon_for_each_target(t, ctx) {
550 		mm = damon_get_mm(t);
551 		if (!mm)
552 			continue;
553 		damon_for_each_region(r, t) {
554 			damon_va_check_access(ctx, mm, r);
555 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
556 		}
557 		mmput(mm);
558 	}
559 
560 	return max_nr_accesses;
561 }
562 
563 /*
564  * Functions for the target validity check and cleanup
565  */
566 
567 bool damon_va_target_valid(void *target)
568 {
569 	struct damon_target *t = target;
570 	struct task_struct *task;
571 
572 	task = damon_get_task_struct(t);
573 	if (task) {
574 		put_task_struct(task);
575 		return true;
576 	}
577 
578 	return false;
579 }
580 
581 #ifndef CONFIG_ADVISE_SYSCALLS
582 static int damos_madvise(struct damon_target *target, struct damon_region *r,
583 			int behavior)
584 {
585 	return -EINVAL;
586 }
587 #else
588 static int damos_madvise(struct damon_target *target, struct damon_region *r,
589 			int behavior)
590 {
591 	struct mm_struct *mm;
592 	int ret = -ENOMEM;
593 
594 	mm = damon_get_mm(target);
595 	if (!mm)
596 		goto out;
597 
598 	ret = do_madvise(mm, PAGE_ALIGN(r->ar.start),
599 			PAGE_ALIGN(r->ar.end - r->ar.start), behavior);
600 	mmput(mm);
601 out:
602 	return ret;
603 }
604 #endif	/* CONFIG_ADVISE_SYSCALLS */
605 
606 int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
607 		struct damon_region *r, struct damos *scheme)
608 {
609 	int madv_action;
610 
611 	switch (scheme->action) {
612 	case DAMOS_WILLNEED:
613 		madv_action = MADV_WILLNEED;
614 		break;
615 	case DAMOS_COLD:
616 		madv_action = MADV_COLD;
617 		break;
618 	case DAMOS_PAGEOUT:
619 		madv_action = MADV_PAGEOUT;
620 		break;
621 	case DAMOS_HUGEPAGE:
622 		madv_action = MADV_HUGEPAGE;
623 		break;
624 	case DAMOS_NOHUGEPAGE:
625 		madv_action = MADV_NOHUGEPAGE;
626 		break;
627 	case DAMOS_STAT:
628 		return 0;
629 	default:
630 		return -EINVAL;
631 	}
632 
633 	return damos_madvise(t, r, madv_action);
634 }
635 
636 int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t,
637 		struct damon_region *r, struct damos *scheme)
638 {
639 
640 	switch (scheme->action) {
641 	case DAMOS_PAGEOUT:
642 		return damon_pageout_score(context, r, scheme);
643 	default:
644 		break;
645 	}
646 
647 	return DAMOS_MAX_SCORE;
648 }
649 
650 void damon_va_set_primitives(struct damon_ctx *ctx)
651 {
652 	ctx->primitive.init = damon_va_init;
653 	ctx->primitive.update = damon_va_update;
654 	ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
655 	ctx->primitive.check_accesses = damon_va_check_accesses;
656 	ctx->primitive.reset_aggregated = NULL;
657 	ctx->primitive.target_valid = damon_va_target_valid;
658 	ctx->primitive.cleanup = NULL;
659 	ctx->primitive.apply_scheme = damon_va_apply_scheme;
660 	ctx->primitive.get_scheme_score = damon_va_scheme_score;
661 }
662 
663 #include "vaddr-test.h"
664