xref: /openbmc/linux/mm/damon/vaddr.c (revision 81f0895f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for Virtual Address Spaces
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon-va: " fmt
9 
10 #include <asm-generic/mman-common.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
17 
18 #include "prmtv-common.h"
19 
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24 
25 /*
26  * 't->id' should be the pointer to the relevant 'struct pid' having reference
27  * count.  Caller must put the returned task, unless it is NULL.
28  */
29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
30 {
31 	return get_pid_task((struct pid *)t->id, PIDTYPE_PID);
32 }
33 
34 /*
35  * Get the mm_struct of the given target
36  *
37  * Caller _must_ put the mm_struct after use, unless it is NULL.
38  *
39  * Returns the mm_struct of the target on success, NULL on failure
40  */
41 static struct mm_struct *damon_get_mm(struct damon_target *t)
42 {
43 	struct task_struct *task;
44 	struct mm_struct *mm;
45 
46 	task = damon_get_task_struct(t);
47 	if (!task)
48 		return NULL;
49 
50 	mm = get_task_mm(task);
51 	put_task_struct(task);
52 	return mm;
53 }
54 
55 /*
56  * Functions for the initial monitoring target regions construction
57  */
58 
59 /*
60  * Size-evenly split a region into 'nr_pieces' small regions
61  *
62  * Returns 0 on success, or negative error code otherwise.
63  */
64 static int damon_va_evenly_split_region(struct damon_target *t,
65 		struct damon_region *r, unsigned int nr_pieces)
66 {
67 	unsigned long sz_orig, sz_piece, orig_end;
68 	struct damon_region *n = NULL, *next;
69 	unsigned long start;
70 
71 	if (!r || !nr_pieces)
72 		return -EINVAL;
73 
74 	orig_end = r->ar.end;
75 	sz_orig = r->ar.end - r->ar.start;
76 	sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
77 
78 	if (!sz_piece)
79 		return -EINVAL;
80 
81 	r->ar.end = r->ar.start + sz_piece;
82 	next = damon_next_region(r);
83 	for (start = r->ar.end; start + sz_piece <= orig_end;
84 			start += sz_piece) {
85 		n = damon_new_region(start, start + sz_piece);
86 		if (!n)
87 			return -ENOMEM;
88 		damon_insert_region(n, r, next, t);
89 		r = n;
90 	}
91 	/* complement last region for possible rounding error */
92 	if (n)
93 		n->ar.end = orig_end;
94 
95 	return 0;
96 }
97 
98 static unsigned long sz_range(struct damon_addr_range *r)
99 {
100 	return r->end - r->start;
101 }
102 
103 /*
104  * Find three regions separated by two biggest unmapped regions
105  *
106  * vma		the head vma of the target address space
107  * regions	an array of three address ranges that results will be saved
108  *
109  * This function receives an address space and finds three regions in it which
110  * separated by the two biggest unmapped regions in the space.  Please refer to
111  * below comments of '__damon_va_init_regions()' function to know why this is
112  * necessary.
113  *
114  * Returns 0 if success, or negative error code otherwise.
115  */
116 static int __damon_va_three_regions(struct vm_area_struct *vma,
117 				       struct damon_addr_range regions[3])
118 {
119 	struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
120 	struct vm_area_struct *last_vma = NULL;
121 	unsigned long start = 0;
122 	struct rb_root rbroot;
123 
124 	/* Find two biggest gaps so that first_gap > second_gap > others */
125 	for (; vma; vma = vma->vm_next) {
126 		if (!last_vma) {
127 			start = vma->vm_start;
128 			goto next;
129 		}
130 
131 		if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
132 			rbroot.rb_node = &vma->vm_rb;
133 			vma = rb_entry(rb_last(&rbroot),
134 					struct vm_area_struct, vm_rb);
135 			goto next;
136 		}
137 
138 		gap.start = last_vma->vm_end;
139 		gap.end = vma->vm_start;
140 		if (sz_range(&gap) > sz_range(&second_gap)) {
141 			swap(gap, second_gap);
142 			if (sz_range(&second_gap) > sz_range(&first_gap))
143 				swap(second_gap, first_gap);
144 		}
145 next:
146 		last_vma = vma;
147 	}
148 
149 	if (!sz_range(&second_gap) || !sz_range(&first_gap))
150 		return -EINVAL;
151 
152 	/* Sort the two biggest gaps by address */
153 	if (first_gap.start > second_gap.start)
154 		swap(first_gap, second_gap);
155 
156 	/* Store the result */
157 	regions[0].start = ALIGN(start, DAMON_MIN_REGION);
158 	regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
159 	regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
160 	regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
161 	regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
162 	regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
163 
164 	return 0;
165 }
166 
167 /*
168  * Get the three regions in the given target (task)
169  *
170  * Returns 0 on success, negative error code otherwise.
171  */
172 static int damon_va_three_regions(struct damon_target *t,
173 				struct damon_addr_range regions[3])
174 {
175 	struct mm_struct *mm;
176 	int rc;
177 
178 	mm = damon_get_mm(t);
179 	if (!mm)
180 		return -EINVAL;
181 
182 	mmap_read_lock(mm);
183 	rc = __damon_va_three_regions(mm->mmap, regions);
184 	mmap_read_unlock(mm);
185 
186 	mmput(mm);
187 	return rc;
188 }
189 
190 /*
191  * Initialize the monitoring target regions for the given target (task)
192  *
193  * t	the given target
194  *
195  * Because only a number of small portions of the entire address space
196  * is actually mapped to the memory and accessed, monitoring the unmapped
197  * regions is wasteful.  That said, because we can deal with small noises,
198  * tracking every mapping is not strictly required but could even incur a high
199  * overhead if the mapping frequently changes or the number of mappings is
200  * high.  The adaptive regions adjustment mechanism will further help to deal
201  * with the noise by simply identifying the unmapped areas as a region that
202  * has no access.  Moreover, applying the real mappings that would have many
203  * unmapped areas inside will make the adaptive mechanism quite complex.  That
204  * said, too huge unmapped areas inside the monitoring target should be removed
205  * to not take the time for the adaptive mechanism.
206  *
207  * For the reason, we convert the complex mappings to three distinct regions
208  * that cover every mapped area of the address space.  Also the two gaps
209  * between the three regions are the two biggest unmapped areas in the given
210  * address space.  In detail, this function first identifies the start and the
211  * end of the mappings and the two biggest unmapped areas of the address space.
212  * Then, it constructs the three regions as below:
213  *
214  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
215  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
216  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
217  *
218  * As usual memory map of processes is as below, the gap between the heap and
219  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
220  * region and the stack will be two biggest unmapped regions.  Because these
221  * gaps are exceptionally huge areas in usual address space, excluding these
222  * two biggest unmapped regions will be sufficient to make a trade-off.
223  *
224  *   <heap>
225  *   <BIG UNMAPPED REGION 1>
226  *   <uppermost mmap()-ed region>
227  *   (other mmap()-ed regions and small unmapped regions)
228  *   <lowermost mmap()-ed region>
229  *   <BIG UNMAPPED REGION 2>
230  *   <stack>
231  */
232 static void __damon_va_init_regions(struct damon_ctx *ctx,
233 				     struct damon_target *t)
234 {
235 	struct damon_region *r;
236 	struct damon_addr_range regions[3];
237 	unsigned long sz = 0, nr_pieces;
238 	int i;
239 
240 	if (damon_va_three_regions(t, regions)) {
241 		pr_err("Failed to get three regions of target %lu\n", t->id);
242 		return;
243 	}
244 
245 	for (i = 0; i < 3; i++)
246 		sz += regions[i].end - regions[i].start;
247 	if (ctx->min_nr_regions)
248 		sz /= ctx->min_nr_regions;
249 	if (sz < DAMON_MIN_REGION)
250 		sz = DAMON_MIN_REGION;
251 
252 	/* Set the initial three regions of the target */
253 	for (i = 0; i < 3; i++) {
254 		r = damon_new_region(regions[i].start, regions[i].end);
255 		if (!r) {
256 			pr_err("%d'th init region creation failed\n", i);
257 			return;
258 		}
259 		damon_add_region(r, t);
260 
261 		nr_pieces = (regions[i].end - regions[i].start) / sz;
262 		damon_va_evenly_split_region(t, r, nr_pieces);
263 	}
264 }
265 
266 /* Initialize '->regions_list' of every target (task) */
267 static void damon_va_init(struct damon_ctx *ctx)
268 {
269 	struct damon_target *t;
270 
271 	damon_for_each_target(t, ctx) {
272 		/* the user may set the target regions as they want */
273 		if (!damon_nr_regions(t))
274 			__damon_va_init_regions(ctx, t);
275 	}
276 }
277 
278 /*
279  * Functions for the dynamic monitoring target regions update
280  */
281 
282 /*
283  * Check whether a region is intersecting an address range
284  *
285  * Returns true if it is.
286  */
287 static bool damon_intersect(struct damon_region *r,
288 		struct damon_addr_range *re)
289 {
290 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
291 }
292 
293 /*
294  * Update damon regions for the three big regions of the given target
295  *
296  * t		the given target
297  * bregions	the three big regions of the target
298  */
299 static void damon_va_apply_three_regions(struct damon_target *t,
300 		struct damon_addr_range bregions[3])
301 {
302 	struct damon_region *r, *next;
303 	unsigned int i;
304 
305 	/* Remove regions which are not in the three big regions now */
306 	damon_for_each_region_safe(r, next, t) {
307 		for (i = 0; i < 3; i++) {
308 			if (damon_intersect(r, &bregions[i]))
309 				break;
310 		}
311 		if (i == 3)
312 			damon_destroy_region(r, t);
313 	}
314 
315 	/* Adjust intersecting regions to fit with the three big regions */
316 	for (i = 0; i < 3; i++) {
317 		struct damon_region *first = NULL, *last;
318 		struct damon_region *newr;
319 		struct damon_addr_range *br;
320 
321 		br = &bregions[i];
322 		/* Get the first and last regions which intersects with br */
323 		damon_for_each_region(r, t) {
324 			if (damon_intersect(r, br)) {
325 				if (!first)
326 					first = r;
327 				last = r;
328 			}
329 			if (r->ar.start >= br->end)
330 				break;
331 		}
332 		if (!first) {
333 			/* no damon_region intersects with this big region */
334 			newr = damon_new_region(
335 					ALIGN_DOWN(br->start,
336 						DAMON_MIN_REGION),
337 					ALIGN(br->end, DAMON_MIN_REGION));
338 			if (!newr)
339 				continue;
340 			damon_insert_region(newr, damon_prev_region(r), r, t);
341 		} else {
342 			first->ar.start = ALIGN_DOWN(br->start,
343 					DAMON_MIN_REGION);
344 			last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
345 		}
346 	}
347 }
348 
349 /*
350  * Update regions for current memory mappings
351  */
352 static void damon_va_update(struct damon_ctx *ctx)
353 {
354 	struct damon_addr_range three_regions[3];
355 	struct damon_target *t;
356 
357 	damon_for_each_target(t, ctx) {
358 		if (damon_va_three_regions(t, three_regions))
359 			continue;
360 		damon_va_apply_three_regions(t, three_regions);
361 	}
362 }
363 
364 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
365 		unsigned long next, struct mm_walk *walk)
366 {
367 	pte_t *pte;
368 	spinlock_t *ptl;
369 
370 	if (pmd_huge(*pmd)) {
371 		ptl = pmd_lock(walk->mm, pmd);
372 		if (pmd_huge(*pmd)) {
373 			damon_pmdp_mkold(pmd, walk->mm, addr);
374 			spin_unlock(ptl);
375 			return 0;
376 		}
377 		spin_unlock(ptl);
378 	}
379 
380 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
381 		return 0;
382 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
383 	if (!pte_present(*pte))
384 		goto out;
385 	damon_ptep_mkold(pte, walk->mm, addr);
386 out:
387 	pte_unmap_unlock(pte, ptl);
388 	return 0;
389 }
390 
391 static const struct mm_walk_ops damon_mkold_ops = {
392 	.pmd_entry = damon_mkold_pmd_entry,
393 };
394 
395 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
396 {
397 	mmap_read_lock(mm);
398 	walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
399 	mmap_read_unlock(mm);
400 }
401 
402 /*
403  * Functions for the access checking of the regions
404  */
405 
406 static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
407 			struct mm_struct *mm, struct damon_region *r)
408 {
409 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
410 
411 	damon_va_mkold(mm, r->sampling_addr);
412 }
413 
414 static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
415 {
416 	struct damon_target *t;
417 	struct mm_struct *mm;
418 	struct damon_region *r;
419 
420 	damon_for_each_target(t, ctx) {
421 		mm = damon_get_mm(t);
422 		if (!mm)
423 			continue;
424 		damon_for_each_region(r, t)
425 			__damon_va_prepare_access_check(ctx, mm, r);
426 		mmput(mm);
427 	}
428 }
429 
430 struct damon_young_walk_private {
431 	unsigned long *page_sz;
432 	bool young;
433 };
434 
435 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
436 		unsigned long next, struct mm_walk *walk)
437 {
438 	pte_t *pte;
439 	spinlock_t *ptl;
440 	struct page *page;
441 	struct damon_young_walk_private *priv = walk->private;
442 
443 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
444 	if (pmd_huge(*pmd)) {
445 		ptl = pmd_lock(walk->mm, pmd);
446 		if (!pmd_huge(*pmd)) {
447 			spin_unlock(ptl);
448 			goto regular_page;
449 		}
450 		page = damon_get_page(pmd_pfn(*pmd));
451 		if (!page)
452 			goto huge_out;
453 		if (pmd_young(*pmd) || !page_is_idle(page) ||
454 					mmu_notifier_test_young(walk->mm,
455 						addr)) {
456 			*priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
457 			priv->young = true;
458 		}
459 		put_page(page);
460 huge_out:
461 		spin_unlock(ptl);
462 		return 0;
463 	}
464 
465 regular_page:
466 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
467 
468 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
469 		return -EINVAL;
470 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
471 	if (!pte_present(*pte))
472 		goto out;
473 	page = damon_get_page(pte_pfn(*pte));
474 	if (!page)
475 		goto out;
476 	if (pte_young(*pte) || !page_is_idle(page) ||
477 			mmu_notifier_test_young(walk->mm, addr)) {
478 		*priv->page_sz = PAGE_SIZE;
479 		priv->young = true;
480 	}
481 	put_page(page);
482 out:
483 	pte_unmap_unlock(pte, ptl);
484 	return 0;
485 }
486 
487 static const struct mm_walk_ops damon_young_ops = {
488 	.pmd_entry = damon_young_pmd_entry,
489 };
490 
491 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
492 		unsigned long *page_sz)
493 {
494 	struct damon_young_walk_private arg = {
495 		.page_sz = page_sz,
496 		.young = false,
497 	};
498 
499 	mmap_read_lock(mm);
500 	walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
501 	mmap_read_unlock(mm);
502 	return arg.young;
503 }
504 
505 /*
506  * Check whether the region was accessed after the last preparation
507  *
508  * mm	'mm_struct' for the given virtual address space
509  * r	the region to be checked
510  */
511 static void __damon_va_check_access(struct damon_ctx *ctx,
512 			       struct mm_struct *mm, struct damon_region *r)
513 {
514 	static struct mm_struct *last_mm;
515 	static unsigned long last_addr;
516 	static unsigned long last_page_sz = PAGE_SIZE;
517 	static bool last_accessed;
518 
519 	/* If the region is in the last checked page, reuse the result */
520 	if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
521 				ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
522 		if (last_accessed)
523 			r->nr_accesses++;
524 		return;
525 	}
526 
527 	last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
528 	if (last_accessed)
529 		r->nr_accesses++;
530 
531 	last_mm = mm;
532 	last_addr = r->sampling_addr;
533 }
534 
535 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
536 {
537 	struct damon_target *t;
538 	struct mm_struct *mm;
539 	struct damon_region *r;
540 	unsigned int max_nr_accesses = 0;
541 
542 	damon_for_each_target(t, ctx) {
543 		mm = damon_get_mm(t);
544 		if (!mm)
545 			continue;
546 		damon_for_each_region(r, t) {
547 			__damon_va_check_access(ctx, mm, r);
548 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
549 		}
550 		mmput(mm);
551 	}
552 
553 	return max_nr_accesses;
554 }
555 
556 /*
557  * Functions for the target validity check and cleanup
558  */
559 
560 bool damon_va_target_valid(void *target)
561 {
562 	struct damon_target *t = target;
563 	struct task_struct *task;
564 
565 	task = damon_get_task_struct(t);
566 	if (task) {
567 		put_task_struct(task);
568 		return true;
569 	}
570 
571 	return false;
572 }
573 
574 #ifndef CONFIG_ADVISE_SYSCALLS
575 static unsigned long damos_madvise(struct damon_target *target,
576 		struct damon_region *r, int behavior)
577 {
578 	return 0;
579 }
580 #else
581 static unsigned long damos_madvise(struct damon_target *target,
582 		struct damon_region *r, int behavior)
583 {
584 	struct mm_struct *mm;
585 	unsigned long start = PAGE_ALIGN(r->ar.start);
586 	unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
587 	unsigned long applied;
588 
589 	mm = damon_get_mm(target);
590 	if (!mm)
591 		return 0;
592 
593 	applied = do_madvise(mm, start, len, behavior) ? 0 : len;
594 	mmput(mm);
595 
596 	return applied;
597 }
598 #endif	/* CONFIG_ADVISE_SYSCALLS */
599 
600 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
601 		struct damon_target *t, struct damon_region *r,
602 		struct damos *scheme)
603 {
604 	int madv_action;
605 
606 	switch (scheme->action) {
607 	case DAMOS_WILLNEED:
608 		madv_action = MADV_WILLNEED;
609 		break;
610 	case DAMOS_COLD:
611 		madv_action = MADV_COLD;
612 		break;
613 	case DAMOS_PAGEOUT:
614 		madv_action = MADV_PAGEOUT;
615 		break;
616 	case DAMOS_HUGEPAGE:
617 		madv_action = MADV_HUGEPAGE;
618 		break;
619 	case DAMOS_NOHUGEPAGE:
620 		madv_action = MADV_NOHUGEPAGE;
621 		break;
622 	case DAMOS_STAT:
623 		return 0;
624 	default:
625 		return 0;
626 	}
627 
628 	return damos_madvise(t, r, madv_action);
629 }
630 
631 static int damon_va_scheme_score(struct damon_ctx *context,
632 		struct damon_target *t, struct damon_region *r,
633 		struct damos *scheme)
634 {
635 
636 	switch (scheme->action) {
637 	case DAMOS_PAGEOUT:
638 		return damon_pageout_score(context, r, scheme);
639 	default:
640 		break;
641 	}
642 
643 	return DAMOS_MAX_SCORE;
644 }
645 
646 void damon_va_set_primitives(struct damon_ctx *ctx)
647 {
648 	ctx->primitive.init = damon_va_init;
649 	ctx->primitive.update = damon_va_update;
650 	ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
651 	ctx->primitive.check_accesses = damon_va_check_accesses;
652 	ctx->primitive.reset_aggregated = NULL;
653 	ctx->primitive.target_valid = damon_va_target_valid;
654 	ctx->primitive.cleanup = NULL;
655 	ctx->primitive.apply_scheme = damon_va_apply_scheme;
656 	ctx->primitive.get_scheme_score = damon_va_scheme_score;
657 }
658 
659 #include "vaddr-test.h"
660