xref: /openbmc/linux/mm/damon/core.c (revision a9da6dda)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
19 
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24 
25 static DEFINE_MUTEX(damon_lock);
26 static int nr_running_ctxs;
27 static bool running_exclusive_ctxs;
28 
29 static DEFINE_MUTEX(damon_ops_lock);
30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
31 
32 static struct kmem_cache *damon_region_cache __ro_after_init;
33 
34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)35 static bool __damon_is_registered_ops(enum damon_ops_id id)
36 {
37 	struct damon_operations empty_ops = {};
38 
39 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
40 		return false;
41 	return true;
42 }
43 
44 /**
45  * damon_is_registered_ops() - Check if a given damon_operations is registered.
46  * @id:	Id of the damon_operations to check if registered.
47  *
48  * Return: true if the ops is set, false otherwise.
49  */
damon_is_registered_ops(enum damon_ops_id id)50 bool damon_is_registered_ops(enum damon_ops_id id)
51 {
52 	bool registered;
53 
54 	if (id >= NR_DAMON_OPS)
55 		return false;
56 	mutex_lock(&damon_ops_lock);
57 	registered = __damon_is_registered_ops(id);
58 	mutex_unlock(&damon_ops_lock);
59 	return registered;
60 }
61 
62 /**
63  * damon_register_ops() - Register a monitoring operations set to DAMON.
64  * @ops:	monitoring operations set to register.
65  *
66  * This function registers a monitoring operations set of valid &struct
67  * damon_operations->id so that others can find and use them later.
68  *
69  * Return: 0 on success, negative error code otherwise.
70  */
damon_register_ops(struct damon_operations * ops)71 int damon_register_ops(struct damon_operations *ops)
72 {
73 	int err = 0;
74 
75 	if (ops->id >= NR_DAMON_OPS)
76 		return -EINVAL;
77 	mutex_lock(&damon_ops_lock);
78 	/* Fail for already registered ops */
79 	if (__damon_is_registered_ops(ops->id)) {
80 		err = -EINVAL;
81 		goto out;
82 	}
83 	damon_registered_ops[ops->id] = *ops;
84 out:
85 	mutex_unlock(&damon_ops_lock);
86 	return err;
87 }
88 
89 /**
90  * damon_select_ops() - Select a monitoring operations to use with the context.
91  * @ctx:	monitoring context to use the operations.
92  * @id:		id of the registered monitoring operations to select.
93  *
94  * This function finds registered monitoring operations set of @id and make
95  * @ctx to use it.
96  *
97  * Return: 0 on success, negative error code otherwise.
98  */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
100 {
101 	int err = 0;
102 
103 	if (id >= NR_DAMON_OPS)
104 		return -EINVAL;
105 
106 	mutex_lock(&damon_ops_lock);
107 	if (!__damon_is_registered_ops(id))
108 		err = -EINVAL;
109 	else
110 		ctx->ops = damon_registered_ops[id];
111 	mutex_unlock(&damon_ops_lock);
112 	return err;
113 }
114 
115 /*
116  * Construct a damon_region struct
117  *
118  * Returns the pointer to the new struct if success, or NULL otherwise
119  */
damon_new_region(unsigned long start,unsigned long end)120 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
121 {
122 	struct damon_region *region;
123 
124 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
125 	if (!region)
126 		return NULL;
127 
128 	region->ar.start = start;
129 	region->ar.end = end;
130 	region->nr_accesses = 0;
131 	INIT_LIST_HEAD(&region->list);
132 
133 	region->age = 0;
134 	region->last_nr_accesses = 0;
135 
136 	return region;
137 }
138 
damon_add_region(struct damon_region * r,struct damon_target * t)139 void damon_add_region(struct damon_region *r, struct damon_target *t)
140 {
141 	list_add_tail(&r->list, &t->regions_list);
142 	t->nr_regions++;
143 }
144 
damon_del_region(struct damon_region * r,struct damon_target * t)145 static void damon_del_region(struct damon_region *r, struct damon_target *t)
146 {
147 	list_del(&r->list);
148 	t->nr_regions--;
149 }
150 
damon_free_region(struct damon_region * r)151 static void damon_free_region(struct damon_region *r)
152 {
153 	kmem_cache_free(damon_region_cache, r);
154 }
155 
damon_destroy_region(struct damon_region * r,struct damon_target * t)156 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
157 {
158 	damon_del_region(r, t);
159 	damon_free_region(r);
160 }
161 
162 /*
163  * Check whether a region is intersecting an address range
164  *
165  * Returns true if it is.
166  */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)167 static bool damon_intersect(struct damon_region *r,
168 		struct damon_addr_range *re)
169 {
170 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
171 }
172 
173 /*
174  * Fill holes in regions with new regions.
175  */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)176 static int damon_fill_regions_holes(struct damon_region *first,
177 		struct damon_region *last, struct damon_target *t)
178 {
179 	struct damon_region *r = first;
180 
181 	damon_for_each_region_from(r, t) {
182 		struct damon_region *next, *newr;
183 
184 		if (r == last)
185 			break;
186 		next = damon_next_region(r);
187 		if (r->ar.end != next->ar.start) {
188 			newr = damon_new_region(r->ar.end, next->ar.start);
189 			if (!newr)
190 				return -ENOMEM;
191 			damon_insert_region(newr, r, next, t);
192 		}
193 	}
194 	return 0;
195 }
196 
197 /*
198  * damon_set_regions() - Set regions of a target for given address ranges.
199  * @t:		the given target.
200  * @ranges:	array of new monitoring target ranges.
201  * @nr_ranges:	length of @ranges.
202  *
203  * This function adds new regions to, or modify existing regions of a
204  * monitoring target to fit in specific ranges.
205  *
206  * Return: 0 if success, or negative error code otherwise.
207  */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges)208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
209 		unsigned int nr_ranges)
210 {
211 	struct damon_region *r, *next;
212 	unsigned int i;
213 	int err;
214 
215 	/* Remove regions which are not in the new ranges */
216 	damon_for_each_region_safe(r, next, t) {
217 		for (i = 0; i < nr_ranges; i++) {
218 			if (damon_intersect(r, &ranges[i]))
219 				break;
220 		}
221 		if (i == nr_ranges)
222 			damon_destroy_region(r, t);
223 	}
224 
225 	r = damon_first_region(t);
226 	/* Add new regions or resize existing regions to fit in the ranges */
227 	for (i = 0; i < nr_ranges; i++) {
228 		struct damon_region *first = NULL, *last, *newr;
229 		struct damon_addr_range *range;
230 
231 		range = &ranges[i];
232 		/* Get the first/last regions intersecting with the range */
233 		damon_for_each_region_from(r, t) {
234 			if (damon_intersect(r, range)) {
235 				if (!first)
236 					first = r;
237 				last = r;
238 			}
239 			if (r->ar.start >= range->end)
240 				break;
241 		}
242 		if (!first) {
243 			/* no region intersects with this range */
244 			newr = damon_new_region(
245 					ALIGN_DOWN(range->start,
246 						DAMON_MIN_REGION),
247 					ALIGN(range->end, DAMON_MIN_REGION));
248 			if (!newr)
249 				return -ENOMEM;
250 			damon_insert_region(newr, damon_prev_region(r), r, t);
251 		} else {
252 			/* resize intersecting regions to fit in this range */
253 			first->ar.start = ALIGN_DOWN(range->start,
254 					DAMON_MIN_REGION);
255 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
256 
257 			/* fill possible holes in the range */
258 			err = damon_fill_regions_holes(first, last, t);
259 			if (err)
260 				return err;
261 		}
262 	}
263 	return 0;
264 }
265 
damos_new_filter(enum damos_filter_type type,bool matching)266 struct damos_filter *damos_new_filter(enum damos_filter_type type,
267 		bool matching)
268 {
269 	struct damos_filter *filter;
270 
271 	filter = kmalloc(sizeof(*filter), GFP_KERNEL);
272 	if (!filter)
273 		return NULL;
274 	filter->type = type;
275 	filter->matching = matching;
276 	INIT_LIST_HEAD(&filter->list);
277 	return filter;
278 }
279 
damos_add_filter(struct damos * s,struct damos_filter * f)280 void damos_add_filter(struct damos *s, struct damos_filter *f)
281 {
282 	list_add_tail(&f->list, &s->filters);
283 }
284 
damos_del_filter(struct damos_filter * f)285 static void damos_del_filter(struct damos_filter *f)
286 {
287 	list_del(&f->list);
288 }
289 
damos_free_filter(struct damos_filter * f)290 static void damos_free_filter(struct damos_filter *f)
291 {
292 	kfree(f);
293 }
294 
damos_destroy_filter(struct damos_filter * f)295 void damos_destroy_filter(struct damos_filter *f)
296 {
297 	damos_del_filter(f);
298 	damos_free_filter(f);
299 }
300 
301 /* initialize private fields of damos_quota and return the pointer */
damos_quota_init_priv(struct damos_quota * quota)302 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
303 {
304 	quota->total_charged_sz = 0;
305 	quota->total_charged_ns = 0;
306 	quota->esz = 0;
307 	quota->charged_sz = 0;
308 	quota->charged_from = 0;
309 	quota->charge_target_from = NULL;
310 	quota->charge_addr_from = 0;
311 	return quota;
312 }
313 
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,struct damos_quota * quota,struct damos_watermarks * wmarks)314 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
315 			enum damos_action action, struct damos_quota *quota,
316 			struct damos_watermarks *wmarks)
317 {
318 	struct damos *scheme;
319 
320 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
321 	if (!scheme)
322 		return NULL;
323 	scheme->pattern = *pattern;
324 	scheme->action = action;
325 	INIT_LIST_HEAD(&scheme->filters);
326 	scheme->stat = (struct damos_stat){};
327 	INIT_LIST_HEAD(&scheme->list);
328 
329 	scheme->quota = *(damos_quota_init_priv(quota));
330 
331 	scheme->wmarks = *wmarks;
332 	scheme->wmarks.activated = true;
333 
334 	return scheme;
335 }
336 
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)337 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
338 {
339 	list_add_tail(&s->list, &ctx->schemes);
340 }
341 
damon_del_scheme(struct damos * s)342 static void damon_del_scheme(struct damos *s)
343 {
344 	list_del(&s->list);
345 }
346 
damon_free_scheme(struct damos * s)347 static void damon_free_scheme(struct damos *s)
348 {
349 	kfree(s);
350 }
351 
damon_destroy_scheme(struct damos * s)352 void damon_destroy_scheme(struct damos *s)
353 {
354 	struct damos_filter *f, *next;
355 
356 	damos_for_each_filter_safe(f, next, s)
357 		damos_destroy_filter(f);
358 	damon_del_scheme(s);
359 	damon_free_scheme(s);
360 }
361 
362 /*
363  * Construct a damon_target struct
364  *
365  * Returns the pointer to the new struct if success, or NULL otherwise
366  */
damon_new_target(void)367 struct damon_target *damon_new_target(void)
368 {
369 	struct damon_target *t;
370 
371 	t = kmalloc(sizeof(*t), GFP_KERNEL);
372 	if (!t)
373 		return NULL;
374 
375 	t->pid = NULL;
376 	t->nr_regions = 0;
377 	INIT_LIST_HEAD(&t->regions_list);
378 	INIT_LIST_HEAD(&t->list);
379 
380 	return t;
381 }
382 
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)383 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
384 {
385 	list_add_tail(&t->list, &ctx->adaptive_targets);
386 }
387 
damon_targets_empty(struct damon_ctx * ctx)388 bool damon_targets_empty(struct damon_ctx *ctx)
389 {
390 	return list_empty(&ctx->adaptive_targets);
391 }
392 
damon_del_target(struct damon_target * t)393 static void damon_del_target(struct damon_target *t)
394 {
395 	list_del(&t->list);
396 }
397 
damon_free_target(struct damon_target * t)398 void damon_free_target(struct damon_target *t)
399 {
400 	struct damon_region *r, *next;
401 
402 	damon_for_each_region_safe(r, next, t)
403 		damon_free_region(r);
404 	kfree(t);
405 }
406 
damon_destroy_target(struct damon_target * t)407 void damon_destroy_target(struct damon_target *t)
408 {
409 	damon_del_target(t);
410 	damon_free_target(t);
411 }
412 
damon_nr_regions(struct damon_target * t)413 unsigned int damon_nr_regions(struct damon_target *t)
414 {
415 	return t->nr_regions;
416 }
417 
damon_new_ctx(void)418 struct damon_ctx *damon_new_ctx(void)
419 {
420 	struct damon_ctx *ctx;
421 
422 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
423 	if (!ctx)
424 		return NULL;
425 
426 	init_completion(&ctx->kdamond_started);
427 
428 	ctx->attrs.sample_interval = 5 * 1000;
429 	ctx->attrs.aggr_interval = 100 * 1000;
430 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
431 
432 	ctx->passed_sample_intervals = 0;
433 	/* These will be set from kdamond_init_intervals_sis() */
434 	ctx->next_aggregation_sis = 0;
435 	ctx->next_ops_update_sis = 0;
436 
437 	mutex_init(&ctx->kdamond_lock);
438 
439 	ctx->attrs.min_nr_regions = 10;
440 	ctx->attrs.max_nr_regions = 1000;
441 
442 	INIT_LIST_HEAD(&ctx->adaptive_targets);
443 	INIT_LIST_HEAD(&ctx->schemes);
444 
445 	return ctx;
446 }
447 
damon_destroy_targets(struct damon_ctx * ctx)448 static void damon_destroy_targets(struct damon_ctx *ctx)
449 {
450 	struct damon_target *t, *next_t;
451 
452 	if (ctx->ops.cleanup) {
453 		ctx->ops.cleanup(ctx);
454 		return;
455 	}
456 
457 	damon_for_each_target_safe(t, next_t, ctx)
458 		damon_destroy_target(t);
459 }
460 
damon_destroy_ctx(struct damon_ctx * ctx)461 void damon_destroy_ctx(struct damon_ctx *ctx)
462 {
463 	struct damos *s, *next_s;
464 
465 	damon_destroy_targets(ctx);
466 
467 	damon_for_each_scheme_safe(s, next_s, ctx)
468 		damon_destroy_scheme(s);
469 
470 	kfree(ctx);
471 }
472 
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)473 static unsigned int damon_age_for_new_attrs(unsigned int age,
474 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
475 {
476 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
477 }
478 
479 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)480 static unsigned int damon_accesses_bp_to_nr_accesses(
481 		unsigned int accesses_bp, struct damon_attrs *attrs)
482 {
483 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
484 }
485 
486 /* convert nr_accesses to access ratio in bp (per 10,000) */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)487 static unsigned int damon_nr_accesses_to_accesses_bp(
488 		unsigned int nr_accesses, struct damon_attrs *attrs)
489 {
490 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
491 }
492 
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)493 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
494 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
495 {
496 	return damon_accesses_bp_to_nr_accesses(
497 			damon_nr_accesses_to_accesses_bp(
498 				nr_accesses, old_attrs),
499 			new_attrs);
500 }
501 
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)502 static void damon_update_monitoring_result(struct damon_region *r,
503 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
504 {
505 	r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
506 			old_attrs, new_attrs);
507 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
508 }
509 
510 /*
511  * region->nr_accesses is the number of sampling intervals in the last
512  * aggregation interval that access to the region has found, and region->age is
513  * the number of aggregation intervals that its access pattern has maintained.
514  * For the reason, the real meaning of the two fields depend on current
515  * sampling interval and aggregation interval.  This function updates
516  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
517  */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs)518 static void damon_update_monitoring_results(struct damon_ctx *ctx,
519 		struct damon_attrs *new_attrs)
520 {
521 	struct damon_attrs *old_attrs = &ctx->attrs;
522 	struct damon_target *t;
523 	struct damon_region *r;
524 
525 	/* if any interval is zero, simply forgive conversion */
526 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
527 			!new_attrs->sample_interval ||
528 			!new_attrs->aggr_interval)
529 		return;
530 
531 	damon_for_each_target(t, ctx)
532 		damon_for_each_region(r, t)
533 			damon_update_monitoring_result(
534 					r, old_attrs, new_attrs);
535 }
536 
537 /**
538  * damon_set_attrs() - Set attributes for the monitoring.
539  * @ctx:		monitoring context
540  * @attrs:		monitoring attributes
541  *
542  * This function should not be called while the kdamond is running.
543  * Every time interval is in micro-seconds.
544  *
545  * Return: 0 on success, negative error code otherwise.
546  */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)547 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
548 {
549 	unsigned long sample_interval = attrs->sample_interval ?
550 		attrs->sample_interval : 1;
551 
552 	if (attrs->min_nr_regions < 3)
553 		return -EINVAL;
554 	if (attrs->min_nr_regions > attrs->max_nr_regions)
555 		return -EINVAL;
556 	if (attrs->sample_interval > attrs->aggr_interval)
557 		return -EINVAL;
558 
559 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
560 		attrs->aggr_interval / sample_interval;
561 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
562 		attrs->ops_update_interval / sample_interval;
563 
564 	damon_update_monitoring_results(ctx, attrs);
565 	ctx->attrs = *attrs;
566 	return 0;
567 }
568 
569 /**
570  * damon_set_schemes() - Set data access monitoring based operation schemes.
571  * @ctx:	monitoring context
572  * @schemes:	array of the schemes
573  * @nr_schemes:	number of entries in @schemes
574  *
575  * This function should not be called while the kdamond of the context is
576  * running.
577  */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)578 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
579 			ssize_t nr_schemes)
580 {
581 	struct damos *s, *next;
582 	ssize_t i;
583 
584 	damon_for_each_scheme_safe(s, next, ctx)
585 		damon_destroy_scheme(s);
586 	for (i = 0; i < nr_schemes; i++)
587 		damon_add_scheme(ctx, schemes[i]);
588 }
589 
590 /**
591  * damon_nr_running_ctxs() - Return number of currently running contexts.
592  */
damon_nr_running_ctxs(void)593 int damon_nr_running_ctxs(void)
594 {
595 	int nr_ctxs;
596 
597 	mutex_lock(&damon_lock);
598 	nr_ctxs = nr_running_ctxs;
599 	mutex_unlock(&damon_lock);
600 
601 	return nr_ctxs;
602 }
603 
604 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)605 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
606 {
607 	struct damon_target *t;
608 	struct damon_region *r;
609 	unsigned long sz = 0;
610 
611 	damon_for_each_target(t, ctx) {
612 		damon_for_each_region(r, t)
613 			sz += damon_sz_region(r);
614 	}
615 
616 	if (ctx->attrs.min_nr_regions)
617 		sz /= ctx->attrs.min_nr_regions;
618 	if (sz < DAMON_MIN_REGION)
619 		sz = DAMON_MIN_REGION;
620 
621 	return sz;
622 }
623 
624 static int kdamond_fn(void *data);
625 
626 /*
627  * __damon_start() - Starts monitoring with given context.
628  * @ctx:	monitoring context
629  *
630  * This function should be called while damon_lock is hold.
631  *
632  * Return: 0 on success, negative error code otherwise.
633  */
__damon_start(struct damon_ctx * ctx)634 static int __damon_start(struct damon_ctx *ctx)
635 {
636 	int err = -EBUSY;
637 
638 	mutex_lock(&ctx->kdamond_lock);
639 	if (!ctx->kdamond) {
640 		err = 0;
641 		reinit_completion(&ctx->kdamond_started);
642 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
643 				nr_running_ctxs);
644 		if (IS_ERR(ctx->kdamond)) {
645 			err = PTR_ERR(ctx->kdamond);
646 			ctx->kdamond = NULL;
647 		} else {
648 			wait_for_completion(&ctx->kdamond_started);
649 		}
650 	}
651 	mutex_unlock(&ctx->kdamond_lock);
652 
653 	return err;
654 }
655 
656 /**
657  * damon_start() - Starts the monitorings for a given group of contexts.
658  * @ctxs:	an array of the pointers for contexts to start monitoring
659  * @nr_ctxs:	size of @ctxs
660  * @exclusive:	exclusiveness of this contexts group
661  *
662  * This function starts a group of monitoring threads for a group of monitoring
663  * contexts.  One thread per each context is created and run in parallel.  The
664  * caller should handle synchronization between the threads by itself.  If
665  * @exclusive is true and a group of threads that created by other
666  * 'damon_start()' call is currently running, this function does nothing but
667  * returns -EBUSY.
668  *
669  * Return: 0 on success, negative error code otherwise.
670  */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)671 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
672 {
673 	int i;
674 	int err = 0;
675 
676 	mutex_lock(&damon_lock);
677 	if ((exclusive && nr_running_ctxs) ||
678 			(!exclusive && running_exclusive_ctxs)) {
679 		mutex_unlock(&damon_lock);
680 		return -EBUSY;
681 	}
682 
683 	for (i = 0; i < nr_ctxs; i++) {
684 		err = __damon_start(ctxs[i]);
685 		if (err)
686 			break;
687 		nr_running_ctxs++;
688 	}
689 	if (exclusive && nr_running_ctxs)
690 		running_exclusive_ctxs = true;
691 	mutex_unlock(&damon_lock);
692 
693 	return err;
694 }
695 
696 /*
697  * __damon_stop() - Stops monitoring of a given context.
698  * @ctx:	monitoring context
699  *
700  * Return: 0 on success, negative error code otherwise.
701  */
__damon_stop(struct damon_ctx * ctx)702 static int __damon_stop(struct damon_ctx *ctx)
703 {
704 	struct task_struct *tsk;
705 
706 	mutex_lock(&ctx->kdamond_lock);
707 	tsk = ctx->kdamond;
708 	if (tsk) {
709 		get_task_struct(tsk);
710 		mutex_unlock(&ctx->kdamond_lock);
711 		kthread_stop_put(tsk);
712 		return 0;
713 	}
714 	mutex_unlock(&ctx->kdamond_lock);
715 
716 	return -EPERM;
717 }
718 
719 /**
720  * damon_stop() - Stops the monitorings for a given group of contexts.
721  * @ctxs:	an array of the pointers for contexts to stop monitoring
722  * @nr_ctxs:	size of @ctxs
723  *
724  * Return: 0 on success, negative error code otherwise.
725  */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)726 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
727 {
728 	int i, err = 0;
729 
730 	for (i = 0; i < nr_ctxs; i++) {
731 		/* nr_running_ctxs is decremented in kdamond_fn */
732 		err = __damon_stop(ctxs[i]);
733 		if (err)
734 			break;
735 	}
736 	return err;
737 }
738 
739 /*
740  * Reset the aggregated monitoring results ('nr_accesses' of each region).
741  */
kdamond_reset_aggregated(struct damon_ctx * c)742 static void kdamond_reset_aggregated(struct damon_ctx *c)
743 {
744 	struct damon_target *t;
745 	unsigned int ti = 0;	/* target's index */
746 
747 	damon_for_each_target(t, c) {
748 		struct damon_region *r;
749 
750 		damon_for_each_region(r, t) {
751 			trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
752 			r->last_nr_accesses = r->nr_accesses;
753 			r->nr_accesses = 0;
754 		}
755 		ti++;
756 	}
757 }
758 
759 static void damon_split_region_at(struct damon_target *t,
760 				  struct damon_region *r, unsigned long sz_r);
761 
__damos_valid_target(struct damon_region * r,struct damos * s)762 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
763 {
764 	unsigned long sz;
765 
766 	sz = damon_sz_region(r);
767 	return s->pattern.min_sz_region <= sz &&
768 		sz <= s->pattern.max_sz_region &&
769 		s->pattern.min_nr_accesses <= r->nr_accesses &&
770 		r->nr_accesses <= s->pattern.max_nr_accesses &&
771 		s->pattern.min_age_region <= r->age &&
772 		r->age <= s->pattern.max_age_region;
773 }
774 
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)775 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
776 		struct damon_region *r, struct damos *s)
777 {
778 	bool ret = __damos_valid_target(r, s);
779 
780 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
781 		return ret;
782 
783 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
784 }
785 
786 /*
787  * damos_skip_charged_region() - Check if the given region or starting part of
788  * it is already charged for the DAMOS quota.
789  * @t:	The target of the region.
790  * @rp:	The pointer to the region.
791  * @s:	The scheme to be applied.
792  *
793  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
794  * action would applied to only a part of the target access pattern fulfilling
795  * regions.  To avoid applying the scheme action to only already applied
796  * regions, DAMON skips applying the scheme action to the regions that charged
797  * in the previous charge window.
798  *
799  * This function checks if a given region should be skipped or not for the
800  * reason.  If only the starting part of the region has previously charged,
801  * this function splits the region into two so that the second one covers the
802  * area that not charged in the previous charge widnow and saves the second
803  * region in *rp and returns false, so that the caller can apply DAMON action
804  * to the second one.
805  *
806  * Return: true if the region should be entirely skipped, false otherwise.
807  */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s)808 static bool damos_skip_charged_region(struct damon_target *t,
809 		struct damon_region **rp, struct damos *s)
810 {
811 	struct damon_region *r = *rp;
812 	struct damos_quota *quota = &s->quota;
813 	unsigned long sz_to_skip;
814 
815 	/* Skip previously charged regions */
816 	if (quota->charge_target_from) {
817 		if (t != quota->charge_target_from)
818 			return true;
819 		if (r == damon_last_region(t)) {
820 			quota->charge_target_from = NULL;
821 			quota->charge_addr_from = 0;
822 			return true;
823 		}
824 		if (quota->charge_addr_from &&
825 				r->ar.end <= quota->charge_addr_from)
826 			return true;
827 
828 		if (quota->charge_addr_from && r->ar.start <
829 				quota->charge_addr_from) {
830 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
831 					r->ar.start, DAMON_MIN_REGION);
832 			if (!sz_to_skip) {
833 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
834 					return true;
835 				sz_to_skip = DAMON_MIN_REGION;
836 			}
837 			damon_split_region_at(t, r, sz_to_skip);
838 			r = damon_next_region(r);
839 			*rp = r;
840 		}
841 		quota->charge_target_from = NULL;
842 		quota->charge_addr_from = 0;
843 	}
844 	return false;
845 }
846 
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied)847 static void damos_update_stat(struct damos *s,
848 		unsigned long sz_tried, unsigned long sz_applied)
849 {
850 	s->stat.nr_tried++;
851 	s->stat.sz_tried += sz_tried;
852 	if (sz_applied)
853 		s->stat.nr_applied++;
854 	s->stat.sz_applied += sz_applied;
855 }
856 
__damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter)857 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
858 		struct damon_region *r, struct damos_filter *filter)
859 {
860 	bool matched = false;
861 	struct damon_target *ti;
862 	int target_idx = 0;
863 	unsigned long start, end;
864 
865 	switch (filter->type) {
866 	case DAMOS_FILTER_TYPE_TARGET:
867 		damon_for_each_target(ti, ctx) {
868 			if (ti == t)
869 				break;
870 			target_idx++;
871 		}
872 		matched = target_idx == filter->target_idx;
873 		break;
874 	case DAMOS_FILTER_TYPE_ADDR:
875 		start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
876 		end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
877 
878 		/* inside the range */
879 		if (start <= r->ar.start && r->ar.end <= end) {
880 			matched = true;
881 			break;
882 		}
883 		/* outside of the range */
884 		if (r->ar.end <= start || end <= r->ar.start) {
885 			matched = false;
886 			break;
887 		}
888 		/* start before the range and overlap */
889 		if (r->ar.start < start) {
890 			damon_split_region_at(t, r, start - r->ar.start);
891 			matched = false;
892 			break;
893 		}
894 		/* start inside the range */
895 		damon_split_region_at(t, r, end - r->ar.start);
896 		matched = true;
897 		break;
898 	default:
899 		return false;
900 	}
901 
902 	return matched == filter->matching;
903 }
904 
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)905 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
906 		struct damon_region *r, struct damos *s)
907 {
908 	struct damos_filter *filter;
909 
910 	damos_for_each_filter(filter, s) {
911 		if (__damos_filter_out(ctx, t, r, filter))
912 			return true;
913 	}
914 	return false;
915 }
916 
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)917 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
918 		struct damon_region *r, struct damos *s)
919 {
920 	struct damos_quota *quota = &s->quota;
921 	unsigned long sz = damon_sz_region(r);
922 	struct timespec64 begin, end;
923 	unsigned long sz_applied = 0;
924 	int err = 0;
925 
926 	if (c->ops.apply_scheme) {
927 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
928 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
929 					DAMON_MIN_REGION);
930 			if (!sz)
931 				goto update_stat;
932 			damon_split_region_at(t, r, sz);
933 		}
934 		if (damos_filter_out(c, t, r, s))
935 			return;
936 		ktime_get_coarse_ts64(&begin);
937 		if (c->callback.before_damos_apply)
938 			err = c->callback.before_damos_apply(c, t, r, s);
939 		if (!err)
940 			sz_applied = c->ops.apply_scheme(c, t, r, s);
941 		ktime_get_coarse_ts64(&end);
942 		quota->total_charged_ns += timespec64_to_ns(&end) -
943 			timespec64_to_ns(&begin);
944 		quota->charged_sz += sz;
945 		if (quota->esz && quota->charged_sz >= quota->esz) {
946 			quota->charge_target_from = t;
947 			quota->charge_addr_from = r->ar.end + 1;
948 		}
949 	}
950 	if (s->action != DAMOS_STAT)
951 		r->age = 0;
952 
953 update_stat:
954 	damos_update_stat(s, sz, sz_applied);
955 }
956 
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)957 static void damon_do_apply_schemes(struct damon_ctx *c,
958 				   struct damon_target *t,
959 				   struct damon_region *r)
960 {
961 	struct damos *s;
962 
963 	damon_for_each_scheme(s, c) {
964 		struct damos_quota *quota = &s->quota;
965 
966 		if (!s->wmarks.activated)
967 			continue;
968 
969 		/* Check the quota */
970 		if (quota->esz && quota->charged_sz >= quota->esz)
971 			continue;
972 
973 		if (damos_skip_charged_region(t, &r, s))
974 			continue;
975 
976 		if (!damos_valid_target(c, t, r, s))
977 			continue;
978 
979 		damos_apply_scheme(c, t, r, s);
980 	}
981 }
982 
983 /* Shouldn't be called if quota->ms and quota->sz are zero */
damos_set_effective_quota(struct damos_quota * quota)984 static void damos_set_effective_quota(struct damos_quota *quota)
985 {
986 	unsigned long throughput;
987 	unsigned long esz;
988 
989 	if (!quota->ms) {
990 		quota->esz = quota->sz;
991 		return;
992 	}
993 
994 	if (quota->total_charged_ns)
995 		throughput = quota->total_charged_sz * 1000000 /
996 			quota->total_charged_ns;
997 	else
998 		throughput = PAGE_SIZE * 1024;
999 	esz = throughput * quota->ms;
1000 
1001 	if (quota->sz && quota->sz < esz)
1002 		esz = quota->sz;
1003 	quota->esz = esz;
1004 }
1005 
damos_adjust_quota(struct damon_ctx * c,struct damos * s)1006 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1007 {
1008 	struct damos_quota *quota = &s->quota;
1009 	struct damon_target *t;
1010 	struct damon_region *r;
1011 	unsigned long cumulated_sz;
1012 	unsigned int score, max_score = 0;
1013 
1014 	if (!quota->ms && !quota->sz)
1015 		return;
1016 
1017 	/* New charge window starts */
1018 	if (time_after_eq(jiffies, quota->charged_from +
1019 				msecs_to_jiffies(quota->reset_interval))) {
1020 		if (quota->esz && quota->charged_sz >= quota->esz)
1021 			s->stat.qt_exceeds++;
1022 		quota->total_charged_sz += quota->charged_sz;
1023 		quota->charged_from = jiffies;
1024 		quota->charged_sz = 0;
1025 		damos_set_effective_quota(quota);
1026 	}
1027 
1028 	if (!c->ops.get_scheme_score)
1029 		return;
1030 
1031 	/* Fill up the score histogram */
1032 	memset(quota->histogram, 0, sizeof(quota->histogram));
1033 	damon_for_each_target(t, c) {
1034 		damon_for_each_region(r, t) {
1035 			if (!__damos_valid_target(r, s))
1036 				continue;
1037 			score = c->ops.get_scheme_score(c, t, r, s);
1038 			quota->histogram[score] += damon_sz_region(r);
1039 			if (score > max_score)
1040 				max_score = score;
1041 		}
1042 	}
1043 
1044 	/* Set the min score limit */
1045 	for (cumulated_sz = 0, score = max_score; ; score--) {
1046 		cumulated_sz += quota->histogram[score];
1047 		if (cumulated_sz >= quota->esz || !score)
1048 			break;
1049 	}
1050 	quota->min_score = score;
1051 }
1052 
kdamond_apply_schemes(struct damon_ctx * c)1053 static void kdamond_apply_schemes(struct damon_ctx *c)
1054 {
1055 	struct damon_target *t;
1056 	struct damon_region *r, *next_r;
1057 	struct damos *s;
1058 
1059 	damon_for_each_scheme(s, c) {
1060 		if (!s->wmarks.activated)
1061 			continue;
1062 
1063 		damos_adjust_quota(c, s);
1064 	}
1065 
1066 	damon_for_each_target(t, c) {
1067 		damon_for_each_region_safe(r, next_r, t)
1068 			damon_do_apply_schemes(c, t, r);
1069 	}
1070 }
1071 
1072 /*
1073  * Merge two adjacent regions into one region
1074  */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)1075 static void damon_merge_two_regions(struct damon_target *t,
1076 		struct damon_region *l, struct damon_region *r)
1077 {
1078 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1079 
1080 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1081 			(sz_l + sz_r);
1082 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1083 	l->ar.end = r->ar.end;
1084 	damon_destroy_region(r, t);
1085 }
1086 
1087 /*
1088  * Merge adjacent regions having similar access frequencies
1089  *
1090  * t		target affected by this merge operation
1091  * thres	'->nr_accesses' diff threshold for the merge
1092  * sz_limit	size upper limit of each region
1093  */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)1094 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1095 				   unsigned long sz_limit)
1096 {
1097 	struct damon_region *r, *prev = NULL, *next;
1098 
1099 	damon_for_each_region_safe(r, next, t) {
1100 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1101 			r->age = 0;
1102 		else
1103 			r->age++;
1104 
1105 		if (prev && prev->ar.end == r->ar.start &&
1106 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1107 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1108 			damon_merge_two_regions(t, prev, r);
1109 		else
1110 			prev = r;
1111 	}
1112 }
1113 
1114 /*
1115  * Merge adjacent regions having similar access frequencies
1116  *
1117  * threshold	'->nr_accesses' diff threshold for the merge
1118  * sz_limit	size upper limit of each region
1119  *
1120  * This function merges monitoring target regions which are adjacent and their
1121  * access frequencies are similar.  This is for minimizing the monitoring
1122  * overhead under the dynamically changeable access pattern.  If a merge was
1123  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1124  */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)1125 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1126 				  unsigned long sz_limit)
1127 {
1128 	struct damon_target *t;
1129 
1130 	damon_for_each_target(t, c)
1131 		damon_merge_regions_of(t, threshold, sz_limit);
1132 }
1133 
1134 /*
1135  * Split a region in two
1136  *
1137  * r		the region to be split
1138  * sz_r		size of the first sub-region that will be made
1139  */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)1140 static void damon_split_region_at(struct damon_target *t,
1141 				  struct damon_region *r, unsigned long sz_r)
1142 {
1143 	struct damon_region *new;
1144 
1145 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1146 	if (!new)
1147 		return;
1148 
1149 	r->ar.end = new->ar.start;
1150 
1151 	new->age = r->age;
1152 	new->last_nr_accesses = r->last_nr_accesses;
1153 
1154 	damon_insert_region(new, r, damon_next_region(r), t);
1155 }
1156 
1157 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs)1158 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1159 {
1160 	struct damon_region *r, *next;
1161 	unsigned long sz_region, sz_sub = 0;
1162 	int i;
1163 
1164 	damon_for_each_region_safe(r, next, t) {
1165 		sz_region = damon_sz_region(r);
1166 
1167 		for (i = 0; i < nr_subs - 1 &&
1168 				sz_region > 2 * DAMON_MIN_REGION; i++) {
1169 			/*
1170 			 * Randomly select size of left sub-region to be at
1171 			 * least 10 percent and at most 90% of original region
1172 			 */
1173 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1174 					sz_region / 10, DAMON_MIN_REGION);
1175 			/* Do not allow blank region */
1176 			if (sz_sub == 0 || sz_sub >= sz_region)
1177 				continue;
1178 
1179 			damon_split_region_at(t, r, sz_sub);
1180 			sz_region = sz_sub;
1181 		}
1182 	}
1183 }
1184 
1185 /*
1186  * Split every target region into randomly-sized small regions
1187  *
1188  * This function splits every target region into random-sized small regions if
1189  * current total number of the regions is equal or smaller than half of the
1190  * user-specified maximum number of regions.  This is for maximizing the
1191  * monitoring accuracy under the dynamically changeable access patterns.  If a
1192  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1193  * it.
1194  */
kdamond_split_regions(struct damon_ctx * ctx)1195 static void kdamond_split_regions(struct damon_ctx *ctx)
1196 {
1197 	struct damon_target *t;
1198 	unsigned int nr_regions = 0;
1199 	static unsigned int last_nr_regions;
1200 	int nr_subregions = 2;
1201 
1202 	damon_for_each_target(t, ctx)
1203 		nr_regions += damon_nr_regions(t);
1204 
1205 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
1206 		return;
1207 
1208 	/* Maybe the middle of the region has different access frequency */
1209 	if (last_nr_regions == nr_regions &&
1210 			nr_regions < ctx->attrs.max_nr_regions / 3)
1211 		nr_subregions = 3;
1212 
1213 	damon_for_each_target(t, ctx)
1214 		damon_split_regions_of(t, nr_subregions);
1215 
1216 	last_nr_regions = nr_regions;
1217 }
1218 
1219 /*
1220  * Check whether current monitoring should be stopped
1221  *
1222  * The monitoring is stopped when either the user requested to stop, or all
1223  * monitoring targets are invalid.
1224  *
1225  * Returns true if need to stop current monitoring.
1226  */
kdamond_need_stop(struct damon_ctx * ctx)1227 static bool kdamond_need_stop(struct damon_ctx *ctx)
1228 {
1229 	struct damon_target *t;
1230 
1231 	if (kthread_should_stop())
1232 		return true;
1233 
1234 	if (!ctx->ops.target_valid)
1235 		return false;
1236 
1237 	damon_for_each_target(t, ctx) {
1238 		if (ctx->ops.target_valid(t))
1239 			return false;
1240 	}
1241 
1242 	return true;
1243 }
1244 
damos_wmark_metric_value(enum damos_wmark_metric metric)1245 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1246 {
1247 	struct sysinfo i;
1248 
1249 	switch (metric) {
1250 	case DAMOS_WMARK_FREE_MEM_RATE:
1251 		si_meminfo(&i);
1252 		return i.freeram * 1000 / i.totalram;
1253 	default:
1254 		break;
1255 	}
1256 	return -EINVAL;
1257 }
1258 
1259 /*
1260  * Returns zero if the scheme is active.  Else, returns time to wait for next
1261  * watermark check in micro-seconds.
1262  */
damos_wmark_wait_us(struct damos * scheme)1263 static unsigned long damos_wmark_wait_us(struct damos *scheme)
1264 {
1265 	unsigned long metric;
1266 
1267 	if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1268 		return 0;
1269 
1270 	metric = damos_wmark_metric_value(scheme->wmarks.metric);
1271 	/* higher than high watermark or lower than low watermark */
1272 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1273 		if (scheme->wmarks.activated)
1274 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
1275 					scheme->action,
1276 					metric > scheme->wmarks.high ?
1277 					"high" : "low");
1278 		scheme->wmarks.activated = false;
1279 		return scheme->wmarks.interval;
1280 	}
1281 
1282 	/* inactive and higher than middle watermark */
1283 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1284 			!scheme->wmarks.activated)
1285 		return scheme->wmarks.interval;
1286 
1287 	if (!scheme->wmarks.activated)
1288 		pr_debug("activate a scheme (%d)\n", scheme->action);
1289 	scheme->wmarks.activated = true;
1290 	return 0;
1291 }
1292 
kdamond_usleep(unsigned long usecs)1293 static void kdamond_usleep(unsigned long usecs)
1294 {
1295 	/* See Documentation/timers/timers-howto.rst for the thresholds */
1296 	if (usecs > 20 * USEC_PER_MSEC)
1297 		schedule_timeout_idle(usecs_to_jiffies(usecs));
1298 	else
1299 		usleep_idle_range(usecs, usecs + 1);
1300 }
1301 
1302 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)1303 static int kdamond_wait_activation(struct damon_ctx *ctx)
1304 {
1305 	struct damos *s;
1306 	unsigned long wait_time;
1307 	unsigned long min_wait_time = 0;
1308 	bool init_wait_time = false;
1309 
1310 	while (!kdamond_need_stop(ctx)) {
1311 		damon_for_each_scheme(s, ctx) {
1312 			wait_time = damos_wmark_wait_us(s);
1313 			if (!init_wait_time || wait_time < min_wait_time) {
1314 				init_wait_time = true;
1315 				min_wait_time = wait_time;
1316 			}
1317 		}
1318 		if (!min_wait_time)
1319 			return 0;
1320 
1321 		kdamond_usleep(min_wait_time);
1322 
1323 		if (ctx->callback.after_wmarks_check &&
1324 				ctx->callback.after_wmarks_check(ctx))
1325 			break;
1326 	}
1327 	return -EBUSY;
1328 }
1329 
kdamond_init_intervals_sis(struct damon_ctx * ctx)1330 static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
1331 {
1332 	unsigned long sample_interval = ctx->attrs.sample_interval ?
1333 		ctx->attrs.sample_interval : 1;
1334 
1335 	ctx->passed_sample_intervals = 0;
1336 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
1337 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
1338 		sample_interval;
1339 }
1340 
1341 /*
1342  * The monitoring daemon that runs as a kernel thread
1343  */
kdamond_fn(void * data)1344 static int kdamond_fn(void *data)
1345 {
1346 	struct damon_ctx *ctx = data;
1347 	struct damon_target *t;
1348 	struct damon_region *r, *next;
1349 	unsigned int max_nr_accesses = 0;
1350 	unsigned long sz_limit = 0;
1351 
1352 	pr_debug("kdamond (%d) starts\n", current->pid);
1353 
1354 	complete(&ctx->kdamond_started);
1355 	kdamond_init_intervals_sis(ctx);
1356 
1357 	if (ctx->ops.init)
1358 		ctx->ops.init(ctx);
1359 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1360 		goto done;
1361 
1362 	sz_limit = damon_region_sz_limit(ctx);
1363 
1364 	while (!kdamond_need_stop(ctx)) {
1365 		/*
1366 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
1367 		 * be changed from after_wmarks_check() or after_aggregation()
1368 		 * callbacks.  Read the values here, and use those for this
1369 		 * iteration.  That is, damon_set_attrs() updated new values
1370 		 * are respected from next iteration.
1371 		 */
1372 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
1373 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
1374 		unsigned long sample_interval = ctx->attrs.sample_interval;
1375 
1376 		if (kdamond_wait_activation(ctx))
1377 			break;
1378 
1379 		if (ctx->ops.prepare_access_checks)
1380 			ctx->ops.prepare_access_checks(ctx);
1381 		if (ctx->callback.after_sampling &&
1382 				ctx->callback.after_sampling(ctx))
1383 			break;
1384 
1385 		kdamond_usleep(sample_interval);
1386 		ctx->passed_sample_intervals++;
1387 
1388 		if (ctx->ops.check_accesses)
1389 			max_nr_accesses = ctx->ops.check_accesses(ctx);
1390 
1391 		sample_interval = ctx->attrs.sample_interval ?
1392 			ctx->attrs.sample_interval : 1;
1393 		if (ctx->passed_sample_intervals == next_aggregation_sis) {
1394 			ctx->next_aggregation_sis = next_aggregation_sis +
1395 				ctx->attrs.aggr_interval / sample_interval;
1396 			kdamond_merge_regions(ctx,
1397 					max_nr_accesses / 10,
1398 					sz_limit);
1399 			if (ctx->callback.after_aggregation &&
1400 					ctx->callback.after_aggregation(ctx))
1401 				break;
1402 			if (!list_empty(&ctx->schemes))
1403 				kdamond_apply_schemes(ctx);
1404 			kdamond_reset_aggregated(ctx);
1405 			kdamond_split_regions(ctx);
1406 			if (ctx->ops.reset_aggregated)
1407 				ctx->ops.reset_aggregated(ctx);
1408 		}
1409 
1410 		if (ctx->passed_sample_intervals == next_ops_update_sis) {
1411 			ctx->next_ops_update_sis = next_ops_update_sis +
1412 				ctx->attrs.ops_update_interval /
1413 				sample_interval;
1414 			if (ctx->ops.update)
1415 				ctx->ops.update(ctx);
1416 			sz_limit = damon_region_sz_limit(ctx);
1417 		}
1418 	}
1419 done:
1420 	damon_for_each_target(t, ctx) {
1421 		damon_for_each_region_safe(r, next, t)
1422 			damon_destroy_region(r, t);
1423 	}
1424 
1425 	if (ctx->callback.before_terminate)
1426 		ctx->callback.before_terminate(ctx);
1427 	if (ctx->ops.cleanup)
1428 		ctx->ops.cleanup(ctx);
1429 
1430 	pr_debug("kdamond (%d) finishes\n", current->pid);
1431 	mutex_lock(&ctx->kdamond_lock);
1432 	ctx->kdamond = NULL;
1433 	mutex_unlock(&ctx->kdamond_lock);
1434 
1435 	mutex_lock(&damon_lock);
1436 	nr_running_ctxs--;
1437 	if (!nr_running_ctxs && running_exclusive_ctxs)
1438 		running_exclusive_ctxs = false;
1439 	mutex_unlock(&damon_lock);
1440 
1441 	return 0;
1442 }
1443 
1444 /*
1445  * struct damon_system_ram_region - System RAM resource address region of
1446  *				    [@start, @end).
1447  * @start:	Start address of the region (inclusive).
1448  * @end:	End address of the region (exclusive).
1449  */
1450 struct damon_system_ram_region {
1451 	unsigned long start;
1452 	unsigned long end;
1453 };
1454 
walk_system_ram(struct resource * res,void * arg)1455 static int walk_system_ram(struct resource *res, void *arg)
1456 {
1457 	struct damon_system_ram_region *a = arg;
1458 
1459 	if (a->end - a->start < resource_size(res)) {
1460 		a->start = res->start;
1461 		a->end = res->end;
1462 	}
1463 	return 0;
1464 }
1465 
1466 /*
1467  * Find biggest 'System RAM' resource and store its start and end address in
1468  * @start and @end, respectively.  If no System RAM is found, returns false.
1469  */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)1470 static bool damon_find_biggest_system_ram(unsigned long *start,
1471 						unsigned long *end)
1472 
1473 {
1474 	struct damon_system_ram_region arg = {};
1475 
1476 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1477 	if (arg.end <= arg.start)
1478 		return false;
1479 
1480 	*start = arg.start;
1481 	*end = arg.end;
1482 	return true;
1483 }
1484 
1485 /**
1486  * damon_set_region_biggest_system_ram_default() - Set the region of the given
1487  * monitoring target as requested, or biggest 'System RAM'.
1488  * @t:		The monitoring target to set the region.
1489  * @start:	The pointer to the start address of the region.
1490  * @end:	The pointer to the end address of the region.
1491  *
1492  * This function sets the region of @t as requested by @start and @end.  If the
1493  * values of @start and @end are zero, however, this function finds the biggest
1494  * 'System RAM' resource and sets the region to cover the resource.  In the
1495  * latter case, this function saves the start and end addresses of the resource
1496  * in @start and @end, respectively.
1497  *
1498  * Return: 0 on success, negative error code otherwise.
1499  */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)1500 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1501 			unsigned long *start, unsigned long *end)
1502 {
1503 	struct damon_addr_range addr_range;
1504 
1505 	if (*start > *end)
1506 		return -EINVAL;
1507 
1508 	if (!*start && !*end &&
1509 		!damon_find_biggest_system_ram(start, end))
1510 		return -EINVAL;
1511 
1512 	addr_range.start = *start;
1513 	addr_range.end = *end;
1514 	return damon_set_regions(t, &addr_range, 1);
1515 }
1516 
damon_init(void)1517 static int __init damon_init(void)
1518 {
1519 	damon_region_cache = KMEM_CACHE(damon_region, 0);
1520 	if (unlikely(!damon_region_cache)) {
1521 		pr_err("creating damon_region_cache fails\n");
1522 		return -ENOMEM;
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 subsys_initcall(damon_init);
1529 
1530 #include "core-test.h"
1531