xref: /openbmc/linux/mm/damon/core.c (revision 2fcb9362)
12224d848SSeongJae Park // SPDX-License-Identifier: GPL-2.0
22224d848SSeongJae Park /*
32224d848SSeongJae Park  * Data Access Monitor
42224d848SSeongJae Park  *
52224d848SSeongJae Park  * Author: SeongJae Park <sjpark@amazon.de>
62224d848SSeongJae Park  */
72224d848SSeongJae Park 
82224d848SSeongJae Park #define pr_fmt(fmt) "damon: " fmt
92224d848SSeongJae Park 
102224d848SSeongJae Park #include <linux/damon.h>
112224d848SSeongJae Park #include <linux/delay.h>
122224d848SSeongJae Park #include <linux/kthread.h>
13b9a6ac4eSSeongJae Park #include <linux/random.h>
142224d848SSeongJae Park #include <linux/slab.h>
152224d848SSeongJae Park 
16*2fcb9362SSeongJae Park #define CREATE_TRACE_POINTS
17*2fcb9362SSeongJae Park #include <trace/events/damon.h>
18*2fcb9362SSeongJae Park 
19b9a6ac4eSSeongJae Park /* Get a random number in [l, r) */
20b9a6ac4eSSeongJae Park #define damon_rand(l, r) (l + prandom_u32_max(r - l))
21b9a6ac4eSSeongJae Park 
222224d848SSeongJae Park static DEFINE_MUTEX(damon_lock);
232224d848SSeongJae Park static int nr_running_ctxs;
242224d848SSeongJae Park 
25f23b8eeeSSeongJae Park /*
26f23b8eeeSSeongJae Park  * Construct a damon_region struct
27f23b8eeeSSeongJae Park  *
28f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
29f23b8eeeSSeongJae Park  */
30f23b8eeeSSeongJae Park struct damon_region *damon_new_region(unsigned long start, unsigned long end)
31f23b8eeeSSeongJae Park {
32f23b8eeeSSeongJae Park 	struct damon_region *region;
33f23b8eeeSSeongJae Park 
34f23b8eeeSSeongJae Park 	region = kmalloc(sizeof(*region), GFP_KERNEL);
35f23b8eeeSSeongJae Park 	if (!region)
36f23b8eeeSSeongJae Park 		return NULL;
37f23b8eeeSSeongJae Park 
38f23b8eeeSSeongJae Park 	region->ar.start = start;
39f23b8eeeSSeongJae Park 	region->ar.end = end;
40f23b8eeeSSeongJae Park 	region->nr_accesses = 0;
41f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&region->list);
42f23b8eeeSSeongJae Park 
43f23b8eeeSSeongJae Park 	return region;
44f23b8eeeSSeongJae Park }
45f23b8eeeSSeongJae Park 
46f23b8eeeSSeongJae Park /*
47f23b8eeeSSeongJae Park  * Add a region between two other regions
48f23b8eeeSSeongJae Park  */
49f23b8eeeSSeongJae Park inline void damon_insert_region(struct damon_region *r,
50b9a6ac4eSSeongJae Park 		struct damon_region *prev, struct damon_region *next,
51b9a6ac4eSSeongJae Park 		struct damon_target *t)
52f23b8eeeSSeongJae Park {
53f23b8eeeSSeongJae Park 	__list_add(&r->list, &prev->list, &next->list);
54b9a6ac4eSSeongJae Park 	t->nr_regions++;
55f23b8eeeSSeongJae Park }
56f23b8eeeSSeongJae Park 
57f23b8eeeSSeongJae Park void damon_add_region(struct damon_region *r, struct damon_target *t)
58f23b8eeeSSeongJae Park {
59f23b8eeeSSeongJae Park 	list_add_tail(&r->list, &t->regions_list);
60b9a6ac4eSSeongJae Park 	t->nr_regions++;
61f23b8eeeSSeongJae Park }
62f23b8eeeSSeongJae Park 
63b9a6ac4eSSeongJae Park static void damon_del_region(struct damon_region *r, struct damon_target *t)
64f23b8eeeSSeongJae Park {
65f23b8eeeSSeongJae Park 	list_del(&r->list);
66b9a6ac4eSSeongJae Park 	t->nr_regions--;
67f23b8eeeSSeongJae Park }
68f23b8eeeSSeongJae Park 
69f23b8eeeSSeongJae Park static void damon_free_region(struct damon_region *r)
70f23b8eeeSSeongJae Park {
71f23b8eeeSSeongJae Park 	kfree(r);
72f23b8eeeSSeongJae Park }
73f23b8eeeSSeongJae Park 
74b9a6ac4eSSeongJae Park void damon_destroy_region(struct damon_region *r, struct damon_target *t)
75f23b8eeeSSeongJae Park {
76b9a6ac4eSSeongJae Park 	damon_del_region(r, t);
77f23b8eeeSSeongJae Park 	damon_free_region(r);
78f23b8eeeSSeongJae Park }
79f23b8eeeSSeongJae Park 
80f23b8eeeSSeongJae Park /*
81f23b8eeeSSeongJae Park  * Construct a damon_target struct
82f23b8eeeSSeongJae Park  *
83f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
84f23b8eeeSSeongJae Park  */
85f23b8eeeSSeongJae Park struct damon_target *damon_new_target(unsigned long id)
86f23b8eeeSSeongJae Park {
87f23b8eeeSSeongJae Park 	struct damon_target *t;
88f23b8eeeSSeongJae Park 
89f23b8eeeSSeongJae Park 	t = kmalloc(sizeof(*t), GFP_KERNEL);
90f23b8eeeSSeongJae Park 	if (!t)
91f23b8eeeSSeongJae Park 		return NULL;
92f23b8eeeSSeongJae Park 
93f23b8eeeSSeongJae Park 	t->id = id;
94b9a6ac4eSSeongJae Park 	t->nr_regions = 0;
95f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&t->regions_list);
96f23b8eeeSSeongJae Park 
97f23b8eeeSSeongJae Park 	return t;
98f23b8eeeSSeongJae Park }
99f23b8eeeSSeongJae Park 
100f23b8eeeSSeongJae Park void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
101f23b8eeeSSeongJae Park {
102b9a6ac4eSSeongJae Park 	list_add_tail(&t->list, &ctx->adaptive_targets);
103f23b8eeeSSeongJae Park }
104f23b8eeeSSeongJae Park 
105f23b8eeeSSeongJae Park static void damon_del_target(struct damon_target *t)
106f23b8eeeSSeongJae Park {
107f23b8eeeSSeongJae Park 	list_del(&t->list);
108f23b8eeeSSeongJae Park }
109f23b8eeeSSeongJae Park 
110f23b8eeeSSeongJae Park void damon_free_target(struct damon_target *t)
111f23b8eeeSSeongJae Park {
112f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
113f23b8eeeSSeongJae Park 
114f23b8eeeSSeongJae Park 	damon_for_each_region_safe(r, next, t)
115f23b8eeeSSeongJae Park 		damon_free_region(r);
116f23b8eeeSSeongJae Park 	kfree(t);
117f23b8eeeSSeongJae Park }
118f23b8eeeSSeongJae Park 
119f23b8eeeSSeongJae Park void damon_destroy_target(struct damon_target *t)
120f23b8eeeSSeongJae Park {
121f23b8eeeSSeongJae Park 	damon_del_target(t);
122f23b8eeeSSeongJae Park 	damon_free_target(t);
123f23b8eeeSSeongJae Park }
124f23b8eeeSSeongJae Park 
125b9a6ac4eSSeongJae Park unsigned int damon_nr_regions(struct damon_target *t)
126b9a6ac4eSSeongJae Park {
127b9a6ac4eSSeongJae Park 	return t->nr_regions;
128b9a6ac4eSSeongJae Park }
129b9a6ac4eSSeongJae Park 
1302224d848SSeongJae Park struct damon_ctx *damon_new_ctx(void)
1312224d848SSeongJae Park {
1322224d848SSeongJae Park 	struct damon_ctx *ctx;
1332224d848SSeongJae Park 
1342224d848SSeongJae Park 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1352224d848SSeongJae Park 	if (!ctx)
1362224d848SSeongJae Park 		return NULL;
1372224d848SSeongJae Park 
1382224d848SSeongJae Park 	ctx->sample_interval = 5 * 1000;
1392224d848SSeongJae Park 	ctx->aggr_interval = 100 * 1000;
1402224d848SSeongJae Park 	ctx->primitive_update_interval = 60 * 1000 * 1000;
1412224d848SSeongJae Park 
1422224d848SSeongJae Park 	ktime_get_coarse_ts64(&ctx->last_aggregation);
1432224d848SSeongJae Park 	ctx->last_primitive_update = ctx->last_aggregation;
1442224d848SSeongJae Park 
1452224d848SSeongJae Park 	mutex_init(&ctx->kdamond_lock);
1462224d848SSeongJae Park 
147b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = 10;
148b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = 1000;
149b9a6ac4eSSeongJae Park 
150b9a6ac4eSSeongJae Park 	INIT_LIST_HEAD(&ctx->adaptive_targets);
1512224d848SSeongJae Park 
1522224d848SSeongJae Park 	return ctx;
1532224d848SSeongJae Park }
1542224d848SSeongJae Park 
155f23b8eeeSSeongJae Park static void damon_destroy_targets(struct damon_ctx *ctx)
156f23b8eeeSSeongJae Park {
157f23b8eeeSSeongJae Park 	struct damon_target *t, *next_t;
158f23b8eeeSSeongJae Park 
159f23b8eeeSSeongJae Park 	if (ctx->primitive.cleanup) {
160f23b8eeeSSeongJae Park 		ctx->primitive.cleanup(ctx);
161f23b8eeeSSeongJae Park 		return;
162f23b8eeeSSeongJae Park 	}
163f23b8eeeSSeongJae Park 
164f23b8eeeSSeongJae Park 	damon_for_each_target_safe(t, next_t, ctx)
165f23b8eeeSSeongJae Park 		damon_destroy_target(t);
166f23b8eeeSSeongJae Park }
167f23b8eeeSSeongJae Park 
1682224d848SSeongJae Park void damon_destroy_ctx(struct damon_ctx *ctx)
1692224d848SSeongJae Park {
170f23b8eeeSSeongJae Park 	damon_destroy_targets(ctx);
1712224d848SSeongJae Park 	kfree(ctx);
1722224d848SSeongJae Park }
1732224d848SSeongJae Park 
1742224d848SSeongJae Park /**
1752224d848SSeongJae Park  * damon_set_attrs() - Set attributes for the monitoring.
1762224d848SSeongJae Park  * @ctx:		monitoring context
1772224d848SSeongJae Park  * @sample_int:		time interval between samplings
1782224d848SSeongJae Park  * @aggr_int:		time interval between aggregations
1792224d848SSeongJae Park  * @primitive_upd_int:	time interval between monitoring primitive updates
180b9a6ac4eSSeongJae Park  * @min_nr_reg:		minimal number of regions
181b9a6ac4eSSeongJae Park  * @max_nr_reg:		maximum number of regions
1822224d848SSeongJae Park  *
1832224d848SSeongJae Park  * This function should not be called while the kdamond is running.
1842224d848SSeongJae Park  * Every time interval is in micro-seconds.
1852224d848SSeongJae Park  *
1862224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
1872224d848SSeongJae Park  */
1882224d848SSeongJae Park int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
189b9a6ac4eSSeongJae Park 		    unsigned long aggr_int, unsigned long primitive_upd_int,
190b9a6ac4eSSeongJae Park 		    unsigned long min_nr_reg, unsigned long max_nr_reg)
1912224d848SSeongJae Park {
192b9a6ac4eSSeongJae Park 	if (min_nr_reg < 3) {
193b9a6ac4eSSeongJae Park 		pr_err("min_nr_regions (%lu) must be at least 3\n",
194b9a6ac4eSSeongJae Park 				min_nr_reg);
195b9a6ac4eSSeongJae Park 		return -EINVAL;
196b9a6ac4eSSeongJae Park 	}
197b9a6ac4eSSeongJae Park 	if (min_nr_reg > max_nr_reg) {
198b9a6ac4eSSeongJae Park 		pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
199b9a6ac4eSSeongJae Park 				min_nr_reg, max_nr_reg);
200b9a6ac4eSSeongJae Park 		return -EINVAL;
201b9a6ac4eSSeongJae Park 	}
202b9a6ac4eSSeongJae Park 
2032224d848SSeongJae Park 	ctx->sample_interval = sample_int;
2042224d848SSeongJae Park 	ctx->aggr_interval = aggr_int;
2052224d848SSeongJae Park 	ctx->primitive_update_interval = primitive_upd_int;
206b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = min_nr_reg;
207b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = max_nr_reg;
2082224d848SSeongJae Park 
2092224d848SSeongJae Park 	return 0;
2102224d848SSeongJae Park }
2112224d848SSeongJae Park 
212b9a6ac4eSSeongJae Park /* Returns the size upper limit for each monitoring region */
213b9a6ac4eSSeongJae Park static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
214b9a6ac4eSSeongJae Park {
215b9a6ac4eSSeongJae Park 	struct damon_target *t;
216b9a6ac4eSSeongJae Park 	struct damon_region *r;
217b9a6ac4eSSeongJae Park 	unsigned long sz = 0;
218b9a6ac4eSSeongJae Park 
219b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx) {
220b9a6ac4eSSeongJae Park 		damon_for_each_region(r, t)
221b9a6ac4eSSeongJae Park 			sz += r->ar.end - r->ar.start;
222b9a6ac4eSSeongJae Park 	}
223b9a6ac4eSSeongJae Park 
224b9a6ac4eSSeongJae Park 	if (ctx->min_nr_regions)
225b9a6ac4eSSeongJae Park 		sz /= ctx->min_nr_regions;
226b9a6ac4eSSeongJae Park 	if (sz < DAMON_MIN_REGION)
227b9a6ac4eSSeongJae Park 		sz = DAMON_MIN_REGION;
228b9a6ac4eSSeongJae Park 
229b9a6ac4eSSeongJae Park 	return sz;
230b9a6ac4eSSeongJae Park }
231b9a6ac4eSSeongJae Park 
2322224d848SSeongJae Park static bool damon_kdamond_running(struct damon_ctx *ctx)
2332224d848SSeongJae Park {
2342224d848SSeongJae Park 	bool running;
2352224d848SSeongJae Park 
2362224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
2372224d848SSeongJae Park 	running = ctx->kdamond != NULL;
2382224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
2392224d848SSeongJae Park 
2402224d848SSeongJae Park 	return running;
2412224d848SSeongJae Park }
2422224d848SSeongJae Park 
2432224d848SSeongJae Park static int kdamond_fn(void *data);
2442224d848SSeongJae Park 
2452224d848SSeongJae Park /*
2462224d848SSeongJae Park  * __damon_start() - Starts monitoring with given context.
2472224d848SSeongJae Park  * @ctx:	monitoring context
2482224d848SSeongJae Park  *
2492224d848SSeongJae Park  * This function should be called while damon_lock is hold.
2502224d848SSeongJae Park  *
2512224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2522224d848SSeongJae Park  */
2532224d848SSeongJae Park static int __damon_start(struct damon_ctx *ctx)
2542224d848SSeongJae Park {
2552224d848SSeongJae Park 	int err = -EBUSY;
2562224d848SSeongJae Park 
2572224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
2582224d848SSeongJae Park 	if (!ctx->kdamond) {
2592224d848SSeongJae Park 		err = 0;
2602224d848SSeongJae Park 		ctx->kdamond_stop = false;
2612224d848SSeongJae Park 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
2622224d848SSeongJae Park 				nr_running_ctxs);
2632224d848SSeongJae Park 		if (IS_ERR(ctx->kdamond)) {
2642224d848SSeongJae Park 			err = PTR_ERR(ctx->kdamond);
2652224d848SSeongJae Park 			ctx->kdamond = 0;
2662224d848SSeongJae Park 		}
2672224d848SSeongJae Park 	}
2682224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
2692224d848SSeongJae Park 
2702224d848SSeongJae Park 	return err;
2712224d848SSeongJae Park }
2722224d848SSeongJae Park 
2732224d848SSeongJae Park /**
2742224d848SSeongJae Park  * damon_start() - Starts the monitorings for a given group of contexts.
2752224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to start monitoring
2762224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
2772224d848SSeongJae Park  *
2782224d848SSeongJae Park  * This function starts a group of monitoring threads for a group of monitoring
2792224d848SSeongJae Park  * contexts.  One thread per each context is created and run in parallel.  The
2802224d848SSeongJae Park  * caller should handle synchronization between the threads by itself.  If a
2812224d848SSeongJae Park  * group of threads that created by other 'damon_start()' call is currently
2822224d848SSeongJae Park  * running, this function does nothing but returns -EBUSY.
2832224d848SSeongJae Park  *
2842224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2852224d848SSeongJae Park  */
2862224d848SSeongJae Park int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
2872224d848SSeongJae Park {
2882224d848SSeongJae Park 	int i;
2892224d848SSeongJae Park 	int err = 0;
2902224d848SSeongJae Park 
2912224d848SSeongJae Park 	mutex_lock(&damon_lock);
2922224d848SSeongJae Park 	if (nr_running_ctxs) {
2932224d848SSeongJae Park 		mutex_unlock(&damon_lock);
2942224d848SSeongJae Park 		return -EBUSY;
2952224d848SSeongJae Park 	}
2962224d848SSeongJae Park 
2972224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
2982224d848SSeongJae Park 		err = __damon_start(ctxs[i]);
2992224d848SSeongJae Park 		if (err)
3002224d848SSeongJae Park 			break;
3012224d848SSeongJae Park 		nr_running_ctxs++;
3022224d848SSeongJae Park 	}
3032224d848SSeongJae Park 	mutex_unlock(&damon_lock);
3042224d848SSeongJae Park 
3052224d848SSeongJae Park 	return err;
3062224d848SSeongJae Park }
3072224d848SSeongJae Park 
3082224d848SSeongJae Park /*
3092224d848SSeongJae Park  * __damon_stop() - Stops monitoring of given context.
3102224d848SSeongJae Park  * @ctx:	monitoring context
3112224d848SSeongJae Park  *
3122224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3132224d848SSeongJae Park  */
3142224d848SSeongJae Park static int __damon_stop(struct damon_ctx *ctx)
3152224d848SSeongJae Park {
3162224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3172224d848SSeongJae Park 	if (ctx->kdamond) {
3182224d848SSeongJae Park 		ctx->kdamond_stop = true;
3192224d848SSeongJae Park 		mutex_unlock(&ctx->kdamond_lock);
3202224d848SSeongJae Park 		while (damon_kdamond_running(ctx))
3212224d848SSeongJae Park 			usleep_range(ctx->sample_interval,
3222224d848SSeongJae Park 					ctx->sample_interval * 2);
3232224d848SSeongJae Park 		return 0;
3242224d848SSeongJae Park 	}
3252224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
3262224d848SSeongJae Park 
3272224d848SSeongJae Park 	return -EPERM;
3282224d848SSeongJae Park }
3292224d848SSeongJae Park 
3302224d848SSeongJae Park /**
3312224d848SSeongJae Park  * damon_stop() - Stops the monitorings for a given group of contexts.
3322224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to stop monitoring
3332224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
3342224d848SSeongJae Park  *
3352224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3362224d848SSeongJae Park  */
3372224d848SSeongJae Park int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
3382224d848SSeongJae Park {
3392224d848SSeongJae Park 	int i, err = 0;
3402224d848SSeongJae Park 
3412224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
3422224d848SSeongJae Park 		/* nr_running_ctxs is decremented in kdamond_fn */
3432224d848SSeongJae Park 		err = __damon_stop(ctxs[i]);
3442224d848SSeongJae Park 		if (err)
3452224d848SSeongJae Park 			return err;
3462224d848SSeongJae Park 	}
3472224d848SSeongJae Park 
3482224d848SSeongJae Park 	return err;
3492224d848SSeongJae Park }
3502224d848SSeongJae Park 
3512224d848SSeongJae Park /*
3522224d848SSeongJae Park  * damon_check_reset_time_interval() - Check if a time interval is elapsed.
3532224d848SSeongJae Park  * @baseline:	the time to check whether the interval has elapsed since
3542224d848SSeongJae Park  * @interval:	the time interval (microseconds)
3552224d848SSeongJae Park  *
3562224d848SSeongJae Park  * See whether the given time interval has passed since the given baseline
3572224d848SSeongJae Park  * time.  If so, it also updates the baseline to current time for next check.
3582224d848SSeongJae Park  *
3592224d848SSeongJae Park  * Return:	true if the time interval has passed, or false otherwise.
3602224d848SSeongJae Park  */
3612224d848SSeongJae Park static bool damon_check_reset_time_interval(struct timespec64 *baseline,
3622224d848SSeongJae Park 		unsigned long interval)
3632224d848SSeongJae Park {
3642224d848SSeongJae Park 	struct timespec64 now;
3652224d848SSeongJae Park 
3662224d848SSeongJae Park 	ktime_get_coarse_ts64(&now);
3672224d848SSeongJae Park 	if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
3682224d848SSeongJae Park 			interval * 1000)
3692224d848SSeongJae Park 		return false;
3702224d848SSeongJae Park 	*baseline = now;
3712224d848SSeongJae Park 	return true;
3722224d848SSeongJae Park }
3732224d848SSeongJae Park 
3742224d848SSeongJae Park /*
3752224d848SSeongJae Park  * Check whether it is time to flush the aggregated information
3762224d848SSeongJae Park  */
3772224d848SSeongJae Park static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
3782224d848SSeongJae Park {
3792224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_aggregation,
3802224d848SSeongJae Park 			ctx->aggr_interval);
3812224d848SSeongJae Park }
3822224d848SSeongJae Park 
3832224d848SSeongJae Park /*
384f23b8eeeSSeongJae Park  * Reset the aggregated monitoring results ('nr_accesses' of each region).
385f23b8eeeSSeongJae Park  */
386f23b8eeeSSeongJae Park static void kdamond_reset_aggregated(struct damon_ctx *c)
387f23b8eeeSSeongJae Park {
388f23b8eeeSSeongJae Park 	struct damon_target *t;
389f23b8eeeSSeongJae Park 
390f23b8eeeSSeongJae Park 	damon_for_each_target(t, c) {
391f23b8eeeSSeongJae Park 		struct damon_region *r;
392f23b8eeeSSeongJae Park 
393*2fcb9362SSeongJae Park 		damon_for_each_region(r, t) {
394*2fcb9362SSeongJae Park 			trace_damon_aggregated(t, r, damon_nr_regions(t));
395f23b8eeeSSeongJae Park 			r->nr_accesses = 0;
396f23b8eeeSSeongJae Park 		}
397f23b8eeeSSeongJae Park 	}
398*2fcb9362SSeongJae Park }
399f23b8eeeSSeongJae Park 
400b9a6ac4eSSeongJae Park #define sz_damon_region(r) (r->ar.end - r->ar.start)
401b9a6ac4eSSeongJae Park 
402b9a6ac4eSSeongJae Park /*
403b9a6ac4eSSeongJae Park  * Merge two adjacent regions into one region
404b9a6ac4eSSeongJae Park  */
405b9a6ac4eSSeongJae Park static void damon_merge_two_regions(struct damon_target *t,
406b9a6ac4eSSeongJae Park 		struct damon_region *l, struct damon_region *r)
407b9a6ac4eSSeongJae Park {
408b9a6ac4eSSeongJae Park 	unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
409b9a6ac4eSSeongJae Park 
410b9a6ac4eSSeongJae Park 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
411b9a6ac4eSSeongJae Park 			(sz_l + sz_r);
412b9a6ac4eSSeongJae Park 	l->ar.end = r->ar.end;
413b9a6ac4eSSeongJae Park 	damon_destroy_region(r, t);
414b9a6ac4eSSeongJae Park }
415b9a6ac4eSSeongJae Park 
416b9a6ac4eSSeongJae Park #define diff_of(a, b) (a > b ? a - b : b - a)
417b9a6ac4eSSeongJae Park 
418b9a6ac4eSSeongJae Park /*
419b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
420b9a6ac4eSSeongJae Park  *
421b9a6ac4eSSeongJae Park  * t		target affected by this merge operation
422b9a6ac4eSSeongJae Park  * thres	'->nr_accesses' diff threshold for the merge
423b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
424b9a6ac4eSSeongJae Park  */
425b9a6ac4eSSeongJae Park static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
426b9a6ac4eSSeongJae Park 				   unsigned long sz_limit)
427b9a6ac4eSSeongJae Park {
428b9a6ac4eSSeongJae Park 	struct damon_region *r, *prev = NULL, *next;
429b9a6ac4eSSeongJae Park 
430b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
431b9a6ac4eSSeongJae Park 		if (prev && prev->ar.end == r->ar.start &&
432b9a6ac4eSSeongJae Park 		    diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
433b9a6ac4eSSeongJae Park 		    sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
434b9a6ac4eSSeongJae Park 			damon_merge_two_regions(t, prev, r);
435b9a6ac4eSSeongJae Park 		else
436b9a6ac4eSSeongJae Park 			prev = r;
437b9a6ac4eSSeongJae Park 	}
438b9a6ac4eSSeongJae Park }
439b9a6ac4eSSeongJae Park 
440b9a6ac4eSSeongJae Park /*
441b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
442b9a6ac4eSSeongJae Park  *
443b9a6ac4eSSeongJae Park  * threshold	'->nr_accesses' diff threshold for the merge
444b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
445b9a6ac4eSSeongJae Park  *
446b9a6ac4eSSeongJae Park  * This function merges monitoring target regions which are adjacent and their
447b9a6ac4eSSeongJae Park  * access frequencies are similar.  This is for minimizing the monitoring
448b9a6ac4eSSeongJae Park  * overhead under the dynamically changeable access pattern.  If a merge was
449b9a6ac4eSSeongJae Park  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
450b9a6ac4eSSeongJae Park  */
451b9a6ac4eSSeongJae Park static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
452b9a6ac4eSSeongJae Park 				  unsigned long sz_limit)
453b9a6ac4eSSeongJae Park {
454b9a6ac4eSSeongJae Park 	struct damon_target *t;
455b9a6ac4eSSeongJae Park 
456b9a6ac4eSSeongJae Park 	damon_for_each_target(t, c)
457b9a6ac4eSSeongJae Park 		damon_merge_regions_of(t, threshold, sz_limit);
458b9a6ac4eSSeongJae Park }
459b9a6ac4eSSeongJae Park 
460b9a6ac4eSSeongJae Park /*
461b9a6ac4eSSeongJae Park  * Split a region in two
462b9a6ac4eSSeongJae Park  *
463b9a6ac4eSSeongJae Park  * r		the region to be split
464b9a6ac4eSSeongJae Park  * sz_r		size of the first sub-region that will be made
465b9a6ac4eSSeongJae Park  */
466b9a6ac4eSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
467b9a6ac4eSSeongJae Park 		struct damon_target *t, struct damon_region *r,
468b9a6ac4eSSeongJae Park 		unsigned long sz_r)
469b9a6ac4eSSeongJae Park {
470b9a6ac4eSSeongJae Park 	struct damon_region *new;
471b9a6ac4eSSeongJae Park 
472b9a6ac4eSSeongJae Park 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
473b9a6ac4eSSeongJae Park 	if (!new)
474b9a6ac4eSSeongJae Park 		return;
475b9a6ac4eSSeongJae Park 
476b9a6ac4eSSeongJae Park 	r->ar.end = new->ar.start;
477b9a6ac4eSSeongJae Park 
478b9a6ac4eSSeongJae Park 	damon_insert_region(new, r, damon_next_region(r), t);
479b9a6ac4eSSeongJae Park }
480b9a6ac4eSSeongJae Park 
481b9a6ac4eSSeongJae Park /* Split every region in the given target into 'nr_subs' regions */
482b9a6ac4eSSeongJae Park static void damon_split_regions_of(struct damon_ctx *ctx,
483b9a6ac4eSSeongJae Park 				     struct damon_target *t, int nr_subs)
484b9a6ac4eSSeongJae Park {
485b9a6ac4eSSeongJae Park 	struct damon_region *r, *next;
486b9a6ac4eSSeongJae Park 	unsigned long sz_region, sz_sub = 0;
487b9a6ac4eSSeongJae Park 	int i;
488b9a6ac4eSSeongJae Park 
489b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
490b9a6ac4eSSeongJae Park 		sz_region = r->ar.end - r->ar.start;
491b9a6ac4eSSeongJae Park 
492b9a6ac4eSSeongJae Park 		for (i = 0; i < nr_subs - 1 &&
493b9a6ac4eSSeongJae Park 				sz_region > 2 * DAMON_MIN_REGION; i++) {
494b9a6ac4eSSeongJae Park 			/*
495b9a6ac4eSSeongJae Park 			 * Randomly select size of left sub-region to be at
496b9a6ac4eSSeongJae Park 			 * least 10 percent and at most 90% of original region
497b9a6ac4eSSeongJae Park 			 */
498b9a6ac4eSSeongJae Park 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
499b9a6ac4eSSeongJae Park 					sz_region / 10, DAMON_MIN_REGION);
500b9a6ac4eSSeongJae Park 			/* Do not allow blank region */
501b9a6ac4eSSeongJae Park 			if (sz_sub == 0 || sz_sub >= sz_region)
502b9a6ac4eSSeongJae Park 				continue;
503b9a6ac4eSSeongJae Park 
504b9a6ac4eSSeongJae Park 			damon_split_region_at(ctx, t, r, sz_sub);
505b9a6ac4eSSeongJae Park 			sz_region = sz_sub;
506b9a6ac4eSSeongJae Park 		}
507b9a6ac4eSSeongJae Park 	}
508b9a6ac4eSSeongJae Park }
509b9a6ac4eSSeongJae Park 
510b9a6ac4eSSeongJae Park /*
511b9a6ac4eSSeongJae Park  * Split every target region into randomly-sized small regions
512b9a6ac4eSSeongJae Park  *
513b9a6ac4eSSeongJae Park  * This function splits every target region into random-sized small regions if
514b9a6ac4eSSeongJae Park  * current total number of the regions is equal or smaller than half of the
515b9a6ac4eSSeongJae Park  * user-specified maximum number of regions.  This is for maximizing the
516b9a6ac4eSSeongJae Park  * monitoring accuracy under the dynamically changeable access patterns.  If a
517b9a6ac4eSSeongJae Park  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
518b9a6ac4eSSeongJae Park  * it.
519b9a6ac4eSSeongJae Park  */
520b9a6ac4eSSeongJae Park static void kdamond_split_regions(struct damon_ctx *ctx)
521b9a6ac4eSSeongJae Park {
522b9a6ac4eSSeongJae Park 	struct damon_target *t;
523b9a6ac4eSSeongJae Park 	unsigned int nr_regions = 0;
524b9a6ac4eSSeongJae Park 	static unsigned int last_nr_regions;
525b9a6ac4eSSeongJae Park 	int nr_subregions = 2;
526b9a6ac4eSSeongJae Park 
527b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
528b9a6ac4eSSeongJae Park 		nr_regions += damon_nr_regions(t);
529b9a6ac4eSSeongJae Park 
530b9a6ac4eSSeongJae Park 	if (nr_regions > ctx->max_nr_regions / 2)
531b9a6ac4eSSeongJae Park 		return;
532b9a6ac4eSSeongJae Park 
533b9a6ac4eSSeongJae Park 	/* Maybe the middle of the region has different access frequency */
534b9a6ac4eSSeongJae Park 	if (last_nr_regions == nr_regions &&
535b9a6ac4eSSeongJae Park 			nr_regions < ctx->max_nr_regions / 3)
536b9a6ac4eSSeongJae Park 		nr_subregions = 3;
537b9a6ac4eSSeongJae Park 
538b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
539b9a6ac4eSSeongJae Park 		damon_split_regions_of(ctx, t, nr_subregions);
540b9a6ac4eSSeongJae Park 
541b9a6ac4eSSeongJae Park 	last_nr_regions = nr_regions;
542b9a6ac4eSSeongJae Park }
543b9a6ac4eSSeongJae Park 
544f23b8eeeSSeongJae Park /*
5452224d848SSeongJae Park  * Check whether it is time to check and apply the target monitoring regions
5462224d848SSeongJae Park  *
5472224d848SSeongJae Park  * Returns true if it is.
5482224d848SSeongJae Park  */
5492224d848SSeongJae Park static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
5502224d848SSeongJae Park {
5512224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_primitive_update,
5522224d848SSeongJae Park 			ctx->primitive_update_interval);
5532224d848SSeongJae Park }
5542224d848SSeongJae Park 
5552224d848SSeongJae Park /*
5562224d848SSeongJae Park  * Check whether current monitoring should be stopped
5572224d848SSeongJae Park  *
5582224d848SSeongJae Park  * The monitoring is stopped when either the user requested to stop, or all
5592224d848SSeongJae Park  * monitoring targets are invalid.
5602224d848SSeongJae Park  *
5612224d848SSeongJae Park  * Returns true if need to stop current monitoring.
5622224d848SSeongJae Park  */
5632224d848SSeongJae Park static bool kdamond_need_stop(struct damon_ctx *ctx)
5642224d848SSeongJae Park {
565f23b8eeeSSeongJae Park 	struct damon_target *t;
5662224d848SSeongJae Park 	bool stop;
5672224d848SSeongJae Park 
5682224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
5692224d848SSeongJae Park 	stop = ctx->kdamond_stop;
5702224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
5712224d848SSeongJae Park 	if (stop)
5722224d848SSeongJae Park 		return true;
5732224d848SSeongJae Park 
5742224d848SSeongJae Park 	if (!ctx->primitive.target_valid)
5752224d848SSeongJae Park 		return false;
5762224d848SSeongJae Park 
577f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
578f23b8eeeSSeongJae Park 		if (ctx->primitive.target_valid(t))
579f23b8eeeSSeongJae Park 			return false;
580f23b8eeeSSeongJae Park 	}
581f23b8eeeSSeongJae Park 
582f23b8eeeSSeongJae Park 	return true;
5832224d848SSeongJae Park }
5842224d848SSeongJae Park 
5852224d848SSeongJae Park static void set_kdamond_stop(struct damon_ctx *ctx)
5862224d848SSeongJae Park {
5872224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
5882224d848SSeongJae Park 	ctx->kdamond_stop = true;
5892224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
5902224d848SSeongJae Park }
5912224d848SSeongJae Park 
5922224d848SSeongJae Park /*
5932224d848SSeongJae Park  * The monitoring daemon that runs as a kernel thread
5942224d848SSeongJae Park  */
5952224d848SSeongJae Park static int kdamond_fn(void *data)
5962224d848SSeongJae Park {
5972224d848SSeongJae Park 	struct damon_ctx *ctx = (struct damon_ctx *)data;
598f23b8eeeSSeongJae Park 	struct damon_target *t;
599f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
600b9a6ac4eSSeongJae Park 	unsigned int max_nr_accesses = 0;
601b9a6ac4eSSeongJae Park 	unsigned long sz_limit = 0;
6022224d848SSeongJae Park 
6032224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6042224d848SSeongJae Park 	pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
6052224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6062224d848SSeongJae Park 
6072224d848SSeongJae Park 	if (ctx->primitive.init)
6082224d848SSeongJae Park 		ctx->primitive.init(ctx);
6092224d848SSeongJae Park 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
6102224d848SSeongJae Park 		set_kdamond_stop(ctx);
6112224d848SSeongJae Park 
612b9a6ac4eSSeongJae Park 	sz_limit = damon_region_sz_limit(ctx);
613b9a6ac4eSSeongJae Park 
6142224d848SSeongJae Park 	while (!kdamond_need_stop(ctx)) {
6152224d848SSeongJae Park 		if (ctx->primitive.prepare_access_checks)
6162224d848SSeongJae Park 			ctx->primitive.prepare_access_checks(ctx);
6172224d848SSeongJae Park 		if (ctx->callback.after_sampling &&
6182224d848SSeongJae Park 				ctx->callback.after_sampling(ctx))
6192224d848SSeongJae Park 			set_kdamond_stop(ctx);
6202224d848SSeongJae Park 
6212224d848SSeongJae Park 		usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
6222224d848SSeongJae Park 
6232224d848SSeongJae Park 		if (ctx->primitive.check_accesses)
624b9a6ac4eSSeongJae Park 			max_nr_accesses = ctx->primitive.check_accesses(ctx);
6252224d848SSeongJae Park 
6262224d848SSeongJae Park 		if (kdamond_aggregate_interval_passed(ctx)) {
627b9a6ac4eSSeongJae Park 			kdamond_merge_regions(ctx,
628b9a6ac4eSSeongJae Park 					max_nr_accesses / 10,
629b9a6ac4eSSeongJae Park 					sz_limit);
6302224d848SSeongJae Park 			if (ctx->callback.after_aggregation &&
6312224d848SSeongJae Park 					ctx->callback.after_aggregation(ctx))
6322224d848SSeongJae Park 				set_kdamond_stop(ctx);
633f23b8eeeSSeongJae Park 			kdamond_reset_aggregated(ctx);
634b9a6ac4eSSeongJae Park 			kdamond_split_regions(ctx);
6352224d848SSeongJae Park 			if (ctx->primitive.reset_aggregated)
6362224d848SSeongJae Park 				ctx->primitive.reset_aggregated(ctx);
6372224d848SSeongJae Park 		}
6382224d848SSeongJae Park 
6392224d848SSeongJae Park 		if (kdamond_need_update_primitive(ctx)) {
6402224d848SSeongJae Park 			if (ctx->primitive.update)
6412224d848SSeongJae Park 				ctx->primitive.update(ctx);
642b9a6ac4eSSeongJae Park 			sz_limit = damon_region_sz_limit(ctx);
6432224d848SSeongJae Park 		}
6442224d848SSeongJae Park 	}
645f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
646f23b8eeeSSeongJae Park 		damon_for_each_region_safe(r, next, t)
647b9a6ac4eSSeongJae Park 			damon_destroy_region(r, t);
648f23b8eeeSSeongJae Park 	}
6492224d848SSeongJae Park 
6502224d848SSeongJae Park 	if (ctx->callback.before_terminate &&
6512224d848SSeongJae Park 			ctx->callback.before_terminate(ctx))
6522224d848SSeongJae Park 		set_kdamond_stop(ctx);
6532224d848SSeongJae Park 	if (ctx->primitive.cleanup)
6542224d848SSeongJae Park 		ctx->primitive.cleanup(ctx);
6552224d848SSeongJae Park 
6562224d848SSeongJae Park 	pr_debug("kdamond (%d) finishes\n", ctx->kdamond->pid);
6572224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6582224d848SSeongJae Park 	ctx->kdamond = NULL;
6592224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6602224d848SSeongJae Park 
6612224d848SSeongJae Park 	mutex_lock(&damon_lock);
6622224d848SSeongJae Park 	nr_running_ctxs--;
6632224d848SSeongJae Park 	mutex_unlock(&damon_lock);
6642224d848SSeongJae Park 
6652224d848SSeongJae Park 	do_exit(0);
6662224d848SSeongJae Park }
667