xref: /openbmc/linux/mm/damon/core.c (revision 4bc05954)
12224d848SSeongJae Park // SPDX-License-Identifier: GPL-2.0
22224d848SSeongJae Park /*
32224d848SSeongJae Park  * Data Access Monitor
42224d848SSeongJae Park  *
52224d848SSeongJae Park  * Author: SeongJae Park <sjpark@amazon.de>
62224d848SSeongJae Park  */
72224d848SSeongJae Park 
82224d848SSeongJae Park #define pr_fmt(fmt) "damon: " fmt
92224d848SSeongJae Park 
102224d848SSeongJae Park #include <linux/damon.h>
112224d848SSeongJae Park #include <linux/delay.h>
122224d848SSeongJae Park #include <linux/kthread.h>
13b9a6ac4eSSeongJae Park #include <linux/random.h>
142224d848SSeongJae Park #include <linux/slab.h>
152224d848SSeongJae Park 
162fcb9362SSeongJae Park #define CREATE_TRACE_POINTS
172fcb9362SSeongJae Park #include <trace/events/damon.h>
182fcb9362SSeongJae Park 
19b9a6ac4eSSeongJae Park /* Get a random number in [l, r) */
20b9a6ac4eSSeongJae Park #define damon_rand(l, r) (l + prandom_u32_max(r - l))
21b9a6ac4eSSeongJae Park 
222224d848SSeongJae Park static DEFINE_MUTEX(damon_lock);
232224d848SSeongJae Park static int nr_running_ctxs;
242224d848SSeongJae Park 
25f23b8eeeSSeongJae Park /*
26f23b8eeeSSeongJae Park  * Construct a damon_region struct
27f23b8eeeSSeongJae Park  *
28f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
29f23b8eeeSSeongJae Park  */
30f23b8eeeSSeongJae Park struct damon_region *damon_new_region(unsigned long start, unsigned long end)
31f23b8eeeSSeongJae Park {
32f23b8eeeSSeongJae Park 	struct damon_region *region;
33f23b8eeeSSeongJae Park 
34f23b8eeeSSeongJae Park 	region = kmalloc(sizeof(*region), GFP_KERNEL);
35f23b8eeeSSeongJae Park 	if (!region)
36f23b8eeeSSeongJae Park 		return NULL;
37f23b8eeeSSeongJae Park 
38f23b8eeeSSeongJae Park 	region->ar.start = start;
39f23b8eeeSSeongJae Park 	region->ar.end = end;
40f23b8eeeSSeongJae Park 	region->nr_accesses = 0;
41f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&region->list);
42f23b8eeeSSeongJae Park 
43f23b8eeeSSeongJae Park 	return region;
44f23b8eeeSSeongJae Park }
45f23b8eeeSSeongJae Park 
46f23b8eeeSSeongJae Park /*
47f23b8eeeSSeongJae Park  * Add a region between two other regions
48f23b8eeeSSeongJae Park  */
49f23b8eeeSSeongJae Park inline void damon_insert_region(struct damon_region *r,
50b9a6ac4eSSeongJae Park 		struct damon_region *prev, struct damon_region *next,
51b9a6ac4eSSeongJae Park 		struct damon_target *t)
52f23b8eeeSSeongJae Park {
53f23b8eeeSSeongJae Park 	__list_add(&r->list, &prev->list, &next->list);
54b9a6ac4eSSeongJae Park 	t->nr_regions++;
55f23b8eeeSSeongJae Park }
56f23b8eeeSSeongJae Park 
57f23b8eeeSSeongJae Park void damon_add_region(struct damon_region *r, struct damon_target *t)
58f23b8eeeSSeongJae Park {
59f23b8eeeSSeongJae Park 	list_add_tail(&r->list, &t->regions_list);
60b9a6ac4eSSeongJae Park 	t->nr_regions++;
61f23b8eeeSSeongJae Park }
62f23b8eeeSSeongJae Park 
63b9a6ac4eSSeongJae Park static void damon_del_region(struct damon_region *r, struct damon_target *t)
64f23b8eeeSSeongJae Park {
65f23b8eeeSSeongJae Park 	list_del(&r->list);
66b9a6ac4eSSeongJae Park 	t->nr_regions--;
67f23b8eeeSSeongJae Park }
68f23b8eeeSSeongJae Park 
69f23b8eeeSSeongJae Park static void damon_free_region(struct damon_region *r)
70f23b8eeeSSeongJae Park {
71f23b8eeeSSeongJae Park 	kfree(r);
72f23b8eeeSSeongJae Park }
73f23b8eeeSSeongJae Park 
74b9a6ac4eSSeongJae Park void damon_destroy_region(struct damon_region *r, struct damon_target *t)
75f23b8eeeSSeongJae Park {
76b9a6ac4eSSeongJae Park 	damon_del_region(r, t);
77f23b8eeeSSeongJae Park 	damon_free_region(r);
78f23b8eeeSSeongJae Park }
79f23b8eeeSSeongJae Park 
80f23b8eeeSSeongJae Park /*
81f23b8eeeSSeongJae Park  * Construct a damon_target struct
82f23b8eeeSSeongJae Park  *
83f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
84f23b8eeeSSeongJae Park  */
85f23b8eeeSSeongJae Park struct damon_target *damon_new_target(unsigned long id)
86f23b8eeeSSeongJae Park {
87f23b8eeeSSeongJae Park 	struct damon_target *t;
88f23b8eeeSSeongJae Park 
89f23b8eeeSSeongJae Park 	t = kmalloc(sizeof(*t), GFP_KERNEL);
90f23b8eeeSSeongJae Park 	if (!t)
91f23b8eeeSSeongJae Park 		return NULL;
92f23b8eeeSSeongJae Park 
93f23b8eeeSSeongJae Park 	t->id = id;
94b9a6ac4eSSeongJae Park 	t->nr_regions = 0;
95f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&t->regions_list);
96f23b8eeeSSeongJae Park 
97f23b8eeeSSeongJae Park 	return t;
98f23b8eeeSSeongJae Park }
99f23b8eeeSSeongJae Park 
100f23b8eeeSSeongJae Park void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
101f23b8eeeSSeongJae Park {
102b9a6ac4eSSeongJae Park 	list_add_tail(&t->list, &ctx->adaptive_targets);
103f23b8eeeSSeongJae Park }
104f23b8eeeSSeongJae Park 
105f23b8eeeSSeongJae Park static void damon_del_target(struct damon_target *t)
106f23b8eeeSSeongJae Park {
107f23b8eeeSSeongJae Park 	list_del(&t->list);
108f23b8eeeSSeongJae Park }
109f23b8eeeSSeongJae Park 
110f23b8eeeSSeongJae Park void damon_free_target(struct damon_target *t)
111f23b8eeeSSeongJae Park {
112f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
113f23b8eeeSSeongJae Park 
114f23b8eeeSSeongJae Park 	damon_for_each_region_safe(r, next, t)
115f23b8eeeSSeongJae Park 		damon_free_region(r);
116f23b8eeeSSeongJae Park 	kfree(t);
117f23b8eeeSSeongJae Park }
118f23b8eeeSSeongJae Park 
119f23b8eeeSSeongJae Park void damon_destroy_target(struct damon_target *t)
120f23b8eeeSSeongJae Park {
121f23b8eeeSSeongJae Park 	damon_del_target(t);
122f23b8eeeSSeongJae Park 	damon_free_target(t);
123f23b8eeeSSeongJae Park }
124f23b8eeeSSeongJae Park 
125b9a6ac4eSSeongJae Park unsigned int damon_nr_regions(struct damon_target *t)
126b9a6ac4eSSeongJae Park {
127b9a6ac4eSSeongJae Park 	return t->nr_regions;
128b9a6ac4eSSeongJae Park }
129b9a6ac4eSSeongJae Park 
1302224d848SSeongJae Park struct damon_ctx *damon_new_ctx(void)
1312224d848SSeongJae Park {
1322224d848SSeongJae Park 	struct damon_ctx *ctx;
1332224d848SSeongJae Park 
1342224d848SSeongJae Park 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1352224d848SSeongJae Park 	if (!ctx)
1362224d848SSeongJae Park 		return NULL;
1372224d848SSeongJae Park 
1382224d848SSeongJae Park 	ctx->sample_interval = 5 * 1000;
1392224d848SSeongJae Park 	ctx->aggr_interval = 100 * 1000;
1402224d848SSeongJae Park 	ctx->primitive_update_interval = 60 * 1000 * 1000;
1412224d848SSeongJae Park 
1422224d848SSeongJae Park 	ktime_get_coarse_ts64(&ctx->last_aggregation);
1432224d848SSeongJae Park 	ctx->last_primitive_update = ctx->last_aggregation;
1442224d848SSeongJae Park 
1452224d848SSeongJae Park 	mutex_init(&ctx->kdamond_lock);
1462224d848SSeongJae Park 
147b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = 10;
148b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = 1000;
149b9a6ac4eSSeongJae Park 
150b9a6ac4eSSeongJae Park 	INIT_LIST_HEAD(&ctx->adaptive_targets);
1512224d848SSeongJae Park 
1522224d848SSeongJae Park 	return ctx;
1532224d848SSeongJae Park }
1542224d848SSeongJae Park 
155f23b8eeeSSeongJae Park static void damon_destroy_targets(struct damon_ctx *ctx)
156f23b8eeeSSeongJae Park {
157f23b8eeeSSeongJae Park 	struct damon_target *t, *next_t;
158f23b8eeeSSeongJae Park 
159f23b8eeeSSeongJae Park 	if (ctx->primitive.cleanup) {
160f23b8eeeSSeongJae Park 		ctx->primitive.cleanup(ctx);
161f23b8eeeSSeongJae Park 		return;
162f23b8eeeSSeongJae Park 	}
163f23b8eeeSSeongJae Park 
164f23b8eeeSSeongJae Park 	damon_for_each_target_safe(t, next_t, ctx)
165f23b8eeeSSeongJae Park 		damon_destroy_target(t);
166f23b8eeeSSeongJae Park }
167f23b8eeeSSeongJae Park 
1682224d848SSeongJae Park void damon_destroy_ctx(struct damon_ctx *ctx)
1692224d848SSeongJae Park {
170f23b8eeeSSeongJae Park 	damon_destroy_targets(ctx);
1712224d848SSeongJae Park 	kfree(ctx);
1722224d848SSeongJae Park }
1732224d848SSeongJae Park 
1742224d848SSeongJae Park /**
175*4bc05954SSeongJae Park  * damon_set_targets() - Set monitoring targets.
176*4bc05954SSeongJae Park  * @ctx:	monitoring context
177*4bc05954SSeongJae Park  * @ids:	array of target ids
178*4bc05954SSeongJae Park  * @nr_ids:	number of entries in @ids
179*4bc05954SSeongJae Park  *
180*4bc05954SSeongJae Park  * This function should not be called while the kdamond is running.
181*4bc05954SSeongJae Park  *
182*4bc05954SSeongJae Park  * Return: 0 on success, negative error code otherwise.
183*4bc05954SSeongJae Park  */
184*4bc05954SSeongJae Park int damon_set_targets(struct damon_ctx *ctx,
185*4bc05954SSeongJae Park 		      unsigned long *ids, ssize_t nr_ids)
186*4bc05954SSeongJae Park {
187*4bc05954SSeongJae Park 	ssize_t i;
188*4bc05954SSeongJae Park 	struct damon_target *t, *next;
189*4bc05954SSeongJae Park 
190*4bc05954SSeongJae Park 	damon_destroy_targets(ctx);
191*4bc05954SSeongJae Park 
192*4bc05954SSeongJae Park 	for (i = 0; i < nr_ids; i++) {
193*4bc05954SSeongJae Park 		t = damon_new_target(ids[i]);
194*4bc05954SSeongJae Park 		if (!t) {
195*4bc05954SSeongJae Park 			pr_err("Failed to alloc damon_target\n");
196*4bc05954SSeongJae Park 			/* The caller should do cleanup of the ids itself */
197*4bc05954SSeongJae Park 			damon_for_each_target_safe(t, next, ctx)
198*4bc05954SSeongJae Park 				damon_destroy_target(t);
199*4bc05954SSeongJae Park 			return -ENOMEM;
200*4bc05954SSeongJae Park 		}
201*4bc05954SSeongJae Park 		damon_add_target(ctx, t);
202*4bc05954SSeongJae Park 	}
203*4bc05954SSeongJae Park 
204*4bc05954SSeongJae Park 	return 0;
205*4bc05954SSeongJae Park }
206*4bc05954SSeongJae Park 
207*4bc05954SSeongJae Park /**
2082224d848SSeongJae Park  * damon_set_attrs() - Set attributes for the monitoring.
2092224d848SSeongJae Park  * @ctx:		monitoring context
2102224d848SSeongJae Park  * @sample_int:		time interval between samplings
2112224d848SSeongJae Park  * @aggr_int:		time interval between aggregations
2122224d848SSeongJae Park  * @primitive_upd_int:	time interval between monitoring primitive updates
213b9a6ac4eSSeongJae Park  * @min_nr_reg:		minimal number of regions
214b9a6ac4eSSeongJae Park  * @max_nr_reg:		maximum number of regions
2152224d848SSeongJae Park  *
2162224d848SSeongJae Park  * This function should not be called while the kdamond is running.
2172224d848SSeongJae Park  * Every time interval is in micro-seconds.
2182224d848SSeongJae Park  *
2192224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2202224d848SSeongJae Park  */
2212224d848SSeongJae Park int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
222b9a6ac4eSSeongJae Park 		    unsigned long aggr_int, unsigned long primitive_upd_int,
223b9a6ac4eSSeongJae Park 		    unsigned long min_nr_reg, unsigned long max_nr_reg)
2242224d848SSeongJae Park {
225b9a6ac4eSSeongJae Park 	if (min_nr_reg < 3) {
226b9a6ac4eSSeongJae Park 		pr_err("min_nr_regions (%lu) must be at least 3\n",
227b9a6ac4eSSeongJae Park 				min_nr_reg);
228b9a6ac4eSSeongJae Park 		return -EINVAL;
229b9a6ac4eSSeongJae Park 	}
230b9a6ac4eSSeongJae Park 	if (min_nr_reg > max_nr_reg) {
231b9a6ac4eSSeongJae Park 		pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
232b9a6ac4eSSeongJae Park 				min_nr_reg, max_nr_reg);
233b9a6ac4eSSeongJae Park 		return -EINVAL;
234b9a6ac4eSSeongJae Park 	}
235b9a6ac4eSSeongJae Park 
2362224d848SSeongJae Park 	ctx->sample_interval = sample_int;
2372224d848SSeongJae Park 	ctx->aggr_interval = aggr_int;
2382224d848SSeongJae Park 	ctx->primitive_update_interval = primitive_upd_int;
239b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = min_nr_reg;
240b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = max_nr_reg;
2412224d848SSeongJae Park 
2422224d848SSeongJae Park 	return 0;
2432224d848SSeongJae Park }
2442224d848SSeongJae Park 
245*4bc05954SSeongJae Park /**
246*4bc05954SSeongJae Park  * damon_nr_running_ctxs() - Return number of currently running contexts.
247*4bc05954SSeongJae Park  */
248*4bc05954SSeongJae Park int damon_nr_running_ctxs(void)
249*4bc05954SSeongJae Park {
250*4bc05954SSeongJae Park 	int nr_ctxs;
251*4bc05954SSeongJae Park 
252*4bc05954SSeongJae Park 	mutex_lock(&damon_lock);
253*4bc05954SSeongJae Park 	nr_ctxs = nr_running_ctxs;
254*4bc05954SSeongJae Park 	mutex_unlock(&damon_lock);
255*4bc05954SSeongJae Park 
256*4bc05954SSeongJae Park 	return nr_ctxs;
257*4bc05954SSeongJae Park }
258*4bc05954SSeongJae Park 
259b9a6ac4eSSeongJae Park /* Returns the size upper limit for each monitoring region */
260b9a6ac4eSSeongJae Park static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
261b9a6ac4eSSeongJae Park {
262b9a6ac4eSSeongJae Park 	struct damon_target *t;
263b9a6ac4eSSeongJae Park 	struct damon_region *r;
264b9a6ac4eSSeongJae Park 	unsigned long sz = 0;
265b9a6ac4eSSeongJae Park 
266b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx) {
267b9a6ac4eSSeongJae Park 		damon_for_each_region(r, t)
268b9a6ac4eSSeongJae Park 			sz += r->ar.end - r->ar.start;
269b9a6ac4eSSeongJae Park 	}
270b9a6ac4eSSeongJae Park 
271b9a6ac4eSSeongJae Park 	if (ctx->min_nr_regions)
272b9a6ac4eSSeongJae Park 		sz /= ctx->min_nr_regions;
273b9a6ac4eSSeongJae Park 	if (sz < DAMON_MIN_REGION)
274b9a6ac4eSSeongJae Park 		sz = DAMON_MIN_REGION;
275b9a6ac4eSSeongJae Park 
276b9a6ac4eSSeongJae Park 	return sz;
277b9a6ac4eSSeongJae Park }
278b9a6ac4eSSeongJae Park 
2792224d848SSeongJae Park static bool damon_kdamond_running(struct damon_ctx *ctx)
2802224d848SSeongJae Park {
2812224d848SSeongJae Park 	bool running;
2822224d848SSeongJae Park 
2832224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
2842224d848SSeongJae Park 	running = ctx->kdamond != NULL;
2852224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
2862224d848SSeongJae Park 
2872224d848SSeongJae Park 	return running;
2882224d848SSeongJae Park }
2892224d848SSeongJae Park 
2902224d848SSeongJae Park static int kdamond_fn(void *data);
2912224d848SSeongJae Park 
2922224d848SSeongJae Park /*
2932224d848SSeongJae Park  * __damon_start() - Starts monitoring with given context.
2942224d848SSeongJae Park  * @ctx:	monitoring context
2952224d848SSeongJae Park  *
2962224d848SSeongJae Park  * This function should be called while damon_lock is hold.
2972224d848SSeongJae Park  *
2982224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2992224d848SSeongJae Park  */
3002224d848SSeongJae Park static int __damon_start(struct damon_ctx *ctx)
3012224d848SSeongJae Park {
3022224d848SSeongJae Park 	int err = -EBUSY;
3032224d848SSeongJae Park 
3042224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3052224d848SSeongJae Park 	if (!ctx->kdamond) {
3062224d848SSeongJae Park 		err = 0;
3072224d848SSeongJae Park 		ctx->kdamond_stop = false;
3082224d848SSeongJae Park 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
3092224d848SSeongJae Park 				nr_running_ctxs);
3102224d848SSeongJae Park 		if (IS_ERR(ctx->kdamond)) {
3112224d848SSeongJae Park 			err = PTR_ERR(ctx->kdamond);
3122224d848SSeongJae Park 			ctx->kdamond = 0;
3132224d848SSeongJae Park 		}
3142224d848SSeongJae Park 	}
3152224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
3162224d848SSeongJae Park 
3172224d848SSeongJae Park 	return err;
3182224d848SSeongJae Park }
3192224d848SSeongJae Park 
3202224d848SSeongJae Park /**
3212224d848SSeongJae Park  * damon_start() - Starts the monitorings for a given group of contexts.
3222224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to start monitoring
3232224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
3242224d848SSeongJae Park  *
3252224d848SSeongJae Park  * This function starts a group of monitoring threads for a group of monitoring
3262224d848SSeongJae Park  * contexts.  One thread per each context is created and run in parallel.  The
3272224d848SSeongJae Park  * caller should handle synchronization between the threads by itself.  If a
3282224d848SSeongJae Park  * group of threads that created by other 'damon_start()' call is currently
3292224d848SSeongJae Park  * running, this function does nothing but returns -EBUSY.
3302224d848SSeongJae Park  *
3312224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3322224d848SSeongJae Park  */
3332224d848SSeongJae Park int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
3342224d848SSeongJae Park {
3352224d848SSeongJae Park 	int i;
3362224d848SSeongJae Park 	int err = 0;
3372224d848SSeongJae Park 
3382224d848SSeongJae Park 	mutex_lock(&damon_lock);
3392224d848SSeongJae Park 	if (nr_running_ctxs) {
3402224d848SSeongJae Park 		mutex_unlock(&damon_lock);
3412224d848SSeongJae Park 		return -EBUSY;
3422224d848SSeongJae Park 	}
3432224d848SSeongJae Park 
3442224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
3452224d848SSeongJae Park 		err = __damon_start(ctxs[i]);
3462224d848SSeongJae Park 		if (err)
3472224d848SSeongJae Park 			break;
3482224d848SSeongJae Park 		nr_running_ctxs++;
3492224d848SSeongJae Park 	}
3502224d848SSeongJae Park 	mutex_unlock(&damon_lock);
3512224d848SSeongJae Park 
3522224d848SSeongJae Park 	return err;
3532224d848SSeongJae Park }
3542224d848SSeongJae Park 
3552224d848SSeongJae Park /*
3562224d848SSeongJae Park  * __damon_stop() - Stops monitoring of given context.
3572224d848SSeongJae Park  * @ctx:	monitoring context
3582224d848SSeongJae Park  *
3592224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3602224d848SSeongJae Park  */
3612224d848SSeongJae Park static int __damon_stop(struct damon_ctx *ctx)
3622224d848SSeongJae Park {
3632224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3642224d848SSeongJae Park 	if (ctx->kdamond) {
3652224d848SSeongJae Park 		ctx->kdamond_stop = true;
3662224d848SSeongJae Park 		mutex_unlock(&ctx->kdamond_lock);
3672224d848SSeongJae Park 		while (damon_kdamond_running(ctx))
3682224d848SSeongJae Park 			usleep_range(ctx->sample_interval,
3692224d848SSeongJae Park 					ctx->sample_interval * 2);
3702224d848SSeongJae Park 		return 0;
3712224d848SSeongJae Park 	}
3722224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
3732224d848SSeongJae Park 
3742224d848SSeongJae Park 	return -EPERM;
3752224d848SSeongJae Park }
3762224d848SSeongJae Park 
3772224d848SSeongJae Park /**
3782224d848SSeongJae Park  * damon_stop() - Stops the monitorings for a given group of contexts.
3792224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to stop monitoring
3802224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
3812224d848SSeongJae Park  *
3822224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3832224d848SSeongJae Park  */
3842224d848SSeongJae Park int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
3852224d848SSeongJae Park {
3862224d848SSeongJae Park 	int i, err = 0;
3872224d848SSeongJae Park 
3882224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
3892224d848SSeongJae Park 		/* nr_running_ctxs is decremented in kdamond_fn */
3902224d848SSeongJae Park 		err = __damon_stop(ctxs[i]);
3912224d848SSeongJae Park 		if (err)
3922224d848SSeongJae Park 			return err;
3932224d848SSeongJae Park 	}
3942224d848SSeongJae Park 
3952224d848SSeongJae Park 	return err;
3962224d848SSeongJae Park }
3972224d848SSeongJae Park 
3982224d848SSeongJae Park /*
3992224d848SSeongJae Park  * damon_check_reset_time_interval() - Check if a time interval is elapsed.
4002224d848SSeongJae Park  * @baseline:	the time to check whether the interval has elapsed since
4012224d848SSeongJae Park  * @interval:	the time interval (microseconds)
4022224d848SSeongJae Park  *
4032224d848SSeongJae Park  * See whether the given time interval has passed since the given baseline
4042224d848SSeongJae Park  * time.  If so, it also updates the baseline to current time for next check.
4052224d848SSeongJae Park  *
4062224d848SSeongJae Park  * Return:	true if the time interval has passed, or false otherwise.
4072224d848SSeongJae Park  */
4082224d848SSeongJae Park static bool damon_check_reset_time_interval(struct timespec64 *baseline,
4092224d848SSeongJae Park 		unsigned long interval)
4102224d848SSeongJae Park {
4112224d848SSeongJae Park 	struct timespec64 now;
4122224d848SSeongJae Park 
4132224d848SSeongJae Park 	ktime_get_coarse_ts64(&now);
4142224d848SSeongJae Park 	if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
4152224d848SSeongJae Park 			interval * 1000)
4162224d848SSeongJae Park 		return false;
4172224d848SSeongJae Park 	*baseline = now;
4182224d848SSeongJae Park 	return true;
4192224d848SSeongJae Park }
4202224d848SSeongJae Park 
4212224d848SSeongJae Park /*
4222224d848SSeongJae Park  * Check whether it is time to flush the aggregated information
4232224d848SSeongJae Park  */
4242224d848SSeongJae Park static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
4252224d848SSeongJae Park {
4262224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_aggregation,
4272224d848SSeongJae Park 			ctx->aggr_interval);
4282224d848SSeongJae Park }
4292224d848SSeongJae Park 
4302224d848SSeongJae Park /*
431f23b8eeeSSeongJae Park  * Reset the aggregated monitoring results ('nr_accesses' of each region).
432f23b8eeeSSeongJae Park  */
433f23b8eeeSSeongJae Park static void kdamond_reset_aggregated(struct damon_ctx *c)
434f23b8eeeSSeongJae Park {
435f23b8eeeSSeongJae Park 	struct damon_target *t;
436f23b8eeeSSeongJae Park 
437f23b8eeeSSeongJae Park 	damon_for_each_target(t, c) {
438f23b8eeeSSeongJae Park 		struct damon_region *r;
439f23b8eeeSSeongJae Park 
4402fcb9362SSeongJae Park 		damon_for_each_region(r, t) {
4412fcb9362SSeongJae Park 			trace_damon_aggregated(t, r, damon_nr_regions(t));
442f23b8eeeSSeongJae Park 			r->nr_accesses = 0;
443f23b8eeeSSeongJae Park 		}
444f23b8eeeSSeongJae Park 	}
4452fcb9362SSeongJae Park }
446f23b8eeeSSeongJae Park 
447b9a6ac4eSSeongJae Park #define sz_damon_region(r) (r->ar.end - r->ar.start)
448b9a6ac4eSSeongJae Park 
449b9a6ac4eSSeongJae Park /*
450b9a6ac4eSSeongJae Park  * Merge two adjacent regions into one region
451b9a6ac4eSSeongJae Park  */
452b9a6ac4eSSeongJae Park static void damon_merge_two_regions(struct damon_target *t,
453b9a6ac4eSSeongJae Park 		struct damon_region *l, struct damon_region *r)
454b9a6ac4eSSeongJae Park {
455b9a6ac4eSSeongJae Park 	unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
456b9a6ac4eSSeongJae Park 
457b9a6ac4eSSeongJae Park 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
458b9a6ac4eSSeongJae Park 			(sz_l + sz_r);
459b9a6ac4eSSeongJae Park 	l->ar.end = r->ar.end;
460b9a6ac4eSSeongJae Park 	damon_destroy_region(r, t);
461b9a6ac4eSSeongJae Park }
462b9a6ac4eSSeongJae Park 
463b9a6ac4eSSeongJae Park #define diff_of(a, b) (a > b ? a - b : b - a)
464b9a6ac4eSSeongJae Park 
465b9a6ac4eSSeongJae Park /*
466b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
467b9a6ac4eSSeongJae Park  *
468b9a6ac4eSSeongJae Park  * t		target affected by this merge operation
469b9a6ac4eSSeongJae Park  * thres	'->nr_accesses' diff threshold for the merge
470b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
471b9a6ac4eSSeongJae Park  */
472b9a6ac4eSSeongJae Park static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
473b9a6ac4eSSeongJae Park 				   unsigned long sz_limit)
474b9a6ac4eSSeongJae Park {
475b9a6ac4eSSeongJae Park 	struct damon_region *r, *prev = NULL, *next;
476b9a6ac4eSSeongJae Park 
477b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
478b9a6ac4eSSeongJae Park 		if (prev && prev->ar.end == r->ar.start &&
479b9a6ac4eSSeongJae Park 		    diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
480b9a6ac4eSSeongJae Park 		    sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
481b9a6ac4eSSeongJae Park 			damon_merge_two_regions(t, prev, r);
482b9a6ac4eSSeongJae Park 		else
483b9a6ac4eSSeongJae Park 			prev = r;
484b9a6ac4eSSeongJae Park 	}
485b9a6ac4eSSeongJae Park }
486b9a6ac4eSSeongJae Park 
487b9a6ac4eSSeongJae Park /*
488b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
489b9a6ac4eSSeongJae Park  *
490b9a6ac4eSSeongJae Park  * threshold	'->nr_accesses' diff threshold for the merge
491b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
492b9a6ac4eSSeongJae Park  *
493b9a6ac4eSSeongJae Park  * This function merges monitoring target regions which are adjacent and their
494b9a6ac4eSSeongJae Park  * access frequencies are similar.  This is for minimizing the monitoring
495b9a6ac4eSSeongJae Park  * overhead under the dynamically changeable access pattern.  If a merge was
496b9a6ac4eSSeongJae Park  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
497b9a6ac4eSSeongJae Park  */
498b9a6ac4eSSeongJae Park static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
499b9a6ac4eSSeongJae Park 				  unsigned long sz_limit)
500b9a6ac4eSSeongJae Park {
501b9a6ac4eSSeongJae Park 	struct damon_target *t;
502b9a6ac4eSSeongJae Park 
503b9a6ac4eSSeongJae Park 	damon_for_each_target(t, c)
504b9a6ac4eSSeongJae Park 		damon_merge_regions_of(t, threshold, sz_limit);
505b9a6ac4eSSeongJae Park }
506b9a6ac4eSSeongJae Park 
507b9a6ac4eSSeongJae Park /*
508b9a6ac4eSSeongJae Park  * Split a region in two
509b9a6ac4eSSeongJae Park  *
510b9a6ac4eSSeongJae Park  * r		the region to be split
511b9a6ac4eSSeongJae Park  * sz_r		size of the first sub-region that will be made
512b9a6ac4eSSeongJae Park  */
513b9a6ac4eSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
514b9a6ac4eSSeongJae Park 		struct damon_target *t, struct damon_region *r,
515b9a6ac4eSSeongJae Park 		unsigned long sz_r)
516b9a6ac4eSSeongJae Park {
517b9a6ac4eSSeongJae Park 	struct damon_region *new;
518b9a6ac4eSSeongJae Park 
519b9a6ac4eSSeongJae Park 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
520b9a6ac4eSSeongJae Park 	if (!new)
521b9a6ac4eSSeongJae Park 		return;
522b9a6ac4eSSeongJae Park 
523b9a6ac4eSSeongJae Park 	r->ar.end = new->ar.start;
524b9a6ac4eSSeongJae Park 
525b9a6ac4eSSeongJae Park 	damon_insert_region(new, r, damon_next_region(r), t);
526b9a6ac4eSSeongJae Park }
527b9a6ac4eSSeongJae Park 
528b9a6ac4eSSeongJae Park /* Split every region in the given target into 'nr_subs' regions */
529b9a6ac4eSSeongJae Park static void damon_split_regions_of(struct damon_ctx *ctx,
530b9a6ac4eSSeongJae Park 				     struct damon_target *t, int nr_subs)
531b9a6ac4eSSeongJae Park {
532b9a6ac4eSSeongJae Park 	struct damon_region *r, *next;
533b9a6ac4eSSeongJae Park 	unsigned long sz_region, sz_sub = 0;
534b9a6ac4eSSeongJae Park 	int i;
535b9a6ac4eSSeongJae Park 
536b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
537b9a6ac4eSSeongJae Park 		sz_region = r->ar.end - r->ar.start;
538b9a6ac4eSSeongJae Park 
539b9a6ac4eSSeongJae Park 		for (i = 0; i < nr_subs - 1 &&
540b9a6ac4eSSeongJae Park 				sz_region > 2 * DAMON_MIN_REGION; i++) {
541b9a6ac4eSSeongJae Park 			/*
542b9a6ac4eSSeongJae Park 			 * Randomly select size of left sub-region to be at
543b9a6ac4eSSeongJae Park 			 * least 10 percent and at most 90% of original region
544b9a6ac4eSSeongJae Park 			 */
545b9a6ac4eSSeongJae Park 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
546b9a6ac4eSSeongJae Park 					sz_region / 10, DAMON_MIN_REGION);
547b9a6ac4eSSeongJae Park 			/* Do not allow blank region */
548b9a6ac4eSSeongJae Park 			if (sz_sub == 0 || sz_sub >= sz_region)
549b9a6ac4eSSeongJae Park 				continue;
550b9a6ac4eSSeongJae Park 
551b9a6ac4eSSeongJae Park 			damon_split_region_at(ctx, t, r, sz_sub);
552b9a6ac4eSSeongJae Park 			sz_region = sz_sub;
553b9a6ac4eSSeongJae Park 		}
554b9a6ac4eSSeongJae Park 	}
555b9a6ac4eSSeongJae Park }
556b9a6ac4eSSeongJae Park 
557b9a6ac4eSSeongJae Park /*
558b9a6ac4eSSeongJae Park  * Split every target region into randomly-sized small regions
559b9a6ac4eSSeongJae Park  *
560b9a6ac4eSSeongJae Park  * This function splits every target region into random-sized small regions if
561b9a6ac4eSSeongJae Park  * current total number of the regions is equal or smaller than half of the
562b9a6ac4eSSeongJae Park  * user-specified maximum number of regions.  This is for maximizing the
563b9a6ac4eSSeongJae Park  * monitoring accuracy under the dynamically changeable access patterns.  If a
564b9a6ac4eSSeongJae Park  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
565b9a6ac4eSSeongJae Park  * it.
566b9a6ac4eSSeongJae Park  */
567b9a6ac4eSSeongJae Park static void kdamond_split_regions(struct damon_ctx *ctx)
568b9a6ac4eSSeongJae Park {
569b9a6ac4eSSeongJae Park 	struct damon_target *t;
570b9a6ac4eSSeongJae Park 	unsigned int nr_regions = 0;
571b9a6ac4eSSeongJae Park 	static unsigned int last_nr_regions;
572b9a6ac4eSSeongJae Park 	int nr_subregions = 2;
573b9a6ac4eSSeongJae Park 
574b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
575b9a6ac4eSSeongJae Park 		nr_regions += damon_nr_regions(t);
576b9a6ac4eSSeongJae Park 
577b9a6ac4eSSeongJae Park 	if (nr_regions > ctx->max_nr_regions / 2)
578b9a6ac4eSSeongJae Park 		return;
579b9a6ac4eSSeongJae Park 
580b9a6ac4eSSeongJae Park 	/* Maybe the middle of the region has different access frequency */
581b9a6ac4eSSeongJae Park 	if (last_nr_regions == nr_regions &&
582b9a6ac4eSSeongJae Park 			nr_regions < ctx->max_nr_regions / 3)
583b9a6ac4eSSeongJae Park 		nr_subregions = 3;
584b9a6ac4eSSeongJae Park 
585b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
586b9a6ac4eSSeongJae Park 		damon_split_regions_of(ctx, t, nr_subregions);
587b9a6ac4eSSeongJae Park 
588b9a6ac4eSSeongJae Park 	last_nr_regions = nr_regions;
589b9a6ac4eSSeongJae Park }
590b9a6ac4eSSeongJae Park 
591f23b8eeeSSeongJae Park /*
5922224d848SSeongJae Park  * Check whether it is time to check and apply the target monitoring regions
5932224d848SSeongJae Park  *
5942224d848SSeongJae Park  * Returns true if it is.
5952224d848SSeongJae Park  */
5962224d848SSeongJae Park static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
5972224d848SSeongJae Park {
5982224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_primitive_update,
5992224d848SSeongJae Park 			ctx->primitive_update_interval);
6002224d848SSeongJae Park }
6012224d848SSeongJae Park 
6022224d848SSeongJae Park /*
6032224d848SSeongJae Park  * Check whether current monitoring should be stopped
6042224d848SSeongJae Park  *
6052224d848SSeongJae Park  * The monitoring is stopped when either the user requested to stop, or all
6062224d848SSeongJae Park  * monitoring targets are invalid.
6072224d848SSeongJae Park  *
6082224d848SSeongJae Park  * Returns true if need to stop current monitoring.
6092224d848SSeongJae Park  */
6102224d848SSeongJae Park static bool kdamond_need_stop(struct damon_ctx *ctx)
6112224d848SSeongJae Park {
612f23b8eeeSSeongJae Park 	struct damon_target *t;
6132224d848SSeongJae Park 	bool stop;
6142224d848SSeongJae Park 
6152224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6162224d848SSeongJae Park 	stop = ctx->kdamond_stop;
6172224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6182224d848SSeongJae Park 	if (stop)
6192224d848SSeongJae Park 		return true;
6202224d848SSeongJae Park 
6212224d848SSeongJae Park 	if (!ctx->primitive.target_valid)
6222224d848SSeongJae Park 		return false;
6232224d848SSeongJae Park 
624f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
625f23b8eeeSSeongJae Park 		if (ctx->primitive.target_valid(t))
626f23b8eeeSSeongJae Park 			return false;
627f23b8eeeSSeongJae Park 	}
628f23b8eeeSSeongJae Park 
629f23b8eeeSSeongJae Park 	return true;
6302224d848SSeongJae Park }
6312224d848SSeongJae Park 
6322224d848SSeongJae Park static void set_kdamond_stop(struct damon_ctx *ctx)
6332224d848SSeongJae Park {
6342224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6352224d848SSeongJae Park 	ctx->kdamond_stop = true;
6362224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6372224d848SSeongJae Park }
6382224d848SSeongJae Park 
6392224d848SSeongJae Park /*
6402224d848SSeongJae Park  * The monitoring daemon that runs as a kernel thread
6412224d848SSeongJae Park  */
6422224d848SSeongJae Park static int kdamond_fn(void *data)
6432224d848SSeongJae Park {
6442224d848SSeongJae Park 	struct damon_ctx *ctx = (struct damon_ctx *)data;
645f23b8eeeSSeongJae Park 	struct damon_target *t;
646f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
647b9a6ac4eSSeongJae Park 	unsigned int max_nr_accesses = 0;
648b9a6ac4eSSeongJae Park 	unsigned long sz_limit = 0;
6492224d848SSeongJae Park 
6502224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6512224d848SSeongJae Park 	pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
6522224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6532224d848SSeongJae Park 
6542224d848SSeongJae Park 	if (ctx->primitive.init)
6552224d848SSeongJae Park 		ctx->primitive.init(ctx);
6562224d848SSeongJae Park 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
6572224d848SSeongJae Park 		set_kdamond_stop(ctx);
6582224d848SSeongJae Park 
659b9a6ac4eSSeongJae Park 	sz_limit = damon_region_sz_limit(ctx);
660b9a6ac4eSSeongJae Park 
6612224d848SSeongJae Park 	while (!kdamond_need_stop(ctx)) {
6622224d848SSeongJae Park 		if (ctx->primitive.prepare_access_checks)
6632224d848SSeongJae Park 			ctx->primitive.prepare_access_checks(ctx);
6642224d848SSeongJae Park 		if (ctx->callback.after_sampling &&
6652224d848SSeongJae Park 				ctx->callback.after_sampling(ctx))
6662224d848SSeongJae Park 			set_kdamond_stop(ctx);
6672224d848SSeongJae Park 
6682224d848SSeongJae Park 		usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
6692224d848SSeongJae Park 
6702224d848SSeongJae Park 		if (ctx->primitive.check_accesses)
671b9a6ac4eSSeongJae Park 			max_nr_accesses = ctx->primitive.check_accesses(ctx);
6722224d848SSeongJae Park 
6732224d848SSeongJae Park 		if (kdamond_aggregate_interval_passed(ctx)) {
674b9a6ac4eSSeongJae Park 			kdamond_merge_regions(ctx,
675b9a6ac4eSSeongJae Park 					max_nr_accesses / 10,
676b9a6ac4eSSeongJae Park 					sz_limit);
6772224d848SSeongJae Park 			if (ctx->callback.after_aggregation &&
6782224d848SSeongJae Park 					ctx->callback.after_aggregation(ctx))
6792224d848SSeongJae Park 				set_kdamond_stop(ctx);
680f23b8eeeSSeongJae Park 			kdamond_reset_aggregated(ctx);
681b9a6ac4eSSeongJae Park 			kdamond_split_regions(ctx);
6822224d848SSeongJae Park 			if (ctx->primitive.reset_aggregated)
6832224d848SSeongJae Park 				ctx->primitive.reset_aggregated(ctx);
6842224d848SSeongJae Park 		}
6852224d848SSeongJae Park 
6862224d848SSeongJae Park 		if (kdamond_need_update_primitive(ctx)) {
6872224d848SSeongJae Park 			if (ctx->primitive.update)
6882224d848SSeongJae Park 				ctx->primitive.update(ctx);
689b9a6ac4eSSeongJae Park 			sz_limit = damon_region_sz_limit(ctx);
6902224d848SSeongJae Park 		}
6912224d848SSeongJae Park 	}
692f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
693f23b8eeeSSeongJae Park 		damon_for_each_region_safe(r, next, t)
694b9a6ac4eSSeongJae Park 			damon_destroy_region(r, t);
695f23b8eeeSSeongJae Park 	}
6962224d848SSeongJae Park 
6972224d848SSeongJae Park 	if (ctx->callback.before_terminate &&
6982224d848SSeongJae Park 			ctx->callback.before_terminate(ctx))
6992224d848SSeongJae Park 		set_kdamond_stop(ctx);
7002224d848SSeongJae Park 	if (ctx->primitive.cleanup)
7012224d848SSeongJae Park 		ctx->primitive.cleanup(ctx);
7022224d848SSeongJae Park 
7032224d848SSeongJae Park 	pr_debug("kdamond (%d) finishes\n", ctx->kdamond->pid);
7042224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
7052224d848SSeongJae Park 	ctx->kdamond = NULL;
7062224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
7072224d848SSeongJae Park 
7082224d848SSeongJae Park 	mutex_lock(&damon_lock);
7092224d848SSeongJae Park 	nr_running_ctxs--;
7102224d848SSeongJae Park 	mutex_unlock(&damon_lock);
7112224d848SSeongJae Park 
7122224d848SSeongJae Park 	do_exit(0);
7132224d848SSeongJae Park }
714