xref: /openbmc/linux/mm/damon/core.c (revision 5f7fe2b9)
12224d848SSeongJae Park // SPDX-License-Identifier: GPL-2.0
22224d848SSeongJae Park /*
32224d848SSeongJae Park  * Data Access Monitor
42224d848SSeongJae Park  *
52224d848SSeongJae Park  * Author: SeongJae Park <sjpark@amazon.de>
62224d848SSeongJae Park  */
72224d848SSeongJae Park 
82224d848SSeongJae Park #define pr_fmt(fmt) "damon: " fmt
92224d848SSeongJae Park 
102224d848SSeongJae Park #include <linux/damon.h>
112224d848SSeongJae Park #include <linux/delay.h>
122224d848SSeongJae Park #include <linux/kthread.h>
13b9a6ac4eSSeongJae Park #include <linux/random.h>
142224d848SSeongJae Park #include <linux/slab.h>
152224d848SSeongJae Park 
162fcb9362SSeongJae Park #define CREATE_TRACE_POINTS
172fcb9362SSeongJae Park #include <trace/events/damon.h>
182fcb9362SSeongJae Park 
1917ccae8bSSeongJae Park #ifdef CONFIG_DAMON_KUNIT_TEST
2017ccae8bSSeongJae Park #undef DAMON_MIN_REGION
2117ccae8bSSeongJae Park #define DAMON_MIN_REGION 1
2217ccae8bSSeongJae Park #endif
2317ccae8bSSeongJae Park 
24b9a6ac4eSSeongJae Park /* Get a random number in [l, r) */
25b9a6ac4eSSeongJae Park #define damon_rand(l, r) (l + prandom_u32_max(r - l))
26b9a6ac4eSSeongJae Park 
272224d848SSeongJae Park static DEFINE_MUTEX(damon_lock);
282224d848SSeongJae Park static int nr_running_ctxs;
292224d848SSeongJae Park 
30f23b8eeeSSeongJae Park /*
31f23b8eeeSSeongJae Park  * Construct a damon_region struct
32f23b8eeeSSeongJae Park  *
33f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
34f23b8eeeSSeongJae Park  */
35f23b8eeeSSeongJae Park struct damon_region *damon_new_region(unsigned long start, unsigned long end)
36f23b8eeeSSeongJae Park {
37f23b8eeeSSeongJae Park 	struct damon_region *region;
38f23b8eeeSSeongJae Park 
39f23b8eeeSSeongJae Park 	region = kmalloc(sizeof(*region), GFP_KERNEL);
40f23b8eeeSSeongJae Park 	if (!region)
41f23b8eeeSSeongJae Park 		return NULL;
42f23b8eeeSSeongJae Park 
43f23b8eeeSSeongJae Park 	region->ar.start = start;
44f23b8eeeSSeongJae Park 	region->ar.end = end;
45f23b8eeeSSeongJae Park 	region->nr_accesses = 0;
46f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&region->list);
47f23b8eeeSSeongJae Park 
48f23b8eeeSSeongJae Park 	return region;
49f23b8eeeSSeongJae Park }
50f23b8eeeSSeongJae Park 
51f23b8eeeSSeongJae Park /*
52f23b8eeeSSeongJae Park  * Add a region between two other regions
53f23b8eeeSSeongJae Park  */
54f23b8eeeSSeongJae Park inline void damon_insert_region(struct damon_region *r,
55b9a6ac4eSSeongJae Park 		struct damon_region *prev, struct damon_region *next,
56b9a6ac4eSSeongJae Park 		struct damon_target *t)
57f23b8eeeSSeongJae Park {
58f23b8eeeSSeongJae Park 	__list_add(&r->list, &prev->list, &next->list);
59b9a6ac4eSSeongJae Park 	t->nr_regions++;
60f23b8eeeSSeongJae Park }
61f23b8eeeSSeongJae Park 
62f23b8eeeSSeongJae Park void damon_add_region(struct damon_region *r, struct damon_target *t)
63f23b8eeeSSeongJae Park {
64f23b8eeeSSeongJae Park 	list_add_tail(&r->list, &t->regions_list);
65b9a6ac4eSSeongJae Park 	t->nr_regions++;
66f23b8eeeSSeongJae Park }
67f23b8eeeSSeongJae Park 
68b9a6ac4eSSeongJae Park static void damon_del_region(struct damon_region *r, struct damon_target *t)
69f23b8eeeSSeongJae Park {
70f23b8eeeSSeongJae Park 	list_del(&r->list);
71b9a6ac4eSSeongJae Park 	t->nr_regions--;
72f23b8eeeSSeongJae Park }
73f23b8eeeSSeongJae Park 
74f23b8eeeSSeongJae Park static void damon_free_region(struct damon_region *r)
75f23b8eeeSSeongJae Park {
76f23b8eeeSSeongJae Park 	kfree(r);
77f23b8eeeSSeongJae Park }
78f23b8eeeSSeongJae Park 
79b9a6ac4eSSeongJae Park void damon_destroy_region(struct damon_region *r, struct damon_target *t)
80f23b8eeeSSeongJae Park {
81b9a6ac4eSSeongJae Park 	damon_del_region(r, t);
82f23b8eeeSSeongJae Park 	damon_free_region(r);
83f23b8eeeSSeongJae Park }
84f23b8eeeSSeongJae Park 
85f23b8eeeSSeongJae Park /*
86f23b8eeeSSeongJae Park  * Construct a damon_target struct
87f23b8eeeSSeongJae Park  *
88f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
89f23b8eeeSSeongJae Park  */
90f23b8eeeSSeongJae Park struct damon_target *damon_new_target(unsigned long id)
91f23b8eeeSSeongJae Park {
92f23b8eeeSSeongJae Park 	struct damon_target *t;
93f23b8eeeSSeongJae Park 
94f23b8eeeSSeongJae Park 	t = kmalloc(sizeof(*t), GFP_KERNEL);
95f23b8eeeSSeongJae Park 	if (!t)
96f23b8eeeSSeongJae Park 		return NULL;
97f23b8eeeSSeongJae Park 
98f23b8eeeSSeongJae Park 	t->id = id;
99b9a6ac4eSSeongJae Park 	t->nr_regions = 0;
100f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&t->regions_list);
101f23b8eeeSSeongJae Park 
102f23b8eeeSSeongJae Park 	return t;
103f23b8eeeSSeongJae Park }
104f23b8eeeSSeongJae Park 
105f23b8eeeSSeongJae Park void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
106f23b8eeeSSeongJae Park {
107b9a6ac4eSSeongJae Park 	list_add_tail(&t->list, &ctx->adaptive_targets);
108f23b8eeeSSeongJae Park }
109f23b8eeeSSeongJae Park 
110f23b8eeeSSeongJae Park static void damon_del_target(struct damon_target *t)
111f23b8eeeSSeongJae Park {
112f23b8eeeSSeongJae Park 	list_del(&t->list);
113f23b8eeeSSeongJae Park }
114f23b8eeeSSeongJae Park 
115f23b8eeeSSeongJae Park void damon_free_target(struct damon_target *t)
116f23b8eeeSSeongJae Park {
117f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
118f23b8eeeSSeongJae Park 
119f23b8eeeSSeongJae Park 	damon_for_each_region_safe(r, next, t)
120f23b8eeeSSeongJae Park 		damon_free_region(r);
121f23b8eeeSSeongJae Park 	kfree(t);
122f23b8eeeSSeongJae Park }
123f23b8eeeSSeongJae Park 
124f23b8eeeSSeongJae Park void damon_destroy_target(struct damon_target *t)
125f23b8eeeSSeongJae Park {
126f23b8eeeSSeongJae Park 	damon_del_target(t);
127f23b8eeeSSeongJae Park 	damon_free_target(t);
128f23b8eeeSSeongJae Park }
129f23b8eeeSSeongJae Park 
130b9a6ac4eSSeongJae Park unsigned int damon_nr_regions(struct damon_target *t)
131b9a6ac4eSSeongJae Park {
132b9a6ac4eSSeongJae Park 	return t->nr_regions;
133b9a6ac4eSSeongJae Park }
134b9a6ac4eSSeongJae Park 
1352224d848SSeongJae Park struct damon_ctx *damon_new_ctx(void)
1362224d848SSeongJae Park {
1372224d848SSeongJae Park 	struct damon_ctx *ctx;
1382224d848SSeongJae Park 
1392224d848SSeongJae Park 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1402224d848SSeongJae Park 	if (!ctx)
1412224d848SSeongJae Park 		return NULL;
1422224d848SSeongJae Park 
1432224d848SSeongJae Park 	ctx->sample_interval = 5 * 1000;
1442224d848SSeongJae Park 	ctx->aggr_interval = 100 * 1000;
1452224d848SSeongJae Park 	ctx->primitive_update_interval = 60 * 1000 * 1000;
1462224d848SSeongJae Park 
1472224d848SSeongJae Park 	ktime_get_coarse_ts64(&ctx->last_aggregation);
1482224d848SSeongJae Park 	ctx->last_primitive_update = ctx->last_aggregation;
1492224d848SSeongJae Park 
1502224d848SSeongJae Park 	mutex_init(&ctx->kdamond_lock);
1512224d848SSeongJae Park 
152b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = 10;
153b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = 1000;
154b9a6ac4eSSeongJae Park 
155b9a6ac4eSSeongJae Park 	INIT_LIST_HEAD(&ctx->adaptive_targets);
1562224d848SSeongJae Park 
1572224d848SSeongJae Park 	return ctx;
1582224d848SSeongJae Park }
1592224d848SSeongJae Park 
160f23b8eeeSSeongJae Park static void damon_destroy_targets(struct damon_ctx *ctx)
161f23b8eeeSSeongJae Park {
162f23b8eeeSSeongJae Park 	struct damon_target *t, *next_t;
163f23b8eeeSSeongJae Park 
164f23b8eeeSSeongJae Park 	if (ctx->primitive.cleanup) {
165f23b8eeeSSeongJae Park 		ctx->primitive.cleanup(ctx);
166f23b8eeeSSeongJae Park 		return;
167f23b8eeeSSeongJae Park 	}
168f23b8eeeSSeongJae Park 
169f23b8eeeSSeongJae Park 	damon_for_each_target_safe(t, next_t, ctx)
170f23b8eeeSSeongJae Park 		damon_destroy_target(t);
171f23b8eeeSSeongJae Park }
172f23b8eeeSSeongJae Park 
1732224d848SSeongJae Park void damon_destroy_ctx(struct damon_ctx *ctx)
1742224d848SSeongJae Park {
175f23b8eeeSSeongJae Park 	damon_destroy_targets(ctx);
1762224d848SSeongJae Park 	kfree(ctx);
1772224d848SSeongJae Park }
1782224d848SSeongJae Park 
1792224d848SSeongJae Park /**
1804bc05954SSeongJae Park  * damon_set_targets() - Set monitoring targets.
1814bc05954SSeongJae Park  * @ctx:	monitoring context
1824bc05954SSeongJae Park  * @ids:	array of target ids
1834bc05954SSeongJae Park  * @nr_ids:	number of entries in @ids
1844bc05954SSeongJae Park  *
1854bc05954SSeongJae Park  * This function should not be called while the kdamond is running.
1864bc05954SSeongJae Park  *
1874bc05954SSeongJae Park  * Return: 0 on success, negative error code otherwise.
1884bc05954SSeongJae Park  */
1894bc05954SSeongJae Park int damon_set_targets(struct damon_ctx *ctx,
1904bc05954SSeongJae Park 		      unsigned long *ids, ssize_t nr_ids)
1914bc05954SSeongJae Park {
1924bc05954SSeongJae Park 	ssize_t i;
1934bc05954SSeongJae Park 	struct damon_target *t, *next;
1944bc05954SSeongJae Park 
1954bc05954SSeongJae Park 	damon_destroy_targets(ctx);
1964bc05954SSeongJae Park 
1974bc05954SSeongJae Park 	for (i = 0; i < nr_ids; i++) {
1984bc05954SSeongJae Park 		t = damon_new_target(ids[i]);
1994bc05954SSeongJae Park 		if (!t) {
2004bc05954SSeongJae Park 			pr_err("Failed to alloc damon_target\n");
2014bc05954SSeongJae Park 			/* The caller should do cleanup of the ids itself */
2024bc05954SSeongJae Park 			damon_for_each_target_safe(t, next, ctx)
2034bc05954SSeongJae Park 				damon_destroy_target(t);
2044bc05954SSeongJae Park 			return -ENOMEM;
2054bc05954SSeongJae Park 		}
2064bc05954SSeongJae Park 		damon_add_target(ctx, t);
2074bc05954SSeongJae Park 	}
2084bc05954SSeongJae Park 
2094bc05954SSeongJae Park 	return 0;
2104bc05954SSeongJae Park }
2114bc05954SSeongJae Park 
2124bc05954SSeongJae Park /**
2132224d848SSeongJae Park  * damon_set_attrs() - Set attributes for the monitoring.
2142224d848SSeongJae Park  * @ctx:		monitoring context
2152224d848SSeongJae Park  * @sample_int:		time interval between samplings
2162224d848SSeongJae Park  * @aggr_int:		time interval between aggregations
2172224d848SSeongJae Park  * @primitive_upd_int:	time interval between monitoring primitive updates
218b9a6ac4eSSeongJae Park  * @min_nr_reg:		minimal number of regions
219b9a6ac4eSSeongJae Park  * @max_nr_reg:		maximum number of regions
2202224d848SSeongJae Park  *
2212224d848SSeongJae Park  * This function should not be called while the kdamond is running.
2222224d848SSeongJae Park  * Every time interval is in micro-seconds.
2232224d848SSeongJae Park  *
2242224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2252224d848SSeongJae Park  */
2262224d848SSeongJae Park int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
227b9a6ac4eSSeongJae Park 		    unsigned long aggr_int, unsigned long primitive_upd_int,
228b9a6ac4eSSeongJae Park 		    unsigned long min_nr_reg, unsigned long max_nr_reg)
2292224d848SSeongJae Park {
230b9a6ac4eSSeongJae Park 	if (min_nr_reg < 3) {
231b9a6ac4eSSeongJae Park 		pr_err("min_nr_regions (%lu) must be at least 3\n",
232b9a6ac4eSSeongJae Park 				min_nr_reg);
233b9a6ac4eSSeongJae Park 		return -EINVAL;
234b9a6ac4eSSeongJae Park 	}
235b9a6ac4eSSeongJae Park 	if (min_nr_reg > max_nr_reg) {
236b9a6ac4eSSeongJae Park 		pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
237b9a6ac4eSSeongJae Park 				min_nr_reg, max_nr_reg);
238b9a6ac4eSSeongJae Park 		return -EINVAL;
239b9a6ac4eSSeongJae Park 	}
240b9a6ac4eSSeongJae Park 
2412224d848SSeongJae Park 	ctx->sample_interval = sample_int;
2422224d848SSeongJae Park 	ctx->aggr_interval = aggr_int;
2432224d848SSeongJae Park 	ctx->primitive_update_interval = primitive_upd_int;
244b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = min_nr_reg;
245b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = max_nr_reg;
2462224d848SSeongJae Park 
2472224d848SSeongJae Park 	return 0;
2482224d848SSeongJae Park }
2492224d848SSeongJae Park 
2504bc05954SSeongJae Park /**
2514bc05954SSeongJae Park  * damon_nr_running_ctxs() - Return number of currently running contexts.
2524bc05954SSeongJae Park  */
2534bc05954SSeongJae Park int damon_nr_running_ctxs(void)
2544bc05954SSeongJae Park {
2554bc05954SSeongJae Park 	int nr_ctxs;
2564bc05954SSeongJae Park 
2574bc05954SSeongJae Park 	mutex_lock(&damon_lock);
2584bc05954SSeongJae Park 	nr_ctxs = nr_running_ctxs;
2594bc05954SSeongJae Park 	mutex_unlock(&damon_lock);
2604bc05954SSeongJae Park 
2614bc05954SSeongJae Park 	return nr_ctxs;
2624bc05954SSeongJae Park }
2634bc05954SSeongJae Park 
264b9a6ac4eSSeongJae Park /* Returns the size upper limit for each monitoring region */
265b9a6ac4eSSeongJae Park static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
266b9a6ac4eSSeongJae Park {
267b9a6ac4eSSeongJae Park 	struct damon_target *t;
268b9a6ac4eSSeongJae Park 	struct damon_region *r;
269b9a6ac4eSSeongJae Park 	unsigned long sz = 0;
270b9a6ac4eSSeongJae Park 
271b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx) {
272b9a6ac4eSSeongJae Park 		damon_for_each_region(r, t)
273b9a6ac4eSSeongJae Park 			sz += r->ar.end - r->ar.start;
274b9a6ac4eSSeongJae Park 	}
275b9a6ac4eSSeongJae Park 
276b9a6ac4eSSeongJae Park 	if (ctx->min_nr_regions)
277b9a6ac4eSSeongJae Park 		sz /= ctx->min_nr_regions;
278b9a6ac4eSSeongJae Park 	if (sz < DAMON_MIN_REGION)
279b9a6ac4eSSeongJae Park 		sz = DAMON_MIN_REGION;
280b9a6ac4eSSeongJae Park 
281b9a6ac4eSSeongJae Park 	return sz;
282b9a6ac4eSSeongJae Park }
283b9a6ac4eSSeongJae Park 
2842224d848SSeongJae Park static bool damon_kdamond_running(struct damon_ctx *ctx)
2852224d848SSeongJae Park {
2862224d848SSeongJae Park 	bool running;
2872224d848SSeongJae Park 
2882224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
2892224d848SSeongJae Park 	running = ctx->kdamond != NULL;
2902224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
2912224d848SSeongJae Park 
2922224d848SSeongJae Park 	return running;
2932224d848SSeongJae Park }
2942224d848SSeongJae Park 
2952224d848SSeongJae Park static int kdamond_fn(void *data);
2962224d848SSeongJae Park 
2972224d848SSeongJae Park /*
2982224d848SSeongJae Park  * __damon_start() - Starts monitoring with given context.
2992224d848SSeongJae Park  * @ctx:	monitoring context
3002224d848SSeongJae Park  *
3012224d848SSeongJae Park  * This function should be called while damon_lock is hold.
3022224d848SSeongJae Park  *
3032224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3042224d848SSeongJae Park  */
3052224d848SSeongJae Park static int __damon_start(struct damon_ctx *ctx)
3062224d848SSeongJae Park {
3072224d848SSeongJae Park 	int err = -EBUSY;
3082224d848SSeongJae Park 
3092224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3102224d848SSeongJae Park 	if (!ctx->kdamond) {
3112224d848SSeongJae Park 		err = 0;
3122224d848SSeongJae Park 		ctx->kdamond_stop = false;
3132224d848SSeongJae Park 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
3142224d848SSeongJae Park 				nr_running_ctxs);
3152224d848SSeongJae Park 		if (IS_ERR(ctx->kdamond)) {
3162224d848SSeongJae Park 			err = PTR_ERR(ctx->kdamond);
3172224d848SSeongJae Park 			ctx->kdamond = 0;
3182224d848SSeongJae Park 		}
3192224d848SSeongJae Park 	}
3202224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
3212224d848SSeongJae Park 
3222224d848SSeongJae Park 	return err;
3232224d848SSeongJae Park }
3242224d848SSeongJae Park 
3252224d848SSeongJae Park /**
3262224d848SSeongJae Park  * damon_start() - Starts the monitorings for a given group of contexts.
3272224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to start monitoring
3282224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
3292224d848SSeongJae Park  *
3302224d848SSeongJae Park  * This function starts a group of monitoring threads for a group of monitoring
3312224d848SSeongJae Park  * contexts.  One thread per each context is created and run in parallel.  The
3322224d848SSeongJae Park  * caller should handle synchronization between the threads by itself.  If a
3332224d848SSeongJae Park  * group of threads that created by other 'damon_start()' call is currently
3342224d848SSeongJae Park  * running, this function does nothing but returns -EBUSY.
3352224d848SSeongJae Park  *
3362224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3372224d848SSeongJae Park  */
3382224d848SSeongJae Park int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
3392224d848SSeongJae Park {
3402224d848SSeongJae Park 	int i;
3412224d848SSeongJae Park 	int err = 0;
3422224d848SSeongJae Park 
3432224d848SSeongJae Park 	mutex_lock(&damon_lock);
3442224d848SSeongJae Park 	if (nr_running_ctxs) {
3452224d848SSeongJae Park 		mutex_unlock(&damon_lock);
3462224d848SSeongJae Park 		return -EBUSY;
3472224d848SSeongJae Park 	}
3482224d848SSeongJae Park 
3492224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
3502224d848SSeongJae Park 		err = __damon_start(ctxs[i]);
3512224d848SSeongJae Park 		if (err)
3522224d848SSeongJae Park 			break;
3532224d848SSeongJae Park 		nr_running_ctxs++;
3542224d848SSeongJae Park 	}
3552224d848SSeongJae Park 	mutex_unlock(&damon_lock);
3562224d848SSeongJae Park 
3572224d848SSeongJae Park 	return err;
3582224d848SSeongJae Park }
3592224d848SSeongJae Park 
3602224d848SSeongJae Park /*
3612224d848SSeongJae Park  * __damon_stop() - Stops monitoring of given context.
3622224d848SSeongJae Park  * @ctx:	monitoring context
3632224d848SSeongJae Park  *
3642224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3652224d848SSeongJae Park  */
3662224d848SSeongJae Park static int __damon_stop(struct damon_ctx *ctx)
3672224d848SSeongJae Park {
3682224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3692224d848SSeongJae Park 	if (ctx->kdamond) {
3702224d848SSeongJae Park 		ctx->kdamond_stop = true;
3712224d848SSeongJae Park 		mutex_unlock(&ctx->kdamond_lock);
3722224d848SSeongJae Park 		while (damon_kdamond_running(ctx))
3732224d848SSeongJae Park 			usleep_range(ctx->sample_interval,
3742224d848SSeongJae Park 					ctx->sample_interval * 2);
3752224d848SSeongJae Park 		return 0;
3762224d848SSeongJae Park 	}
3772224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
3782224d848SSeongJae Park 
3792224d848SSeongJae Park 	return -EPERM;
3802224d848SSeongJae Park }
3812224d848SSeongJae Park 
3822224d848SSeongJae Park /**
3832224d848SSeongJae Park  * damon_stop() - Stops the monitorings for a given group of contexts.
3842224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to stop monitoring
3852224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
3862224d848SSeongJae Park  *
3872224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3882224d848SSeongJae Park  */
3892224d848SSeongJae Park int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
3902224d848SSeongJae Park {
3912224d848SSeongJae Park 	int i, err = 0;
3922224d848SSeongJae Park 
3932224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
3942224d848SSeongJae Park 		/* nr_running_ctxs is decremented in kdamond_fn */
3952224d848SSeongJae Park 		err = __damon_stop(ctxs[i]);
3962224d848SSeongJae Park 		if (err)
3972224d848SSeongJae Park 			return err;
3982224d848SSeongJae Park 	}
3992224d848SSeongJae Park 
4002224d848SSeongJae Park 	return err;
4012224d848SSeongJae Park }
4022224d848SSeongJae Park 
4032224d848SSeongJae Park /*
4042224d848SSeongJae Park  * damon_check_reset_time_interval() - Check if a time interval is elapsed.
4052224d848SSeongJae Park  * @baseline:	the time to check whether the interval has elapsed since
4062224d848SSeongJae Park  * @interval:	the time interval (microseconds)
4072224d848SSeongJae Park  *
4082224d848SSeongJae Park  * See whether the given time interval has passed since the given baseline
4092224d848SSeongJae Park  * time.  If so, it also updates the baseline to current time for next check.
4102224d848SSeongJae Park  *
4112224d848SSeongJae Park  * Return:	true if the time interval has passed, or false otherwise.
4122224d848SSeongJae Park  */
4132224d848SSeongJae Park static bool damon_check_reset_time_interval(struct timespec64 *baseline,
4142224d848SSeongJae Park 		unsigned long interval)
4152224d848SSeongJae Park {
4162224d848SSeongJae Park 	struct timespec64 now;
4172224d848SSeongJae Park 
4182224d848SSeongJae Park 	ktime_get_coarse_ts64(&now);
4192224d848SSeongJae Park 	if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
4202224d848SSeongJae Park 			interval * 1000)
4212224d848SSeongJae Park 		return false;
4222224d848SSeongJae Park 	*baseline = now;
4232224d848SSeongJae Park 	return true;
4242224d848SSeongJae Park }
4252224d848SSeongJae Park 
4262224d848SSeongJae Park /*
4272224d848SSeongJae Park  * Check whether it is time to flush the aggregated information
4282224d848SSeongJae Park  */
4292224d848SSeongJae Park static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
4302224d848SSeongJae Park {
4312224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_aggregation,
4322224d848SSeongJae Park 			ctx->aggr_interval);
4332224d848SSeongJae Park }
4342224d848SSeongJae Park 
4352224d848SSeongJae Park /*
436f23b8eeeSSeongJae Park  * Reset the aggregated monitoring results ('nr_accesses' of each region).
437f23b8eeeSSeongJae Park  */
438f23b8eeeSSeongJae Park static void kdamond_reset_aggregated(struct damon_ctx *c)
439f23b8eeeSSeongJae Park {
440f23b8eeeSSeongJae Park 	struct damon_target *t;
441f23b8eeeSSeongJae Park 
442f23b8eeeSSeongJae Park 	damon_for_each_target(t, c) {
443f23b8eeeSSeongJae Park 		struct damon_region *r;
444f23b8eeeSSeongJae Park 
4452fcb9362SSeongJae Park 		damon_for_each_region(r, t) {
4462fcb9362SSeongJae Park 			trace_damon_aggregated(t, r, damon_nr_regions(t));
447f23b8eeeSSeongJae Park 			r->nr_accesses = 0;
448f23b8eeeSSeongJae Park 		}
449f23b8eeeSSeongJae Park 	}
4502fcb9362SSeongJae Park }
451f23b8eeeSSeongJae Park 
452b9a6ac4eSSeongJae Park #define sz_damon_region(r) (r->ar.end - r->ar.start)
453b9a6ac4eSSeongJae Park 
454b9a6ac4eSSeongJae Park /*
455b9a6ac4eSSeongJae Park  * Merge two adjacent regions into one region
456b9a6ac4eSSeongJae Park  */
457b9a6ac4eSSeongJae Park static void damon_merge_two_regions(struct damon_target *t,
458b9a6ac4eSSeongJae Park 		struct damon_region *l, struct damon_region *r)
459b9a6ac4eSSeongJae Park {
460b9a6ac4eSSeongJae Park 	unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
461b9a6ac4eSSeongJae Park 
462b9a6ac4eSSeongJae Park 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
463b9a6ac4eSSeongJae Park 			(sz_l + sz_r);
464b9a6ac4eSSeongJae Park 	l->ar.end = r->ar.end;
465b9a6ac4eSSeongJae Park 	damon_destroy_region(r, t);
466b9a6ac4eSSeongJae Park }
467b9a6ac4eSSeongJae Park 
468b9a6ac4eSSeongJae Park #define diff_of(a, b) (a > b ? a - b : b - a)
469b9a6ac4eSSeongJae Park 
470b9a6ac4eSSeongJae Park /*
471b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
472b9a6ac4eSSeongJae Park  *
473b9a6ac4eSSeongJae Park  * t		target affected by this merge operation
474b9a6ac4eSSeongJae Park  * thres	'->nr_accesses' diff threshold for the merge
475b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
476b9a6ac4eSSeongJae Park  */
477b9a6ac4eSSeongJae Park static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
478b9a6ac4eSSeongJae Park 				   unsigned long sz_limit)
479b9a6ac4eSSeongJae Park {
480b9a6ac4eSSeongJae Park 	struct damon_region *r, *prev = NULL, *next;
481b9a6ac4eSSeongJae Park 
482b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
483b9a6ac4eSSeongJae Park 		if (prev && prev->ar.end == r->ar.start &&
484b9a6ac4eSSeongJae Park 		    diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
485b9a6ac4eSSeongJae Park 		    sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
486b9a6ac4eSSeongJae Park 			damon_merge_two_regions(t, prev, r);
487b9a6ac4eSSeongJae Park 		else
488b9a6ac4eSSeongJae Park 			prev = r;
489b9a6ac4eSSeongJae Park 	}
490b9a6ac4eSSeongJae Park }
491b9a6ac4eSSeongJae Park 
492b9a6ac4eSSeongJae Park /*
493b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
494b9a6ac4eSSeongJae Park  *
495b9a6ac4eSSeongJae Park  * threshold	'->nr_accesses' diff threshold for the merge
496b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
497b9a6ac4eSSeongJae Park  *
498b9a6ac4eSSeongJae Park  * This function merges monitoring target regions which are adjacent and their
499b9a6ac4eSSeongJae Park  * access frequencies are similar.  This is for minimizing the monitoring
500b9a6ac4eSSeongJae Park  * overhead under the dynamically changeable access pattern.  If a merge was
501b9a6ac4eSSeongJae Park  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
502b9a6ac4eSSeongJae Park  */
503b9a6ac4eSSeongJae Park static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
504b9a6ac4eSSeongJae Park 				  unsigned long sz_limit)
505b9a6ac4eSSeongJae Park {
506b9a6ac4eSSeongJae Park 	struct damon_target *t;
507b9a6ac4eSSeongJae Park 
508b9a6ac4eSSeongJae Park 	damon_for_each_target(t, c)
509b9a6ac4eSSeongJae Park 		damon_merge_regions_of(t, threshold, sz_limit);
510b9a6ac4eSSeongJae Park }
511b9a6ac4eSSeongJae Park 
512b9a6ac4eSSeongJae Park /*
513b9a6ac4eSSeongJae Park  * Split a region in two
514b9a6ac4eSSeongJae Park  *
515b9a6ac4eSSeongJae Park  * r		the region to be split
516b9a6ac4eSSeongJae Park  * sz_r		size of the first sub-region that will be made
517b9a6ac4eSSeongJae Park  */
518b9a6ac4eSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
519b9a6ac4eSSeongJae Park 		struct damon_target *t, struct damon_region *r,
520b9a6ac4eSSeongJae Park 		unsigned long sz_r)
521b9a6ac4eSSeongJae Park {
522b9a6ac4eSSeongJae Park 	struct damon_region *new;
523b9a6ac4eSSeongJae Park 
524b9a6ac4eSSeongJae Park 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
525b9a6ac4eSSeongJae Park 	if (!new)
526b9a6ac4eSSeongJae Park 		return;
527b9a6ac4eSSeongJae Park 
528b9a6ac4eSSeongJae Park 	r->ar.end = new->ar.start;
529b9a6ac4eSSeongJae Park 
530b9a6ac4eSSeongJae Park 	damon_insert_region(new, r, damon_next_region(r), t);
531b9a6ac4eSSeongJae Park }
532b9a6ac4eSSeongJae Park 
533b9a6ac4eSSeongJae Park /* Split every region in the given target into 'nr_subs' regions */
534b9a6ac4eSSeongJae Park static void damon_split_regions_of(struct damon_ctx *ctx,
535b9a6ac4eSSeongJae Park 				     struct damon_target *t, int nr_subs)
536b9a6ac4eSSeongJae Park {
537b9a6ac4eSSeongJae Park 	struct damon_region *r, *next;
538b9a6ac4eSSeongJae Park 	unsigned long sz_region, sz_sub = 0;
539b9a6ac4eSSeongJae Park 	int i;
540b9a6ac4eSSeongJae Park 
541b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
542b9a6ac4eSSeongJae Park 		sz_region = r->ar.end - r->ar.start;
543b9a6ac4eSSeongJae Park 
544b9a6ac4eSSeongJae Park 		for (i = 0; i < nr_subs - 1 &&
545b9a6ac4eSSeongJae Park 				sz_region > 2 * DAMON_MIN_REGION; i++) {
546b9a6ac4eSSeongJae Park 			/*
547b9a6ac4eSSeongJae Park 			 * Randomly select size of left sub-region to be at
548b9a6ac4eSSeongJae Park 			 * least 10 percent and at most 90% of original region
549b9a6ac4eSSeongJae Park 			 */
550b9a6ac4eSSeongJae Park 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
551b9a6ac4eSSeongJae Park 					sz_region / 10, DAMON_MIN_REGION);
552b9a6ac4eSSeongJae Park 			/* Do not allow blank region */
553b9a6ac4eSSeongJae Park 			if (sz_sub == 0 || sz_sub >= sz_region)
554b9a6ac4eSSeongJae Park 				continue;
555b9a6ac4eSSeongJae Park 
556b9a6ac4eSSeongJae Park 			damon_split_region_at(ctx, t, r, sz_sub);
557b9a6ac4eSSeongJae Park 			sz_region = sz_sub;
558b9a6ac4eSSeongJae Park 		}
559b9a6ac4eSSeongJae Park 	}
560b9a6ac4eSSeongJae Park }
561b9a6ac4eSSeongJae Park 
562b9a6ac4eSSeongJae Park /*
563b9a6ac4eSSeongJae Park  * Split every target region into randomly-sized small regions
564b9a6ac4eSSeongJae Park  *
565b9a6ac4eSSeongJae Park  * This function splits every target region into random-sized small regions if
566b9a6ac4eSSeongJae Park  * current total number of the regions is equal or smaller than half of the
567b9a6ac4eSSeongJae Park  * user-specified maximum number of regions.  This is for maximizing the
568b9a6ac4eSSeongJae Park  * monitoring accuracy under the dynamically changeable access patterns.  If a
569b9a6ac4eSSeongJae Park  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
570b9a6ac4eSSeongJae Park  * it.
571b9a6ac4eSSeongJae Park  */
572b9a6ac4eSSeongJae Park static void kdamond_split_regions(struct damon_ctx *ctx)
573b9a6ac4eSSeongJae Park {
574b9a6ac4eSSeongJae Park 	struct damon_target *t;
575b9a6ac4eSSeongJae Park 	unsigned int nr_regions = 0;
576b9a6ac4eSSeongJae Park 	static unsigned int last_nr_regions;
577b9a6ac4eSSeongJae Park 	int nr_subregions = 2;
578b9a6ac4eSSeongJae Park 
579b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
580b9a6ac4eSSeongJae Park 		nr_regions += damon_nr_regions(t);
581b9a6ac4eSSeongJae Park 
582b9a6ac4eSSeongJae Park 	if (nr_regions > ctx->max_nr_regions / 2)
583b9a6ac4eSSeongJae Park 		return;
584b9a6ac4eSSeongJae Park 
585b9a6ac4eSSeongJae Park 	/* Maybe the middle of the region has different access frequency */
586b9a6ac4eSSeongJae Park 	if (last_nr_regions == nr_regions &&
587b9a6ac4eSSeongJae Park 			nr_regions < ctx->max_nr_regions / 3)
588b9a6ac4eSSeongJae Park 		nr_subregions = 3;
589b9a6ac4eSSeongJae Park 
590b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
591b9a6ac4eSSeongJae Park 		damon_split_regions_of(ctx, t, nr_subregions);
592b9a6ac4eSSeongJae Park 
593b9a6ac4eSSeongJae Park 	last_nr_regions = nr_regions;
594b9a6ac4eSSeongJae Park }
595b9a6ac4eSSeongJae Park 
596f23b8eeeSSeongJae Park /*
5972224d848SSeongJae Park  * Check whether it is time to check and apply the target monitoring regions
5982224d848SSeongJae Park  *
5992224d848SSeongJae Park  * Returns true if it is.
6002224d848SSeongJae Park  */
6012224d848SSeongJae Park static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
6022224d848SSeongJae Park {
6032224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_primitive_update,
6042224d848SSeongJae Park 			ctx->primitive_update_interval);
6052224d848SSeongJae Park }
6062224d848SSeongJae Park 
6072224d848SSeongJae Park /*
6082224d848SSeongJae Park  * Check whether current monitoring should be stopped
6092224d848SSeongJae Park  *
6102224d848SSeongJae Park  * The monitoring is stopped when either the user requested to stop, or all
6112224d848SSeongJae Park  * monitoring targets are invalid.
6122224d848SSeongJae Park  *
6132224d848SSeongJae Park  * Returns true if need to stop current monitoring.
6142224d848SSeongJae Park  */
6152224d848SSeongJae Park static bool kdamond_need_stop(struct damon_ctx *ctx)
6162224d848SSeongJae Park {
617f23b8eeeSSeongJae Park 	struct damon_target *t;
6182224d848SSeongJae Park 	bool stop;
6192224d848SSeongJae Park 
6202224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6212224d848SSeongJae Park 	stop = ctx->kdamond_stop;
6222224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6232224d848SSeongJae Park 	if (stop)
6242224d848SSeongJae Park 		return true;
6252224d848SSeongJae Park 
6262224d848SSeongJae Park 	if (!ctx->primitive.target_valid)
6272224d848SSeongJae Park 		return false;
6282224d848SSeongJae Park 
629f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
630f23b8eeeSSeongJae Park 		if (ctx->primitive.target_valid(t))
631f23b8eeeSSeongJae Park 			return false;
632f23b8eeeSSeongJae Park 	}
633f23b8eeeSSeongJae Park 
634f23b8eeeSSeongJae Park 	return true;
6352224d848SSeongJae Park }
6362224d848SSeongJae Park 
6372224d848SSeongJae Park static void set_kdamond_stop(struct damon_ctx *ctx)
6382224d848SSeongJae Park {
6392224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
6402224d848SSeongJae Park 	ctx->kdamond_stop = true;
6412224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6422224d848SSeongJae Park }
6432224d848SSeongJae Park 
6442224d848SSeongJae Park /*
6452224d848SSeongJae Park  * The monitoring daemon that runs as a kernel thread
6462224d848SSeongJae Park  */
6472224d848SSeongJae Park static int kdamond_fn(void *data)
6482224d848SSeongJae Park {
6492224d848SSeongJae Park 	struct damon_ctx *ctx = (struct damon_ctx *)data;
650f23b8eeeSSeongJae Park 	struct damon_target *t;
651f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
652b9a6ac4eSSeongJae Park 	unsigned int max_nr_accesses = 0;
653b9a6ac4eSSeongJae Park 	unsigned long sz_limit = 0;
6542224d848SSeongJae Park 
6552224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
656704571f9SSeongJae Park 	pr_debug("kdamond (%d) starts\n", ctx->kdamond->pid);
6572224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
6582224d848SSeongJae Park 
6592224d848SSeongJae Park 	if (ctx->primitive.init)
6602224d848SSeongJae Park 		ctx->primitive.init(ctx);
6612224d848SSeongJae Park 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
6622224d848SSeongJae Park 		set_kdamond_stop(ctx);
6632224d848SSeongJae Park 
664b9a6ac4eSSeongJae Park 	sz_limit = damon_region_sz_limit(ctx);
665b9a6ac4eSSeongJae Park 
6662224d848SSeongJae Park 	while (!kdamond_need_stop(ctx)) {
6672224d848SSeongJae Park 		if (ctx->primitive.prepare_access_checks)
6682224d848SSeongJae Park 			ctx->primitive.prepare_access_checks(ctx);
6692224d848SSeongJae Park 		if (ctx->callback.after_sampling &&
6702224d848SSeongJae Park 				ctx->callback.after_sampling(ctx))
6712224d848SSeongJae Park 			set_kdamond_stop(ctx);
6722224d848SSeongJae Park 
6732224d848SSeongJae Park 		usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
6742224d848SSeongJae Park 
6752224d848SSeongJae Park 		if (ctx->primitive.check_accesses)
676b9a6ac4eSSeongJae Park 			max_nr_accesses = ctx->primitive.check_accesses(ctx);
6772224d848SSeongJae Park 
6782224d848SSeongJae Park 		if (kdamond_aggregate_interval_passed(ctx)) {
679b9a6ac4eSSeongJae Park 			kdamond_merge_regions(ctx,
680b9a6ac4eSSeongJae Park 					max_nr_accesses / 10,
681b9a6ac4eSSeongJae Park 					sz_limit);
6822224d848SSeongJae Park 			if (ctx->callback.after_aggregation &&
6832224d848SSeongJae Park 					ctx->callback.after_aggregation(ctx))
6842224d848SSeongJae Park 				set_kdamond_stop(ctx);
685f23b8eeeSSeongJae Park 			kdamond_reset_aggregated(ctx);
686b9a6ac4eSSeongJae Park 			kdamond_split_regions(ctx);
6872224d848SSeongJae Park 			if (ctx->primitive.reset_aggregated)
6882224d848SSeongJae Park 				ctx->primitive.reset_aggregated(ctx);
6892224d848SSeongJae Park 		}
6902224d848SSeongJae Park 
6912224d848SSeongJae Park 		if (kdamond_need_update_primitive(ctx)) {
6922224d848SSeongJae Park 			if (ctx->primitive.update)
6932224d848SSeongJae Park 				ctx->primitive.update(ctx);
694b9a6ac4eSSeongJae Park 			sz_limit = damon_region_sz_limit(ctx);
6952224d848SSeongJae Park 		}
6962224d848SSeongJae Park 	}
697f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
698f23b8eeeSSeongJae Park 		damon_for_each_region_safe(r, next, t)
699b9a6ac4eSSeongJae Park 			damon_destroy_region(r, t);
700f23b8eeeSSeongJae Park 	}
7012224d848SSeongJae Park 
7022224d848SSeongJae Park 	if (ctx->callback.before_terminate &&
7032224d848SSeongJae Park 			ctx->callback.before_terminate(ctx))
7042224d848SSeongJae Park 		set_kdamond_stop(ctx);
7052224d848SSeongJae Park 	if (ctx->primitive.cleanup)
7062224d848SSeongJae Park 		ctx->primitive.cleanup(ctx);
7072224d848SSeongJae Park 
7082224d848SSeongJae Park 	pr_debug("kdamond (%d) finishes\n", ctx->kdamond->pid);
7092224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
7102224d848SSeongJae Park 	ctx->kdamond = NULL;
7112224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
7122224d848SSeongJae Park 
7132224d848SSeongJae Park 	mutex_lock(&damon_lock);
7142224d848SSeongJae Park 	nr_running_ctxs--;
7152224d848SSeongJae Park 	mutex_unlock(&damon_lock);
7162224d848SSeongJae Park 
717*5f7fe2b9SChangbin Du 	return 0;
7182224d848SSeongJae Park }
71917ccae8bSSeongJae Park 
72017ccae8bSSeongJae Park #include "core-test.h"
721