xref: /openbmc/linux/mm/damon/core.c (revision 50585192)
12224d848SSeongJae Park // SPDX-License-Identifier: GPL-2.0
22224d848SSeongJae Park /*
32224d848SSeongJae Park  * Data Access Monitor
42224d848SSeongJae Park  *
52224d848SSeongJae Park  * Author: SeongJae Park <sjpark@amazon.de>
62224d848SSeongJae Park  */
72224d848SSeongJae Park 
82224d848SSeongJae Park #define pr_fmt(fmt) "damon: " fmt
92224d848SSeongJae Park 
102224d848SSeongJae Park #include <linux/damon.h>
112224d848SSeongJae Park #include <linux/delay.h>
122224d848SSeongJae Park #include <linux/kthread.h>
13b9a6ac4eSSeongJae Park #include <linux/random.h>
142224d848SSeongJae Park #include <linux/slab.h>
152224d848SSeongJae Park 
162fcb9362SSeongJae Park #define CREATE_TRACE_POINTS
172fcb9362SSeongJae Park #include <trace/events/damon.h>
182fcb9362SSeongJae Park 
1917ccae8bSSeongJae Park #ifdef CONFIG_DAMON_KUNIT_TEST
2017ccae8bSSeongJae Park #undef DAMON_MIN_REGION
2117ccae8bSSeongJae Park #define DAMON_MIN_REGION 1
2217ccae8bSSeongJae Park #endif
2317ccae8bSSeongJae Park 
24b9a6ac4eSSeongJae Park /* Get a random number in [l, r) */
25b9a6ac4eSSeongJae Park #define damon_rand(l, r) (l + prandom_u32_max(r - l))
26b9a6ac4eSSeongJae Park 
272224d848SSeongJae Park static DEFINE_MUTEX(damon_lock);
282224d848SSeongJae Park static int nr_running_ctxs;
292224d848SSeongJae Park 
30f23b8eeeSSeongJae Park /*
31f23b8eeeSSeongJae Park  * Construct a damon_region struct
32f23b8eeeSSeongJae Park  *
33f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
34f23b8eeeSSeongJae Park  */
35f23b8eeeSSeongJae Park struct damon_region *damon_new_region(unsigned long start, unsigned long end)
36f23b8eeeSSeongJae Park {
37f23b8eeeSSeongJae Park 	struct damon_region *region;
38f23b8eeeSSeongJae Park 
39f23b8eeeSSeongJae Park 	region = kmalloc(sizeof(*region), GFP_KERNEL);
40f23b8eeeSSeongJae Park 	if (!region)
41f23b8eeeSSeongJae Park 		return NULL;
42f23b8eeeSSeongJae Park 
43f23b8eeeSSeongJae Park 	region->ar.start = start;
44f23b8eeeSSeongJae Park 	region->ar.end = end;
45f23b8eeeSSeongJae Park 	region->nr_accesses = 0;
46f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&region->list);
47f23b8eeeSSeongJae Park 
48fda504faSSeongJae Park 	region->age = 0;
49fda504faSSeongJae Park 	region->last_nr_accesses = 0;
50fda504faSSeongJae Park 
51f23b8eeeSSeongJae Park 	return region;
52f23b8eeeSSeongJae Park }
53f23b8eeeSSeongJae Park 
54f23b8eeeSSeongJae Park /*
55f23b8eeeSSeongJae Park  * Add a region between two other regions
56f23b8eeeSSeongJae Park  */
57f23b8eeeSSeongJae Park inline void damon_insert_region(struct damon_region *r,
58b9a6ac4eSSeongJae Park 		struct damon_region *prev, struct damon_region *next,
59b9a6ac4eSSeongJae Park 		struct damon_target *t)
60f23b8eeeSSeongJae Park {
61f23b8eeeSSeongJae Park 	__list_add(&r->list, &prev->list, &next->list);
62b9a6ac4eSSeongJae Park 	t->nr_regions++;
63f23b8eeeSSeongJae Park }
64f23b8eeeSSeongJae Park 
65f23b8eeeSSeongJae Park void damon_add_region(struct damon_region *r, struct damon_target *t)
66f23b8eeeSSeongJae Park {
67f23b8eeeSSeongJae Park 	list_add_tail(&r->list, &t->regions_list);
68b9a6ac4eSSeongJae Park 	t->nr_regions++;
69f23b8eeeSSeongJae Park }
70f23b8eeeSSeongJae Park 
71b9a6ac4eSSeongJae Park static void damon_del_region(struct damon_region *r, struct damon_target *t)
72f23b8eeeSSeongJae Park {
73f23b8eeeSSeongJae Park 	list_del(&r->list);
74b9a6ac4eSSeongJae Park 	t->nr_regions--;
75f23b8eeeSSeongJae Park }
76f23b8eeeSSeongJae Park 
77f23b8eeeSSeongJae Park static void damon_free_region(struct damon_region *r)
78f23b8eeeSSeongJae Park {
79f23b8eeeSSeongJae Park 	kfree(r);
80f23b8eeeSSeongJae Park }
81f23b8eeeSSeongJae Park 
82b9a6ac4eSSeongJae Park void damon_destroy_region(struct damon_region *r, struct damon_target *t)
83f23b8eeeSSeongJae Park {
84b9a6ac4eSSeongJae Park 	damon_del_region(r, t);
85f23b8eeeSSeongJae Park 	damon_free_region(r);
86f23b8eeeSSeongJae Park }
87f23b8eeeSSeongJae Park 
881f366e42SSeongJae Park struct damos *damon_new_scheme(
891f366e42SSeongJae Park 		unsigned long min_sz_region, unsigned long max_sz_region,
901f366e42SSeongJae Park 		unsigned int min_nr_accesses, unsigned int max_nr_accesses,
911f366e42SSeongJae Park 		unsigned int min_age_region, unsigned int max_age_region,
922b8a248dSSeongJae Park 		enum damos_action action, struct damos_quota *quota)
931f366e42SSeongJae Park {
941f366e42SSeongJae Park 	struct damos *scheme;
951f366e42SSeongJae Park 
961f366e42SSeongJae Park 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
971f366e42SSeongJae Park 	if (!scheme)
981f366e42SSeongJae Park 		return NULL;
991f366e42SSeongJae Park 	scheme->min_sz_region = min_sz_region;
1001f366e42SSeongJae Park 	scheme->max_sz_region = max_sz_region;
1011f366e42SSeongJae Park 	scheme->min_nr_accesses = min_nr_accesses;
1021f366e42SSeongJae Park 	scheme->max_nr_accesses = max_nr_accesses;
1031f366e42SSeongJae Park 	scheme->min_age_region = min_age_region;
1041f366e42SSeongJae Park 	scheme->max_age_region = max_age_region;
1051f366e42SSeongJae Park 	scheme->action = action;
1062f0b548cSSeongJae Park 	scheme->stat_count = 0;
1072f0b548cSSeongJae Park 	scheme->stat_sz = 0;
1081f366e42SSeongJae Park 	INIT_LIST_HEAD(&scheme->list);
1091f366e42SSeongJae Park 
1102b8a248dSSeongJae Park 	scheme->quota.sz = quota->sz;
1112b8a248dSSeongJae Park 	scheme->quota.reset_interval = quota->reset_interval;
1122b8a248dSSeongJae Park 	scheme->quota.charged_sz = 0;
1132b8a248dSSeongJae Park 	scheme->quota.charged_from = 0;
114*50585192SSeongJae Park 	scheme->quota.charge_target_from = NULL;
115*50585192SSeongJae Park 	scheme->quota.charge_addr_from = 0;
1162b8a248dSSeongJae Park 
1171f366e42SSeongJae Park 	return scheme;
1181f366e42SSeongJae Park }
1191f366e42SSeongJae Park 
1201f366e42SSeongJae Park void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
1211f366e42SSeongJae Park {
1221f366e42SSeongJae Park 	list_add_tail(&s->list, &ctx->schemes);
1231f366e42SSeongJae Park }
1241f366e42SSeongJae Park 
1251f366e42SSeongJae Park static void damon_del_scheme(struct damos *s)
1261f366e42SSeongJae Park {
1271f366e42SSeongJae Park 	list_del(&s->list);
1281f366e42SSeongJae Park }
1291f366e42SSeongJae Park 
1301f366e42SSeongJae Park static void damon_free_scheme(struct damos *s)
1311f366e42SSeongJae Park {
1321f366e42SSeongJae Park 	kfree(s);
1331f366e42SSeongJae Park }
1341f366e42SSeongJae Park 
1351f366e42SSeongJae Park void damon_destroy_scheme(struct damos *s)
1361f366e42SSeongJae Park {
1371f366e42SSeongJae Park 	damon_del_scheme(s);
1381f366e42SSeongJae Park 	damon_free_scheme(s);
1391f366e42SSeongJae Park }
1401f366e42SSeongJae Park 
141f23b8eeeSSeongJae Park /*
142f23b8eeeSSeongJae Park  * Construct a damon_target struct
143f23b8eeeSSeongJae Park  *
144f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
145f23b8eeeSSeongJae Park  */
146f23b8eeeSSeongJae Park struct damon_target *damon_new_target(unsigned long id)
147f23b8eeeSSeongJae Park {
148f23b8eeeSSeongJae Park 	struct damon_target *t;
149f23b8eeeSSeongJae Park 
150f23b8eeeSSeongJae Park 	t = kmalloc(sizeof(*t), GFP_KERNEL);
151f23b8eeeSSeongJae Park 	if (!t)
152f23b8eeeSSeongJae Park 		return NULL;
153f23b8eeeSSeongJae Park 
154f23b8eeeSSeongJae Park 	t->id = id;
155b9a6ac4eSSeongJae Park 	t->nr_regions = 0;
156f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&t->regions_list);
157f23b8eeeSSeongJae Park 
158f23b8eeeSSeongJae Park 	return t;
159f23b8eeeSSeongJae Park }
160f23b8eeeSSeongJae Park 
161f23b8eeeSSeongJae Park void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
162f23b8eeeSSeongJae Park {
163b9a6ac4eSSeongJae Park 	list_add_tail(&t->list, &ctx->adaptive_targets);
164f23b8eeeSSeongJae Park }
165f23b8eeeSSeongJae Park 
166f23b8eeeSSeongJae Park static void damon_del_target(struct damon_target *t)
167f23b8eeeSSeongJae Park {
168f23b8eeeSSeongJae Park 	list_del(&t->list);
169f23b8eeeSSeongJae Park }
170f23b8eeeSSeongJae Park 
171f23b8eeeSSeongJae Park void damon_free_target(struct damon_target *t)
172f23b8eeeSSeongJae Park {
173f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
174f23b8eeeSSeongJae Park 
175f23b8eeeSSeongJae Park 	damon_for_each_region_safe(r, next, t)
176f23b8eeeSSeongJae Park 		damon_free_region(r);
177f23b8eeeSSeongJae Park 	kfree(t);
178f23b8eeeSSeongJae Park }
179f23b8eeeSSeongJae Park 
180f23b8eeeSSeongJae Park void damon_destroy_target(struct damon_target *t)
181f23b8eeeSSeongJae Park {
182f23b8eeeSSeongJae Park 	damon_del_target(t);
183f23b8eeeSSeongJae Park 	damon_free_target(t);
184f23b8eeeSSeongJae Park }
185f23b8eeeSSeongJae Park 
186b9a6ac4eSSeongJae Park unsigned int damon_nr_regions(struct damon_target *t)
187b9a6ac4eSSeongJae Park {
188b9a6ac4eSSeongJae Park 	return t->nr_regions;
189b9a6ac4eSSeongJae Park }
190b9a6ac4eSSeongJae Park 
1912224d848SSeongJae Park struct damon_ctx *damon_new_ctx(void)
1922224d848SSeongJae Park {
1932224d848SSeongJae Park 	struct damon_ctx *ctx;
1942224d848SSeongJae Park 
1952224d848SSeongJae Park 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1962224d848SSeongJae Park 	if (!ctx)
1972224d848SSeongJae Park 		return NULL;
1982224d848SSeongJae Park 
1992224d848SSeongJae Park 	ctx->sample_interval = 5 * 1000;
2002224d848SSeongJae Park 	ctx->aggr_interval = 100 * 1000;
2012224d848SSeongJae Park 	ctx->primitive_update_interval = 60 * 1000 * 1000;
2022224d848SSeongJae Park 
2032224d848SSeongJae Park 	ktime_get_coarse_ts64(&ctx->last_aggregation);
2042224d848SSeongJae Park 	ctx->last_primitive_update = ctx->last_aggregation;
2052224d848SSeongJae Park 
2062224d848SSeongJae Park 	mutex_init(&ctx->kdamond_lock);
2072224d848SSeongJae Park 
208b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = 10;
209b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = 1000;
210b9a6ac4eSSeongJae Park 
211b9a6ac4eSSeongJae Park 	INIT_LIST_HEAD(&ctx->adaptive_targets);
2121f366e42SSeongJae Park 	INIT_LIST_HEAD(&ctx->schemes);
2132224d848SSeongJae Park 
2142224d848SSeongJae Park 	return ctx;
2152224d848SSeongJae Park }
2162224d848SSeongJae Park 
217f23b8eeeSSeongJae Park static void damon_destroy_targets(struct damon_ctx *ctx)
218f23b8eeeSSeongJae Park {
219f23b8eeeSSeongJae Park 	struct damon_target *t, *next_t;
220f23b8eeeSSeongJae Park 
221f23b8eeeSSeongJae Park 	if (ctx->primitive.cleanup) {
222f23b8eeeSSeongJae Park 		ctx->primitive.cleanup(ctx);
223f23b8eeeSSeongJae Park 		return;
224f23b8eeeSSeongJae Park 	}
225f23b8eeeSSeongJae Park 
226f23b8eeeSSeongJae Park 	damon_for_each_target_safe(t, next_t, ctx)
227f23b8eeeSSeongJae Park 		damon_destroy_target(t);
228f23b8eeeSSeongJae Park }
229f23b8eeeSSeongJae Park 
2302224d848SSeongJae Park void damon_destroy_ctx(struct damon_ctx *ctx)
2312224d848SSeongJae Park {
2321f366e42SSeongJae Park 	struct damos *s, *next_s;
2331f366e42SSeongJae Park 
234f23b8eeeSSeongJae Park 	damon_destroy_targets(ctx);
2351f366e42SSeongJae Park 
2361f366e42SSeongJae Park 	damon_for_each_scheme_safe(s, next_s, ctx)
2371f366e42SSeongJae Park 		damon_destroy_scheme(s);
2381f366e42SSeongJae Park 
2392224d848SSeongJae Park 	kfree(ctx);
2402224d848SSeongJae Park }
2412224d848SSeongJae Park 
2422224d848SSeongJae Park /**
2434bc05954SSeongJae Park  * damon_set_targets() - Set monitoring targets.
2444bc05954SSeongJae Park  * @ctx:	monitoring context
2454bc05954SSeongJae Park  * @ids:	array of target ids
2464bc05954SSeongJae Park  * @nr_ids:	number of entries in @ids
2474bc05954SSeongJae Park  *
2484bc05954SSeongJae Park  * This function should not be called while the kdamond is running.
2494bc05954SSeongJae Park  *
2504bc05954SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2514bc05954SSeongJae Park  */
2524bc05954SSeongJae Park int damon_set_targets(struct damon_ctx *ctx,
2534bc05954SSeongJae Park 		      unsigned long *ids, ssize_t nr_ids)
2544bc05954SSeongJae Park {
2554bc05954SSeongJae Park 	ssize_t i;
2564bc05954SSeongJae Park 	struct damon_target *t, *next;
2574bc05954SSeongJae Park 
2584bc05954SSeongJae Park 	damon_destroy_targets(ctx);
2594bc05954SSeongJae Park 
2604bc05954SSeongJae Park 	for (i = 0; i < nr_ids; i++) {
2614bc05954SSeongJae Park 		t = damon_new_target(ids[i]);
2624bc05954SSeongJae Park 		if (!t) {
2634bc05954SSeongJae Park 			pr_err("Failed to alloc damon_target\n");
2644bc05954SSeongJae Park 			/* The caller should do cleanup of the ids itself */
2654bc05954SSeongJae Park 			damon_for_each_target_safe(t, next, ctx)
2664bc05954SSeongJae Park 				damon_destroy_target(t);
2674bc05954SSeongJae Park 			return -ENOMEM;
2684bc05954SSeongJae Park 		}
2694bc05954SSeongJae Park 		damon_add_target(ctx, t);
2704bc05954SSeongJae Park 	}
2714bc05954SSeongJae Park 
2724bc05954SSeongJae Park 	return 0;
2734bc05954SSeongJae Park }
2744bc05954SSeongJae Park 
2754bc05954SSeongJae Park /**
2762224d848SSeongJae Park  * damon_set_attrs() - Set attributes for the monitoring.
2772224d848SSeongJae Park  * @ctx:		monitoring context
2782224d848SSeongJae Park  * @sample_int:		time interval between samplings
2792224d848SSeongJae Park  * @aggr_int:		time interval between aggregations
2802224d848SSeongJae Park  * @primitive_upd_int:	time interval between monitoring primitive updates
281b9a6ac4eSSeongJae Park  * @min_nr_reg:		minimal number of regions
282b9a6ac4eSSeongJae Park  * @max_nr_reg:		maximum number of regions
2832224d848SSeongJae Park  *
2842224d848SSeongJae Park  * This function should not be called while the kdamond is running.
2852224d848SSeongJae Park  * Every time interval is in micro-seconds.
2862224d848SSeongJae Park  *
2872224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
2882224d848SSeongJae Park  */
2892224d848SSeongJae Park int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
290b9a6ac4eSSeongJae Park 		    unsigned long aggr_int, unsigned long primitive_upd_int,
291b9a6ac4eSSeongJae Park 		    unsigned long min_nr_reg, unsigned long max_nr_reg)
2922224d848SSeongJae Park {
293b9a6ac4eSSeongJae Park 	if (min_nr_reg < 3) {
294b9a6ac4eSSeongJae Park 		pr_err("min_nr_regions (%lu) must be at least 3\n",
295b9a6ac4eSSeongJae Park 				min_nr_reg);
296b9a6ac4eSSeongJae Park 		return -EINVAL;
297b9a6ac4eSSeongJae Park 	}
298b9a6ac4eSSeongJae Park 	if (min_nr_reg > max_nr_reg) {
299b9a6ac4eSSeongJae Park 		pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
300b9a6ac4eSSeongJae Park 				min_nr_reg, max_nr_reg);
301b9a6ac4eSSeongJae Park 		return -EINVAL;
302b9a6ac4eSSeongJae Park 	}
303b9a6ac4eSSeongJae Park 
3042224d848SSeongJae Park 	ctx->sample_interval = sample_int;
3052224d848SSeongJae Park 	ctx->aggr_interval = aggr_int;
3062224d848SSeongJae Park 	ctx->primitive_update_interval = primitive_upd_int;
307b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = min_nr_reg;
308b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = max_nr_reg;
3092224d848SSeongJae Park 
3102224d848SSeongJae Park 	return 0;
3112224d848SSeongJae Park }
3122224d848SSeongJae Park 
3134bc05954SSeongJae Park /**
3141f366e42SSeongJae Park  * damon_set_schemes() - Set data access monitoring based operation schemes.
3151f366e42SSeongJae Park  * @ctx:	monitoring context
3161f366e42SSeongJae Park  * @schemes:	array of the schemes
3171f366e42SSeongJae Park  * @nr_schemes:	number of entries in @schemes
3181f366e42SSeongJae Park  *
3191f366e42SSeongJae Park  * This function should not be called while the kdamond of the context is
3201f366e42SSeongJae Park  * running.
3211f366e42SSeongJae Park  *
3221f366e42SSeongJae Park  * Return: 0 if success, or negative error code otherwise.
3231f366e42SSeongJae Park  */
3241f366e42SSeongJae Park int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
3251f366e42SSeongJae Park 			ssize_t nr_schemes)
3261f366e42SSeongJae Park {
3271f366e42SSeongJae Park 	struct damos *s, *next;
3281f366e42SSeongJae Park 	ssize_t i;
3291f366e42SSeongJae Park 
3301f366e42SSeongJae Park 	damon_for_each_scheme_safe(s, next, ctx)
3311f366e42SSeongJae Park 		damon_destroy_scheme(s);
3321f366e42SSeongJae Park 	for (i = 0; i < nr_schemes; i++)
3331f366e42SSeongJae Park 		damon_add_scheme(ctx, schemes[i]);
3341f366e42SSeongJae Park 	return 0;
3351f366e42SSeongJae Park }
3361f366e42SSeongJae Park 
3371f366e42SSeongJae Park /**
3384bc05954SSeongJae Park  * damon_nr_running_ctxs() - Return number of currently running contexts.
3394bc05954SSeongJae Park  */
3404bc05954SSeongJae Park int damon_nr_running_ctxs(void)
3414bc05954SSeongJae Park {
3424bc05954SSeongJae Park 	int nr_ctxs;
3434bc05954SSeongJae Park 
3444bc05954SSeongJae Park 	mutex_lock(&damon_lock);
3454bc05954SSeongJae Park 	nr_ctxs = nr_running_ctxs;
3464bc05954SSeongJae Park 	mutex_unlock(&damon_lock);
3474bc05954SSeongJae Park 
3484bc05954SSeongJae Park 	return nr_ctxs;
3494bc05954SSeongJae Park }
3504bc05954SSeongJae Park 
351b9a6ac4eSSeongJae Park /* Returns the size upper limit for each monitoring region */
352b9a6ac4eSSeongJae Park static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
353b9a6ac4eSSeongJae Park {
354b9a6ac4eSSeongJae Park 	struct damon_target *t;
355b9a6ac4eSSeongJae Park 	struct damon_region *r;
356b9a6ac4eSSeongJae Park 	unsigned long sz = 0;
357b9a6ac4eSSeongJae Park 
358b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx) {
359b9a6ac4eSSeongJae Park 		damon_for_each_region(r, t)
360b9a6ac4eSSeongJae Park 			sz += r->ar.end - r->ar.start;
361b9a6ac4eSSeongJae Park 	}
362b9a6ac4eSSeongJae Park 
363b9a6ac4eSSeongJae Park 	if (ctx->min_nr_regions)
364b9a6ac4eSSeongJae Park 		sz /= ctx->min_nr_regions;
365b9a6ac4eSSeongJae Park 	if (sz < DAMON_MIN_REGION)
366b9a6ac4eSSeongJae Park 		sz = DAMON_MIN_REGION;
367b9a6ac4eSSeongJae Park 
368b9a6ac4eSSeongJae Park 	return sz;
369b9a6ac4eSSeongJae Park }
370b9a6ac4eSSeongJae Park 
3712224d848SSeongJae Park static bool damon_kdamond_running(struct damon_ctx *ctx)
3722224d848SSeongJae Park {
3732224d848SSeongJae Park 	bool running;
3742224d848SSeongJae Park 
3752224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3762224d848SSeongJae Park 	running = ctx->kdamond != NULL;
3772224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
3782224d848SSeongJae Park 
3792224d848SSeongJae Park 	return running;
3802224d848SSeongJae Park }
3812224d848SSeongJae Park 
3822224d848SSeongJae Park static int kdamond_fn(void *data);
3832224d848SSeongJae Park 
3842224d848SSeongJae Park /*
3852224d848SSeongJae Park  * __damon_start() - Starts monitoring with given context.
3862224d848SSeongJae Park  * @ctx:	monitoring context
3872224d848SSeongJae Park  *
3882224d848SSeongJae Park  * This function should be called while damon_lock is hold.
3892224d848SSeongJae Park  *
3902224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3912224d848SSeongJae Park  */
3922224d848SSeongJae Park static int __damon_start(struct damon_ctx *ctx)
3932224d848SSeongJae Park {
3942224d848SSeongJae Park 	int err = -EBUSY;
3952224d848SSeongJae Park 
3962224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
3972224d848SSeongJae Park 	if (!ctx->kdamond) {
3982224d848SSeongJae Park 		err = 0;
3992224d848SSeongJae Park 		ctx->kdamond_stop = false;
4002224d848SSeongJae Park 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
4012224d848SSeongJae Park 				nr_running_ctxs);
4022224d848SSeongJae Park 		if (IS_ERR(ctx->kdamond)) {
4032224d848SSeongJae Park 			err = PTR_ERR(ctx->kdamond);
4047ec1992bSColin Ian King 			ctx->kdamond = NULL;
4052224d848SSeongJae Park 		}
4062224d848SSeongJae Park 	}
4072224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
4082224d848SSeongJae Park 
4092224d848SSeongJae Park 	return err;
4102224d848SSeongJae Park }
4112224d848SSeongJae Park 
4122224d848SSeongJae Park /**
4132224d848SSeongJae Park  * damon_start() - Starts the monitorings for a given group of contexts.
4142224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to start monitoring
4152224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
4162224d848SSeongJae Park  *
4172224d848SSeongJae Park  * This function starts a group of monitoring threads for a group of monitoring
4182224d848SSeongJae Park  * contexts.  One thread per each context is created and run in parallel.  The
4192224d848SSeongJae Park  * caller should handle synchronization between the threads by itself.  If a
4202224d848SSeongJae Park  * group of threads that created by other 'damon_start()' call is currently
4212224d848SSeongJae Park  * running, this function does nothing but returns -EBUSY.
4222224d848SSeongJae Park  *
4232224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4242224d848SSeongJae Park  */
4252224d848SSeongJae Park int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
4262224d848SSeongJae Park {
4272224d848SSeongJae Park 	int i;
4282224d848SSeongJae Park 	int err = 0;
4292224d848SSeongJae Park 
4302224d848SSeongJae Park 	mutex_lock(&damon_lock);
4312224d848SSeongJae Park 	if (nr_running_ctxs) {
4322224d848SSeongJae Park 		mutex_unlock(&damon_lock);
4332224d848SSeongJae Park 		return -EBUSY;
4342224d848SSeongJae Park 	}
4352224d848SSeongJae Park 
4362224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
4372224d848SSeongJae Park 		err = __damon_start(ctxs[i]);
4382224d848SSeongJae Park 		if (err)
4392224d848SSeongJae Park 			break;
4402224d848SSeongJae Park 		nr_running_ctxs++;
4412224d848SSeongJae Park 	}
4422224d848SSeongJae Park 	mutex_unlock(&damon_lock);
4432224d848SSeongJae Park 
4442224d848SSeongJae Park 	return err;
4452224d848SSeongJae Park }
4462224d848SSeongJae Park 
4472224d848SSeongJae Park /*
4482224d848SSeongJae Park  * __damon_stop() - Stops monitoring of given context.
4492224d848SSeongJae Park  * @ctx:	monitoring context
4502224d848SSeongJae Park  *
4512224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4522224d848SSeongJae Park  */
4532224d848SSeongJae Park static int __damon_stop(struct damon_ctx *ctx)
4542224d848SSeongJae Park {
4552224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
4562224d848SSeongJae Park 	if (ctx->kdamond) {
4572224d848SSeongJae Park 		ctx->kdamond_stop = true;
4582224d848SSeongJae Park 		mutex_unlock(&ctx->kdamond_lock);
4592224d848SSeongJae Park 		while (damon_kdamond_running(ctx))
4602224d848SSeongJae Park 			usleep_range(ctx->sample_interval,
4612224d848SSeongJae Park 					ctx->sample_interval * 2);
4622224d848SSeongJae Park 		return 0;
4632224d848SSeongJae Park 	}
4642224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
4652224d848SSeongJae Park 
4662224d848SSeongJae Park 	return -EPERM;
4672224d848SSeongJae Park }
4682224d848SSeongJae Park 
4692224d848SSeongJae Park /**
4702224d848SSeongJae Park  * damon_stop() - Stops the monitorings for a given group of contexts.
4712224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to stop monitoring
4722224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
4732224d848SSeongJae Park  *
4742224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4752224d848SSeongJae Park  */
4762224d848SSeongJae Park int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
4772224d848SSeongJae Park {
4782224d848SSeongJae Park 	int i, err = 0;
4792224d848SSeongJae Park 
4802224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
4812224d848SSeongJae Park 		/* nr_running_ctxs is decremented in kdamond_fn */
4822224d848SSeongJae Park 		err = __damon_stop(ctxs[i]);
4832224d848SSeongJae Park 		if (err)
4842224d848SSeongJae Park 			return err;
4852224d848SSeongJae Park 	}
4862224d848SSeongJae Park 
4872224d848SSeongJae Park 	return err;
4882224d848SSeongJae Park }
4892224d848SSeongJae Park 
4902224d848SSeongJae Park /*
4912224d848SSeongJae Park  * damon_check_reset_time_interval() - Check if a time interval is elapsed.
4922224d848SSeongJae Park  * @baseline:	the time to check whether the interval has elapsed since
4932224d848SSeongJae Park  * @interval:	the time interval (microseconds)
4942224d848SSeongJae Park  *
4952224d848SSeongJae Park  * See whether the given time interval has passed since the given baseline
4962224d848SSeongJae Park  * time.  If so, it also updates the baseline to current time for next check.
4972224d848SSeongJae Park  *
4982224d848SSeongJae Park  * Return:	true if the time interval has passed, or false otherwise.
4992224d848SSeongJae Park  */
5002224d848SSeongJae Park static bool damon_check_reset_time_interval(struct timespec64 *baseline,
5012224d848SSeongJae Park 		unsigned long interval)
5022224d848SSeongJae Park {
5032224d848SSeongJae Park 	struct timespec64 now;
5042224d848SSeongJae Park 
5052224d848SSeongJae Park 	ktime_get_coarse_ts64(&now);
5062224d848SSeongJae Park 	if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
5072224d848SSeongJae Park 			interval * 1000)
5082224d848SSeongJae Park 		return false;
5092224d848SSeongJae Park 	*baseline = now;
5102224d848SSeongJae Park 	return true;
5112224d848SSeongJae Park }
5122224d848SSeongJae Park 
5132224d848SSeongJae Park /*
5142224d848SSeongJae Park  * Check whether it is time to flush the aggregated information
5152224d848SSeongJae Park  */
5162224d848SSeongJae Park static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
5172224d848SSeongJae Park {
5182224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_aggregation,
5192224d848SSeongJae Park 			ctx->aggr_interval);
5202224d848SSeongJae Park }
5212224d848SSeongJae Park 
5222224d848SSeongJae Park /*
523f23b8eeeSSeongJae Park  * Reset the aggregated monitoring results ('nr_accesses' of each region).
524f23b8eeeSSeongJae Park  */
525f23b8eeeSSeongJae Park static void kdamond_reset_aggregated(struct damon_ctx *c)
526f23b8eeeSSeongJae Park {
527f23b8eeeSSeongJae Park 	struct damon_target *t;
528f23b8eeeSSeongJae Park 
529f23b8eeeSSeongJae Park 	damon_for_each_target(t, c) {
530f23b8eeeSSeongJae Park 		struct damon_region *r;
531f23b8eeeSSeongJae Park 
5322fcb9362SSeongJae Park 		damon_for_each_region(r, t) {
5332fcb9362SSeongJae Park 			trace_damon_aggregated(t, r, damon_nr_regions(t));
534fda504faSSeongJae Park 			r->last_nr_accesses = r->nr_accesses;
535f23b8eeeSSeongJae Park 			r->nr_accesses = 0;
536f23b8eeeSSeongJae Park 		}
537f23b8eeeSSeongJae Park 	}
5382fcb9362SSeongJae Park }
539f23b8eeeSSeongJae Park 
5402b8a248dSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
5412b8a248dSSeongJae Park 		struct damon_target *t, struct damon_region *r,
5422b8a248dSSeongJae Park 		unsigned long sz_r);
5432b8a248dSSeongJae Park 
5441f366e42SSeongJae Park static void damon_do_apply_schemes(struct damon_ctx *c,
5451f366e42SSeongJae Park 				   struct damon_target *t,
5461f366e42SSeongJae Park 				   struct damon_region *r)
5471f366e42SSeongJae Park {
5481f366e42SSeongJae Park 	struct damos *s;
5491f366e42SSeongJae Park 
5501f366e42SSeongJae Park 	damon_for_each_scheme(s, c) {
5512b8a248dSSeongJae Park 		struct damos_quota *quota = &s->quota;
5522b8a248dSSeongJae Park 		unsigned long sz = r->ar.end - r->ar.start;
5532b8a248dSSeongJae Park 
5542b8a248dSSeongJae Park 		/* Check the quota */
5552b8a248dSSeongJae Park 		if (quota->sz && quota->charged_sz >= quota->sz)
5562b8a248dSSeongJae Park 			continue;
5572b8a248dSSeongJae Park 
558*50585192SSeongJae Park 		/* Skip previously charged regions */
559*50585192SSeongJae Park 		if (quota->charge_target_from) {
560*50585192SSeongJae Park 			if (t != quota->charge_target_from)
561*50585192SSeongJae Park 				continue;
562*50585192SSeongJae Park 			if (r == damon_last_region(t)) {
563*50585192SSeongJae Park 				quota->charge_target_from = NULL;
564*50585192SSeongJae Park 				quota->charge_addr_from = 0;
565*50585192SSeongJae Park 				continue;
566*50585192SSeongJae Park 			}
567*50585192SSeongJae Park 			if (quota->charge_addr_from &&
568*50585192SSeongJae Park 					r->ar.end <= quota->charge_addr_from)
569*50585192SSeongJae Park 				continue;
570*50585192SSeongJae Park 
571*50585192SSeongJae Park 			if (quota->charge_addr_from && r->ar.start <
572*50585192SSeongJae Park 					quota->charge_addr_from) {
573*50585192SSeongJae Park 				sz = ALIGN_DOWN(quota->charge_addr_from -
574*50585192SSeongJae Park 						r->ar.start, DAMON_MIN_REGION);
575*50585192SSeongJae Park 				if (!sz) {
576*50585192SSeongJae Park 					if (r->ar.end - r->ar.start <=
577*50585192SSeongJae Park 							DAMON_MIN_REGION)
578*50585192SSeongJae Park 						continue;
579*50585192SSeongJae Park 					sz = DAMON_MIN_REGION;
580*50585192SSeongJae Park 				}
581*50585192SSeongJae Park 				damon_split_region_at(c, t, r, sz);
582*50585192SSeongJae Park 				r = damon_next_region(r);
583*50585192SSeongJae Park 				sz = r->ar.end - r->ar.start;
584*50585192SSeongJae Park 			}
585*50585192SSeongJae Park 			quota->charge_target_from = NULL;
586*50585192SSeongJae Park 			quota->charge_addr_from = 0;
587*50585192SSeongJae Park 		}
588*50585192SSeongJae Park 
5892b8a248dSSeongJae Park 		/* Check the target regions condition */
5901f366e42SSeongJae Park 		if (sz < s->min_sz_region || s->max_sz_region < sz)
5911f366e42SSeongJae Park 			continue;
5921f366e42SSeongJae Park 		if (r->nr_accesses < s->min_nr_accesses ||
5931f366e42SSeongJae Park 				s->max_nr_accesses < r->nr_accesses)
5941f366e42SSeongJae Park 			continue;
5951f366e42SSeongJae Park 		if (r->age < s->min_age_region || s->max_age_region < r->age)
5961f366e42SSeongJae Park 			continue;
5972b8a248dSSeongJae Park 
5982b8a248dSSeongJae Park 		/* Apply the scheme */
5992b8a248dSSeongJae Park 		if (c->primitive.apply_scheme) {
6002b8a248dSSeongJae Park 			if (quota->sz && quota->charged_sz + sz > quota->sz) {
6012b8a248dSSeongJae Park 				sz = ALIGN_DOWN(quota->sz - quota->charged_sz,
6022b8a248dSSeongJae Park 						DAMON_MIN_REGION);
6032b8a248dSSeongJae Park 				if (!sz)
6042b8a248dSSeongJae Park 					goto update_stat;
6052b8a248dSSeongJae Park 				damon_split_region_at(c, t, r, sz);
6062b8a248dSSeongJae Park 			}
6071f366e42SSeongJae Park 			c->primitive.apply_scheme(c, t, r, s);
6082b8a248dSSeongJae Park 			quota->charged_sz += sz;
609*50585192SSeongJae Park 			if (quota->sz && quota->charged_sz >= quota->sz) {
610*50585192SSeongJae Park 				quota->charge_target_from = t;
611*50585192SSeongJae Park 				quota->charge_addr_from = r->ar.end + 1;
612*50585192SSeongJae Park 			}
6132b8a248dSSeongJae Park 		}
6142f0b548cSSeongJae Park 		if (s->action != DAMOS_STAT)
6151f366e42SSeongJae Park 			r->age = 0;
6162b8a248dSSeongJae Park 
6172b8a248dSSeongJae Park update_stat:
6182b8a248dSSeongJae Park 		s->stat_count++;
6192b8a248dSSeongJae Park 		s->stat_sz += sz;
6201f366e42SSeongJae Park 	}
6211f366e42SSeongJae Park }
6221f366e42SSeongJae Park 
6231f366e42SSeongJae Park static void kdamond_apply_schemes(struct damon_ctx *c)
6241f366e42SSeongJae Park {
6251f366e42SSeongJae Park 	struct damon_target *t;
6262b8a248dSSeongJae Park 	struct damon_region *r, *next_r;
6272b8a248dSSeongJae Park 	struct damos *s;
6282b8a248dSSeongJae Park 
6292b8a248dSSeongJae Park 	damon_for_each_scheme(s, c) {
6302b8a248dSSeongJae Park 		struct damos_quota *quota = &s->quota;
6312b8a248dSSeongJae Park 
6322b8a248dSSeongJae Park 		if (!quota->sz)
6332b8a248dSSeongJae Park 			continue;
6342b8a248dSSeongJae Park 
6352b8a248dSSeongJae Park 		/* New charge window starts */
6362b8a248dSSeongJae Park 		if (time_after_eq(jiffies, quota->charged_from +
6372b8a248dSSeongJae Park 					msecs_to_jiffies(
6382b8a248dSSeongJae Park 						quota->reset_interval))) {
6392b8a248dSSeongJae Park 			quota->charged_from = jiffies;
6402b8a248dSSeongJae Park 			quota->charged_sz = 0;
6412b8a248dSSeongJae Park 		}
6422b8a248dSSeongJae Park 	}
6431f366e42SSeongJae Park 
6441f366e42SSeongJae Park 	damon_for_each_target(t, c) {
6452b8a248dSSeongJae Park 		damon_for_each_region_safe(r, next_r, t)
6461f366e42SSeongJae Park 			damon_do_apply_schemes(c, t, r);
6471f366e42SSeongJae Park 	}
6481f366e42SSeongJae Park }
6491f366e42SSeongJae Park 
650b9a6ac4eSSeongJae Park #define sz_damon_region(r) (r->ar.end - r->ar.start)
651b9a6ac4eSSeongJae Park 
652b9a6ac4eSSeongJae Park /*
653b9a6ac4eSSeongJae Park  * Merge two adjacent regions into one region
654b9a6ac4eSSeongJae Park  */
655b9a6ac4eSSeongJae Park static void damon_merge_two_regions(struct damon_target *t,
656b9a6ac4eSSeongJae Park 		struct damon_region *l, struct damon_region *r)
657b9a6ac4eSSeongJae Park {
658b9a6ac4eSSeongJae Park 	unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
659b9a6ac4eSSeongJae Park 
660b9a6ac4eSSeongJae Park 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
661b9a6ac4eSSeongJae Park 			(sz_l + sz_r);
662fda504faSSeongJae Park 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
663b9a6ac4eSSeongJae Park 	l->ar.end = r->ar.end;
664b9a6ac4eSSeongJae Park 	damon_destroy_region(r, t);
665b9a6ac4eSSeongJae Park }
666b9a6ac4eSSeongJae Park 
667b9a6ac4eSSeongJae Park #define diff_of(a, b) (a > b ? a - b : b - a)
668b9a6ac4eSSeongJae Park 
669b9a6ac4eSSeongJae Park /*
670b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
671b9a6ac4eSSeongJae Park  *
672b9a6ac4eSSeongJae Park  * t		target affected by this merge operation
673b9a6ac4eSSeongJae Park  * thres	'->nr_accesses' diff threshold for the merge
674b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
675b9a6ac4eSSeongJae Park  */
676b9a6ac4eSSeongJae Park static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
677b9a6ac4eSSeongJae Park 				   unsigned long sz_limit)
678b9a6ac4eSSeongJae Park {
679b9a6ac4eSSeongJae Park 	struct damon_region *r, *prev = NULL, *next;
680b9a6ac4eSSeongJae Park 
681b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
682fda504faSSeongJae Park 		if (diff_of(r->nr_accesses, r->last_nr_accesses) > thres)
683fda504faSSeongJae Park 			r->age = 0;
684fda504faSSeongJae Park 		else
685fda504faSSeongJae Park 			r->age++;
686fda504faSSeongJae Park 
687b9a6ac4eSSeongJae Park 		if (prev && prev->ar.end == r->ar.start &&
688b9a6ac4eSSeongJae Park 		    diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
689b9a6ac4eSSeongJae Park 		    sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
690b9a6ac4eSSeongJae Park 			damon_merge_two_regions(t, prev, r);
691b9a6ac4eSSeongJae Park 		else
692b9a6ac4eSSeongJae Park 			prev = r;
693b9a6ac4eSSeongJae Park 	}
694b9a6ac4eSSeongJae Park }
695b9a6ac4eSSeongJae Park 
696b9a6ac4eSSeongJae Park /*
697b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
698b9a6ac4eSSeongJae Park  *
699b9a6ac4eSSeongJae Park  * threshold	'->nr_accesses' diff threshold for the merge
700b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
701b9a6ac4eSSeongJae Park  *
702b9a6ac4eSSeongJae Park  * This function merges monitoring target regions which are adjacent and their
703b9a6ac4eSSeongJae Park  * access frequencies are similar.  This is for minimizing the monitoring
704b9a6ac4eSSeongJae Park  * overhead under the dynamically changeable access pattern.  If a merge was
705b9a6ac4eSSeongJae Park  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
706b9a6ac4eSSeongJae Park  */
707b9a6ac4eSSeongJae Park static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
708b9a6ac4eSSeongJae Park 				  unsigned long sz_limit)
709b9a6ac4eSSeongJae Park {
710b9a6ac4eSSeongJae Park 	struct damon_target *t;
711b9a6ac4eSSeongJae Park 
712b9a6ac4eSSeongJae Park 	damon_for_each_target(t, c)
713b9a6ac4eSSeongJae Park 		damon_merge_regions_of(t, threshold, sz_limit);
714b9a6ac4eSSeongJae Park }
715b9a6ac4eSSeongJae Park 
716b9a6ac4eSSeongJae Park /*
717b9a6ac4eSSeongJae Park  * Split a region in two
718b9a6ac4eSSeongJae Park  *
719b9a6ac4eSSeongJae Park  * r		the region to be split
720b9a6ac4eSSeongJae Park  * sz_r		size of the first sub-region that will be made
721b9a6ac4eSSeongJae Park  */
722b9a6ac4eSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
723b9a6ac4eSSeongJae Park 		struct damon_target *t, struct damon_region *r,
724b9a6ac4eSSeongJae Park 		unsigned long sz_r)
725b9a6ac4eSSeongJae Park {
726b9a6ac4eSSeongJae Park 	struct damon_region *new;
727b9a6ac4eSSeongJae Park 
728b9a6ac4eSSeongJae Park 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
729b9a6ac4eSSeongJae Park 	if (!new)
730b9a6ac4eSSeongJae Park 		return;
731b9a6ac4eSSeongJae Park 
732b9a6ac4eSSeongJae Park 	r->ar.end = new->ar.start;
733b9a6ac4eSSeongJae Park 
734fda504faSSeongJae Park 	new->age = r->age;
735fda504faSSeongJae Park 	new->last_nr_accesses = r->last_nr_accesses;
736fda504faSSeongJae Park 
737b9a6ac4eSSeongJae Park 	damon_insert_region(new, r, damon_next_region(r), t);
738b9a6ac4eSSeongJae Park }
739b9a6ac4eSSeongJae Park 
740b9a6ac4eSSeongJae Park /* Split every region in the given target into 'nr_subs' regions */
741b9a6ac4eSSeongJae Park static void damon_split_regions_of(struct damon_ctx *ctx,
742b9a6ac4eSSeongJae Park 				     struct damon_target *t, int nr_subs)
743b9a6ac4eSSeongJae Park {
744b9a6ac4eSSeongJae Park 	struct damon_region *r, *next;
745b9a6ac4eSSeongJae Park 	unsigned long sz_region, sz_sub = 0;
746b9a6ac4eSSeongJae Park 	int i;
747b9a6ac4eSSeongJae Park 
748b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
749b9a6ac4eSSeongJae Park 		sz_region = r->ar.end - r->ar.start;
750b9a6ac4eSSeongJae Park 
751b9a6ac4eSSeongJae Park 		for (i = 0; i < nr_subs - 1 &&
752b9a6ac4eSSeongJae Park 				sz_region > 2 * DAMON_MIN_REGION; i++) {
753b9a6ac4eSSeongJae Park 			/*
754b9a6ac4eSSeongJae Park 			 * Randomly select size of left sub-region to be at
755b9a6ac4eSSeongJae Park 			 * least 10 percent and at most 90% of original region
756b9a6ac4eSSeongJae Park 			 */
757b9a6ac4eSSeongJae Park 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
758b9a6ac4eSSeongJae Park 					sz_region / 10, DAMON_MIN_REGION);
759b9a6ac4eSSeongJae Park 			/* Do not allow blank region */
760b9a6ac4eSSeongJae Park 			if (sz_sub == 0 || sz_sub >= sz_region)
761b9a6ac4eSSeongJae Park 				continue;
762b9a6ac4eSSeongJae Park 
763b9a6ac4eSSeongJae Park 			damon_split_region_at(ctx, t, r, sz_sub);
764b9a6ac4eSSeongJae Park 			sz_region = sz_sub;
765b9a6ac4eSSeongJae Park 		}
766b9a6ac4eSSeongJae Park 	}
767b9a6ac4eSSeongJae Park }
768b9a6ac4eSSeongJae Park 
769b9a6ac4eSSeongJae Park /*
770b9a6ac4eSSeongJae Park  * Split every target region into randomly-sized small regions
771b9a6ac4eSSeongJae Park  *
772b9a6ac4eSSeongJae Park  * This function splits every target region into random-sized small regions if
773b9a6ac4eSSeongJae Park  * current total number of the regions is equal or smaller than half of the
774b9a6ac4eSSeongJae Park  * user-specified maximum number of regions.  This is for maximizing the
775b9a6ac4eSSeongJae Park  * monitoring accuracy under the dynamically changeable access patterns.  If a
776b9a6ac4eSSeongJae Park  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
777b9a6ac4eSSeongJae Park  * it.
778b9a6ac4eSSeongJae Park  */
779b9a6ac4eSSeongJae Park static void kdamond_split_regions(struct damon_ctx *ctx)
780b9a6ac4eSSeongJae Park {
781b9a6ac4eSSeongJae Park 	struct damon_target *t;
782b9a6ac4eSSeongJae Park 	unsigned int nr_regions = 0;
783b9a6ac4eSSeongJae Park 	static unsigned int last_nr_regions;
784b9a6ac4eSSeongJae Park 	int nr_subregions = 2;
785b9a6ac4eSSeongJae Park 
786b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
787b9a6ac4eSSeongJae Park 		nr_regions += damon_nr_regions(t);
788b9a6ac4eSSeongJae Park 
789b9a6ac4eSSeongJae Park 	if (nr_regions > ctx->max_nr_regions / 2)
790b9a6ac4eSSeongJae Park 		return;
791b9a6ac4eSSeongJae Park 
792b9a6ac4eSSeongJae Park 	/* Maybe the middle of the region has different access frequency */
793b9a6ac4eSSeongJae Park 	if (last_nr_regions == nr_regions &&
794b9a6ac4eSSeongJae Park 			nr_regions < ctx->max_nr_regions / 3)
795b9a6ac4eSSeongJae Park 		nr_subregions = 3;
796b9a6ac4eSSeongJae Park 
797b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
798b9a6ac4eSSeongJae Park 		damon_split_regions_of(ctx, t, nr_subregions);
799b9a6ac4eSSeongJae Park 
800b9a6ac4eSSeongJae Park 	last_nr_regions = nr_regions;
801b9a6ac4eSSeongJae Park }
802b9a6ac4eSSeongJae Park 
803f23b8eeeSSeongJae Park /*
8042224d848SSeongJae Park  * Check whether it is time to check and apply the target monitoring regions
8052224d848SSeongJae Park  *
8062224d848SSeongJae Park  * Returns true if it is.
8072224d848SSeongJae Park  */
8082224d848SSeongJae Park static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
8092224d848SSeongJae Park {
8102224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_primitive_update,
8112224d848SSeongJae Park 			ctx->primitive_update_interval);
8122224d848SSeongJae Park }
8132224d848SSeongJae Park 
8142224d848SSeongJae Park /*
8152224d848SSeongJae Park  * Check whether current monitoring should be stopped
8162224d848SSeongJae Park  *
8172224d848SSeongJae Park  * The monitoring is stopped when either the user requested to stop, or all
8182224d848SSeongJae Park  * monitoring targets are invalid.
8192224d848SSeongJae Park  *
8202224d848SSeongJae Park  * Returns true if need to stop current monitoring.
8212224d848SSeongJae Park  */
8222224d848SSeongJae Park static bool kdamond_need_stop(struct damon_ctx *ctx)
8232224d848SSeongJae Park {
824f23b8eeeSSeongJae Park 	struct damon_target *t;
8252224d848SSeongJae Park 	bool stop;
8262224d848SSeongJae Park 
8272224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
8282224d848SSeongJae Park 	stop = ctx->kdamond_stop;
8292224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
8302224d848SSeongJae Park 	if (stop)
8312224d848SSeongJae Park 		return true;
8322224d848SSeongJae Park 
8332224d848SSeongJae Park 	if (!ctx->primitive.target_valid)
8342224d848SSeongJae Park 		return false;
8352224d848SSeongJae Park 
836f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
837f23b8eeeSSeongJae Park 		if (ctx->primitive.target_valid(t))
838f23b8eeeSSeongJae Park 			return false;
839f23b8eeeSSeongJae Park 	}
840f23b8eeeSSeongJae Park 
841f23b8eeeSSeongJae Park 	return true;
8422224d848SSeongJae Park }
8432224d848SSeongJae Park 
8442224d848SSeongJae Park static void set_kdamond_stop(struct damon_ctx *ctx)
8452224d848SSeongJae Park {
8462224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
8472224d848SSeongJae Park 	ctx->kdamond_stop = true;
8482224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
8492224d848SSeongJae Park }
8502224d848SSeongJae Park 
8512224d848SSeongJae Park /*
8522224d848SSeongJae Park  * The monitoring daemon that runs as a kernel thread
8532224d848SSeongJae Park  */
8542224d848SSeongJae Park static int kdamond_fn(void *data)
8552224d848SSeongJae Park {
8562224d848SSeongJae Park 	struct damon_ctx *ctx = (struct damon_ctx *)data;
857f23b8eeeSSeongJae Park 	struct damon_target *t;
858f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
859b9a6ac4eSSeongJae Park 	unsigned int max_nr_accesses = 0;
860b9a6ac4eSSeongJae Park 	unsigned long sz_limit = 0;
8612224d848SSeongJae Park 
86242e4cef5SChangbin Du 	pr_debug("kdamond (%d) starts\n", current->pid);
8632224d848SSeongJae Park 
8642224d848SSeongJae Park 	if (ctx->primitive.init)
8652224d848SSeongJae Park 		ctx->primitive.init(ctx);
8662224d848SSeongJae Park 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
8672224d848SSeongJae Park 		set_kdamond_stop(ctx);
8682224d848SSeongJae Park 
869b9a6ac4eSSeongJae Park 	sz_limit = damon_region_sz_limit(ctx);
870b9a6ac4eSSeongJae Park 
8712224d848SSeongJae Park 	while (!kdamond_need_stop(ctx)) {
8722224d848SSeongJae Park 		if (ctx->primitive.prepare_access_checks)
8732224d848SSeongJae Park 			ctx->primitive.prepare_access_checks(ctx);
8742224d848SSeongJae Park 		if (ctx->callback.after_sampling &&
8752224d848SSeongJae Park 				ctx->callback.after_sampling(ctx))
8762224d848SSeongJae Park 			set_kdamond_stop(ctx);
8772224d848SSeongJae Park 
8782224d848SSeongJae Park 		usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
8792224d848SSeongJae Park 
8802224d848SSeongJae Park 		if (ctx->primitive.check_accesses)
881b9a6ac4eSSeongJae Park 			max_nr_accesses = ctx->primitive.check_accesses(ctx);
8822224d848SSeongJae Park 
8832224d848SSeongJae Park 		if (kdamond_aggregate_interval_passed(ctx)) {
884b9a6ac4eSSeongJae Park 			kdamond_merge_regions(ctx,
885b9a6ac4eSSeongJae Park 					max_nr_accesses / 10,
886b9a6ac4eSSeongJae Park 					sz_limit);
8872224d848SSeongJae Park 			if (ctx->callback.after_aggregation &&
8882224d848SSeongJae Park 					ctx->callback.after_aggregation(ctx))
8892224d848SSeongJae Park 				set_kdamond_stop(ctx);
8901f366e42SSeongJae Park 			kdamond_apply_schemes(ctx);
891f23b8eeeSSeongJae Park 			kdamond_reset_aggregated(ctx);
892b9a6ac4eSSeongJae Park 			kdamond_split_regions(ctx);
8932224d848SSeongJae Park 			if (ctx->primitive.reset_aggregated)
8942224d848SSeongJae Park 				ctx->primitive.reset_aggregated(ctx);
8952224d848SSeongJae Park 		}
8962224d848SSeongJae Park 
8972224d848SSeongJae Park 		if (kdamond_need_update_primitive(ctx)) {
8982224d848SSeongJae Park 			if (ctx->primitive.update)
8992224d848SSeongJae Park 				ctx->primitive.update(ctx);
900b9a6ac4eSSeongJae Park 			sz_limit = damon_region_sz_limit(ctx);
9012224d848SSeongJae Park 		}
9022224d848SSeongJae Park 	}
903f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
904f23b8eeeSSeongJae Park 		damon_for_each_region_safe(r, next, t)
905b9a6ac4eSSeongJae Park 			damon_destroy_region(r, t);
906f23b8eeeSSeongJae Park 	}
9072224d848SSeongJae Park 
9082224d848SSeongJae Park 	if (ctx->callback.before_terminate &&
9092224d848SSeongJae Park 			ctx->callback.before_terminate(ctx))
9102224d848SSeongJae Park 		set_kdamond_stop(ctx);
9112224d848SSeongJae Park 	if (ctx->primitive.cleanup)
9122224d848SSeongJae Park 		ctx->primitive.cleanup(ctx);
9132224d848SSeongJae Park 
91442e4cef5SChangbin Du 	pr_debug("kdamond (%d) finishes\n", current->pid);
9152224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
9162224d848SSeongJae Park 	ctx->kdamond = NULL;
9172224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
9182224d848SSeongJae Park 
9192224d848SSeongJae Park 	mutex_lock(&damon_lock);
9202224d848SSeongJae Park 	nr_running_ctxs--;
9212224d848SSeongJae Park 	mutex_unlock(&damon_lock);
9222224d848SSeongJae Park 
9235f7fe2b9SChangbin Du 	return 0;
9242224d848SSeongJae Park }
92517ccae8bSSeongJae Park 
92617ccae8bSSeongJae Park #include "core-test.h"
927