xref: /openbmc/linux/mm/damon/core.c (revision 9f7b053a)
12224d848SSeongJae Park // SPDX-License-Identifier: GPL-2.0
22224d848SSeongJae Park /*
32224d848SSeongJae Park  * Data Access Monitor
42224d848SSeongJae Park  *
52224d848SSeongJae Park  * Author: SeongJae Park <sjpark@amazon.de>
62224d848SSeongJae Park  */
72224d848SSeongJae Park 
82224d848SSeongJae Park #define pr_fmt(fmt) "damon: " fmt
92224d848SSeongJae Park 
102224d848SSeongJae Park #include <linux/damon.h>
112224d848SSeongJae Park #include <linux/delay.h>
122224d848SSeongJae Park #include <linux/kthread.h>
13ee801b7dSSeongJae Park #include <linux/mm.h>
142224d848SSeongJae Park #include <linux/slab.h>
1538683e00SSeongJae Park #include <linux/string.h>
162224d848SSeongJae Park 
172fcb9362SSeongJae Park #define CREATE_TRACE_POINTS
182fcb9362SSeongJae Park #include <trace/events/damon.h>
192fcb9362SSeongJae Park 
2017ccae8bSSeongJae Park #ifdef CONFIG_DAMON_KUNIT_TEST
2117ccae8bSSeongJae Park #undef DAMON_MIN_REGION
2217ccae8bSSeongJae Park #define DAMON_MIN_REGION 1
2317ccae8bSSeongJae Park #endif
2417ccae8bSSeongJae Park 
252224d848SSeongJae Park static DEFINE_MUTEX(damon_lock);
262224d848SSeongJae Park static int nr_running_ctxs;
272224d848SSeongJae Park 
28*9f7b053aSSeongJae Park static DEFINE_MUTEX(damon_ops_lock);
29*9f7b053aSSeongJae Park static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
30*9f7b053aSSeongJae Park 
31*9f7b053aSSeongJae Park /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
32*9f7b053aSSeongJae Park static bool damon_registered_ops_id(enum damon_ops_id id)
33*9f7b053aSSeongJae Park {
34*9f7b053aSSeongJae Park 	struct damon_operations empty_ops = {};
35*9f7b053aSSeongJae Park 
36*9f7b053aSSeongJae Park 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
37*9f7b053aSSeongJae Park 		return false;
38*9f7b053aSSeongJae Park 	return true;
39*9f7b053aSSeongJae Park }
40*9f7b053aSSeongJae Park 
41*9f7b053aSSeongJae Park /**
42*9f7b053aSSeongJae Park  * damon_register_ops() - Register a monitoring operations set to DAMON.
43*9f7b053aSSeongJae Park  * @ops:	monitoring operations set to register.
44*9f7b053aSSeongJae Park  *
45*9f7b053aSSeongJae Park  * This function registers a monitoring operations set of valid &struct
46*9f7b053aSSeongJae Park  * damon_operations->id so that others can find and use them later.
47*9f7b053aSSeongJae Park  *
48*9f7b053aSSeongJae Park  * Return: 0 on success, negative error code otherwise.
49*9f7b053aSSeongJae Park  */
50*9f7b053aSSeongJae Park int damon_register_ops(struct damon_operations *ops)
51*9f7b053aSSeongJae Park {
52*9f7b053aSSeongJae Park 	int err = 0;
53*9f7b053aSSeongJae Park 
54*9f7b053aSSeongJae Park 	if (ops->id >= NR_DAMON_OPS)
55*9f7b053aSSeongJae Park 		return -EINVAL;
56*9f7b053aSSeongJae Park 	mutex_lock(&damon_ops_lock);
57*9f7b053aSSeongJae Park 	/* Fail for already registered ops */
58*9f7b053aSSeongJae Park 	if (damon_registered_ops_id(ops->id)) {
59*9f7b053aSSeongJae Park 		err = -EINVAL;
60*9f7b053aSSeongJae Park 		goto out;
61*9f7b053aSSeongJae Park 	}
62*9f7b053aSSeongJae Park 	damon_registered_ops[ops->id] = *ops;
63*9f7b053aSSeongJae Park out:
64*9f7b053aSSeongJae Park 	mutex_unlock(&damon_ops_lock);
65*9f7b053aSSeongJae Park 	return err;
66*9f7b053aSSeongJae Park }
67*9f7b053aSSeongJae Park 
68*9f7b053aSSeongJae Park /**
69*9f7b053aSSeongJae Park  * damon_select_ops() - Select a monitoring operations to use with the context.
70*9f7b053aSSeongJae Park  * @ctx:	monitoring context to use the operations.
71*9f7b053aSSeongJae Park  * @id:		id of the registered monitoring operations to select.
72*9f7b053aSSeongJae Park  *
73*9f7b053aSSeongJae Park  * This function finds registered monitoring operations set of @id and make
74*9f7b053aSSeongJae Park  * @ctx to use it.
75*9f7b053aSSeongJae Park  *
76*9f7b053aSSeongJae Park  * Return: 0 on success, negative error code otherwise.
77*9f7b053aSSeongJae Park  */
78*9f7b053aSSeongJae Park int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
79*9f7b053aSSeongJae Park {
80*9f7b053aSSeongJae Park 	int err = 0;
81*9f7b053aSSeongJae Park 
82*9f7b053aSSeongJae Park 	if (id >= NR_DAMON_OPS)
83*9f7b053aSSeongJae Park 		return -EINVAL;
84*9f7b053aSSeongJae Park 
85*9f7b053aSSeongJae Park 	mutex_lock(&damon_ops_lock);
86*9f7b053aSSeongJae Park 	if (!damon_registered_ops_id(id))
87*9f7b053aSSeongJae Park 		err = -EINVAL;
88*9f7b053aSSeongJae Park 	else
89*9f7b053aSSeongJae Park 		ctx->ops = damon_registered_ops[id];
90*9f7b053aSSeongJae Park 	mutex_unlock(&damon_ops_lock);
91*9f7b053aSSeongJae Park 	return err;
92*9f7b053aSSeongJae Park }
93*9f7b053aSSeongJae Park 
94f23b8eeeSSeongJae Park /*
95f23b8eeeSSeongJae Park  * Construct a damon_region struct
96f23b8eeeSSeongJae Park  *
97f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
98f23b8eeeSSeongJae Park  */
99f23b8eeeSSeongJae Park struct damon_region *damon_new_region(unsigned long start, unsigned long end)
100f23b8eeeSSeongJae Park {
101f23b8eeeSSeongJae Park 	struct damon_region *region;
102f23b8eeeSSeongJae Park 
103f23b8eeeSSeongJae Park 	region = kmalloc(sizeof(*region), GFP_KERNEL);
104f23b8eeeSSeongJae Park 	if (!region)
105f23b8eeeSSeongJae Park 		return NULL;
106f23b8eeeSSeongJae Park 
107f23b8eeeSSeongJae Park 	region->ar.start = start;
108f23b8eeeSSeongJae Park 	region->ar.end = end;
109f23b8eeeSSeongJae Park 	region->nr_accesses = 0;
110f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&region->list);
111f23b8eeeSSeongJae Park 
112fda504faSSeongJae Park 	region->age = 0;
113fda504faSSeongJae Park 	region->last_nr_accesses = 0;
114fda504faSSeongJae Park 
115f23b8eeeSSeongJae Park 	return region;
116f23b8eeeSSeongJae Park }
117f23b8eeeSSeongJae Park 
118f23b8eeeSSeongJae Park void damon_add_region(struct damon_region *r, struct damon_target *t)
119f23b8eeeSSeongJae Park {
120f23b8eeeSSeongJae Park 	list_add_tail(&r->list, &t->regions_list);
121b9a6ac4eSSeongJae Park 	t->nr_regions++;
122f23b8eeeSSeongJae Park }
123f23b8eeeSSeongJae Park 
124b9a6ac4eSSeongJae Park static void damon_del_region(struct damon_region *r, struct damon_target *t)
125f23b8eeeSSeongJae Park {
126f23b8eeeSSeongJae Park 	list_del(&r->list);
127b9a6ac4eSSeongJae Park 	t->nr_regions--;
128f23b8eeeSSeongJae Park }
129f23b8eeeSSeongJae Park 
130f23b8eeeSSeongJae Park static void damon_free_region(struct damon_region *r)
131f23b8eeeSSeongJae Park {
132f23b8eeeSSeongJae Park 	kfree(r);
133f23b8eeeSSeongJae Park }
134f23b8eeeSSeongJae Park 
135b9a6ac4eSSeongJae Park void damon_destroy_region(struct damon_region *r, struct damon_target *t)
136f23b8eeeSSeongJae Park {
137b9a6ac4eSSeongJae Park 	damon_del_region(r, t);
138f23b8eeeSSeongJae Park 	damon_free_region(r);
139f23b8eeeSSeongJae Park }
140f23b8eeeSSeongJae Park 
1411f366e42SSeongJae Park struct damos *damon_new_scheme(
1421f366e42SSeongJae Park 		unsigned long min_sz_region, unsigned long max_sz_region,
1431f366e42SSeongJae Park 		unsigned int min_nr_accesses, unsigned int max_nr_accesses,
1441f366e42SSeongJae Park 		unsigned int min_age_region, unsigned int max_age_region,
145ee801b7dSSeongJae Park 		enum damos_action action, struct damos_quota *quota,
146ee801b7dSSeongJae Park 		struct damos_watermarks *wmarks)
1471f366e42SSeongJae Park {
1481f366e42SSeongJae Park 	struct damos *scheme;
1491f366e42SSeongJae Park 
1501f366e42SSeongJae Park 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
1511f366e42SSeongJae Park 	if (!scheme)
1521f366e42SSeongJae Park 		return NULL;
1531f366e42SSeongJae Park 	scheme->min_sz_region = min_sz_region;
1541f366e42SSeongJae Park 	scheme->max_sz_region = max_sz_region;
1551f366e42SSeongJae Park 	scheme->min_nr_accesses = min_nr_accesses;
1561f366e42SSeongJae Park 	scheme->max_nr_accesses = max_nr_accesses;
1571f366e42SSeongJae Park 	scheme->min_age_region = min_age_region;
1581f366e42SSeongJae Park 	scheme->max_age_region = max_age_region;
1591f366e42SSeongJae Park 	scheme->action = action;
1600e92c2eeSSeongJae Park 	scheme->stat = (struct damos_stat){};
1611f366e42SSeongJae Park 	INIT_LIST_HEAD(&scheme->list);
1621f366e42SSeongJae Park 
1631cd24303SSeongJae Park 	scheme->quota.ms = quota->ms;
1642b8a248dSSeongJae Park 	scheme->quota.sz = quota->sz;
1652b8a248dSSeongJae Park 	scheme->quota.reset_interval = quota->reset_interval;
16638683e00SSeongJae Park 	scheme->quota.weight_sz = quota->weight_sz;
16738683e00SSeongJae Park 	scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
16838683e00SSeongJae Park 	scheme->quota.weight_age = quota->weight_age;
1691cd24303SSeongJae Park 	scheme->quota.total_charged_sz = 0;
1701cd24303SSeongJae Park 	scheme->quota.total_charged_ns = 0;
1711cd24303SSeongJae Park 	scheme->quota.esz = 0;
1722b8a248dSSeongJae Park 	scheme->quota.charged_sz = 0;
1732b8a248dSSeongJae Park 	scheme->quota.charged_from = 0;
17450585192SSeongJae Park 	scheme->quota.charge_target_from = NULL;
17550585192SSeongJae Park 	scheme->quota.charge_addr_from = 0;
1762b8a248dSSeongJae Park 
177ee801b7dSSeongJae Park 	scheme->wmarks.metric = wmarks->metric;
178ee801b7dSSeongJae Park 	scheme->wmarks.interval = wmarks->interval;
179ee801b7dSSeongJae Park 	scheme->wmarks.high = wmarks->high;
180ee801b7dSSeongJae Park 	scheme->wmarks.mid = wmarks->mid;
181ee801b7dSSeongJae Park 	scheme->wmarks.low = wmarks->low;
182ee801b7dSSeongJae Park 	scheme->wmarks.activated = true;
183ee801b7dSSeongJae Park 
1841f366e42SSeongJae Park 	return scheme;
1851f366e42SSeongJae Park }
1861f366e42SSeongJae Park 
1871f366e42SSeongJae Park void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
1881f366e42SSeongJae Park {
1891f366e42SSeongJae Park 	list_add_tail(&s->list, &ctx->schemes);
1901f366e42SSeongJae Park }
1911f366e42SSeongJae Park 
1921f366e42SSeongJae Park static void damon_del_scheme(struct damos *s)
1931f366e42SSeongJae Park {
1941f366e42SSeongJae Park 	list_del(&s->list);
1951f366e42SSeongJae Park }
1961f366e42SSeongJae Park 
1971f366e42SSeongJae Park static void damon_free_scheme(struct damos *s)
1981f366e42SSeongJae Park {
1991f366e42SSeongJae Park 	kfree(s);
2001f366e42SSeongJae Park }
2011f366e42SSeongJae Park 
2021f366e42SSeongJae Park void damon_destroy_scheme(struct damos *s)
2031f366e42SSeongJae Park {
2041f366e42SSeongJae Park 	damon_del_scheme(s);
2051f366e42SSeongJae Park 	damon_free_scheme(s);
2061f366e42SSeongJae Park }
2071f366e42SSeongJae Park 
208f23b8eeeSSeongJae Park /*
209f23b8eeeSSeongJae Park  * Construct a damon_target struct
210f23b8eeeSSeongJae Park  *
211f23b8eeeSSeongJae Park  * Returns the pointer to the new struct if success, or NULL otherwise
212f23b8eeeSSeongJae Park  */
2131971bd63SSeongJae Park struct damon_target *damon_new_target(void)
214f23b8eeeSSeongJae Park {
215f23b8eeeSSeongJae Park 	struct damon_target *t;
216f23b8eeeSSeongJae Park 
217f23b8eeeSSeongJae Park 	t = kmalloc(sizeof(*t), GFP_KERNEL);
218f23b8eeeSSeongJae Park 	if (!t)
219f23b8eeeSSeongJae Park 		return NULL;
220f23b8eeeSSeongJae Park 
2211971bd63SSeongJae Park 	t->pid = NULL;
222b9a6ac4eSSeongJae Park 	t->nr_regions = 0;
223f23b8eeeSSeongJae Park 	INIT_LIST_HEAD(&t->regions_list);
224f23b8eeeSSeongJae Park 
225f23b8eeeSSeongJae Park 	return t;
226f23b8eeeSSeongJae Park }
227f23b8eeeSSeongJae Park 
228f23b8eeeSSeongJae Park void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
229f23b8eeeSSeongJae Park {
230b9a6ac4eSSeongJae Park 	list_add_tail(&t->list, &ctx->adaptive_targets);
231f23b8eeeSSeongJae Park }
232f23b8eeeSSeongJae Park 
233b5ca3e83SXin Hao bool damon_targets_empty(struct damon_ctx *ctx)
234b5ca3e83SXin Hao {
235b5ca3e83SXin Hao 	return list_empty(&ctx->adaptive_targets);
236b5ca3e83SXin Hao }
237b5ca3e83SXin Hao 
238f23b8eeeSSeongJae Park static void damon_del_target(struct damon_target *t)
239f23b8eeeSSeongJae Park {
240f23b8eeeSSeongJae Park 	list_del(&t->list);
241f23b8eeeSSeongJae Park }
242f23b8eeeSSeongJae Park 
243f23b8eeeSSeongJae Park void damon_free_target(struct damon_target *t)
244f23b8eeeSSeongJae Park {
245f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
246f23b8eeeSSeongJae Park 
247f23b8eeeSSeongJae Park 	damon_for_each_region_safe(r, next, t)
248f23b8eeeSSeongJae Park 		damon_free_region(r);
249f23b8eeeSSeongJae Park 	kfree(t);
250f23b8eeeSSeongJae Park }
251f23b8eeeSSeongJae Park 
252f23b8eeeSSeongJae Park void damon_destroy_target(struct damon_target *t)
253f23b8eeeSSeongJae Park {
254f23b8eeeSSeongJae Park 	damon_del_target(t);
255f23b8eeeSSeongJae Park 	damon_free_target(t);
256f23b8eeeSSeongJae Park }
257f23b8eeeSSeongJae Park 
258b9a6ac4eSSeongJae Park unsigned int damon_nr_regions(struct damon_target *t)
259b9a6ac4eSSeongJae Park {
260b9a6ac4eSSeongJae Park 	return t->nr_regions;
261b9a6ac4eSSeongJae Park }
262b9a6ac4eSSeongJae Park 
2632224d848SSeongJae Park struct damon_ctx *damon_new_ctx(void)
2642224d848SSeongJae Park {
2652224d848SSeongJae Park 	struct damon_ctx *ctx;
2662224d848SSeongJae Park 
2672224d848SSeongJae Park 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2682224d848SSeongJae Park 	if (!ctx)
2692224d848SSeongJae Park 		return NULL;
2702224d848SSeongJae Park 
2712224d848SSeongJae Park 	ctx->sample_interval = 5 * 1000;
2722224d848SSeongJae Park 	ctx->aggr_interval = 100 * 1000;
273f7d911c3SSeongJae Park 	ctx->ops_update_interval = 60 * 1000 * 1000;
2742224d848SSeongJae Park 
2752224d848SSeongJae Park 	ktime_get_coarse_ts64(&ctx->last_aggregation);
276f7d911c3SSeongJae Park 	ctx->last_ops_update = ctx->last_aggregation;
2772224d848SSeongJae Park 
2782224d848SSeongJae Park 	mutex_init(&ctx->kdamond_lock);
2792224d848SSeongJae Park 
280b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = 10;
281b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = 1000;
282b9a6ac4eSSeongJae Park 
283b9a6ac4eSSeongJae Park 	INIT_LIST_HEAD(&ctx->adaptive_targets);
2841f366e42SSeongJae Park 	INIT_LIST_HEAD(&ctx->schemes);
2852224d848SSeongJae Park 
2862224d848SSeongJae Park 	return ctx;
2872224d848SSeongJae Park }
2882224d848SSeongJae Park 
289f23b8eeeSSeongJae Park static void damon_destroy_targets(struct damon_ctx *ctx)
290f23b8eeeSSeongJae Park {
291f23b8eeeSSeongJae Park 	struct damon_target *t, *next_t;
292f23b8eeeSSeongJae Park 
293f7d911c3SSeongJae Park 	if (ctx->ops.cleanup) {
294f7d911c3SSeongJae Park 		ctx->ops.cleanup(ctx);
295f23b8eeeSSeongJae Park 		return;
296f23b8eeeSSeongJae Park 	}
297f23b8eeeSSeongJae Park 
298f23b8eeeSSeongJae Park 	damon_for_each_target_safe(t, next_t, ctx)
299f23b8eeeSSeongJae Park 		damon_destroy_target(t);
300f23b8eeeSSeongJae Park }
301f23b8eeeSSeongJae Park 
3022224d848SSeongJae Park void damon_destroy_ctx(struct damon_ctx *ctx)
3032224d848SSeongJae Park {
3041f366e42SSeongJae Park 	struct damos *s, *next_s;
3051f366e42SSeongJae Park 
306f23b8eeeSSeongJae Park 	damon_destroy_targets(ctx);
3071f366e42SSeongJae Park 
3081f366e42SSeongJae Park 	damon_for_each_scheme_safe(s, next_s, ctx)
3091f366e42SSeongJae Park 		damon_destroy_scheme(s);
3101f366e42SSeongJae Park 
3112224d848SSeongJae Park 	kfree(ctx);
3122224d848SSeongJae Park }
3132224d848SSeongJae Park 
3142224d848SSeongJae Park /**
3152224d848SSeongJae Park  * damon_set_attrs() - Set attributes for the monitoring.
3162224d848SSeongJae Park  * @ctx:		monitoring context
3172224d848SSeongJae Park  * @sample_int:		time interval between samplings
3182224d848SSeongJae Park  * @aggr_int:		time interval between aggregations
319f7d911c3SSeongJae Park  * @ops_upd_int:	time interval between monitoring operations updates
320b9a6ac4eSSeongJae Park  * @min_nr_reg:		minimal number of regions
321b9a6ac4eSSeongJae Park  * @max_nr_reg:		maximum number of regions
3222224d848SSeongJae Park  *
3232224d848SSeongJae Park  * This function should not be called while the kdamond is running.
3242224d848SSeongJae Park  * Every time interval is in micro-seconds.
3252224d848SSeongJae Park  *
3262224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
3272224d848SSeongJae Park  */
3282224d848SSeongJae Park int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
329f7d911c3SSeongJae Park 		    unsigned long aggr_int, unsigned long ops_upd_int,
330b9a6ac4eSSeongJae Park 		    unsigned long min_nr_reg, unsigned long max_nr_reg)
3312224d848SSeongJae Park {
3321afaf5cbSSeongJae Park 	if (min_nr_reg < 3)
333b9a6ac4eSSeongJae Park 		return -EINVAL;
3341afaf5cbSSeongJae Park 	if (min_nr_reg > max_nr_reg)
335b9a6ac4eSSeongJae Park 		return -EINVAL;
336b9a6ac4eSSeongJae Park 
3372224d848SSeongJae Park 	ctx->sample_interval = sample_int;
3382224d848SSeongJae Park 	ctx->aggr_interval = aggr_int;
339f7d911c3SSeongJae Park 	ctx->ops_update_interval = ops_upd_int;
340b9a6ac4eSSeongJae Park 	ctx->min_nr_regions = min_nr_reg;
341b9a6ac4eSSeongJae Park 	ctx->max_nr_regions = max_nr_reg;
3422224d848SSeongJae Park 
3432224d848SSeongJae Park 	return 0;
3442224d848SSeongJae Park }
3452224d848SSeongJae Park 
3464bc05954SSeongJae Park /**
3471f366e42SSeongJae Park  * damon_set_schemes() - Set data access monitoring based operation schemes.
3481f366e42SSeongJae Park  * @ctx:	monitoring context
3491f366e42SSeongJae Park  * @schemes:	array of the schemes
3501f366e42SSeongJae Park  * @nr_schemes:	number of entries in @schemes
3511f366e42SSeongJae Park  *
3521f366e42SSeongJae Park  * This function should not be called while the kdamond of the context is
3531f366e42SSeongJae Park  * running.
3541f366e42SSeongJae Park  *
3551f366e42SSeongJae Park  * Return: 0 if success, or negative error code otherwise.
3561f366e42SSeongJae Park  */
3571f366e42SSeongJae Park int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
3581f366e42SSeongJae Park 			ssize_t nr_schemes)
3591f366e42SSeongJae Park {
3601f366e42SSeongJae Park 	struct damos *s, *next;
3611f366e42SSeongJae Park 	ssize_t i;
3621f366e42SSeongJae Park 
3631f366e42SSeongJae Park 	damon_for_each_scheme_safe(s, next, ctx)
3641f366e42SSeongJae Park 		damon_destroy_scheme(s);
3651f366e42SSeongJae Park 	for (i = 0; i < nr_schemes; i++)
3661f366e42SSeongJae Park 		damon_add_scheme(ctx, schemes[i]);
3671f366e42SSeongJae Park 	return 0;
3681f366e42SSeongJae Park }
3691f366e42SSeongJae Park 
3701f366e42SSeongJae Park /**
3714bc05954SSeongJae Park  * damon_nr_running_ctxs() - Return number of currently running contexts.
3724bc05954SSeongJae Park  */
3734bc05954SSeongJae Park int damon_nr_running_ctxs(void)
3744bc05954SSeongJae Park {
3754bc05954SSeongJae Park 	int nr_ctxs;
3764bc05954SSeongJae Park 
3774bc05954SSeongJae Park 	mutex_lock(&damon_lock);
3784bc05954SSeongJae Park 	nr_ctxs = nr_running_ctxs;
3794bc05954SSeongJae Park 	mutex_unlock(&damon_lock);
3804bc05954SSeongJae Park 
3814bc05954SSeongJae Park 	return nr_ctxs;
3824bc05954SSeongJae Park }
3834bc05954SSeongJae Park 
384b9a6ac4eSSeongJae Park /* Returns the size upper limit for each monitoring region */
385b9a6ac4eSSeongJae Park static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
386b9a6ac4eSSeongJae Park {
387b9a6ac4eSSeongJae Park 	struct damon_target *t;
388b9a6ac4eSSeongJae Park 	struct damon_region *r;
389b9a6ac4eSSeongJae Park 	unsigned long sz = 0;
390b9a6ac4eSSeongJae Park 
391b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx) {
392b9a6ac4eSSeongJae Park 		damon_for_each_region(r, t)
393b9a6ac4eSSeongJae Park 			sz += r->ar.end - r->ar.start;
394b9a6ac4eSSeongJae Park 	}
395b9a6ac4eSSeongJae Park 
396b9a6ac4eSSeongJae Park 	if (ctx->min_nr_regions)
397b9a6ac4eSSeongJae Park 		sz /= ctx->min_nr_regions;
398b9a6ac4eSSeongJae Park 	if (sz < DAMON_MIN_REGION)
399b9a6ac4eSSeongJae Park 		sz = DAMON_MIN_REGION;
400b9a6ac4eSSeongJae Park 
401b9a6ac4eSSeongJae Park 	return sz;
402b9a6ac4eSSeongJae Park }
403b9a6ac4eSSeongJae Park 
4042224d848SSeongJae Park static int kdamond_fn(void *data);
4052224d848SSeongJae Park 
4062224d848SSeongJae Park /*
4072224d848SSeongJae Park  * __damon_start() - Starts monitoring with given context.
4082224d848SSeongJae Park  * @ctx:	monitoring context
4092224d848SSeongJae Park  *
4102224d848SSeongJae Park  * This function should be called while damon_lock is hold.
4112224d848SSeongJae Park  *
4122224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4132224d848SSeongJae Park  */
4142224d848SSeongJae Park static int __damon_start(struct damon_ctx *ctx)
4152224d848SSeongJae Park {
4162224d848SSeongJae Park 	int err = -EBUSY;
4172224d848SSeongJae Park 
4182224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
4192224d848SSeongJae Park 	if (!ctx->kdamond) {
4202224d848SSeongJae Park 		err = 0;
4212224d848SSeongJae Park 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
4222224d848SSeongJae Park 				nr_running_ctxs);
4232224d848SSeongJae Park 		if (IS_ERR(ctx->kdamond)) {
4242224d848SSeongJae Park 			err = PTR_ERR(ctx->kdamond);
4257ec1992bSColin Ian King 			ctx->kdamond = NULL;
4262224d848SSeongJae Park 		}
4272224d848SSeongJae Park 	}
4282224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
4292224d848SSeongJae Park 
4302224d848SSeongJae Park 	return err;
4312224d848SSeongJae Park }
4322224d848SSeongJae Park 
4332224d848SSeongJae Park /**
4342224d848SSeongJae Park  * damon_start() - Starts the monitorings for a given group of contexts.
4352224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to start monitoring
4362224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
4372224d848SSeongJae Park  *
4382224d848SSeongJae Park  * This function starts a group of monitoring threads for a group of monitoring
4392224d848SSeongJae Park  * contexts.  One thread per each context is created and run in parallel.  The
4402224d848SSeongJae Park  * caller should handle synchronization between the threads by itself.  If a
4412224d848SSeongJae Park  * group of threads that created by other 'damon_start()' call is currently
4422224d848SSeongJae Park  * running, this function does nothing but returns -EBUSY.
4432224d848SSeongJae Park  *
4442224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4452224d848SSeongJae Park  */
4462224d848SSeongJae Park int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
4472224d848SSeongJae Park {
4482224d848SSeongJae Park 	int i;
4492224d848SSeongJae Park 	int err = 0;
4502224d848SSeongJae Park 
4512224d848SSeongJae Park 	mutex_lock(&damon_lock);
4522224d848SSeongJae Park 	if (nr_running_ctxs) {
4532224d848SSeongJae Park 		mutex_unlock(&damon_lock);
4542224d848SSeongJae Park 		return -EBUSY;
4552224d848SSeongJae Park 	}
4562224d848SSeongJae Park 
4572224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
4582224d848SSeongJae Park 		err = __damon_start(ctxs[i]);
4592224d848SSeongJae Park 		if (err)
4602224d848SSeongJae Park 			break;
4612224d848SSeongJae Park 		nr_running_ctxs++;
4622224d848SSeongJae Park 	}
4632224d848SSeongJae Park 	mutex_unlock(&damon_lock);
4642224d848SSeongJae Park 
4652224d848SSeongJae Park 	return err;
4662224d848SSeongJae Park }
4672224d848SSeongJae Park 
4682224d848SSeongJae Park /*
4692224d848SSeongJae Park  * __damon_stop() - Stops monitoring of given context.
4702224d848SSeongJae Park  * @ctx:	monitoring context
4712224d848SSeongJae Park  *
4722224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4732224d848SSeongJae Park  */
4742224d848SSeongJae Park static int __damon_stop(struct damon_ctx *ctx)
4752224d848SSeongJae Park {
4760f91d133SChangbin Du 	struct task_struct *tsk;
4770f91d133SChangbin Du 
4782224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
4790f91d133SChangbin Du 	tsk = ctx->kdamond;
4800f91d133SChangbin Du 	if (tsk) {
4810f91d133SChangbin Du 		get_task_struct(tsk);
4822224d848SSeongJae Park 		mutex_unlock(&ctx->kdamond_lock);
4830f91d133SChangbin Du 		kthread_stop(tsk);
4840f91d133SChangbin Du 		put_task_struct(tsk);
4852224d848SSeongJae Park 		return 0;
4862224d848SSeongJae Park 	}
4872224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
4882224d848SSeongJae Park 
4892224d848SSeongJae Park 	return -EPERM;
4902224d848SSeongJae Park }
4912224d848SSeongJae Park 
4922224d848SSeongJae Park /**
4932224d848SSeongJae Park  * damon_stop() - Stops the monitorings for a given group of contexts.
4942224d848SSeongJae Park  * @ctxs:	an array of the pointers for contexts to stop monitoring
4952224d848SSeongJae Park  * @nr_ctxs:	size of @ctxs
4962224d848SSeongJae Park  *
4972224d848SSeongJae Park  * Return: 0 on success, negative error code otherwise.
4982224d848SSeongJae Park  */
4992224d848SSeongJae Park int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
5002224d848SSeongJae Park {
5012224d848SSeongJae Park 	int i, err = 0;
5022224d848SSeongJae Park 
5032224d848SSeongJae Park 	for (i = 0; i < nr_ctxs; i++) {
5042224d848SSeongJae Park 		/* nr_running_ctxs is decremented in kdamond_fn */
5052224d848SSeongJae Park 		err = __damon_stop(ctxs[i]);
5062224d848SSeongJae Park 		if (err)
5072224d848SSeongJae Park 			return err;
5082224d848SSeongJae Park 	}
5092224d848SSeongJae Park 
5102224d848SSeongJae Park 	return err;
5112224d848SSeongJae Park }
5122224d848SSeongJae Park 
5132224d848SSeongJae Park /*
5142224d848SSeongJae Park  * damon_check_reset_time_interval() - Check if a time interval is elapsed.
5152224d848SSeongJae Park  * @baseline:	the time to check whether the interval has elapsed since
5162224d848SSeongJae Park  * @interval:	the time interval (microseconds)
5172224d848SSeongJae Park  *
5182224d848SSeongJae Park  * See whether the given time interval has passed since the given baseline
5192224d848SSeongJae Park  * time.  If so, it also updates the baseline to current time for next check.
5202224d848SSeongJae Park  *
5212224d848SSeongJae Park  * Return:	true if the time interval has passed, or false otherwise.
5222224d848SSeongJae Park  */
5232224d848SSeongJae Park static bool damon_check_reset_time_interval(struct timespec64 *baseline,
5242224d848SSeongJae Park 		unsigned long interval)
5252224d848SSeongJae Park {
5262224d848SSeongJae Park 	struct timespec64 now;
5272224d848SSeongJae Park 
5282224d848SSeongJae Park 	ktime_get_coarse_ts64(&now);
5292224d848SSeongJae Park 	if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
5302224d848SSeongJae Park 			interval * 1000)
5312224d848SSeongJae Park 		return false;
5322224d848SSeongJae Park 	*baseline = now;
5332224d848SSeongJae Park 	return true;
5342224d848SSeongJae Park }
5352224d848SSeongJae Park 
5362224d848SSeongJae Park /*
5372224d848SSeongJae Park  * Check whether it is time to flush the aggregated information
5382224d848SSeongJae Park  */
5392224d848SSeongJae Park static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
5402224d848SSeongJae Park {
5412224d848SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_aggregation,
5422224d848SSeongJae Park 			ctx->aggr_interval);
5432224d848SSeongJae Park }
5442224d848SSeongJae Park 
5452224d848SSeongJae Park /*
546f23b8eeeSSeongJae Park  * Reset the aggregated monitoring results ('nr_accesses' of each region).
547f23b8eeeSSeongJae Park  */
548f23b8eeeSSeongJae Park static void kdamond_reset_aggregated(struct damon_ctx *c)
549f23b8eeeSSeongJae Park {
550f23b8eeeSSeongJae Park 	struct damon_target *t;
55176fd0285SSeongJae Park 	unsigned int ti = 0;	/* target's index */
552f23b8eeeSSeongJae Park 
553f23b8eeeSSeongJae Park 	damon_for_each_target(t, c) {
554f23b8eeeSSeongJae Park 		struct damon_region *r;
555f23b8eeeSSeongJae Park 
5562fcb9362SSeongJae Park 		damon_for_each_region(r, t) {
55776fd0285SSeongJae Park 			trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
558fda504faSSeongJae Park 			r->last_nr_accesses = r->nr_accesses;
559f23b8eeeSSeongJae Park 			r->nr_accesses = 0;
560f23b8eeeSSeongJae Park 		}
56176fd0285SSeongJae Park 		ti++;
562f23b8eeeSSeongJae Park 	}
5632fcb9362SSeongJae Park }
564f23b8eeeSSeongJae Park 
5652b8a248dSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
5662b8a248dSSeongJae Park 		struct damon_target *t, struct damon_region *r,
5672b8a248dSSeongJae Park 		unsigned long sz_r);
5682b8a248dSSeongJae Park 
56938683e00SSeongJae Park static bool __damos_valid_target(struct damon_region *r, struct damos *s)
57038683e00SSeongJae Park {
57138683e00SSeongJae Park 	unsigned long sz;
57238683e00SSeongJae Park 
57338683e00SSeongJae Park 	sz = r->ar.end - r->ar.start;
57438683e00SSeongJae Park 	return s->min_sz_region <= sz && sz <= s->max_sz_region &&
57538683e00SSeongJae Park 		s->min_nr_accesses <= r->nr_accesses &&
57638683e00SSeongJae Park 		r->nr_accesses <= s->max_nr_accesses &&
57738683e00SSeongJae Park 		s->min_age_region <= r->age && r->age <= s->max_age_region;
57838683e00SSeongJae Park }
57938683e00SSeongJae Park 
58038683e00SSeongJae Park static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
58138683e00SSeongJae Park 		struct damon_region *r, struct damos *s)
58238683e00SSeongJae Park {
58338683e00SSeongJae Park 	bool ret = __damos_valid_target(r, s);
58438683e00SSeongJae Park 
585f7d911c3SSeongJae Park 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
58638683e00SSeongJae Park 		return ret;
58738683e00SSeongJae Park 
588f7d911c3SSeongJae Park 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
58938683e00SSeongJae Park }
59038683e00SSeongJae Park 
5911f366e42SSeongJae Park static void damon_do_apply_schemes(struct damon_ctx *c,
5921f366e42SSeongJae Park 				   struct damon_target *t,
5931f366e42SSeongJae Park 				   struct damon_region *r)
5941f366e42SSeongJae Park {
5951f366e42SSeongJae Park 	struct damos *s;
5961f366e42SSeongJae Park 
5971f366e42SSeongJae Park 	damon_for_each_scheme(s, c) {
5982b8a248dSSeongJae Park 		struct damos_quota *quota = &s->quota;
5992b8a248dSSeongJae Park 		unsigned long sz = r->ar.end - r->ar.start;
6001cd24303SSeongJae Park 		struct timespec64 begin, end;
6010e92c2eeSSeongJae Park 		unsigned long sz_applied = 0;
6022b8a248dSSeongJae Park 
603ee801b7dSSeongJae Park 		if (!s->wmarks.activated)
604ee801b7dSSeongJae Park 			continue;
605ee801b7dSSeongJae Park 
6062b8a248dSSeongJae Park 		/* Check the quota */
6071cd24303SSeongJae Park 		if (quota->esz && quota->charged_sz >= quota->esz)
6082b8a248dSSeongJae Park 			continue;
6092b8a248dSSeongJae Park 
61050585192SSeongJae Park 		/* Skip previously charged regions */
61150585192SSeongJae Park 		if (quota->charge_target_from) {
61250585192SSeongJae Park 			if (t != quota->charge_target_from)
61350585192SSeongJae Park 				continue;
61450585192SSeongJae Park 			if (r == damon_last_region(t)) {
61550585192SSeongJae Park 				quota->charge_target_from = NULL;
61650585192SSeongJae Park 				quota->charge_addr_from = 0;
61750585192SSeongJae Park 				continue;
61850585192SSeongJae Park 			}
61950585192SSeongJae Park 			if (quota->charge_addr_from &&
62050585192SSeongJae Park 					r->ar.end <= quota->charge_addr_from)
62150585192SSeongJae Park 				continue;
62250585192SSeongJae Park 
62350585192SSeongJae Park 			if (quota->charge_addr_from && r->ar.start <
62450585192SSeongJae Park 					quota->charge_addr_from) {
62550585192SSeongJae Park 				sz = ALIGN_DOWN(quota->charge_addr_from -
62650585192SSeongJae Park 						r->ar.start, DAMON_MIN_REGION);
62750585192SSeongJae Park 				if (!sz) {
62850585192SSeongJae Park 					if (r->ar.end - r->ar.start <=
62950585192SSeongJae Park 							DAMON_MIN_REGION)
63050585192SSeongJae Park 						continue;
63150585192SSeongJae Park 					sz = DAMON_MIN_REGION;
63250585192SSeongJae Park 				}
63350585192SSeongJae Park 				damon_split_region_at(c, t, r, sz);
63450585192SSeongJae Park 				r = damon_next_region(r);
63550585192SSeongJae Park 				sz = r->ar.end - r->ar.start;
63650585192SSeongJae Park 			}
63750585192SSeongJae Park 			quota->charge_target_from = NULL;
63850585192SSeongJae Park 			quota->charge_addr_from = 0;
63950585192SSeongJae Park 		}
64050585192SSeongJae Park 
64138683e00SSeongJae Park 		if (!damos_valid_target(c, t, r, s))
6421f366e42SSeongJae Park 			continue;
6432b8a248dSSeongJae Park 
6442b8a248dSSeongJae Park 		/* Apply the scheme */
645f7d911c3SSeongJae Park 		if (c->ops.apply_scheme) {
6461cd24303SSeongJae Park 			if (quota->esz &&
6471cd24303SSeongJae Park 					quota->charged_sz + sz > quota->esz) {
6481cd24303SSeongJae Park 				sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
6492b8a248dSSeongJae Park 						DAMON_MIN_REGION);
6502b8a248dSSeongJae Park 				if (!sz)
6512b8a248dSSeongJae Park 					goto update_stat;
6522b8a248dSSeongJae Park 				damon_split_region_at(c, t, r, sz);
6532b8a248dSSeongJae Park 			}
6541cd24303SSeongJae Park 			ktime_get_coarse_ts64(&begin);
655f7d911c3SSeongJae Park 			sz_applied = c->ops.apply_scheme(c, t, r, s);
6561cd24303SSeongJae Park 			ktime_get_coarse_ts64(&end);
6571cd24303SSeongJae Park 			quota->total_charged_ns += timespec64_to_ns(&end) -
6581cd24303SSeongJae Park 				timespec64_to_ns(&begin);
6592b8a248dSSeongJae Park 			quota->charged_sz += sz;
6601cd24303SSeongJae Park 			if (quota->esz && quota->charged_sz >= quota->esz) {
66150585192SSeongJae Park 				quota->charge_target_from = t;
66250585192SSeongJae Park 				quota->charge_addr_from = r->ar.end + 1;
66350585192SSeongJae Park 			}
6642b8a248dSSeongJae Park 		}
6652f0b548cSSeongJae Park 		if (s->action != DAMOS_STAT)
6661f366e42SSeongJae Park 			r->age = 0;
6672b8a248dSSeongJae Park 
6682b8a248dSSeongJae Park update_stat:
6690e92c2eeSSeongJae Park 		s->stat.nr_tried++;
6700e92c2eeSSeongJae Park 		s->stat.sz_tried += sz;
6710e92c2eeSSeongJae Park 		if (sz_applied)
6720e92c2eeSSeongJae Park 			s->stat.nr_applied++;
6730e92c2eeSSeongJae Park 		s->stat.sz_applied += sz_applied;
6741f366e42SSeongJae Park 	}
6751f366e42SSeongJae Park }
6761f366e42SSeongJae Park 
6771cd24303SSeongJae Park /* Shouldn't be called if quota->ms and quota->sz are zero */
6781cd24303SSeongJae Park static void damos_set_effective_quota(struct damos_quota *quota)
6791cd24303SSeongJae Park {
6801cd24303SSeongJae Park 	unsigned long throughput;
6811cd24303SSeongJae Park 	unsigned long esz;
6821cd24303SSeongJae Park 
6831cd24303SSeongJae Park 	if (!quota->ms) {
6841cd24303SSeongJae Park 		quota->esz = quota->sz;
6851cd24303SSeongJae Park 		return;
6861cd24303SSeongJae Park 	}
6871cd24303SSeongJae Park 
6881cd24303SSeongJae Park 	if (quota->total_charged_ns)
6891cd24303SSeongJae Park 		throughput = quota->total_charged_sz * 1000000 /
6901cd24303SSeongJae Park 			quota->total_charged_ns;
6911cd24303SSeongJae Park 	else
6921cd24303SSeongJae Park 		throughput = PAGE_SIZE * 1024;
6931cd24303SSeongJae Park 	esz = throughput * quota->ms;
6941cd24303SSeongJae Park 
6951cd24303SSeongJae Park 	if (quota->sz && quota->sz < esz)
6961cd24303SSeongJae Park 		esz = quota->sz;
6971cd24303SSeongJae Park 	quota->esz = esz;
6981cd24303SSeongJae Park }
6991cd24303SSeongJae Park 
7001f366e42SSeongJae Park static void kdamond_apply_schemes(struct damon_ctx *c)
7011f366e42SSeongJae Park {
7021f366e42SSeongJae Park 	struct damon_target *t;
7032b8a248dSSeongJae Park 	struct damon_region *r, *next_r;
7042b8a248dSSeongJae Park 	struct damos *s;
7052b8a248dSSeongJae Park 
7062b8a248dSSeongJae Park 	damon_for_each_scheme(s, c) {
7072b8a248dSSeongJae Park 		struct damos_quota *quota = &s->quota;
70838683e00SSeongJae Park 		unsigned long cumulated_sz;
70938683e00SSeongJae Park 		unsigned int score, max_score = 0;
7102b8a248dSSeongJae Park 
711ee801b7dSSeongJae Park 		if (!s->wmarks.activated)
712ee801b7dSSeongJae Park 			continue;
713ee801b7dSSeongJae Park 
7141cd24303SSeongJae Park 		if (!quota->ms && !quota->sz)
7152b8a248dSSeongJae Park 			continue;
7162b8a248dSSeongJae Park 
7172b8a248dSSeongJae Park 		/* New charge window starts */
7182b8a248dSSeongJae Park 		if (time_after_eq(jiffies, quota->charged_from +
7192b8a248dSSeongJae Park 					msecs_to_jiffies(
7202b8a248dSSeongJae Park 						quota->reset_interval))) {
7216268eac3SSeongJae Park 			if (quota->esz && quota->charged_sz >= quota->esz)
7226268eac3SSeongJae Park 				s->stat.qt_exceeds++;
7231cd24303SSeongJae Park 			quota->total_charged_sz += quota->charged_sz;
7242b8a248dSSeongJae Park 			quota->charged_from = jiffies;
7252b8a248dSSeongJae Park 			quota->charged_sz = 0;
7261cd24303SSeongJae Park 			damos_set_effective_quota(quota);
7272b8a248dSSeongJae Park 		}
72838683e00SSeongJae Park 
729f7d911c3SSeongJae Park 		if (!c->ops.get_scheme_score)
73038683e00SSeongJae Park 			continue;
73138683e00SSeongJae Park 
73238683e00SSeongJae Park 		/* Fill up the score histogram */
73338683e00SSeongJae Park 		memset(quota->histogram, 0, sizeof(quota->histogram));
73438683e00SSeongJae Park 		damon_for_each_target(t, c) {
73538683e00SSeongJae Park 			damon_for_each_region(r, t) {
73638683e00SSeongJae Park 				if (!__damos_valid_target(r, s))
73738683e00SSeongJae Park 					continue;
738f7d911c3SSeongJae Park 				score = c->ops.get_scheme_score(
73938683e00SSeongJae Park 						c, t, r, s);
74038683e00SSeongJae Park 				quota->histogram[score] +=
74138683e00SSeongJae Park 					r->ar.end - r->ar.start;
74238683e00SSeongJae Park 				if (score > max_score)
74338683e00SSeongJae Park 					max_score = score;
74438683e00SSeongJae Park 			}
74538683e00SSeongJae Park 		}
74638683e00SSeongJae Park 
74738683e00SSeongJae Park 		/* Set the min score limit */
74838683e00SSeongJae Park 		for (cumulated_sz = 0, score = max_score; ; score--) {
74938683e00SSeongJae Park 			cumulated_sz += quota->histogram[score];
75038683e00SSeongJae Park 			if (cumulated_sz >= quota->esz || !score)
75138683e00SSeongJae Park 				break;
75238683e00SSeongJae Park 		}
75338683e00SSeongJae Park 		quota->min_score = score;
7542b8a248dSSeongJae Park 	}
7551f366e42SSeongJae Park 
7561f366e42SSeongJae Park 	damon_for_each_target(t, c) {
7572b8a248dSSeongJae Park 		damon_for_each_region_safe(r, next_r, t)
7581f366e42SSeongJae Park 			damon_do_apply_schemes(c, t, r);
7591f366e42SSeongJae Park 	}
7601f366e42SSeongJae Park }
7611f366e42SSeongJae Park 
76288f86dcfSSeongJae Park static inline unsigned long sz_damon_region(struct damon_region *r)
76388f86dcfSSeongJae Park {
76488f86dcfSSeongJae Park 	return r->ar.end - r->ar.start;
76588f86dcfSSeongJae Park }
766b9a6ac4eSSeongJae Park 
767b9a6ac4eSSeongJae Park /*
768b9a6ac4eSSeongJae Park  * Merge two adjacent regions into one region
769b9a6ac4eSSeongJae Park  */
770b9a6ac4eSSeongJae Park static void damon_merge_two_regions(struct damon_target *t,
771b9a6ac4eSSeongJae Park 		struct damon_region *l, struct damon_region *r)
772b9a6ac4eSSeongJae Park {
773b9a6ac4eSSeongJae Park 	unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
774b9a6ac4eSSeongJae Park 
775b9a6ac4eSSeongJae Park 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
776b9a6ac4eSSeongJae Park 			(sz_l + sz_r);
777fda504faSSeongJae Park 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
778b9a6ac4eSSeongJae Park 	l->ar.end = r->ar.end;
779b9a6ac4eSSeongJae Park 	damon_destroy_region(r, t);
780b9a6ac4eSSeongJae Park }
781b9a6ac4eSSeongJae Park 
782b9a6ac4eSSeongJae Park /*
783b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
784b9a6ac4eSSeongJae Park  *
785b9a6ac4eSSeongJae Park  * t		target affected by this merge operation
786b9a6ac4eSSeongJae Park  * thres	'->nr_accesses' diff threshold for the merge
787b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
788b9a6ac4eSSeongJae Park  */
789b9a6ac4eSSeongJae Park static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
790b9a6ac4eSSeongJae Park 				   unsigned long sz_limit)
791b9a6ac4eSSeongJae Park {
792b9a6ac4eSSeongJae Park 	struct damon_region *r, *prev = NULL, *next;
793b9a6ac4eSSeongJae Park 
794b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
795d720bbbdSXin Hao 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
796fda504faSSeongJae Park 			r->age = 0;
797fda504faSSeongJae Park 		else
798fda504faSSeongJae Park 			r->age++;
799fda504faSSeongJae Park 
800b9a6ac4eSSeongJae Park 		if (prev && prev->ar.end == r->ar.start &&
801d720bbbdSXin Hao 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
802b9a6ac4eSSeongJae Park 		    sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
803b9a6ac4eSSeongJae Park 			damon_merge_two_regions(t, prev, r);
804b9a6ac4eSSeongJae Park 		else
805b9a6ac4eSSeongJae Park 			prev = r;
806b9a6ac4eSSeongJae Park 	}
807b9a6ac4eSSeongJae Park }
808b9a6ac4eSSeongJae Park 
809b9a6ac4eSSeongJae Park /*
810b9a6ac4eSSeongJae Park  * Merge adjacent regions having similar access frequencies
811b9a6ac4eSSeongJae Park  *
812b9a6ac4eSSeongJae Park  * threshold	'->nr_accesses' diff threshold for the merge
813b9a6ac4eSSeongJae Park  * sz_limit	size upper limit of each region
814b9a6ac4eSSeongJae Park  *
815b9a6ac4eSSeongJae Park  * This function merges monitoring target regions which are adjacent and their
816b9a6ac4eSSeongJae Park  * access frequencies are similar.  This is for minimizing the monitoring
817b9a6ac4eSSeongJae Park  * overhead under the dynamically changeable access pattern.  If a merge was
818b9a6ac4eSSeongJae Park  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
819b9a6ac4eSSeongJae Park  */
820b9a6ac4eSSeongJae Park static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
821b9a6ac4eSSeongJae Park 				  unsigned long sz_limit)
822b9a6ac4eSSeongJae Park {
823b9a6ac4eSSeongJae Park 	struct damon_target *t;
824b9a6ac4eSSeongJae Park 
825b9a6ac4eSSeongJae Park 	damon_for_each_target(t, c)
826b9a6ac4eSSeongJae Park 		damon_merge_regions_of(t, threshold, sz_limit);
827b9a6ac4eSSeongJae Park }
828b9a6ac4eSSeongJae Park 
829b9a6ac4eSSeongJae Park /*
830b9a6ac4eSSeongJae Park  * Split a region in two
831b9a6ac4eSSeongJae Park  *
832b9a6ac4eSSeongJae Park  * r		the region to be split
833b9a6ac4eSSeongJae Park  * sz_r		size of the first sub-region that will be made
834b9a6ac4eSSeongJae Park  */
835b9a6ac4eSSeongJae Park static void damon_split_region_at(struct damon_ctx *ctx,
836b9a6ac4eSSeongJae Park 		struct damon_target *t, struct damon_region *r,
837b9a6ac4eSSeongJae Park 		unsigned long sz_r)
838b9a6ac4eSSeongJae Park {
839b9a6ac4eSSeongJae Park 	struct damon_region *new;
840b9a6ac4eSSeongJae Park 
841b9a6ac4eSSeongJae Park 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
842b9a6ac4eSSeongJae Park 	if (!new)
843b9a6ac4eSSeongJae Park 		return;
844b9a6ac4eSSeongJae Park 
845b9a6ac4eSSeongJae Park 	r->ar.end = new->ar.start;
846b9a6ac4eSSeongJae Park 
847fda504faSSeongJae Park 	new->age = r->age;
848fda504faSSeongJae Park 	new->last_nr_accesses = r->last_nr_accesses;
849fda504faSSeongJae Park 
850b9a6ac4eSSeongJae Park 	damon_insert_region(new, r, damon_next_region(r), t);
851b9a6ac4eSSeongJae Park }
852b9a6ac4eSSeongJae Park 
853b9a6ac4eSSeongJae Park /* Split every region in the given target into 'nr_subs' regions */
854b9a6ac4eSSeongJae Park static void damon_split_regions_of(struct damon_ctx *ctx,
855b9a6ac4eSSeongJae Park 				     struct damon_target *t, int nr_subs)
856b9a6ac4eSSeongJae Park {
857b9a6ac4eSSeongJae Park 	struct damon_region *r, *next;
858b9a6ac4eSSeongJae Park 	unsigned long sz_region, sz_sub = 0;
859b9a6ac4eSSeongJae Park 	int i;
860b9a6ac4eSSeongJae Park 
861b9a6ac4eSSeongJae Park 	damon_for_each_region_safe(r, next, t) {
862b9a6ac4eSSeongJae Park 		sz_region = r->ar.end - r->ar.start;
863b9a6ac4eSSeongJae Park 
864b9a6ac4eSSeongJae Park 		for (i = 0; i < nr_subs - 1 &&
865b9a6ac4eSSeongJae Park 				sz_region > 2 * DAMON_MIN_REGION; i++) {
866b9a6ac4eSSeongJae Park 			/*
867b9a6ac4eSSeongJae Park 			 * Randomly select size of left sub-region to be at
868b9a6ac4eSSeongJae Park 			 * least 10 percent and at most 90% of original region
869b9a6ac4eSSeongJae Park 			 */
870b9a6ac4eSSeongJae Park 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
871b9a6ac4eSSeongJae Park 					sz_region / 10, DAMON_MIN_REGION);
872b9a6ac4eSSeongJae Park 			/* Do not allow blank region */
873b9a6ac4eSSeongJae Park 			if (sz_sub == 0 || sz_sub >= sz_region)
874b9a6ac4eSSeongJae Park 				continue;
875b9a6ac4eSSeongJae Park 
876b9a6ac4eSSeongJae Park 			damon_split_region_at(ctx, t, r, sz_sub);
877b9a6ac4eSSeongJae Park 			sz_region = sz_sub;
878b9a6ac4eSSeongJae Park 		}
879b9a6ac4eSSeongJae Park 	}
880b9a6ac4eSSeongJae Park }
881b9a6ac4eSSeongJae Park 
882b9a6ac4eSSeongJae Park /*
883b9a6ac4eSSeongJae Park  * Split every target region into randomly-sized small regions
884b9a6ac4eSSeongJae Park  *
885b9a6ac4eSSeongJae Park  * This function splits every target region into random-sized small regions if
886b9a6ac4eSSeongJae Park  * current total number of the regions is equal or smaller than half of the
887b9a6ac4eSSeongJae Park  * user-specified maximum number of regions.  This is for maximizing the
888b9a6ac4eSSeongJae Park  * monitoring accuracy under the dynamically changeable access patterns.  If a
889b9a6ac4eSSeongJae Park  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
890b9a6ac4eSSeongJae Park  * it.
891b9a6ac4eSSeongJae Park  */
892b9a6ac4eSSeongJae Park static void kdamond_split_regions(struct damon_ctx *ctx)
893b9a6ac4eSSeongJae Park {
894b9a6ac4eSSeongJae Park 	struct damon_target *t;
895b9a6ac4eSSeongJae Park 	unsigned int nr_regions = 0;
896b9a6ac4eSSeongJae Park 	static unsigned int last_nr_regions;
897b9a6ac4eSSeongJae Park 	int nr_subregions = 2;
898b9a6ac4eSSeongJae Park 
899b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
900b9a6ac4eSSeongJae Park 		nr_regions += damon_nr_regions(t);
901b9a6ac4eSSeongJae Park 
902b9a6ac4eSSeongJae Park 	if (nr_regions > ctx->max_nr_regions / 2)
903b9a6ac4eSSeongJae Park 		return;
904b9a6ac4eSSeongJae Park 
905b9a6ac4eSSeongJae Park 	/* Maybe the middle of the region has different access frequency */
906b9a6ac4eSSeongJae Park 	if (last_nr_regions == nr_regions &&
907b9a6ac4eSSeongJae Park 			nr_regions < ctx->max_nr_regions / 3)
908b9a6ac4eSSeongJae Park 		nr_subregions = 3;
909b9a6ac4eSSeongJae Park 
910b9a6ac4eSSeongJae Park 	damon_for_each_target(t, ctx)
911b9a6ac4eSSeongJae Park 		damon_split_regions_of(ctx, t, nr_subregions);
912b9a6ac4eSSeongJae Park 
913b9a6ac4eSSeongJae Park 	last_nr_regions = nr_regions;
914b9a6ac4eSSeongJae Park }
915b9a6ac4eSSeongJae Park 
916f23b8eeeSSeongJae Park /*
917f7d911c3SSeongJae Park  * Check whether it is time to check and apply the operations-related data
918f7d911c3SSeongJae Park  * structures.
9192224d848SSeongJae Park  *
9202224d848SSeongJae Park  * Returns true if it is.
9212224d848SSeongJae Park  */
922f7d911c3SSeongJae Park static bool kdamond_need_update_operations(struct damon_ctx *ctx)
9232224d848SSeongJae Park {
924f7d911c3SSeongJae Park 	return damon_check_reset_time_interval(&ctx->last_ops_update,
925f7d911c3SSeongJae Park 			ctx->ops_update_interval);
9262224d848SSeongJae Park }
9272224d848SSeongJae Park 
9282224d848SSeongJae Park /*
9292224d848SSeongJae Park  * Check whether current monitoring should be stopped
9302224d848SSeongJae Park  *
9312224d848SSeongJae Park  * The monitoring is stopped when either the user requested to stop, or all
9322224d848SSeongJae Park  * monitoring targets are invalid.
9332224d848SSeongJae Park  *
9342224d848SSeongJae Park  * Returns true if need to stop current monitoring.
9352224d848SSeongJae Park  */
9362224d848SSeongJae Park static bool kdamond_need_stop(struct damon_ctx *ctx)
9372224d848SSeongJae Park {
938f23b8eeeSSeongJae Park 	struct damon_target *t;
9392224d848SSeongJae Park 
9400f91d133SChangbin Du 	if (kthread_should_stop())
9412224d848SSeongJae Park 		return true;
9422224d848SSeongJae Park 
943f7d911c3SSeongJae Park 	if (!ctx->ops.target_valid)
9442224d848SSeongJae Park 		return false;
9452224d848SSeongJae Park 
946f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
947f7d911c3SSeongJae Park 		if (ctx->ops.target_valid(t))
948f23b8eeeSSeongJae Park 			return false;
949f23b8eeeSSeongJae Park 	}
950f23b8eeeSSeongJae Park 
951f23b8eeeSSeongJae Park 	return true;
9522224d848SSeongJae Park }
9532224d848SSeongJae Park 
954ee801b7dSSeongJae Park static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
955ee801b7dSSeongJae Park {
956ee801b7dSSeongJae Park 	struct sysinfo i;
957ee801b7dSSeongJae Park 
958ee801b7dSSeongJae Park 	switch (metric) {
959ee801b7dSSeongJae Park 	case DAMOS_WMARK_FREE_MEM_RATE:
960ee801b7dSSeongJae Park 		si_meminfo(&i);
961ee801b7dSSeongJae Park 		return i.freeram * 1000 / i.totalram;
962ee801b7dSSeongJae Park 	default:
963ee801b7dSSeongJae Park 		break;
964ee801b7dSSeongJae Park 	}
965ee801b7dSSeongJae Park 	return -EINVAL;
966ee801b7dSSeongJae Park }
967ee801b7dSSeongJae Park 
968ee801b7dSSeongJae Park /*
969ee801b7dSSeongJae Park  * Returns zero if the scheme is active.  Else, returns time to wait for next
970ee801b7dSSeongJae Park  * watermark check in micro-seconds.
971ee801b7dSSeongJae Park  */
972ee801b7dSSeongJae Park static unsigned long damos_wmark_wait_us(struct damos *scheme)
973ee801b7dSSeongJae Park {
974ee801b7dSSeongJae Park 	unsigned long metric;
975ee801b7dSSeongJae Park 
976ee801b7dSSeongJae Park 	if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
977ee801b7dSSeongJae Park 		return 0;
978ee801b7dSSeongJae Park 
979ee801b7dSSeongJae Park 	metric = damos_wmark_metric_value(scheme->wmarks.metric);
980ee801b7dSSeongJae Park 	/* higher than high watermark or lower than low watermark */
981ee801b7dSSeongJae Park 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
982ee801b7dSSeongJae Park 		if (scheme->wmarks.activated)
98301078655SColin Ian King 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
984ee801b7dSSeongJae Park 					scheme->action,
985ee801b7dSSeongJae Park 					metric > scheme->wmarks.high ?
986ee801b7dSSeongJae Park 					"high" : "low");
987ee801b7dSSeongJae Park 		scheme->wmarks.activated = false;
988ee801b7dSSeongJae Park 		return scheme->wmarks.interval;
989ee801b7dSSeongJae Park 	}
990ee801b7dSSeongJae Park 
991ee801b7dSSeongJae Park 	/* inactive and higher than middle watermark */
992ee801b7dSSeongJae Park 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
993ee801b7dSSeongJae Park 			!scheme->wmarks.activated)
994ee801b7dSSeongJae Park 		return scheme->wmarks.interval;
995ee801b7dSSeongJae Park 
996ee801b7dSSeongJae Park 	if (!scheme->wmarks.activated)
997ee801b7dSSeongJae Park 		pr_debug("activate a scheme (%d)\n", scheme->action);
998ee801b7dSSeongJae Park 	scheme->wmarks.activated = true;
999ee801b7dSSeongJae Park 	return 0;
1000ee801b7dSSeongJae Park }
1001ee801b7dSSeongJae Park 
1002ee801b7dSSeongJae Park static void kdamond_usleep(unsigned long usecs)
1003ee801b7dSSeongJae Park {
10044de46a30SSeongJae Park 	/* See Documentation/timers/timers-howto.rst for the thresholds */
10054de46a30SSeongJae Park 	if (usecs > 20 * USEC_PER_MSEC)
100670e92748SSeongJae Park 		schedule_timeout_idle(usecs_to_jiffies(usecs));
1007ee801b7dSSeongJae Park 	else
100870e92748SSeongJae Park 		usleep_idle_range(usecs, usecs + 1);
1009ee801b7dSSeongJae Park }
1010ee801b7dSSeongJae Park 
1011ee801b7dSSeongJae Park /* Returns negative error code if it's not activated but should return */
1012ee801b7dSSeongJae Park static int kdamond_wait_activation(struct damon_ctx *ctx)
1013ee801b7dSSeongJae Park {
1014ee801b7dSSeongJae Park 	struct damos *s;
1015ee801b7dSSeongJae Park 	unsigned long wait_time;
1016ee801b7dSSeongJae Park 	unsigned long min_wait_time = 0;
1017ee801b7dSSeongJae Park 
1018ee801b7dSSeongJae Park 	while (!kdamond_need_stop(ctx)) {
1019ee801b7dSSeongJae Park 		damon_for_each_scheme(s, ctx) {
1020ee801b7dSSeongJae Park 			wait_time = damos_wmark_wait_us(s);
1021ee801b7dSSeongJae Park 			if (!min_wait_time || wait_time < min_wait_time)
1022ee801b7dSSeongJae Park 				min_wait_time = wait_time;
1023ee801b7dSSeongJae Park 		}
1024ee801b7dSSeongJae Park 		if (!min_wait_time)
1025ee801b7dSSeongJae Park 			return 0;
1026ee801b7dSSeongJae Park 
1027ee801b7dSSeongJae Park 		kdamond_usleep(min_wait_time);
1028ee801b7dSSeongJae Park 	}
1029ee801b7dSSeongJae Park 	return -EBUSY;
1030ee801b7dSSeongJae Park }
1031ee801b7dSSeongJae Park 
10322224d848SSeongJae Park /*
10332224d848SSeongJae Park  * The monitoring daemon that runs as a kernel thread
10342224d848SSeongJae Park  */
10352224d848SSeongJae Park static int kdamond_fn(void *data)
10362224d848SSeongJae Park {
10372224d848SSeongJae Park 	struct damon_ctx *ctx = (struct damon_ctx *)data;
1038f23b8eeeSSeongJae Park 	struct damon_target *t;
1039f23b8eeeSSeongJae Park 	struct damon_region *r, *next;
1040b9a6ac4eSSeongJae Park 	unsigned int max_nr_accesses = 0;
1041b9a6ac4eSSeongJae Park 	unsigned long sz_limit = 0;
10420f91d133SChangbin Du 	bool done = false;
10432224d848SSeongJae Park 
104442e4cef5SChangbin Du 	pr_debug("kdamond (%d) starts\n", current->pid);
10452224d848SSeongJae Park 
1046f7d911c3SSeongJae Park 	if (ctx->ops.init)
1047f7d911c3SSeongJae Park 		ctx->ops.init(ctx);
10482224d848SSeongJae Park 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
10490f91d133SChangbin Du 		done = true;
10502224d848SSeongJae Park 
1051b9a6ac4eSSeongJae Park 	sz_limit = damon_region_sz_limit(ctx);
1052b9a6ac4eSSeongJae Park 
10530f91d133SChangbin Du 	while (!kdamond_need_stop(ctx) && !done) {
1054ee801b7dSSeongJae Park 		if (kdamond_wait_activation(ctx))
1055ee801b7dSSeongJae Park 			continue;
1056ee801b7dSSeongJae Park 
1057f7d911c3SSeongJae Park 		if (ctx->ops.prepare_access_checks)
1058f7d911c3SSeongJae Park 			ctx->ops.prepare_access_checks(ctx);
10592224d848SSeongJae Park 		if (ctx->callback.after_sampling &&
10602224d848SSeongJae Park 				ctx->callback.after_sampling(ctx))
10610f91d133SChangbin Du 			done = true;
10622224d848SSeongJae Park 
106370e92748SSeongJae Park 		kdamond_usleep(ctx->sample_interval);
10642224d848SSeongJae Park 
1065f7d911c3SSeongJae Park 		if (ctx->ops.check_accesses)
1066f7d911c3SSeongJae Park 			max_nr_accesses = ctx->ops.check_accesses(ctx);
10672224d848SSeongJae Park 
10682224d848SSeongJae Park 		if (kdamond_aggregate_interval_passed(ctx)) {
1069b9a6ac4eSSeongJae Park 			kdamond_merge_regions(ctx,
1070b9a6ac4eSSeongJae Park 					max_nr_accesses / 10,
1071b9a6ac4eSSeongJae Park 					sz_limit);
10722224d848SSeongJae Park 			if (ctx->callback.after_aggregation &&
10732224d848SSeongJae Park 					ctx->callback.after_aggregation(ctx))
10740f91d133SChangbin Du 				done = true;
10751f366e42SSeongJae Park 			kdamond_apply_schemes(ctx);
1076f23b8eeeSSeongJae Park 			kdamond_reset_aggregated(ctx);
1077b9a6ac4eSSeongJae Park 			kdamond_split_regions(ctx);
1078f7d911c3SSeongJae Park 			if (ctx->ops.reset_aggregated)
1079f7d911c3SSeongJae Park 				ctx->ops.reset_aggregated(ctx);
10802224d848SSeongJae Park 		}
10812224d848SSeongJae Park 
1082f7d911c3SSeongJae Park 		if (kdamond_need_update_operations(ctx)) {
1083f7d911c3SSeongJae Park 			if (ctx->ops.update)
1084f7d911c3SSeongJae Park 				ctx->ops.update(ctx);
1085b9a6ac4eSSeongJae Park 			sz_limit = damon_region_sz_limit(ctx);
10862224d848SSeongJae Park 		}
10872224d848SSeongJae Park 	}
1088f23b8eeeSSeongJae Park 	damon_for_each_target(t, ctx) {
1089f23b8eeeSSeongJae Park 		damon_for_each_region_safe(r, next, t)
1090b9a6ac4eSSeongJae Park 			damon_destroy_region(r, t);
1091f23b8eeeSSeongJae Park 	}
10922224d848SSeongJae Park 
10930f91d133SChangbin Du 	if (ctx->callback.before_terminate)
10940f91d133SChangbin Du 		ctx->callback.before_terminate(ctx);
1095f7d911c3SSeongJae Park 	if (ctx->ops.cleanup)
1096f7d911c3SSeongJae Park 		ctx->ops.cleanup(ctx);
10972224d848SSeongJae Park 
109842e4cef5SChangbin Du 	pr_debug("kdamond (%d) finishes\n", current->pid);
10992224d848SSeongJae Park 	mutex_lock(&ctx->kdamond_lock);
11002224d848SSeongJae Park 	ctx->kdamond = NULL;
11012224d848SSeongJae Park 	mutex_unlock(&ctx->kdamond_lock);
11022224d848SSeongJae Park 
11032224d848SSeongJae Park 	mutex_lock(&damon_lock);
11042224d848SSeongJae Park 	nr_running_ctxs--;
11052224d848SSeongJae Park 	mutex_unlock(&damon_lock);
11062224d848SSeongJae Park 
11075f7fe2b9SChangbin Du 	return 0;
11082224d848SSeongJae Park }
110917ccae8bSSeongJae Park 
111017ccae8bSSeongJae Park #include "core-test.h"
1111