xref: /openbmc/linux/mm/damon/dbgfs.c (revision 620932cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Debugfs Interface
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon-dbgfs: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/debugfs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/page_idle.h>
16 #include <linux/slab.h>
17 
18 static struct damon_ctx **dbgfs_ctxs;
19 static int dbgfs_nr_ctxs;
20 static struct dentry **dbgfs_dirs;
21 static DEFINE_MUTEX(damon_dbgfs_lock);
22 
damon_dbgfs_warn_deprecation(void)23 static void damon_dbgfs_warn_deprecation(void)
24 {
25 	pr_warn_once("DAMON debugfs interface is deprecated, "
26 		     "so users should move to DAMON_SYSFS. If you cannot, "
27 		     "please report your usecase to damon@lists.linux.dev and "
28 		     "linux-mm@kvack.org.\n");
29 }
30 
31 /*
32  * Returns non-empty string on success, negative error code otherwise.
33  */
user_input_str(const char __user * buf,size_t count,loff_t * ppos)34 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
35 {
36 	char *kbuf;
37 	ssize_t ret;
38 
39 	/* We do not accept continuous write */
40 	if (*ppos)
41 		return ERR_PTR(-EINVAL);
42 
43 	kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
44 	if (!kbuf)
45 		return ERR_PTR(-ENOMEM);
46 
47 	ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
48 	if (ret != count) {
49 		kfree(kbuf);
50 		return ERR_PTR(-EIO);
51 	}
52 	kbuf[ret] = '\0';
53 
54 	return kbuf;
55 }
56 
dbgfs_attrs_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)57 static ssize_t dbgfs_attrs_read(struct file *file,
58 		char __user *buf, size_t count, loff_t *ppos)
59 {
60 	struct damon_ctx *ctx = file->private_data;
61 	char kbuf[128];
62 	int ret;
63 
64 	mutex_lock(&ctx->kdamond_lock);
65 	ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
66 			ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
67 			ctx->attrs.ops_update_interval,
68 			ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
69 	mutex_unlock(&ctx->kdamond_lock);
70 
71 	return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
72 }
73 
dbgfs_attrs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)74 static ssize_t dbgfs_attrs_write(struct file *file,
75 		const char __user *buf, size_t count, loff_t *ppos)
76 {
77 	struct damon_ctx *ctx = file->private_data;
78 	struct damon_attrs attrs;
79 	char *kbuf;
80 	ssize_t ret;
81 
82 	kbuf = user_input_str(buf, count, ppos);
83 	if (IS_ERR(kbuf))
84 		return PTR_ERR(kbuf);
85 
86 	if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
87 				&attrs.sample_interval, &attrs.aggr_interval,
88 				&attrs.ops_update_interval,
89 				&attrs.min_nr_regions,
90 				&attrs.max_nr_regions) != 5) {
91 		ret = -EINVAL;
92 		goto out;
93 	}
94 
95 	mutex_lock(&ctx->kdamond_lock);
96 	if (ctx->kdamond) {
97 		ret = -EBUSY;
98 		goto unlock_out;
99 	}
100 
101 	ret = damon_set_attrs(ctx, &attrs);
102 	if (!ret)
103 		ret = count;
104 unlock_out:
105 	mutex_unlock(&ctx->kdamond_lock);
106 out:
107 	kfree(kbuf);
108 	return ret;
109 }
110 
111 /*
112  * Return corresponding dbgfs' scheme action value (int) for the given
113  * damos_action if the given damos_action value is valid and supported by
114  * dbgfs, negative error code otherwise.
115  */
damos_action_to_dbgfs_scheme_action(enum damos_action action)116 static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
117 {
118 	switch (action) {
119 	case DAMOS_WILLNEED:
120 		return 0;
121 	case DAMOS_COLD:
122 		return 1;
123 	case DAMOS_PAGEOUT:
124 		return 2;
125 	case DAMOS_HUGEPAGE:
126 		return 3;
127 	case DAMOS_NOHUGEPAGE:
128 		return 4;
129 	case DAMOS_STAT:
130 		return 5;
131 	default:
132 		return -EINVAL;
133 	}
134 }
135 
sprint_schemes(struct damon_ctx * c,char * buf,ssize_t len)136 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
137 {
138 	struct damos *s;
139 	int written = 0;
140 	int rc;
141 
142 	damon_for_each_scheme(s, c) {
143 		rc = scnprintf(&buf[written], len - written,
144 				"%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
145 				s->pattern.min_sz_region,
146 				s->pattern.max_sz_region,
147 				s->pattern.min_nr_accesses,
148 				s->pattern.max_nr_accesses,
149 				s->pattern.min_age_region,
150 				s->pattern.max_age_region,
151 				damos_action_to_dbgfs_scheme_action(s->action),
152 				s->quota.ms, s->quota.sz,
153 				s->quota.reset_interval,
154 				s->quota.weight_sz,
155 				s->quota.weight_nr_accesses,
156 				s->quota.weight_age,
157 				s->wmarks.metric, s->wmarks.interval,
158 				s->wmarks.high, s->wmarks.mid, s->wmarks.low,
159 				s->stat.nr_tried, s->stat.sz_tried,
160 				s->stat.nr_applied, s->stat.sz_applied,
161 				s->stat.qt_exceeds);
162 		if (!rc)
163 			return -ENOMEM;
164 
165 		written += rc;
166 	}
167 	return written;
168 }
169 
dbgfs_schemes_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)170 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
171 		size_t count, loff_t *ppos)
172 {
173 	struct damon_ctx *ctx = file->private_data;
174 	char *kbuf;
175 	ssize_t len;
176 
177 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
178 	if (!kbuf)
179 		return -ENOMEM;
180 
181 	mutex_lock(&ctx->kdamond_lock);
182 	len = sprint_schemes(ctx, kbuf, count);
183 	mutex_unlock(&ctx->kdamond_lock);
184 	if (len < 0)
185 		goto out;
186 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
187 
188 out:
189 	kfree(kbuf);
190 	return len;
191 }
192 
free_schemes_arr(struct damos ** schemes,ssize_t nr_schemes)193 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
194 {
195 	ssize_t i;
196 
197 	for (i = 0; i < nr_schemes; i++)
198 		kfree(schemes[i]);
199 	kfree(schemes);
200 }
201 
202 /*
203  * Return corresponding damos_action for the given dbgfs input for a scheme
204  * action if the input is valid, negative error code otherwise.
205  */
dbgfs_scheme_action_to_damos_action(int dbgfs_action)206 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
207 {
208 	switch (dbgfs_action) {
209 	case 0:
210 		return DAMOS_WILLNEED;
211 	case 1:
212 		return DAMOS_COLD;
213 	case 2:
214 		return DAMOS_PAGEOUT;
215 	case 3:
216 		return DAMOS_HUGEPAGE;
217 	case 4:
218 		return DAMOS_NOHUGEPAGE;
219 	case 5:
220 		return DAMOS_STAT;
221 	default:
222 		return -EINVAL;
223 	}
224 }
225 
226 /*
227  * Converts a string into an array of struct damos pointers
228  *
229  * Returns an array of struct damos pointers that converted if the conversion
230  * success, or NULL otherwise.
231  */
str_to_schemes(const char * str,ssize_t len,ssize_t * nr_schemes)232 static struct damos **str_to_schemes(const char *str, ssize_t len,
233 				ssize_t *nr_schemes)
234 {
235 	struct damos *scheme, **schemes;
236 	const int max_nr_schemes = 256;
237 	int pos = 0, parsed, ret;
238 	unsigned int action_input;
239 	enum damos_action action;
240 
241 	schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
242 			GFP_KERNEL);
243 	if (!schemes)
244 		return NULL;
245 
246 	*nr_schemes = 0;
247 	while (pos < len && *nr_schemes < max_nr_schemes) {
248 		struct damos_access_pattern pattern = {};
249 		struct damos_quota quota = {};
250 		struct damos_watermarks wmarks;
251 
252 		ret = sscanf(&str[pos],
253 				"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
254 				&pattern.min_sz_region, &pattern.max_sz_region,
255 				&pattern.min_nr_accesses,
256 				&pattern.max_nr_accesses,
257 				&pattern.min_age_region,
258 				&pattern.max_age_region,
259 				&action_input, &quota.ms,
260 				&quota.sz, &quota.reset_interval,
261 				&quota.weight_sz, &quota.weight_nr_accesses,
262 				&quota.weight_age, &wmarks.metric,
263 				&wmarks.interval, &wmarks.high, &wmarks.mid,
264 				&wmarks.low, &parsed);
265 		if (ret != 18)
266 			break;
267 		action = dbgfs_scheme_action_to_damos_action(action_input);
268 		if ((int)action < 0)
269 			goto fail;
270 
271 		if (pattern.min_sz_region > pattern.max_sz_region ||
272 		    pattern.min_nr_accesses > pattern.max_nr_accesses ||
273 		    pattern.min_age_region > pattern.max_age_region)
274 			goto fail;
275 
276 		if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
277 		    wmarks.mid <  wmarks.low)
278 			goto fail;
279 
280 		pos += parsed;
281 		scheme = damon_new_scheme(&pattern, action, &quota, &wmarks);
282 		if (!scheme)
283 			goto fail;
284 
285 		schemes[*nr_schemes] = scheme;
286 		*nr_schemes += 1;
287 	}
288 	return schemes;
289 fail:
290 	free_schemes_arr(schemes, *nr_schemes);
291 	return NULL;
292 }
293 
dbgfs_schemes_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)294 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
295 		size_t count, loff_t *ppos)
296 {
297 	struct damon_ctx *ctx = file->private_data;
298 	char *kbuf;
299 	struct damos **schemes;
300 	ssize_t nr_schemes = 0, ret;
301 
302 	kbuf = user_input_str(buf, count, ppos);
303 	if (IS_ERR(kbuf))
304 		return PTR_ERR(kbuf);
305 
306 	schemes = str_to_schemes(kbuf, count, &nr_schemes);
307 	if (!schemes) {
308 		ret = -EINVAL;
309 		goto out;
310 	}
311 
312 	mutex_lock(&ctx->kdamond_lock);
313 	if (ctx->kdamond) {
314 		ret = -EBUSY;
315 		goto unlock_out;
316 	}
317 
318 	damon_set_schemes(ctx, schemes, nr_schemes);
319 	ret = count;
320 	nr_schemes = 0;
321 
322 unlock_out:
323 	mutex_unlock(&ctx->kdamond_lock);
324 	free_schemes_arr(schemes, nr_schemes);
325 out:
326 	kfree(kbuf);
327 	return ret;
328 }
329 
sprint_target_ids(struct damon_ctx * ctx,char * buf,ssize_t len)330 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
331 {
332 	struct damon_target *t;
333 	int id;
334 	int written = 0;
335 	int rc;
336 
337 	damon_for_each_target(t, ctx) {
338 		if (damon_target_has_pid(ctx))
339 			/* Show pid numbers to debugfs users */
340 			id = pid_vnr(t->pid);
341 		else
342 			/* Show 42 for physical address space, just for fun */
343 			id = 42;
344 
345 		rc = scnprintf(&buf[written], len - written, "%d ", id);
346 		if (!rc)
347 			return -ENOMEM;
348 		written += rc;
349 	}
350 	if (written)
351 		written -= 1;
352 	written += scnprintf(&buf[written], len - written, "\n");
353 	return written;
354 }
355 
dbgfs_target_ids_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)356 static ssize_t dbgfs_target_ids_read(struct file *file,
357 		char __user *buf, size_t count, loff_t *ppos)
358 {
359 	struct damon_ctx *ctx = file->private_data;
360 	ssize_t len;
361 	char ids_buf[320];
362 
363 	mutex_lock(&ctx->kdamond_lock);
364 	len = sprint_target_ids(ctx, ids_buf, 320);
365 	mutex_unlock(&ctx->kdamond_lock);
366 	if (len < 0)
367 		return len;
368 
369 	return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
370 }
371 
372 /*
373  * Converts a string into an integers array
374  *
375  * Returns an array of integers array if the conversion success, or NULL
376  * otherwise.
377  */
str_to_ints(const char * str,ssize_t len,ssize_t * nr_ints)378 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
379 {
380 	int *array;
381 	const int max_nr_ints = 32;
382 	int nr;
383 	int pos = 0, parsed, ret;
384 
385 	*nr_ints = 0;
386 	array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
387 	if (!array)
388 		return NULL;
389 	while (*nr_ints < max_nr_ints && pos < len) {
390 		ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
391 		pos += parsed;
392 		if (ret != 1)
393 			break;
394 		array[*nr_ints] = nr;
395 		*nr_ints += 1;
396 	}
397 
398 	return array;
399 }
400 
dbgfs_put_pids(struct pid ** pids,int nr_pids)401 static void dbgfs_put_pids(struct pid **pids, int nr_pids)
402 {
403 	int i;
404 
405 	for (i = 0; i < nr_pids; i++)
406 		put_pid(pids[i]);
407 }
408 
409 /*
410  * Converts a string into an struct pid pointers array
411  *
412  * Returns an array of struct pid pointers if the conversion success, or NULL
413  * otherwise.
414  */
str_to_pids(const char * str,ssize_t len,ssize_t * nr_pids)415 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
416 {
417 	int *ints;
418 	ssize_t nr_ints;
419 	struct pid **pids;
420 
421 	*nr_pids = 0;
422 
423 	ints = str_to_ints(str, len, &nr_ints);
424 	if (!ints)
425 		return NULL;
426 
427 	pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
428 	if (!pids)
429 		goto out;
430 
431 	for (; *nr_pids < nr_ints; (*nr_pids)++) {
432 		pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
433 		if (!pids[*nr_pids]) {
434 			dbgfs_put_pids(pids, *nr_pids);
435 			kfree(ints);
436 			kfree(pids);
437 			return NULL;
438 		}
439 	}
440 
441 out:
442 	kfree(ints);
443 	return pids;
444 }
445 
446 /*
447  * dbgfs_set_targets() - Set monitoring targets.
448  * @ctx:	monitoring context
449  * @nr_targets:	number of targets
450  * @pids:	array of target pids (size is same to @nr_targets)
451  *
452  * This function should not be called while the kdamond is running.  @pids is
453  * ignored if the context is not configured to have pid in each target.  On
454  * failure, reference counts of all pids in @pids are decremented.
455  *
456  * Return: 0 on success, negative error code otherwise.
457  */
dbgfs_set_targets(struct damon_ctx * ctx,ssize_t nr_targets,struct pid ** pids)458 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
459 		struct pid **pids)
460 {
461 	ssize_t i;
462 	struct damon_target *t, *next;
463 
464 	damon_for_each_target_safe(t, next, ctx) {
465 		if (damon_target_has_pid(ctx))
466 			put_pid(t->pid);
467 		damon_destroy_target(t);
468 	}
469 
470 	for (i = 0; i < nr_targets; i++) {
471 		t = damon_new_target();
472 		if (!t) {
473 			damon_for_each_target_safe(t, next, ctx)
474 				damon_destroy_target(t);
475 			if (damon_target_has_pid(ctx))
476 				dbgfs_put_pids(pids, nr_targets);
477 			return -ENOMEM;
478 		}
479 		if (damon_target_has_pid(ctx))
480 			t->pid = pids[i];
481 		damon_add_target(ctx, t);
482 	}
483 
484 	return 0;
485 }
486 
dbgfs_target_ids_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)487 static ssize_t dbgfs_target_ids_write(struct file *file,
488 		const char __user *buf, size_t count, loff_t *ppos)
489 {
490 	struct damon_ctx *ctx = file->private_data;
491 	bool id_is_pid = true;
492 	char *kbuf;
493 	struct pid **target_pids = NULL;
494 	ssize_t nr_targets;
495 	ssize_t ret;
496 
497 	kbuf = user_input_str(buf, count, ppos);
498 	if (IS_ERR(kbuf))
499 		return PTR_ERR(kbuf);
500 
501 	if (!strncmp(kbuf, "paddr\n", count)) {
502 		id_is_pid = false;
503 		nr_targets = 1;
504 	}
505 
506 	if (id_is_pid) {
507 		target_pids = str_to_pids(kbuf, count, &nr_targets);
508 		if (!target_pids) {
509 			ret = -ENOMEM;
510 			goto out;
511 		}
512 	}
513 
514 	mutex_lock(&ctx->kdamond_lock);
515 	if (ctx->kdamond) {
516 		if (id_is_pid)
517 			dbgfs_put_pids(target_pids, nr_targets);
518 		ret = -EBUSY;
519 		goto unlock_out;
520 	}
521 
522 	/* remove previously set targets */
523 	dbgfs_set_targets(ctx, 0, NULL);
524 	if (!nr_targets) {
525 		ret = count;
526 		goto unlock_out;
527 	}
528 
529 	/* Configure the context for the address space type */
530 	if (id_is_pid)
531 		ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
532 	else
533 		ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
534 	if (ret)
535 		goto unlock_out;
536 
537 	ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
538 	if (!ret)
539 		ret = count;
540 
541 unlock_out:
542 	mutex_unlock(&ctx->kdamond_lock);
543 	kfree(target_pids);
544 out:
545 	kfree(kbuf);
546 	return ret;
547 }
548 
sprint_init_regions(struct damon_ctx * c,char * buf,ssize_t len)549 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
550 {
551 	struct damon_target *t;
552 	struct damon_region *r;
553 	int target_idx = 0;
554 	int written = 0;
555 	int rc;
556 
557 	damon_for_each_target(t, c) {
558 		damon_for_each_region(r, t) {
559 			rc = scnprintf(&buf[written], len - written,
560 					"%d %lu %lu\n",
561 					target_idx, r->ar.start, r->ar.end);
562 			if (!rc)
563 				return -ENOMEM;
564 			written += rc;
565 		}
566 		target_idx++;
567 	}
568 	return written;
569 }
570 
dbgfs_init_regions_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)571 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
572 		size_t count, loff_t *ppos)
573 {
574 	struct damon_ctx *ctx = file->private_data;
575 	char *kbuf;
576 	ssize_t len;
577 
578 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
579 	if (!kbuf)
580 		return -ENOMEM;
581 
582 	mutex_lock(&ctx->kdamond_lock);
583 	if (ctx->kdamond) {
584 		mutex_unlock(&ctx->kdamond_lock);
585 		len = -EBUSY;
586 		goto out;
587 	}
588 
589 	len = sprint_init_regions(ctx, kbuf, count);
590 	mutex_unlock(&ctx->kdamond_lock);
591 	if (len < 0)
592 		goto out;
593 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
594 
595 out:
596 	kfree(kbuf);
597 	return len;
598 }
599 
add_init_region(struct damon_ctx * c,int target_idx,struct damon_addr_range * ar)600 static int add_init_region(struct damon_ctx *c, int target_idx,
601 		struct damon_addr_range *ar)
602 {
603 	struct damon_target *t;
604 	struct damon_region *r, *prev;
605 	unsigned long idx = 0;
606 	int rc = -EINVAL;
607 
608 	if (ar->start >= ar->end)
609 		return -EINVAL;
610 
611 	damon_for_each_target(t, c) {
612 		if (idx++ == target_idx) {
613 			r = damon_new_region(ar->start, ar->end);
614 			if (!r)
615 				return -ENOMEM;
616 			damon_add_region(r, t);
617 			if (damon_nr_regions(t) > 1) {
618 				prev = damon_prev_region(r);
619 				if (prev->ar.end > r->ar.start) {
620 					damon_destroy_region(r, t);
621 					return -EINVAL;
622 				}
623 			}
624 			rc = 0;
625 		}
626 	}
627 	return rc;
628 }
629 
set_init_regions(struct damon_ctx * c,const char * str,ssize_t len)630 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
631 {
632 	struct damon_target *t;
633 	struct damon_region *r, *next;
634 	int pos = 0, parsed, ret;
635 	int target_idx;
636 	struct damon_addr_range ar;
637 	int err;
638 
639 	damon_for_each_target(t, c) {
640 		damon_for_each_region_safe(r, next, t)
641 			damon_destroy_region(r, t);
642 	}
643 
644 	while (pos < len) {
645 		ret = sscanf(&str[pos], "%d %lu %lu%n",
646 				&target_idx, &ar.start, &ar.end, &parsed);
647 		if (ret != 3)
648 			break;
649 		err = add_init_region(c, target_idx, &ar);
650 		if (err)
651 			goto fail;
652 		pos += parsed;
653 	}
654 
655 	return 0;
656 
657 fail:
658 	damon_for_each_target(t, c) {
659 		damon_for_each_region_safe(r, next, t)
660 			damon_destroy_region(r, t);
661 	}
662 	return err;
663 }
664 
dbgfs_init_regions_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)665 static ssize_t dbgfs_init_regions_write(struct file *file,
666 					  const char __user *buf, size_t count,
667 					  loff_t *ppos)
668 {
669 	struct damon_ctx *ctx = file->private_data;
670 	char *kbuf;
671 	ssize_t ret = count;
672 	int err;
673 
674 	kbuf = user_input_str(buf, count, ppos);
675 	if (IS_ERR(kbuf))
676 		return PTR_ERR(kbuf);
677 
678 	mutex_lock(&ctx->kdamond_lock);
679 	if (ctx->kdamond) {
680 		ret = -EBUSY;
681 		goto unlock_out;
682 	}
683 
684 	err = set_init_regions(ctx, kbuf, ret);
685 	if (err)
686 		ret = err;
687 
688 unlock_out:
689 	mutex_unlock(&ctx->kdamond_lock);
690 	kfree(kbuf);
691 	return ret;
692 }
693 
dbgfs_kdamond_pid_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)694 static ssize_t dbgfs_kdamond_pid_read(struct file *file,
695 		char __user *buf, size_t count, loff_t *ppos)
696 {
697 	struct damon_ctx *ctx = file->private_data;
698 	char *kbuf;
699 	ssize_t len;
700 
701 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
702 	if (!kbuf)
703 		return -ENOMEM;
704 
705 	mutex_lock(&ctx->kdamond_lock);
706 	if (ctx->kdamond)
707 		len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
708 	else
709 		len = scnprintf(kbuf, count, "none\n");
710 	mutex_unlock(&ctx->kdamond_lock);
711 	if (!len)
712 		goto out;
713 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
714 
715 out:
716 	kfree(kbuf);
717 	return len;
718 }
719 
damon_dbgfs_open(struct inode * inode,struct file * file)720 static int damon_dbgfs_open(struct inode *inode, struct file *file)
721 {
722 	damon_dbgfs_warn_deprecation();
723 
724 	file->private_data = inode->i_private;
725 
726 	return nonseekable_open(inode, file);
727 }
728 
729 static const struct file_operations attrs_fops = {
730 	.open = damon_dbgfs_open,
731 	.read = dbgfs_attrs_read,
732 	.write = dbgfs_attrs_write,
733 };
734 
735 static const struct file_operations schemes_fops = {
736 	.open = damon_dbgfs_open,
737 	.read = dbgfs_schemes_read,
738 	.write = dbgfs_schemes_write,
739 };
740 
741 static const struct file_operations target_ids_fops = {
742 	.open = damon_dbgfs_open,
743 	.read = dbgfs_target_ids_read,
744 	.write = dbgfs_target_ids_write,
745 };
746 
747 static const struct file_operations init_regions_fops = {
748 	.open = damon_dbgfs_open,
749 	.read = dbgfs_init_regions_read,
750 	.write = dbgfs_init_regions_write,
751 };
752 
753 static const struct file_operations kdamond_pid_fops = {
754 	.open = damon_dbgfs_open,
755 	.read = dbgfs_kdamond_pid_read,
756 };
757 
dbgfs_fill_ctx_dir(struct dentry * dir,struct damon_ctx * ctx)758 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
759 {
760 	const char * const file_names[] = {"attrs", "schemes", "target_ids",
761 		"init_regions", "kdamond_pid"};
762 	const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
763 		&target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
764 	int i;
765 
766 	for (i = 0; i < ARRAY_SIZE(file_names); i++)
767 		debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
768 }
769 
dbgfs_before_terminate(struct damon_ctx * ctx)770 static void dbgfs_before_terminate(struct damon_ctx *ctx)
771 {
772 	struct damon_target *t, *next;
773 
774 	if (!damon_target_has_pid(ctx))
775 		return;
776 
777 	mutex_lock(&ctx->kdamond_lock);
778 	damon_for_each_target_safe(t, next, ctx) {
779 		put_pid(t->pid);
780 		damon_destroy_target(t);
781 	}
782 	mutex_unlock(&ctx->kdamond_lock);
783 }
784 
dbgfs_new_ctx(void)785 static struct damon_ctx *dbgfs_new_ctx(void)
786 {
787 	struct damon_ctx *ctx;
788 
789 	ctx = damon_new_ctx();
790 	if (!ctx)
791 		return NULL;
792 
793 	if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
794 			damon_select_ops(ctx, DAMON_OPS_PADDR)) {
795 		damon_destroy_ctx(ctx);
796 		return NULL;
797 	}
798 	ctx->callback.before_terminate = dbgfs_before_terminate;
799 	return ctx;
800 }
801 
dbgfs_destroy_ctx(struct damon_ctx * ctx)802 static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
803 {
804 	damon_destroy_ctx(ctx);
805 }
806 
807 /*
808  * Make a context of @name and create a debugfs directory for it.
809  *
810  * This function should be called while holding damon_dbgfs_lock.
811  *
812  * Returns 0 on success, negative error code otherwise.
813  */
dbgfs_mk_context(char * name)814 static int dbgfs_mk_context(char *name)
815 {
816 	struct dentry *root, **new_dirs, *new_dir;
817 	struct damon_ctx **new_ctxs, *new_ctx;
818 
819 	if (damon_nr_running_ctxs())
820 		return -EBUSY;
821 
822 	new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
823 			(dbgfs_nr_ctxs + 1), GFP_KERNEL);
824 	if (!new_ctxs)
825 		return -ENOMEM;
826 	dbgfs_ctxs = new_ctxs;
827 
828 	new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
829 			(dbgfs_nr_ctxs + 1), GFP_KERNEL);
830 	if (!new_dirs)
831 		return -ENOMEM;
832 	dbgfs_dirs = new_dirs;
833 
834 	root = dbgfs_dirs[0];
835 	if (!root)
836 		return -ENOENT;
837 
838 	new_dir = debugfs_create_dir(name, root);
839 	/* Below check is required for a potential duplicated name case */
840 	if (IS_ERR(new_dir))
841 		return PTR_ERR(new_dir);
842 	dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
843 
844 	new_ctx = dbgfs_new_ctx();
845 	if (!new_ctx) {
846 		debugfs_remove(new_dir);
847 		dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
848 		return -ENOMEM;
849 	}
850 
851 	dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
852 	dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
853 			dbgfs_ctxs[dbgfs_nr_ctxs]);
854 	dbgfs_nr_ctxs++;
855 
856 	return 0;
857 }
858 
dbgfs_mk_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)859 static ssize_t dbgfs_mk_context_write(struct file *file,
860 		const char __user *buf, size_t count, loff_t *ppos)
861 {
862 	char *kbuf;
863 	char *ctx_name;
864 	ssize_t ret;
865 
866 	kbuf = user_input_str(buf, count, ppos);
867 	if (IS_ERR(kbuf))
868 		return PTR_ERR(kbuf);
869 	ctx_name = kmalloc(count + 1, GFP_KERNEL);
870 	if (!ctx_name) {
871 		kfree(kbuf);
872 		return -ENOMEM;
873 	}
874 
875 	/* Trim white space */
876 	if (sscanf(kbuf, "%s", ctx_name) != 1) {
877 		ret = -EINVAL;
878 		goto out;
879 	}
880 
881 	mutex_lock(&damon_dbgfs_lock);
882 	ret = dbgfs_mk_context(ctx_name);
883 	if (!ret)
884 		ret = count;
885 	mutex_unlock(&damon_dbgfs_lock);
886 
887 out:
888 	kfree(kbuf);
889 	kfree(ctx_name);
890 	return ret;
891 }
892 
893 /*
894  * Remove a context of @name and its debugfs directory.
895  *
896  * This function should be called while holding damon_dbgfs_lock.
897  *
898  * Return 0 on success, negative error code otherwise.
899  */
dbgfs_rm_context(char * name)900 static int dbgfs_rm_context(char *name)
901 {
902 	struct dentry *root, *dir, **new_dirs;
903 	struct inode *inode;
904 	struct damon_ctx **new_ctxs;
905 	int i, j;
906 	int ret = 0;
907 
908 	if (damon_nr_running_ctxs())
909 		return -EBUSY;
910 
911 	root = dbgfs_dirs[0];
912 	if (!root)
913 		return -ENOENT;
914 
915 	dir = debugfs_lookup(name, root);
916 	if (!dir)
917 		return -ENOENT;
918 
919 	inode = d_inode(dir);
920 	if (!S_ISDIR(inode->i_mode)) {
921 		ret = -EINVAL;
922 		goto out_dput;
923 	}
924 
925 	new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
926 			GFP_KERNEL);
927 	if (!new_dirs) {
928 		ret = -ENOMEM;
929 		goto out_dput;
930 	}
931 
932 	new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
933 			GFP_KERNEL);
934 	if (!new_ctxs) {
935 		ret = -ENOMEM;
936 		goto out_new_dirs;
937 	}
938 
939 	for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
940 		if (dbgfs_dirs[i] == dir) {
941 			debugfs_remove(dbgfs_dirs[i]);
942 			dbgfs_destroy_ctx(dbgfs_ctxs[i]);
943 			continue;
944 		}
945 		new_dirs[j] = dbgfs_dirs[i];
946 		new_ctxs[j++] = dbgfs_ctxs[i];
947 	}
948 
949 	kfree(dbgfs_dirs);
950 	kfree(dbgfs_ctxs);
951 
952 	dbgfs_dirs = new_dirs;
953 	dbgfs_ctxs = new_ctxs;
954 	dbgfs_nr_ctxs--;
955 
956 	goto out_dput;
957 
958 out_new_dirs:
959 	kfree(new_dirs);
960 out_dput:
961 	dput(dir);
962 	return ret;
963 }
964 
dbgfs_rm_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)965 static ssize_t dbgfs_rm_context_write(struct file *file,
966 		const char __user *buf, size_t count, loff_t *ppos)
967 {
968 	char *kbuf;
969 	ssize_t ret;
970 	char *ctx_name;
971 
972 	kbuf = user_input_str(buf, count, ppos);
973 	if (IS_ERR(kbuf))
974 		return PTR_ERR(kbuf);
975 	ctx_name = kmalloc(count + 1, GFP_KERNEL);
976 	if (!ctx_name) {
977 		kfree(kbuf);
978 		return -ENOMEM;
979 	}
980 
981 	/* Trim white space */
982 	if (sscanf(kbuf, "%s", ctx_name) != 1) {
983 		ret = -EINVAL;
984 		goto out;
985 	}
986 
987 	mutex_lock(&damon_dbgfs_lock);
988 	ret = dbgfs_rm_context(ctx_name);
989 	if (!ret)
990 		ret = count;
991 	mutex_unlock(&damon_dbgfs_lock);
992 
993 out:
994 	kfree(kbuf);
995 	kfree(ctx_name);
996 	return ret;
997 }
998 
dbgfs_monitor_on_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)999 static ssize_t dbgfs_monitor_on_read(struct file *file,
1000 		char __user *buf, size_t count, loff_t *ppos)
1001 {
1002 	char monitor_on_buf[5];
1003 	bool monitor_on = damon_nr_running_ctxs() != 0;
1004 	int len;
1005 
1006 	len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
1007 
1008 	return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
1009 }
1010 
dbgfs_monitor_on_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1011 static ssize_t dbgfs_monitor_on_write(struct file *file,
1012 		const char __user *buf, size_t count, loff_t *ppos)
1013 {
1014 	ssize_t ret;
1015 	char *kbuf;
1016 
1017 	kbuf = user_input_str(buf, count, ppos);
1018 	if (IS_ERR(kbuf))
1019 		return PTR_ERR(kbuf);
1020 
1021 	/* Remove white space */
1022 	if (sscanf(kbuf, "%s", kbuf) != 1) {
1023 		kfree(kbuf);
1024 		return -EINVAL;
1025 	}
1026 
1027 	mutex_lock(&damon_dbgfs_lock);
1028 	if (!strncmp(kbuf, "on", count)) {
1029 		int i;
1030 
1031 		for (i = 0; i < dbgfs_nr_ctxs; i++) {
1032 			if (damon_targets_empty(dbgfs_ctxs[i])) {
1033 				kfree(kbuf);
1034 				mutex_unlock(&damon_dbgfs_lock);
1035 				return -EINVAL;
1036 			}
1037 		}
1038 		ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1039 	} else if (!strncmp(kbuf, "off", count)) {
1040 		ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1041 	} else {
1042 		ret = -EINVAL;
1043 	}
1044 	mutex_unlock(&damon_dbgfs_lock);
1045 
1046 	if (!ret)
1047 		ret = count;
1048 	kfree(kbuf);
1049 	return ret;
1050 }
1051 
damon_dbgfs_static_file_open(struct inode * inode,struct file * file)1052 static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
1053 {
1054 	damon_dbgfs_warn_deprecation();
1055 	return nonseekable_open(inode, file);
1056 }
1057 
1058 static const struct file_operations mk_contexts_fops = {
1059 	.open = damon_dbgfs_static_file_open,
1060 	.write = dbgfs_mk_context_write,
1061 };
1062 
1063 static const struct file_operations rm_contexts_fops = {
1064 	.open = damon_dbgfs_static_file_open,
1065 	.write = dbgfs_rm_context_write,
1066 };
1067 
1068 static const struct file_operations monitor_on_fops = {
1069 	.open = damon_dbgfs_static_file_open,
1070 	.read = dbgfs_monitor_on_read,
1071 	.write = dbgfs_monitor_on_write,
1072 };
1073 
__damon_dbgfs_init(void)1074 static int __init __damon_dbgfs_init(void)
1075 {
1076 	struct dentry *dbgfs_root;
1077 	const char * const file_names[] = {"mk_contexts", "rm_contexts",
1078 		"monitor_on"};
1079 	const struct file_operations *fops[] = {&mk_contexts_fops,
1080 		&rm_contexts_fops, &monitor_on_fops};
1081 	int i;
1082 
1083 	dbgfs_root = debugfs_create_dir("damon", NULL);
1084 
1085 	for (i = 0; i < ARRAY_SIZE(file_names); i++)
1086 		debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1087 				fops[i]);
1088 	dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1089 
1090 	dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
1091 	if (!dbgfs_dirs) {
1092 		debugfs_remove(dbgfs_root);
1093 		return -ENOMEM;
1094 	}
1095 	dbgfs_dirs[0] = dbgfs_root;
1096 
1097 	return 0;
1098 }
1099 
1100 /*
1101  * Functions for the initialization
1102  */
1103 
damon_dbgfs_init(void)1104 static int __init damon_dbgfs_init(void)
1105 {
1106 	int rc = -ENOMEM;
1107 
1108 	mutex_lock(&damon_dbgfs_lock);
1109 	dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1110 	if (!dbgfs_ctxs)
1111 		goto out;
1112 	dbgfs_ctxs[0] = dbgfs_new_ctx();
1113 	if (!dbgfs_ctxs[0]) {
1114 		kfree(dbgfs_ctxs);
1115 		goto out;
1116 	}
1117 	dbgfs_nr_ctxs = 1;
1118 
1119 	rc = __damon_dbgfs_init();
1120 	if (rc) {
1121 		kfree(dbgfs_ctxs[0]);
1122 		kfree(dbgfs_ctxs);
1123 		pr_err("%s: dbgfs init failed\n", __func__);
1124 	}
1125 
1126 out:
1127 	mutex_unlock(&damon_dbgfs_lock);
1128 	return rc;
1129 }
1130 
1131 module_init(damon_dbgfs_init);
1132 
1133 #include "dbgfs-test.h"
1134