xref: /openbmc/linux/mm/damon/dbgfs.c (revision cbeaa77b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Debugfs Interface
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon-dbgfs: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/debugfs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/page_idle.h>
16 #include <linux/slab.h>
17 
18 static struct damon_ctx **dbgfs_ctxs;
19 static int dbgfs_nr_ctxs;
20 static struct dentry **dbgfs_dirs;
21 static DEFINE_MUTEX(damon_dbgfs_lock);
22 
23 /*
24  * Returns non-empty string on success, negative error code otherwise.
25  */
26 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
27 {
28 	char *kbuf;
29 	ssize_t ret;
30 
31 	/* We do not accept continuous write */
32 	if (*ppos)
33 		return ERR_PTR(-EINVAL);
34 
35 	kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
36 	if (!kbuf)
37 		return ERR_PTR(-ENOMEM);
38 
39 	ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
40 	if (ret != count) {
41 		kfree(kbuf);
42 		return ERR_PTR(-EIO);
43 	}
44 	kbuf[ret] = '\0';
45 
46 	return kbuf;
47 }
48 
49 static ssize_t dbgfs_attrs_read(struct file *file,
50 		char __user *buf, size_t count, loff_t *ppos)
51 {
52 	struct damon_ctx *ctx = file->private_data;
53 	char kbuf[128];
54 	int ret;
55 
56 	mutex_lock(&ctx->kdamond_lock);
57 	ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
58 			ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
59 			ctx->attrs.ops_update_interval,
60 			ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
61 	mutex_unlock(&ctx->kdamond_lock);
62 
63 	return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
64 }
65 
66 static ssize_t dbgfs_attrs_write(struct file *file,
67 		const char __user *buf, size_t count, loff_t *ppos)
68 {
69 	struct damon_ctx *ctx = file->private_data;
70 	unsigned long s, a, r, minr, maxr;
71 	char *kbuf;
72 	ssize_t ret;
73 
74 	kbuf = user_input_str(buf, count, ppos);
75 	if (IS_ERR(kbuf))
76 		return PTR_ERR(kbuf);
77 
78 	if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
79 				&s, &a, &r, &minr, &maxr) != 5) {
80 		ret = -EINVAL;
81 		goto out;
82 	}
83 
84 	mutex_lock(&ctx->kdamond_lock);
85 	if (ctx->kdamond) {
86 		ret = -EBUSY;
87 		goto unlock_out;
88 	}
89 
90 	ret = damon_set_attrs(ctx, s, a, r, minr, maxr);
91 	if (!ret)
92 		ret = count;
93 unlock_out:
94 	mutex_unlock(&ctx->kdamond_lock);
95 out:
96 	kfree(kbuf);
97 	return ret;
98 }
99 
100 /*
101  * Return corresponding dbgfs' scheme action value (int) for the given
102  * damos_action if the given damos_action value is valid and supported by
103  * dbgfs, negative error code otherwise.
104  */
105 static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
106 {
107 	switch (action) {
108 	case DAMOS_WILLNEED:
109 		return 0;
110 	case DAMOS_COLD:
111 		return 1;
112 	case DAMOS_PAGEOUT:
113 		return 2;
114 	case DAMOS_HUGEPAGE:
115 		return 3;
116 	case DAMOS_NOHUGEPAGE:
117 		return 4;
118 	case DAMOS_STAT:
119 		return 5;
120 	default:
121 		return -EINVAL;
122 	}
123 }
124 
125 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
126 {
127 	struct damos *s;
128 	int written = 0;
129 	int rc;
130 
131 	damon_for_each_scheme(s, c) {
132 		rc = scnprintf(&buf[written], len - written,
133 				"%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
134 				s->pattern.min_sz_region,
135 				s->pattern.max_sz_region,
136 				s->pattern.min_nr_accesses,
137 				s->pattern.max_nr_accesses,
138 				s->pattern.min_age_region,
139 				s->pattern.max_age_region,
140 				damos_action_to_dbgfs_scheme_action(s->action),
141 				s->quota.ms, s->quota.sz,
142 				s->quota.reset_interval,
143 				s->quota.weight_sz,
144 				s->quota.weight_nr_accesses,
145 				s->quota.weight_age,
146 				s->wmarks.metric, s->wmarks.interval,
147 				s->wmarks.high, s->wmarks.mid, s->wmarks.low,
148 				s->stat.nr_tried, s->stat.sz_tried,
149 				s->stat.nr_applied, s->stat.sz_applied,
150 				s->stat.qt_exceeds);
151 		if (!rc)
152 			return -ENOMEM;
153 
154 		written += rc;
155 	}
156 	return written;
157 }
158 
159 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
160 		size_t count, loff_t *ppos)
161 {
162 	struct damon_ctx *ctx = file->private_data;
163 	char *kbuf;
164 	ssize_t len;
165 
166 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
167 	if (!kbuf)
168 		return -ENOMEM;
169 
170 	mutex_lock(&ctx->kdamond_lock);
171 	len = sprint_schemes(ctx, kbuf, count);
172 	mutex_unlock(&ctx->kdamond_lock);
173 	if (len < 0)
174 		goto out;
175 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
176 
177 out:
178 	kfree(kbuf);
179 	return len;
180 }
181 
182 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
183 {
184 	ssize_t i;
185 
186 	for (i = 0; i < nr_schemes; i++)
187 		kfree(schemes[i]);
188 	kfree(schemes);
189 }
190 
191 /*
192  * Return corresponding damos_action for the given dbgfs input for a scheme
193  * action if the input is valid, negative error code otherwise.
194  */
195 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
196 {
197 	switch (dbgfs_action) {
198 	case 0:
199 		return DAMOS_WILLNEED;
200 	case 1:
201 		return DAMOS_COLD;
202 	case 2:
203 		return DAMOS_PAGEOUT;
204 	case 3:
205 		return DAMOS_HUGEPAGE;
206 	case 4:
207 		return DAMOS_NOHUGEPAGE;
208 	case 5:
209 		return DAMOS_STAT;
210 	default:
211 		return -EINVAL;
212 	}
213 }
214 
215 /*
216  * Converts a string into an array of struct damos pointers
217  *
218  * Returns an array of struct damos pointers that converted if the conversion
219  * success, or NULL otherwise.
220  */
221 static struct damos **str_to_schemes(const char *str, ssize_t len,
222 				ssize_t *nr_schemes)
223 {
224 	struct damos *scheme, **schemes;
225 	const int max_nr_schemes = 256;
226 	int pos = 0, parsed, ret;
227 	unsigned int action_input;
228 	enum damos_action action;
229 
230 	schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
231 			GFP_KERNEL);
232 	if (!schemes)
233 		return NULL;
234 
235 	*nr_schemes = 0;
236 	while (pos < len && *nr_schemes < max_nr_schemes) {
237 		struct damos_access_pattern pattern = {};
238 		struct damos_quota quota = {};
239 		struct damos_watermarks wmarks;
240 
241 		ret = sscanf(&str[pos],
242 				"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
243 				&pattern.min_sz_region, &pattern.max_sz_region,
244 				&pattern.min_nr_accesses,
245 				&pattern.max_nr_accesses,
246 				&pattern.min_age_region,
247 				&pattern.max_age_region,
248 				&action_input, &quota.ms,
249 				&quota.sz, &quota.reset_interval,
250 				&quota.weight_sz, &quota.weight_nr_accesses,
251 				&quota.weight_age, &wmarks.metric,
252 				&wmarks.interval, &wmarks.high, &wmarks.mid,
253 				&wmarks.low, &parsed);
254 		if (ret != 18)
255 			break;
256 		action = dbgfs_scheme_action_to_damos_action(action_input);
257 		if ((int)action < 0)
258 			goto fail;
259 
260 		if (pattern.min_sz_region > pattern.max_sz_region ||
261 		    pattern.min_nr_accesses > pattern.max_nr_accesses ||
262 		    pattern.min_age_region > pattern.max_age_region)
263 			goto fail;
264 
265 		if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
266 		    wmarks.mid <  wmarks.low)
267 			goto fail;
268 
269 		pos += parsed;
270 		scheme = damon_new_scheme(&pattern, action, &quota, &wmarks);
271 		if (!scheme)
272 			goto fail;
273 
274 		schemes[*nr_schemes] = scheme;
275 		*nr_schemes += 1;
276 	}
277 	return schemes;
278 fail:
279 	free_schemes_arr(schemes, *nr_schemes);
280 	return NULL;
281 }
282 
283 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
284 		size_t count, loff_t *ppos)
285 {
286 	struct damon_ctx *ctx = file->private_data;
287 	char *kbuf;
288 	struct damos **schemes;
289 	ssize_t nr_schemes = 0, ret;
290 
291 	kbuf = user_input_str(buf, count, ppos);
292 	if (IS_ERR(kbuf))
293 		return PTR_ERR(kbuf);
294 
295 	schemes = str_to_schemes(kbuf, count, &nr_schemes);
296 	if (!schemes) {
297 		ret = -EINVAL;
298 		goto out;
299 	}
300 
301 	mutex_lock(&ctx->kdamond_lock);
302 	if (ctx->kdamond) {
303 		ret = -EBUSY;
304 		goto unlock_out;
305 	}
306 
307 	ret = damon_set_schemes(ctx, schemes, nr_schemes);
308 	if (!ret) {
309 		ret = count;
310 		nr_schemes = 0;
311 	}
312 
313 unlock_out:
314 	mutex_unlock(&ctx->kdamond_lock);
315 	free_schemes_arr(schemes, nr_schemes);
316 out:
317 	kfree(kbuf);
318 	return ret;
319 }
320 
321 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
322 {
323 	struct damon_target *t;
324 	int id;
325 	int written = 0;
326 	int rc;
327 
328 	damon_for_each_target(t, ctx) {
329 		if (damon_target_has_pid(ctx))
330 			/* Show pid numbers to debugfs users */
331 			id = pid_vnr(t->pid);
332 		else
333 			/* Show 42 for physical address space, just for fun */
334 			id = 42;
335 
336 		rc = scnprintf(&buf[written], len - written, "%d ", id);
337 		if (!rc)
338 			return -ENOMEM;
339 		written += rc;
340 	}
341 	if (written)
342 		written -= 1;
343 	written += scnprintf(&buf[written], len - written, "\n");
344 	return written;
345 }
346 
347 static ssize_t dbgfs_target_ids_read(struct file *file,
348 		char __user *buf, size_t count, loff_t *ppos)
349 {
350 	struct damon_ctx *ctx = file->private_data;
351 	ssize_t len;
352 	char ids_buf[320];
353 
354 	mutex_lock(&ctx->kdamond_lock);
355 	len = sprint_target_ids(ctx, ids_buf, 320);
356 	mutex_unlock(&ctx->kdamond_lock);
357 	if (len < 0)
358 		return len;
359 
360 	return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
361 }
362 
363 /*
364  * Converts a string into an integers array
365  *
366  * Returns an array of integers array if the conversion success, or NULL
367  * otherwise.
368  */
369 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
370 {
371 	int *array;
372 	const int max_nr_ints = 32;
373 	int nr;
374 	int pos = 0, parsed, ret;
375 
376 	*nr_ints = 0;
377 	array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
378 	if (!array)
379 		return NULL;
380 	while (*nr_ints < max_nr_ints && pos < len) {
381 		ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
382 		pos += parsed;
383 		if (ret != 1)
384 			break;
385 		array[*nr_ints] = nr;
386 		*nr_ints += 1;
387 	}
388 
389 	return array;
390 }
391 
392 static void dbgfs_put_pids(struct pid **pids, int nr_pids)
393 {
394 	int i;
395 
396 	for (i = 0; i < nr_pids; i++)
397 		put_pid(pids[i]);
398 }
399 
400 /*
401  * Converts a string into an struct pid pointers array
402  *
403  * Returns an array of struct pid pointers if the conversion success, or NULL
404  * otherwise.
405  */
406 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
407 {
408 	int *ints;
409 	ssize_t nr_ints;
410 	struct pid **pids;
411 
412 	*nr_pids = 0;
413 
414 	ints = str_to_ints(str, len, &nr_ints);
415 	if (!ints)
416 		return NULL;
417 
418 	pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
419 	if (!pids)
420 		goto out;
421 
422 	for (; *nr_pids < nr_ints; (*nr_pids)++) {
423 		pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
424 		if (!pids[*nr_pids]) {
425 			dbgfs_put_pids(pids, *nr_pids);
426 			kfree(ints);
427 			kfree(pids);
428 			return NULL;
429 		}
430 	}
431 
432 out:
433 	kfree(ints);
434 	return pids;
435 }
436 
437 /*
438  * dbgfs_set_targets() - Set monitoring targets.
439  * @ctx:	monitoring context
440  * @nr_targets:	number of targets
441  * @pids:	array of target pids (size is same to @nr_targets)
442  *
443  * This function should not be called while the kdamond is running.  @pids is
444  * ignored if the context is not configured to have pid in each target.  On
445  * failure, reference counts of all pids in @pids are decremented.
446  *
447  * Return: 0 on success, negative error code otherwise.
448  */
449 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
450 		struct pid **pids)
451 {
452 	ssize_t i;
453 	struct damon_target *t, *next;
454 
455 	damon_for_each_target_safe(t, next, ctx) {
456 		if (damon_target_has_pid(ctx))
457 			put_pid(t->pid);
458 		damon_destroy_target(t);
459 	}
460 
461 	for (i = 0; i < nr_targets; i++) {
462 		t = damon_new_target();
463 		if (!t) {
464 			damon_for_each_target_safe(t, next, ctx)
465 				damon_destroy_target(t);
466 			if (damon_target_has_pid(ctx))
467 				dbgfs_put_pids(pids, nr_targets);
468 			return -ENOMEM;
469 		}
470 		if (damon_target_has_pid(ctx))
471 			t->pid = pids[i];
472 		damon_add_target(ctx, t);
473 	}
474 
475 	return 0;
476 }
477 
478 static ssize_t dbgfs_target_ids_write(struct file *file,
479 		const char __user *buf, size_t count, loff_t *ppos)
480 {
481 	struct damon_ctx *ctx = file->private_data;
482 	bool id_is_pid = true;
483 	char *kbuf;
484 	struct pid **target_pids = NULL;
485 	ssize_t nr_targets;
486 	ssize_t ret;
487 
488 	kbuf = user_input_str(buf, count, ppos);
489 	if (IS_ERR(kbuf))
490 		return PTR_ERR(kbuf);
491 
492 	if (!strncmp(kbuf, "paddr\n", count)) {
493 		id_is_pid = false;
494 		nr_targets = 1;
495 	}
496 
497 	if (id_is_pid) {
498 		target_pids = str_to_pids(kbuf, count, &nr_targets);
499 		if (!target_pids) {
500 			ret = -ENOMEM;
501 			goto out;
502 		}
503 	}
504 
505 	mutex_lock(&ctx->kdamond_lock);
506 	if (ctx->kdamond) {
507 		if (id_is_pid)
508 			dbgfs_put_pids(target_pids, nr_targets);
509 		ret = -EBUSY;
510 		goto unlock_out;
511 	}
512 
513 	/* remove previously set targets */
514 	dbgfs_set_targets(ctx, 0, NULL);
515 	if (!nr_targets) {
516 		ret = count;
517 		goto unlock_out;
518 	}
519 
520 	/* Configure the context for the address space type */
521 	if (id_is_pid)
522 		ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
523 	else
524 		ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
525 	if (ret)
526 		goto unlock_out;
527 
528 	ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
529 	if (!ret)
530 		ret = count;
531 
532 unlock_out:
533 	mutex_unlock(&ctx->kdamond_lock);
534 	kfree(target_pids);
535 out:
536 	kfree(kbuf);
537 	return ret;
538 }
539 
540 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
541 {
542 	struct damon_target *t;
543 	struct damon_region *r;
544 	int target_idx = 0;
545 	int written = 0;
546 	int rc;
547 
548 	damon_for_each_target(t, c) {
549 		damon_for_each_region(r, t) {
550 			rc = scnprintf(&buf[written], len - written,
551 					"%d %lu %lu\n",
552 					target_idx, r->ar.start, r->ar.end);
553 			if (!rc)
554 				return -ENOMEM;
555 			written += rc;
556 		}
557 		target_idx++;
558 	}
559 	return written;
560 }
561 
562 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
563 		size_t count, loff_t *ppos)
564 {
565 	struct damon_ctx *ctx = file->private_data;
566 	char *kbuf;
567 	ssize_t len;
568 
569 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
570 	if (!kbuf)
571 		return -ENOMEM;
572 
573 	mutex_lock(&ctx->kdamond_lock);
574 	if (ctx->kdamond) {
575 		mutex_unlock(&ctx->kdamond_lock);
576 		len = -EBUSY;
577 		goto out;
578 	}
579 
580 	len = sprint_init_regions(ctx, kbuf, count);
581 	mutex_unlock(&ctx->kdamond_lock);
582 	if (len < 0)
583 		goto out;
584 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
585 
586 out:
587 	kfree(kbuf);
588 	return len;
589 }
590 
591 static int add_init_region(struct damon_ctx *c, int target_idx,
592 		struct damon_addr_range *ar)
593 {
594 	struct damon_target *t;
595 	struct damon_region *r, *prev;
596 	unsigned long idx = 0;
597 	int rc = -EINVAL;
598 
599 	if (ar->start >= ar->end)
600 		return -EINVAL;
601 
602 	damon_for_each_target(t, c) {
603 		if (idx++ == target_idx) {
604 			r = damon_new_region(ar->start, ar->end);
605 			if (!r)
606 				return -ENOMEM;
607 			damon_add_region(r, t);
608 			if (damon_nr_regions(t) > 1) {
609 				prev = damon_prev_region(r);
610 				if (prev->ar.end > r->ar.start) {
611 					damon_destroy_region(r, t);
612 					return -EINVAL;
613 				}
614 			}
615 			rc = 0;
616 		}
617 	}
618 	return rc;
619 }
620 
621 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
622 {
623 	struct damon_target *t;
624 	struct damon_region *r, *next;
625 	int pos = 0, parsed, ret;
626 	int target_idx;
627 	struct damon_addr_range ar;
628 	int err;
629 
630 	damon_for_each_target(t, c) {
631 		damon_for_each_region_safe(r, next, t)
632 			damon_destroy_region(r, t);
633 	}
634 
635 	while (pos < len) {
636 		ret = sscanf(&str[pos], "%d %lu %lu%n",
637 				&target_idx, &ar.start, &ar.end, &parsed);
638 		if (ret != 3)
639 			break;
640 		err = add_init_region(c, target_idx, &ar);
641 		if (err)
642 			goto fail;
643 		pos += parsed;
644 	}
645 
646 	return 0;
647 
648 fail:
649 	damon_for_each_target(t, c) {
650 		damon_for_each_region_safe(r, next, t)
651 			damon_destroy_region(r, t);
652 	}
653 	return err;
654 }
655 
656 static ssize_t dbgfs_init_regions_write(struct file *file,
657 					  const char __user *buf, size_t count,
658 					  loff_t *ppos)
659 {
660 	struct damon_ctx *ctx = file->private_data;
661 	char *kbuf;
662 	ssize_t ret = count;
663 	int err;
664 
665 	kbuf = user_input_str(buf, count, ppos);
666 	if (IS_ERR(kbuf))
667 		return PTR_ERR(kbuf);
668 
669 	mutex_lock(&ctx->kdamond_lock);
670 	if (ctx->kdamond) {
671 		ret = -EBUSY;
672 		goto unlock_out;
673 	}
674 
675 	err = set_init_regions(ctx, kbuf, ret);
676 	if (err)
677 		ret = err;
678 
679 unlock_out:
680 	mutex_unlock(&ctx->kdamond_lock);
681 	kfree(kbuf);
682 	return ret;
683 }
684 
685 static ssize_t dbgfs_kdamond_pid_read(struct file *file,
686 		char __user *buf, size_t count, loff_t *ppos)
687 {
688 	struct damon_ctx *ctx = file->private_data;
689 	char *kbuf;
690 	ssize_t len;
691 
692 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
693 	if (!kbuf)
694 		return -ENOMEM;
695 
696 	mutex_lock(&ctx->kdamond_lock);
697 	if (ctx->kdamond)
698 		len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
699 	else
700 		len = scnprintf(kbuf, count, "none\n");
701 	mutex_unlock(&ctx->kdamond_lock);
702 	if (!len)
703 		goto out;
704 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
705 
706 out:
707 	kfree(kbuf);
708 	return len;
709 }
710 
711 static int damon_dbgfs_open(struct inode *inode, struct file *file)
712 {
713 	file->private_data = inode->i_private;
714 
715 	return nonseekable_open(inode, file);
716 }
717 
718 static const struct file_operations attrs_fops = {
719 	.open = damon_dbgfs_open,
720 	.read = dbgfs_attrs_read,
721 	.write = dbgfs_attrs_write,
722 };
723 
724 static const struct file_operations schemes_fops = {
725 	.open = damon_dbgfs_open,
726 	.read = dbgfs_schemes_read,
727 	.write = dbgfs_schemes_write,
728 };
729 
730 static const struct file_operations target_ids_fops = {
731 	.open = damon_dbgfs_open,
732 	.read = dbgfs_target_ids_read,
733 	.write = dbgfs_target_ids_write,
734 };
735 
736 static const struct file_operations init_regions_fops = {
737 	.open = damon_dbgfs_open,
738 	.read = dbgfs_init_regions_read,
739 	.write = dbgfs_init_regions_write,
740 };
741 
742 static const struct file_operations kdamond_pid_fops = {
743 	.open = damon_dbgfs_open,
744 	.read = dbgfs_kdamond_pid_read,
745 };
746 
747 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
748 {
749 	const char * const file_names[] = {"attrs", "schemes", "target_ids",
750 		"init_regions", "kdamond_pid"};
751 	const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
752 		&target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
753 	int i;
754 
755 	for (i = 0; i < ARRAY_SIZE(file_names); i++)
756 		debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
757 }
758 
759 static void dbgfs_before_terminate(struct damon_ctx *ctx)
760 {
761 	struct damon_target *t, *next;
762 
763 	if (!damon_target_has_pid(ctx))
764 		return;
765 
766 	mutex_lock(&ctx->kdamond_lock);
767 	damon_for_each_target_safe(t, next, ctx) {
768 		put_pid(t->pid);
769 		damon_destroy_target(t);
770 	}
771 	mutex_unlock(&ctx->kdamond_lock);
772 }
773 
774 static struct damon_ctx *dbgfs_new_ctx(void)
775 {
776 	struct damon_ctx *ctx;
777 
778 	ctx = damon_new_ctx();
779 	if (!ctx)
780 		return NULL;
781 
782 	if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
783 			damon_select_ops(ctx, DAMON_OPS_PADDR)) {
784 		damon_destroy_ctx(ctx);
785 		return NULL;
786 	}
787 	ctx->callback.before_terminate = dbgfs_before_terminate;
788 	return ctx;
789 }
790 
791 static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
792 {
793 	damon_destroy_ctx(ctx);
794 }
795 
796 /*
797  * Make a context of @name and create a debugfs directory for it.
798  *
799  * This function should be called while holding damon_dbgfs_lock.
800  *
801  * Returns 0 on success, negative error code otherwise.
802  */
803 static int dbgfs_mk_context(char *name)
804 {
805 	struct dentry *root, **new_dirs, *new_dir;
806 	struct damon_ctx **new_ctxs, *new_ctx;
807 
808 	if (damon_nr_running_ctxs())
809 		return -EBUSY;
810 
811 	new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
812 			(dbgfs_nr_ctxs + 1), GFP_KERNEL);
813 	if (!new_ctxs)
814 		return -ENOMEM;
815 	dbgfs_ctxs = new_ctxs;
816 
817 	new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
818 			(dbgfs_nr_ctxs + 1), GFP_KERNEL);
819 	if (!new_dirs)
820 		return -ENOMEM;
821 	dbgfs_dirs = new_dirs;
822 
823 	root = dbgfs_dirs[0];
824 	if (!root)
825 		return -ENOENT;
826 
827 	new_dir = debugfs_create_dir(name, root);
828 	/* Below check is required for a potential duplicated name case */
829 	if (IS_ERR(new_dir))
830 		return PTR_ERR(new_dir);
831 	dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
832 
833 	new_ctx = dbgfs_new_ctx();
834 	if (!new_ctx) {
835 		debugfs_remove(new_dir);
836 		dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
837 		return -ENOMEM;
838 	}
839 
840 	dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
841 	dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
842 			dbgfs_ctxs[dbgfs_nr_ctxs]);
843 	dbgfs_nr_ctxs++;
844 
845 	return 0;
846 }
847 
848 static ssize_t dbgfs_mk_context_write(struct file *file,
849 		const char __user *buf, size_t count, loff_t *ppos)
850 {
851 	char *kbuf;
852 	char *ctx_name;
853 	ssize_t ret;
854 
855 	kbuf = user_input_str(buf, count, ppos);
856 	if (IS_ERR(kbuf))
857 		return PTR_ERR(kbuf);
858 	ctx_name = kmalloc(count + 1, GFP_KERNEL);
859 	if (!ctx_name) {
860 		kfree(kbuf);
861 		return -ENOMEM;
862 	}
863 
864 	/* Trim white space */
865 	if (sscanf(kbuf, "%s", ctx_name) != 1) {
866 		ret = -EINVAL;
867 		goto out;
868 	}
869 
870 	mutex_lock(&damon_dbgfs_lock);
871 	ret = dbgfs_mk_context(ctx_name);
872 	if (!ret)
873 		ret = count;
874 	mutex_unlock(&damon_dbgfs_lock);
875 
876 out:
877 	kfree(kbuf);
878 	kfree(ctx_name);
879 	return ret;
880 }
881 
882 /*
883  * Remove a context of @name and its debugfs directory.
884  *
885  * This function should be called while holding damon_dbgfs_lock.
886  *
887  * Return 0 on success, negative error code otherwise.
888  */
889 static int dbgfs_rm_context(char *name)
890 {
891 	struct dentry *root, *dir, **new_dirs;
892 	struct damon_ctx **new_ctxs;
893 	int i, j;
894 	int ret = 0;
895 
896 	if (damon_nr_running_ctxs())
897 		return -EBUSY;
898 
899 	root = dbgfs_dirs[0];
900 	if (!root)
901 		return -ENOENT;
902 
903 	dir = debugfs_lookup(name, root);
904 	if (!dir)
905 		return -ENOENT;
906 
907 	new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
908 			GFP_KERNEL);
909 	if (!new_dirs) {
910 		ret = -ENOMEM;
911 		goto out_dput;
912 	}
913 
914 	new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
915 			GFP_KERNEL);
916 	if (!new_ctxs) {
917 		ret = -ENOMEM;
918 		goto out_new_dirs;
919 	}
920 
921 	for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
922 		if (dbgfs_dirs[i] == dir) {
923 			debugfs_remove(dbgfs_dirs[i]);
924 			dbgfs_destroy_ctx(dbgfs_ctxs[i]);
925 			continue;
926 		}
927 		new_dirs[j] = dbgfs_dirs[i];
928 		new_ctxs[j++] = dbgfs_ctxs[i];
929 	}
930 
931 	kfree(dbgfs_dirs);
932 	kfree(dbgfs_ctxs);
933 
934 	dbgfs_dirs = new_dirs;
935 	dbgfs_ctxs = new_ctxs;
936 	dbgfs_nr_ctxs--;
937 
938 	goto out_dput;
939 
940 out_new_dirs:
941 	kfree(new_dirs);
942 out_dput:
943 	dput(dir);
944 	return ret;
945 }
946 
947 static ssize_t dbgfs_rm_context_write(struct file *file,
948 		const char __user *buf, size_t count, loff_t *ppos)
949 {
950 	char *kbuf;
951 	ssize_t ret;
952 	char *ctx_name;
953 
954 	kbuf = user_input_str(buf, count, ppos);
955 	if (IS_ERR(kbuf))
956 		return PTR_ERR(kbuf);
957 	ctx_name = kmalloc(count + 1, GFP_KERNEL);
958 	if (!ctx_name) {
959 		kfree(kbuf);
960 		return -ENOMEM;
961 	}
962 
963 	/* Trim white space */
964 	if (sscanf(kbuf, "%s", ctx_name) != 1) {
965 		ret = -EINVAL;
966 		goto out;
967 	}
968 
969 	mutex_lock(&damon_dbgfs_lock);
970 	ret = dbgfs_rm_context(ctx_name);
971 	if (!ret)
972 		ret = count;
973 	mutex_unlock(&damon_dbgfs_lock);
974 
975 out:
976 	kfree(kbuf);
977 	kfree(ctx_name);
978 	return ret;
979 }
980 
981 static ssize_t dbgfs_monitor_on_read(struct file *file,
982 		char __user *buf, size_t count, loff_t *ppos)
983 {
984 	char monitor_on_buf[5];
985 	bool monitor_on = damon_nr_running_ctxs() != 0;
986 	int len;
987 
988 	len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
989 
990 	return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
991 }
992 
993 static ssize_t dbgfs_monitor_on_write(struct file *file,
994 		const char __user *buf, size_t count, loff_t *ppos)
995 {
996 	ssize_t ret;
997 	char *kbuf;
998 
999 	kbuf = user_input_str(buf, count, ppos);
1000 	if (IS_ERR(kbuf))
1001 		return PTR_ERR(kbuf);
1002 
1003 	/* Remove white space */
1004 	if (sscanf(kbuf, "%s", kbuf) != 1) {
1005 		kfree(kbuf);
1006 		return -EINVAL;
1007 	}
1008 
1009 	mutex_lock(&damon_dbgfs_lock);
1010 	if (!strncmp(kbuf, "on", count)) {
1011 		int i;
1012 
1013 		for (i = 0; i < dbgfs_nr_ctxs; i++) {
1014 			if (damon_targets_empty(dbgfs_ctxs[i])) {
1015 				kfree(kbuf);
1016 				mutex_unlock(&damon_dbgfs_lock);
1017 				return -EINVAL;
1018 			}
1019 		}
1020 		ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1021 	} else if (!strncmp(kbuf, "off", count)) {
1022 		ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1023 	} else {
1024 		ret = -EINVAL;
1025 	}
1026 	mutex_unlock(&damon_dbgfs_lock);
1027 
1028 	if (!ret)
1029 		ret = count;
1030 	kfree(kbuf);
1031 	return ret;
1032 }
1033 
1034 static const struct file_operations mk_contexts_fops = {
1035 	.write = dbgfs_mk_context_write,
1036 };
1037 
1038 static const struct file_operations rm_contexts_fops = {
1039 	.write = dbgfs_rm_context_write,
1040 };
1041 
1042 static const struct file_operations monitor_on_fops = {
1043 	.read = dbgfs_monitor_on_read,
1044 	.write = dbgfs_monitor_on_write,
1045 };
1046 
1047 static int __init __damon_dbgfs_init(void)
1048 {
1049 	struct dentry *dbgfs_root;
1050 	const char * const file_names[] = {"mk_contexts", "rm_contexts",
1051 		"monitor_on"};
1052 	const struct file_operations *fops[] = {&mk_contexts_fops,
1053 		&rm_contexts_fops, &monitor_on_fops};
1054 	int i;
1055 
1056 	dbgfs_root = debugfs_create_dir("damon", NULL);
1057 
1058 	for (i = 0; i < ARRAY_SIZE(file_names); i++)
1059 		debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1060 				fops[i]);
1061 	dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1062 
1063 	dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
1064 	if (!dbgfs_dirs) {
1065 		debugfs_remove(dbgfs_root);
1066 		return -ENOMEM;
1067 	}
1068 	dbgfs_dirs[0] = dbgfs_root;
1069 
1070 	return 0;
1071 }
1072 
1073 /*
1074  * Functions for the initialization
1075  */
1076 
1077 static int __init damon_dbgfs_init(void)
1078 {
1079 	int rc = -ENOMEM;
1080 
1081 	mutex_lock(&damon_dbgfs_lock);
1082 	dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1083 	if (!dbgfs_ctxs)
1084 		goto out;
1085 	dbgfs_ctxs[0] = dbgfs_new_ctx();
1086 	if (!dbgfs_ctxs[0]) {
1087 		kfree(dbgfs_ctxs);
1088 		goto out;
1089 	}
1090 	dbgfs_nr_ctxs = 1;
1091 
1092 	rc = __damon_dbgfs_init();
1093 	if (rc) {
1094 		kfree(dbgfs_ctxs[0]);
1095 		kfree(dbgfs_ctxs);
1096 		pr_err("%s: dbgfs init failed\n", __func__);
1097 	}
1098 
1099 out:
1100 	mutex_unlock(&damon_dbgfs_lock);
1101 	return rc;
1102 }
1103 
1104 module_init(damon_dbgfs_init);
1105 
1106 #include "dbgfs-test.h"
1107