1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON Debugfs Interface
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8 #define pr_fmt(fmt) "damon-dbgfs: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/debugfs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/page_idle.h>
16 #include <linux/slab.h>
17
18 static struct damon_ctx **dbgfs_ctxs;
19 static int dbgfs_nr_ctxs;
20 static struct dentry **dbgfs_dirs;
21 static DEFINE_MUTEX(damon_dbgfs_lock);
22
damon_dbgfs_warn_deprecation(void)23 static void damon_dbgfs_warn_deprecation(void)
24 {
25 pr_warn_once("DAMON debugfs interface is deprecated, "
26 "so users should move to DAMON_SYSFS. If you cannot, "
27 "please report your usecase to damon@lists.linux.dev and "
28 "linux-mm@kvack.org.\n");
29 }
30
31 /*
32 * Returns non-empty string on success, negative error code otherwise.
33 */
user_input_str(const char __user * buf,size_t count,loff_t * ppos)34 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
35 {
36 char *kbuf;
37 ssize_t ret;
38
39 /* We do not accept continuous write */
40 if (*ppos)
41 return ERR_PTR(-EINVAL);
42
43 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
44 if (!kbuf)
45 return ERR_PTR(-ENOMEM);
46
47 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
48 if (ret != count) {
49 kfree(kbuf);
50 return ERR_PTR(-EIO);
51 }
52 kbuf[ret] = '\0';
53
54 return kbuf;
55 }
56
dbgfs_attrs_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)57 static ssize_t dbgfs_attrs_read(struct file *file,
58 char __user *buf, size_t count, loff_t *ppos)
59 {
60 struct damon_ctx *ctx = file->private_data;
61 char kbuf[128];
62 int ret;
63
64 mutex_lock(&ctx->kdamond_lock);
65 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
66 ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
67 ctx->attrs.ops_update_interval,
68 ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
69 mutex_unlock(&ctx->kdamond_lock);
70
71 return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
72 }
73
dbgfs_attrs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)74 static ssize_t dbgfs_attrs_write(struct file *file,
75 const char __user *buf, size_t count, loff_t *ppos)
76 {
77 struct damon_ctx *ctx = file->private_data;
78 struct damon_attrs attrs;
79 char *kbuf;
80 ssize_t ret;
81
82 kbuf = user_input_str(buf, count, ppos);
83 if (IS_ERR(kbuf))
84 return PTR_ERR(kbuf);
85
86 if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
87 &attrs.sample_interval, &attrs.aggr_interval,
88 &attrs.ops_update_interval,
89 &attrs.min_nr_regions,
90 &attrs.max_nr_regions) != 5) {
91 ret = -EINVAL;
92 goto out;
93 }
94
95 mutex_lock(&ctx->kdamond_lock);
96 if (ctx->kdamond) {
97 ret = -EBUSY;
98 goto unlock_out;
99 }
100
101 ret = damon_set_attrs(ctx, &attrs);
102 if (!ret)
103 ret = count;
104 unlock_out:
105 mutex_unlock(&ctx->kdamond_lock);
106 out:
107 kfree(kbuf);
108 return ret;
109 }
110
111 /*
112 * Return corresponding dbgfs' scheme action value (int) for the given
113 * damos_action if the given damos_action value is valid and supported by
114 * dbgfs, negative error code otherwise.
115 */
damos_action_to_dbgfs_scheme_action(enum damos_action action)116 static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
117 {
118 switch (action) {
119 case DAMOS_WILLNEED:
120 return 0;
121 case DAMOS_COLD:
122 return 1;
123 case DAMOS_PAGEOUT:
124 return 2;
125 case DAMOS_HUGEPAGE:
126 return 3;
127 case DAMOS_NOHUGEPAGE:
128 return 4;
129 case DAMOS_STAT:
130 return 5;
131 default:
132 return -EINVAL;
133 }
134 }
135
sprint_schemes(struct damon_ctx * c,char * buf,ssize_t len)136 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
137 {
138 struct damos *s;
139 int written = 0;
140 int rc;
141
142 damon_for_each_scheme(s, c) {
143 rc = scnprintf(&buf[written], len - written,
144 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
145 s->pattern.min_sz_region,
146 s->pattern.max_sz_region,
147 s->pattern.min_nr_accesses,
148 s->pattern.max_nr_accesses,
149 s->pattern.min_age_region,
150 s->pattern.max_age_region,
151 damos_action_to_dbgfs_scheme_action(s->action),
152 s->quota.ms, s->quota.sz,
153 s->quota.reset_interval,
154 s->quota.weight_sz,
155 s->quota.weight_nr_accesses,
156 s->quota.weight_age,
157 s->wmarks.metric, s->wmarks.interval,
158 s->wmarks.high, s->wmarks.mid, s->wmarks.low,
159 s->stat.nr_tried, s->stat.sz_tried,
160 s->stat.nr_applied, s->stat.sz_applied,
161 s->stat.qt_exceeds);
162 if (!rc)
163 return -ENOMEM;
164
165 written += rc;
166 }
167 return written;
168 }
169
dbgfs_schemes_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)170 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
171 size_t count, loff_t *ppos)
172 {
173 struct damon_ctx *ctx = file->private_data;
174 char *kbuf;
175 ssize_t len;
176
177 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
178 if (!kbuf)
179 return -ENOMEM;
180
181 mutex_lock(&ctx->kdamond_lock);
182 len = sprint_schemes(ctx, kbuf, count);
183 mutex_unlock(&ctx->kdamond_lock);
184 if (len < 0)
185 goto out;
186 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
187
188 out:
189 kfree(kbuf);
190 return len;
191 }
192
free_schemes_arr(struct damos ** schemes,ssize_t nr_schemes)193 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
194 {
195 ssize_t i;
196
197 for (i = 0; i < nr_schemes; i++)
198 kfree(schemes[i]);
199 kfree(schemes);
200 }
201
202 /*
203 * Return corresponding damos_action for the given dbgfs input for a scheme
204 * action if the input is valid, negative error code otherwise.
205 */
dbgfs_scheme_action_to_damos_action(int dbgfs_action)206 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
207 {
208 switch (dbgfs_action) {
209 case 0:
210 return DAMOS_WILLNEED;
211 case 1:
212 return DAMOS_COLD;
213 case 2:
214 return DAMOS_PAGEOUT;
215 case 3:
216 return DAMOS_HUGEPAGE;
217 case 4:
218 return DAMOS_NOHUGEPAGE;
219 case 5:
220 return DAMOS_STAT;
221 default:
222 return -EINVAL;
223 }
224 }
225
226 /*
227 * Converts a string into an array of struct damos pointers
228 *
229 * Returns an array of struct damos pointers that converted if the conversion
230 * success, or NULL otherwise.
231 */
str_to_schemes(const char * str,ssize_t len,ssize_t * nr_schemes)232 static struct damos **str_to_schemes(const char *str, ssize_t len,
233 ssize_t *nr_schemes)
234 {
235 struct damos *scheme, **schemes;
236 const int max_nr_schemes = 256;
237 int pos = 0, parsed, ret;
238 unsigned int action_input;
239 enum damos_action action;
240
241 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
242 GFP_KERNEL);
243 if (!schemes)
244 return NULL;
245
246 *nr_schemes = 0;
247 while (pos < len && *nr_schemes < max_nr_schemes) {
248 struct damos_access_pattern pattern = {};
249 struct damos_quota quota = {};
250 struct damos_watermarks wmarks;
251
252 ret = sscanf(&str[pos],
253 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
254 &pattern.min_sz_region, &pattern.max_sz_region,
255 &pattern.min_nr_accesses,
256 &pattern.max_nr_accesses,
257 &pattern.min_age_region,
258 &pattern.max_age_region,
259 &action_input, "a.ms,
260 "a.sz, "a.reset_interval,
261 "a.weight_sz, "a.weight_nr_accesses,
262 "a.weight_age, &wmarks.metric,
263 &wmarks.interval, &wmarks.high, &wmarks.mid,
264 &wmarks.low, &parsed);
265 if (ret != 18)
266 break;
267 action = dbgfs_scheme_action_to_damos_action(action_input);
268 if ((int)action < 0)
269 goto fail;
270
271 if (pattern.min_sz_region > pattern.max_sz_region ||
272 pattern.min_nr_accesses > pattern.max_nr_accesses ||
273 pattern.min_age_region > pattern.max_age_region)
274 goto fail;
275
276 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
277 wmarks.mid < wmarks.low)
278 goto fail;
279
280 pos += parsed;
281 scheme = damon_new_scheme(&pattern, action, 0, "a,
282 &wmarks);
283 if (!scheme)
284 goto fail;
285
286 schemes[*nr_schemes] = scheme;
287 *nr_schemes += 1;
288 }
289 return schemes;
290 fail:
291 free_schemes_arr(schemes, *nr_schemes);
292 return NULL;
293 }
294
dbgfs_schemes_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)295 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
296 size_t count, loff_t *ppos)
297 {
298 struct damon_ctx *ctx = file->private_data;
299 char *kbuf;
300 struct damos **schemes;
301 ssize_t nr_schemes = 0, ret;
302
303 kbuf = user_input_str(buf, count, ppos);
304 if (IS_ERR(kbuf))
305 return PTR_ERR(kbuf);
306
307 schemes = str_to_schemes(kbuf, count, &nr_schemes);
308 if (!schemes) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 mutex_lock(&ctx->kdamond_lock);
314 if (ctx->kdamond) {
315 ret = -EBUSY;
316 goto unlock_out;
317 }
318
319 damon_set_schemes(ctx, schemes, nr_schemes);
320 ret = count;
321 nr_schemes = 0;
322
323 unlock_out:
324 mutex_unlock(&ctx->kdamond_lock);
325 free_schemes_arr(schemes, nr_schemes);
326 out:
327 kfree(kbuf);
328 return ret;
329 }
330
sprint_target_ids(struct damon_ctx * ctx,char * buf,ssize_t len)331 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
332 {
333 struct damon_target *t;
334 int id;
335 int written = 0;
336 int rc;
337
338 damon_for_each_target(t, ctx) {
339 if (damon_target_has_pid(ctx))
340 /* Show pid numbers to debugfs users */
341 id = pid_vnr(t->pid);
342 else
343 /* Show 42 for physical address space, just for fun */
344 id = 42;
345
346 rc = scnprintf(&buf[written], len - written, "%d ", id);
347 if (!rc)
348 return -ENOMEM;
349 written += rc;
350 }
351 if (written)
352 written -= 1;
353 written += scnprintf(&buf[written], len - written, "\n");
354 return written;
355 }
356
dbgfs_target_ids_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)357 static ssize_t dbgfs_target_ids_read(struct file *file,
358 char __user *buf, size_t count, loff_t *ppos)
359 {
360 struct damon_ctx *ctx = file->private_data;
361 ssize_t len;
362 char ids_buf[320];
363
364 mutex_lock(&ctx->kdamond_lock);
365 len = sprint_target_ids(ctx, ids_buf, 320);
366 mutex_unlock(&ctx->kdamond_lock);
367 if (len < 0)
368 return len;
369
370 return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
371 }
372
373 /*
374 * Converts a string into an integers array
375 *
376 * Returns an array of integers array if the conversion success, or NULL
377 * otherwise.
378 */
str_to_ints(const char * str,ssize_t len,ssize_t * nr_ints)379 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
380 {
381 int *array;
382 const int max_nr_ints = 32;
383 int nr;
384 int pos = 0, parsed, ret;
385
386 *nr_ints = 0;
387 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
388 if (!array)
389 return NULL;
390 while (*nr_ints < max_nr_ints && pos < len) {
391 ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
392 pos += parsed;
393 if (ret != 1)
394 break;
395 array[*nr_ints] = nr;
396 *nr_ints += 1;
397 }
398
399 return array;
400 }
401
dbgfs_put_pids(struct pid ** pids,int nr_pids)402 static void dbgfs_put_pids(struct pid **pids, int nr_pids)
403 {
404 int i;
405
406 for (i = 0; i < nr_pids; i++)
407 put_pid(pids[i]);
408 }
409
410 /*
411 * Converts a string into an struct pid pointers array
412 *
413 * Returns an array of struct pid pointers if the conversion success, or NULL
414 * otherwise.
415 */
str_to_pids(const char * str,ssize_t len,ssize_t * nr_pids)416 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
417 {
418 int *ints;
419 ssize_t nr_ints;
420 struct pid **pids;
421
422 *nr_pids = 0;
423
424 ints = str_to_ints(str, len, &nr_ints);
425 if (!ints)
426 return NULL;
427
428 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
429 if (!pids)
430 goto out;
431
432 for (; *nr_pids < nr_ints; (*nr_pids)++) {
433 pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
434 if (!pids[*nr_pids]) {
435 dbgfs_put_pids(pids, *nr_pids);
436 kfree(ints);
437 kfree(pids);
438 return NULL;
439 }
440 }
441
442 out:
443 kfree(ints);
444 return pids;
445 }
446
447 /*
448 * dbgfs_set_targets() - Set monitoring targets.
449 * @ctx: monitoring context
450 * @nr_targets: number of targets
451 * @pids: array of target pids (size is same to @nr_targets)
452 *
453 * This function should not be called while the kdamond is running. @pids is
454 * ignored if the context is not configured to have pid in each target. On
455 * failure, reference counts of all pids in @pids are decremented.
456 *
457 * Return: 0 on success, negative error code otherwise.
458 */
dbgfs_set_targets(struct damon_ctx * ctx,ssize_t nr_targets,struct pid ** pids)459 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
460 struct pid **pids)
461 {
462 ssize_t i;
463 struct damon_target *t, *next;
464
465 damon_for_each_target_safe(t, next, ctx) {
466 if (damon_target_has_pid(ctx))
467 put_pid(t->pid);
468 damon_destroy_target(t);
469 }
470
471 for (i = 0; i < nr_targets; i++) {
472 t = damon_new_target();
473 if (!t) {
474 damon_for_each_target_safe(t, next, ctx)
475 damon_destroy_target(t);
476 if (damon_target_has_pid(ctx))
477 dbgfs_put_pids(pids, nr_targets);
478 return -ENOMEM;
479 }
480 if (damon_target_has_pid(ctx))
481 t->pid = pids[i];
482 damon_add_target(ctx, t);
483 }
484
485 return 0;
486 }
487
dbgfs_target_ids_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)488 static ssize_t dbgfs_target_ids_write(struct file *file,
489 const char __user *buf, size_t count, loff_t *ppos)
490 {
491 struct damon_ctx *ctx = file->private_data;
492 bool id_is_pid = true;
493 char *kbuf;
494 struct pid **target_pids = NULL;
495 ssize_t nr_targets;
496 ssize_t ret;
497
498 kbuf = user_input_str(buf, count, ppos);
499 if (IS_ERR(kbuf))
500 return PTR_ERR(kbuf);
501
502 if (!strncmp(kbuf, "paddr\n", count)) {
503 id_is_pid = false;
504 nr_targets = 1;
505 }
506
507 if (id_is_pid) {
508 target_pids = str_to_pids(kbuf, count, &nr_targets);
509 if (!target_pids) {
510 ret = -ENOMEM;
511 goto out;
512 }
513 }
514
515 mutex_lock(&ctx->kdamond_lock);
516 if (ctx->kdamond) {
517 if (id_is_pid)
518 dbgfs_put_pids(target_pids, nr_targets);
519 ret = -EBUSY;
520 goto unlock_out;
521 }
522
523 /* remove previously set targets */
524 dbgfs_set_targets(ctx, 0, NULL);
525 if (!nr_targets) {
526 ret = count;
527 goto unlock_out;
528 }
529
530 /* Configure the context for the address space type */
531 if (id_is_pid)
532 ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
533 else
534 ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
535 if (ret)
536 goto unlock_out;
537
538 ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
539 if (!ret)
540 ret = count;
541
542 unlock_out:
543 mutex_unlock(&ctx->kdamond_lock);
544 kfree(target_pids);
545 out:
546 kfree(kbuf);
547 return ret;
548 }
549
sprint_init_regions(struct damon_ctx * c,char * buf,ssize_t len)550 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
551 {
552 struct damon_target *t;
553 struct damon_region *r;
554 int target_idx = 0;
555 int written = 0;
556 int rc;
557
558 damon_for_each_target(t, c) {
559 damon_for_each_region(r, t) {
560 rc = scnprintf(&buf[written], len - written,
561 "%d %lu %lu\n",
562 target_idx, r->ar.start, r->ar.end);
563 if (!rc)
564 return -ENOMEM;
565 written += rc;
566 }
567 target_idx++;
568 }
569 return written;
570 }
571
dbgfs_init_regions_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)572 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
573 size_t count, loff_t *ppos)
574 {
575 struct damon_ctx *ctx = file->private_data;
576 char *kbuf;
577 ssize_t len;
578
579 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
580 if (!kbuf)
581 return -ENOMEM;
582
583 mutex_lock(&ctx->kdamond_lock);
584 if (ctx->kdamond) {
585 mutex_unlock(&ctx->kdamond_lock);
586 len = -EBUSY;
587 goto out;
588 }
589
590 len = sprint_init_regions(ctx, kbuf, count);
591 mutex_unlock(&ctx->kdamond_lock);
592 if (len < 0)
593 goto out;
594 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
595
596 out:
597 kfree(kbuf);
598 return len;
599 }
600
add_init_region(struct damon_ctx * c,int target_idx,struct damon_addr_range * ar)601 static int add_init_region(struct damon_ctx *c, int target_idx,
602 struct damon_addr_range *ar)
603 {
604 struct damon_target *t;
605 struct damon_region *r, *prev;
606 unsigned long idx = 0;
607 int rc = -EINVAL;
608
609 if (ar->start >= ar->end)
610 return -EINVAL;
611
612 damon_for_each_target(t, c) {
613 if (idx++ == target_idx) {
614 r = damon_new_region(ar->start, ar->end);
615 if (!r)
616 return -ENOMEM;
617 damon_add_region(r, t);
618 if (damon_nr_regions(t) > 1) {
619 prev = damon_prev_region(r);
620 if (prev->ar.end > r->ar.start) {
621 damon_destroy_region(r, t);
622 return -EINVAL;
623 }
624 }
625 rc = 0;
626 }
627 }
628 return rc;
629 }
630
set_init_regions(struct damon_ctx * c,const char * str,ssize_t len)631 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
632 {
633 struct damon_target *t;
634 struct damon_region *r, *next;
635 int pos = 0, parsed, ret;
636 int target_idx;
637 struct damon_addr_range ar;
638 int err;
639
640 damon_for_each_target(t, c) {
641 damon_for_each_region_safe(r, next, t)
642 damon_destroy_region(r, t);
643 }
644
645 while (pos < len) {
646 ret = sscanf(&str[pos], "%d %lu %lu%n",
647 &target_idx, &ar.start, &ar.end, &parsed);
648 if (ret != 3)
649 break;
650 err = add_init_region(c, target_idx, &ar);
651 if (err)
652 goto fail;
653 pos += parsed;
654 }
655
656 return 0;
657
658 fail:
659 damon_for_each_target(t, c) {
660 damon_for_each_region_safe(r, next, t)
661 damon_destroy_region(r, t);
662 }
663 return err;
664 }
665
dbgfs_init_regions_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)666 static ssize_t dbgfs_init_regions_write(struct file *file,
667 const char __user *buf, size_t count,
668 loff_t *ppos)
669 {
670 struct damon_ctx *ctx = file->private_data;
671 char *kbuf;
672 ssize_t ret = count;
673 int err;
674
675 kbuf = user_input_str(buf, count, ppos);
676 if (IS_ERR(kbuf))
677 return PTR_ERR(kbuf);
678
679 mutex_lock(&ctx->kdamond_lock);
680 if (ctx->kdamond) {
681 ret = -EBUSY;
682 goto unlock_out;
683 }
684
685 err = set_init_regions(ctx, kbuf, ret);
686 if (err)
687 ret = err;
688
689 unlock_out:
690 mutex_unlock(&ctx->kdamond_lock);
691 kfree(kbuf);
692 return ret;
693 }
694
dbgfs_kdamond_pid_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)695 static ssize_t dbgfs_kdamond_pid_read(struct file *file,
696 char __user *buf, size_t count, loff_t *ppos)
697 {
698 struct damon_ctx *ctx = file->private_data;
699 char *kbuf;
700 ssize_t len;
701
702 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
703 if (!kbuf)
704 return -ENOMEM;
705
706 mutex_lock(&ctx->kdamond_lock);
707 if (ctx->kdamond)
708 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
709 else
710 len = scnprintf(kbuf, count, "none\n");
711 mutex_unlock(&ctx->kdamond_lock);
712 if (!len)
713 goto out;
714 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
715
716 out:
717 kfree(kbuf);
718 return len;
719 }
720
damon_dbgfs_open(struct inode * inode,struct file * file)721 static int damon_dbgfs_open(struct inode *inode, struct file *file)
722 {
723 damon_dbgfs_warn_deprecation();
724
725 file->private_data = inode->i_private;
726
727 return nonseekable_open(inode, file);
728 }
729
730 static const struct file_operations attrs_fops = {
731 .open = damon_dbgfs_open,
732 .read = dbgfs_attrs_read,
733 .write = dbgfs_attrs_write,
734 };
735
736 static const struct file_operations schemes_fops = {
737 .open = damon_dbgfs_open,
738 .read = dbgfs_schemes_read,
739 .write = dbgfs_schemes_write,
740 };
741
742 static const struct file_operations target_ids_fops = {
743 .open = damon_dbgfs_open,
744 .read = dbgfs_target_ids_read,
745 .write = dbgfs_target_ids_write,
746 };
747
748 static const struct file_operations init_regions_fops = {
749 .open = damon_dbgfs_open,
750 .read = dbgfs_init_regions_read,
751 .write = dbgfs_init_regions_write,
752 };
753
754 static const struct file_operations kdamond_pid_fops = {
755 .open = damon_dbgfs_open,
756 .read = dbgfs_kdamond_pid_read,
757 };
758
dbgfs_fill_ctx_dir(struct dentry * dir,struct damon_ctx * ctx)759 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
760 {
761 const char * const file_names[] = {"attrs", "schemes", "target_ids",
762 "init_regions", "kdamond_pid"};
763 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
764 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
765 int i;
766
767 for (i = 0; i < ARRAY_SIZE(file_names); i++)
768 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
769 }
770
dbgfs_before_terminate(struct damon_ctx * ctx)771 static void dbgfs_before_terminate(struct damon_ctx *ctx)
772 {
773 struct damon_target *t, *next;
774
775 if (!damon_target_has_pid(ctx))
776 return;
777
778 mutex_lock(&ctx->kdamond_lock);
779 damon_for_each_target_safe(t, next, ctx) {
780 put_pid(t->pid);
781 damon_destroy_target(t);
782 }
783 mutex_unlock(&ctx->kdamond_lock);
784 }
785
dbgfs_new_ctx(void)786 static struct damon_ctx *dbgfs_new_ctx(void)
787 {
788 struct damon_ctx *ctx;
789
790 ctx = damon_new_ctx();
791 if (!ctx)
792 return NULL;
793
794 if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
795 damon_select_ops(ctx, DAMON_OPS_PADDR)) {
796 damon_destroy_ctx(ctx);
797 return NULL;
798 }
799 ctx->callback.before_terminate = dbgfs_before_terminate;
800 return ctx;
801 }
802
dbgfs_destroy_ctx(struct damon_ctx * ctx)803 static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
804 {
805 damon_destroy_ctx(ctx);
806 }
807
808 /*
809 * Make a context of @name and create a debugfs directory for it.
810 *
811 * This function should be called while holding damon_dbgfs_lock.
812 *
813 * Returns 0 on success, negative error code otherwise.
814 */
dbgfs_mk_context(char * name)815 static int dbgfs_mk_context(char *name)
816 {
817 struct dentry *root, **new_dirs, *new_dir;
818 struct damon_ctx **new_ctxs, *new_ctx;
819
820 if (damon_nr_running_ctxs())
821 return -EBUSY;
822
823 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
824 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
825 if (!new_ctxs)
826 return -ENOMEM;
827 dbgfs_ctxs = new_ctxs;
828
829 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
830 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
831 if (!new_dirs)
832 return -ENOMEM;
833 dbgfs_dirs = new_dirs;
834
835 root = dbgfs_dirs[0];
836 if (!root)
837 return -ENOENT;
838
839 new_dir = debugfs_create_dir(name, root);
840 /* Below check is required for a potential duplicated name case */
841 if (IS_ERR(new_dir))
842 return PTR_ERR(new_dir);
843 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
844
845 new_ctx = dbgfs_new_ctx();
846 if (!new_ctx) {
847 debugfs_remove(new_dir);
848 dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
849 return -ENOMEM;
850 }
851
852 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
853 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
854 dbgfs_ctxs[dbgfs_nr_ctxs]);
855 dbgfs_nr_ctxs++;
856
857 return 0;
858 }
859
dbgfs_mk_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)860 static ssize_t dbgfs_mk_context_write(struct file *file,
861 const char __user *buf, size_t count, loff_t *ppos)
862 {
863 char *kbuf;
864 char *ctx_name;
865 ssize_t ret;
866
867 kbuf = user_input_str(buf, count, ppos);
868 if (IS_ERR(kbuf))
869 return PTR_ERR(kbuf);
870 ctx_name = kmalloc(count + 1, GFP_KERNEL);
871 if (!ctx_name) {
872 kfree(kbuf);
873 return -ENOMEM;
874 }
875
876 /* Trim white space */
877 if (sscanf(kbuf, "%s", ctx_name) != 1) {
878 ret = -EINVAL;
879 goto out;
880 }
881
882 mutex_lock(&damon_dbgfs_lock);
883 ret = dbgfs_mk_context(ctx_name);
884 if (!ret)
885 ret = count;
886 mutex_unlock(&damon_dbgfs_lock);
887
888 out:
889 kfree(kbuf);
890 kfree(ctx_name);
891 return ret;
892 }
893
894 /*
895 * Remove a context of @name and its debugfs directory.
896 *
897 * This function should be called while holding damon_dbgfs_lock.
898 *
899 * Return 0 on success, negative error code otherwise.
900 */
dbgfs_rm_context(char * name)901 static int dbgfs_rm_context(char *name)
902 {
903 struct dentry *root, *dir, **new_dirs;
904 struct inode *inode;
905 struct damon_ctx **new_ctxs;
906 int i, j;
907 int ret = 0;
908
909 if (damon_nr_running_ctxs())
910 return -EBUSY;
911
912 root = dbgfs_dirs[0];
913 if (!root)
914 return -ENOENT;
915
916 dir = debugfs_lookup(name, root);
917 if (!dir)
918 return -ENOENT;
919
920 inode = d_inode(dir);
921 if (!S_ISDIR(inode->i_mode)) {
922 ret = -EINVAL;
923 goto out_dput;
924 }
925
926 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
927 GFP_KERNEL);
928 if (!new_dirs) {
929 ret = -ENOMEM;
930 goto out_dput;
931 }
932
933 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
934 GFP_KERNEL);
935 if (!new_ctxs) {
936 ret = -ENOMEM;
937 goto out_new_dirs;
938 }
939
940 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
941 if (dbgfs_dirs[i] == dir) {
942 debugfs_remove(dbgfs_dirs[i]);
943 dbgfs_destroy_ctx(dbgfs_ctxs[i]);
944 continue;
945 }
946 new_dirs[j] = dbgfs_dirs[i];
947 new_ctxs[j++] = dbgfs_ctxs[i];
948 }
949
950 kfree(dbgfs_dirs);
951 kfree(dbgfs_ctxs);
952
953 dbgfs_dirs = new_dirs;
954 dbgfs_ctxs = new_ctxs;
955 dbgfs_nr_ctxs--;
956
957 goto out_dput;
958
959 out_new_dirs:
960 kfree(new_dirs);
961 out_dput:
962 dput(dir);
963 return ret;
964 }
965
dbgfs_rm_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)966 static ssize_t dbgfs_rm_context_write(struct file *file,
967 const char __user *buf, size_t count, loff_t *ppos)
968 {
969 char *kbuf;
970 ssize_t ret;
971 char *ctx_name;
972
973 kbuf = user_input_str(buf, count, ppos);
974 if (IS_ERR(kbuf))
975 return PTR_ERR(kbuf);
976 ctx_name = kmalloc(count + 1, GFP_KERNEL);
977 if (!ctx_name) {
978 kfree(kbuf);
979 return -ENOMEM;
980 }
981
982 /* Trim white space */
983 if (sscanf(kbuf, "%s", ctx_name) != 1) {
984 ret = -EINVAL;
985 goto out;
986 }
987
988 mutex_lock(&damon_dbgfs_lock);
989 ret = dbgfs_rm_context(ctx_name);
990 if (!ret)
991 ret = count;
992 mutex_unlock(&damon_dbgfs_lock);
993
994 out:
995 kfree(kbuf);
996 kfree(ctx_name);
997 return ret;
998 }
999
dbgfs_monitor_on_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1000 static ssize_t dbgfs_monitor_on_read(struct file *file,
1001 char __user *buf, size_t count, loff_t *ppos)
1002 {
1003 char monitor_on_buf[5];
1004 bool monitor_on = damon_nr_running_ctxs() != 0;
1005 int len;
1006
1007 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
1008
1009 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
1010 }
1011
dbgfs_monitor_on_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1012 static ssize_t dbgfs_monitor_on_write(struct file *file,
1013 const char __user *buf, size_t count, loff_t *ppos)
1014 {
1015 ssize_t ret;
1016 char *kbuf;
1017
1018 kbuf = user_input_str(buf, count, ppos);
1019 if (IS_ERR(kbuf))
1020 return PTR_ERR(kbuf);
1021
1022 /* Remove white space */
1023 if (sscanf(kbuf, "%s", kbuf) != 1) {
1024 kfree(kbuf);
1025 return -EINVAL;
1026 }
1027
1028 mutex_lock(&damon_dbgfs_lock);
1029 if (!strncmp(kbuf, "on", count)) {
1030 int i;
1031
1032 for (i = 0; i < dbgfs_nr_ctxs; i++) {
1033 if (damon_targets_empty(dbgfs_ctxs[i])) {
1034 kfree(kbuf);
1035 mutex_unlock(&damon_dbgfs_lock);
1036 return -EINVAL;
1037 }
1038 }
1039 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1040 } else if (!strncmp(kbuf, "off", count)) {
1041 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1042 } else {
1043 ret = -EINVAL;
1044 }
1045 mutex_unlock(&damon_dbgfs_lock);
1046
1047 if (!ret)
1048 ret = count;
1049 kfree(kbuf);
1050 return ret;
1051 }
1052
damon_dbgfs_static_file_open(struct inode * inode,struct file * file)1053 static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
1054 {
1055 damon_dbgfs_warn_deprecation();
1056 return nonseekable_open(inode, file);
1057 }
1058
1059 static const struct file_operations mk_contexts_fops = {
1060 .open = damon_dbgfs_static_file_open,
1061 .write = dbgfs_mk_context_write,
1062 };
1063
1064 static const struct file_operations rm_contexts_fops = {
1065 .open = damon_dbgfs_static_file_open,
1066 .write = dbgfs_rm_context_write,
1067 };
1068
1069 static const struct file_operations monitor_on_fops = {
1070 .open = damon_dbgfs_static_file_open,
1071 .read = dbgfs_monitor_on_read,
1072 .write = dbgfs_monitor_on_write,
1073 };
1074
__damon_dbgfs_init(void)1075 static int __init __damon_dbgfs_init(void)
1076 {
1077 struct dentry *dbgfs_root;
1078 const char * const file_names[] = {"mk_contexts", "rm_contexts",
1079 "monitor_on"};
1080 const struct file_operations *fops[] = {&mk_contexts_fops,
1081 &rm_contexts_fops, &monitor_on_fops};
1082 int i;
1083
1084 dbgfs_root = debugfs_create_dir("damon", NULL);
1085
1086 for (i = 0; i < ARRAY_SIZE(file_names); i++)
1087 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1088 fops[i]);
1089 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1090
1091 dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
1092 if (!dbgfs_dirs) {
1093 debugfs_remove(dbgfs_root);
1094 return -ENOMEM;
1095 }
1096 dbgfs_dirs[0] = dbgfs_root;
1097
1098 return 0;
1099 }
1100
1101 /*
1102 * Functions for the initialization
1103 */
1104
damon_dbgfs_init(void)1105 static int __init damon_dbgfs_init(void)
1106 {
1107 int rc = -ENOMEM;
1108
1109 mutex_lock(&damon_dbgfs_lock);
1110 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1111 if (!dbgfs_ctxs)
1112 goto out;
1113 dbgfs_ctxs[0] = dbgfs_new_ctx();
1114 if (!dbgfs_ctxs[0]) {
1115 kfree(dbgfs_ctxs);
1116 goto out;
1117 }
1118 dbgfs_nr_ctxs = 1;
1119
1120 rc = __damon_dbgfs_init();
1121 if (rc) {
1122 kfree(dbgfs_ctxs[0]);
1123 kfree(dbgfs_ctxs);
1124 pr_err("%s: dbgfs init failed\n", __func__);
1125 }
1126
1127 out:
1128 mutex_unlock(&damon_dbgfs_lock);
1129 return rc;
1130 }
1131
1132 module_init(damon_dbgfs_init);
1133
1134 #include "dbgfs-test.h"
1135