1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON Debugfs Interface 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon-dbgfs: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/debugfs.h> 12 #include <linux/file.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/page_idle.h> 16 #include <linux/slab.h> 17 18 static struct damon_ctx **dbgfs_ctxs; 19 static int dbgfs_nr_ctxs; 20 static struct dentry **dbgfs_dirs; 21 static DEFINE_MUTEX(damon_dbgfs_lock); 22 23 /* 24 * Returns non-empty string on success, negative error code otherwise. 25 */ 26 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) 27 { 28 char *kbuf; 29 ssize_t ret; 30 31 /* We do not accept continuous write */ 32 if (*ppos) 33 return ERR_PTR(-EINVAL); 34 35 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); 36 if (!kbuf) 37 return ERR_PTR(-ENOMEM); 38 39 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count); 40 if (ret != count) { 41 kfree(kbuf); 42 return ERR_PTR(-EIO); 43 } 44 kbuf[ret] = '\0'; 45 46 return kbuf; 47 } 48 49 static ssize_t dbgfs_attrs_read(struct file *file, 50 char __user *buf, size_t count, loff_t *ppos) 51 { 52 struct damon_ctx *ctx = file->private_data; 53 char kbuf[128]; 54 int ret; 55 56 mutex_lock(&ctx->kdamond_lock); 57 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n", 58 ctx->sample_interval, ctx->aggr_interval, 59 ctx->ops_update_interval, ctx->min_nr_regions, 60 ctx->max_nr_regions); 61 mutex_unlock(&ctx->kdamond_lock); 62 63 return simple_read_from_buffer(buf, count, ppos, kbuf, ret); 64 } 65 66 static ssize_t dbgfs_attrs_write(struct file *file, 67 const char __user *buf, size_t count, loff_t *ppos) 68 { 69 struct damon_ctx *ctx = file->private_data; 70 unsigned long s, a, r, minr, maxr; 71 char *kbuf; 72 ssize_t ret; 73 74 kbuf = user_input_str(buf, count, ppos); 75 if (IS_ERR(kbuf)) 76 return PTR_ERR(kbuf); 77 78 if (sscanf(kbuf, "%lu %lu %lu %lu %lu", 79 &s, &a, &r, &minr, &maxr) != 5) { 80 ret = -EINVAL; 81 goto out; 82 } 83 84 mutex_lock(&ctx->kdamond_lock); 85 if (ctx->kdamond) { 86 ret = -EBUSY; 87 goto unlock_out; 88 } 89 90 ret = damon_set_attrs(ctx, s, a, r, minr, maxr); 91 if (!ret) 92 ret = count; 93 unlock_out: 94 mutex_unlock(&ctx->kdamond_lock); 95 out: 96 kfree(kbuf); 97 return ret; 98 } 99 100 /* 101 * Return corresponding dbgfs' scheme action value (int) for the given 102 * damos_action if the given damos_action value is valid and supported by 103 * dbgfs, negative error code otherwise. 104 */ 105 static int damos_action_to_dbgfs_scheme_action(enum damos_action action) 106 { 107 switch (action) { 108 case DAMOS_WILLNEED: 109 return 0; 110 case DAMOS_COLD: 111 return 1; 112 case DAMOS_PAGEOUT: 113 return 2; 114 case DAMOS_HUGEPAGE: 115 return 3; 116 case DAMOS_NOHUGEPAGE: 117 return 4; 118 case DAMOS_STAT: 119 return 5; 120 default: 121 return -EINVAL; 122 } 123 } 124 125 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) 126 { 127 struct damos *s; 128 int written = 0; 129 int rc; 130 131 damon_for_each_scheme(s, c) { 132 rc = scnprintf(&buf[written], len - written, 133 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", 134 s->min_sz_region, s->max_sz_region, 135 s->min_nr_accesses, s->max_nr_accesses, 136 s->min_age_region, s->max_age_region, 137 damos_action_to_dbgfs_scheme_action(s->action), 138 s->quota.ms, s->quota.sz, 139 s->quota.reset_interval, 140 s->quota.weight_sz, 141 s->quota.weight_nr_accesses, 142 s->quota.weight_age, 143 s->wmarks.metric, s->wmarks.interval, 144 s->wmarks.high, s->wmarks.mid, s->wmarks.low, 145 s->stat.nr_tried, s->stat.sz_tried, 146 s->stat.nr_applied, s->stat.sz_applied, 147 s->stat.qt_exceeds); 148 if (!rc) 149 return -ENOMEM; 150 151 written += rc; 152 } 153 return written; 154 } 155 156 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, 157 size_t count, loff_t *ppos) 158 { 159 struct damon_ctx *ctx = file->private_data; 160 char *kbuf; 161 ssize_t len; 162 163 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); 164 if (!kbuf) 165 return -ENOMEM; 166 167 mutex_lock(&ctx->kdamond_lock); 168 len = sprint_schemes(ctx, kbuf, count); 169 mutex_unlock(&ctx->kdamond_lock); 170 if (len < 0) 171 goto out; 172 len = simple_read_from_buffer(buf, count, ppos, kbuf, len); 173 174 out: 175 kfree(kbuf); 176 return len; 177 } 178 179 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes) 180 { 181 ssize_t i; 182 183 for (i = 0; i < nr_schemes; i++) 184 kfree(schemes[i]); 185 kfree(schemes); 186 } 187 188 /* 189 * Return corresponding damos_action for the given dbgfs input for a scheme 190 * action if the input is valid, negative error code otherwise. 191 */ 192 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action) 193 { 194 switch (dbgfs_action) { 195 case 0: 196 return DAMOS_WILLNEED; 197 case 1: 198 return DAMOS_COLD; 199 case 2: 200 return DAMOS_PAGEOUT; 201 case 3: 202 return DAMOS_HUGEPAGE; 203 case 4: 204 return DAMOS_NOHUGEPAGE; 205 case 5: 206 return DAMOS_STAT; 207 default: 208 return -EINVAL; 209 } 210 } 211 212 /* 213 * Converts a string into an array of struct damos pointers 214 * 215 * Returns an array of struct damos pointers that converted if the conversion 216 * success, or NULL otherwise. 217 */ 218 static struct damos **str_to_schemes(const char *str, ssize_t len, 219 ssize_t *nr_schemes) 220 { 221 struct damos *scheme, **schemes; 222 const int max_nr_schemes = 256; 223 int pos = 0, parsed, ret; 224 unsigned long min_sz, max_sz; 225 unsigned int min_nr_a, max_nr_a, min_age, max_age; 226 unsigned int action_input; 227 enum damos_action action; 228 229 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme), 230 GFP_KERNEL); 231 if (!schemes) 232 return NULL; 233 234 *nr_schemes = 0; 235 while (pos < len && *nr_schemes < max_nr_schemes) { 236 struct damos_quota quota = {}; 237 struct damos_watermarks wmarks; 238 239 ret = sscanf(&str[pos], 240 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n", 241 &min_sz, &max_sz, &min_nr_a, &max_nr_a, 242 &min_age, &max_age, &action_input, "a.ms, 243 "a.sz, "a.reset_interval, 244 "a.weight_sz, "a.weight_nr_accesses, 245 "a.weight_age, &wmarks.metric, 246 &wmarks.interval, &wmarks.high, &wmarks.mid, 247 &wmarks.low, &parsed); 248 if (ret != 18) 249 break; 250 action = dbgfs_scheme_action_to_damos_action(action_input); 251 if ((int)action < 0) 252 goto fail; 253 254 if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age) 255 goto fail; 256 257 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low || 258 wmarks.mid < wmarks.low) 259 goto fail; 260 261 pos += parsed; 262 scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a, 263 min_age, max_age, action, "a, &wmarks); 264 if (!scheme) 265 goto fail; 266 267 schemes[*nr_schemes] = scheme; 268 *nr_schemes += 1; 269 } 270 return schemes; 271 fail: 272 free_schemes_arr(schemes, *nr_schemes); 273 return NULL; 274 } 275 276 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf, 277 size_t count, loff_t *ppos) 278 { 279 struct damon_ctx *ctx = file->private_data; 280 char *kbuf; 281 struct damos **schemes; 282 ssize_t nr_schemes = 0, ret; 283 284 kbuf = user_input_str(buf, count, ppos); 285 if (IS_ERR(kbuf)) 286 return PTR_ERR(kbuf); 287 288 schemes = str_to_schemes(kbuf, count, &nr_schemes); 289 if (!schemes) { 290 ret = -EINVAL; 291 goto out; 292 } 293 294 mutex_lock(&ctx->kdamond_lock); 295 if (ctx->kdamond) { 296 ret = -EBUSY; 297 goto unlock_out; 298 } 299 300 ret = damon_set_schemes(ctx, schemes, nr_schemes); 301 if (!ret) { 302 ret = count; 303 nr_schemes = 0; 304 } 305 306 unlock_out: 307 mutex_unlock(&ctx->kdamond_lock); 308 free_schemes_arr(schemes, nr_schemes); 309 out: 310 kfree(kbuf); 311 return ret; 312 } 313 314 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) 315 { 316 struct damon_target *t; 317 int id; 318 int written = 0; 319 int rc; 320 321 damon_for_each_target(t, ctx) { 322 if (damon_target_has_pid(ctx)) 323 /* Show pid numbers to debugfs users */ 324 id = pid_vnr(t->pid); 325 else 326 /* Show 42 for physical address space, just for fun */ 327 id = 42; 328 329 rc = scnprintf(&buf[written], len - written, "%d ", id); 330 if (!rc) 331 return -ENOMEM; 332 written += rc; 333 } 334 if (written) 335 written -= 1; 336 written += scnprintf(&buf[written], len - written, "\n"); 337 return written; 338 } 339 340 static ssize_t dbgfs_target_ids_read(struct file *file, 341 char __user *buf, size_t count, loff_t *ppos) 342 { 343 struct damon_ctx *ctx = file->private_data; 344 ssize_t len; 345 char ids_buf[320]; 346 347 mutex_lock(&ctx->kdamond_lock); 348 len = sprint_target_ids(ctx, ids_buf, 320); 349 mutex_unlock(&ctx->kdamond_lock); 350 if (len < 0) 351 return len; 352 353 return simple_read_from_buffer(buf, count, ppos, ids_buf, len); 354 } 355 356 /* 357 * Converts a string into an integers array 358 * 359 * Returns an array of integers array if the conversion success, or NULL 360 * otherwise. 361 */ 362 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints) 363 { 364 int *array; 365 const int max_nr_ints = 32; 366 int nr; 367 int pos = 0, parsed, ret; 368 369 *nr_ints = 0; 370 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL); 371 if (!array) 372 return NULL; 373 while (*nr_ints < max_nr_ints && pos < len) { 374 ret = sscanf(&str[pos], "%d%n", &nr, &parsed); 375 pos += parsed; 376 if (ret != 1) 377 break; 378 array[*nr_ints] = nr; 379 *nr_ints += 1; 380 } 381 382 return array; 383 } 384 385 static void dbgfs_put_pids(struct pid **pids, int nr_pids) 386 { 387 int i; 388 389 for (i = 0; i < nr_pids; i++) 390 put_pid(pids[i]); 391 } 392 393 /* 394 * Converts a string into an struct pid pointers array 395 * 396 * Returns an array of struct pid pointers if the conversion success, or NULL 397 * otherwise. 398 */ 399 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids) 400 { 401 int *ints; 402 ssize_t nr_ints; 403 struct pid **pids; 404 405 *nr_pids = 0; 406 407 ints = str_to_ints(str, len, &nr_ints); 408 if (!ints) 409 return NULL; 410 411 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL); 412 if (!pids) 413 goto out; 414 415 for (; *nr_pids < nr_ints; (*nr_pids)++) { 416 pids[*nr_pids] = find_get_pid(ints[*nr_pids]); 417 if (!pids[*nr_pids]) { 418 dbgfs_put_pids(pids, *nr_pids); 419 kfree(ints); 420 kfree(pids); 421 return NULL; 422 } 423 } 424 425 out: 426 kfree(ints); 427 return pids; 428 } 429 430 /* 431 * dbgfs_set_targets() - Set monitoring targets. 432 * @ctx: monitoring context 433 * @nr_targets: number of targets 434 * @pids: array of target pids (size is same to @nr_targets) 435 * 436 * This function should not be called while the kdamond is running. @pids is 437 * ignored if the context is not configured to have pid in each target. On 438 * failure, reference counts of all pids in @pids are decremented. 439 * 440 * Return: 0 on success, negative error code otherwise. 441 */ 442 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets, 443 struct pid **pids) 444 { 445 ssize_t i; 446 struct damon_target *t, *next; 447 448 damon_for_each_target_safe(t, next, ctx) { 449 if (damon_target_has_pid(ctx)) 450 put_pid(t->pid); 451 damon_destroy_target(t); 452 } 453 454 for (i = 0; i < nr_targets; i++) { 455 t = damon_new_target(); 456 if (!t) { 457 damon_for_each_target_safe(t, next, ctx) 458 damon_destroy_target(t); 459 if (damon_target_has_pid(ctx)) 460 dbgfs_put_pids(pids, nr_targets); 461 return -ENOMEM; 462 } 463 if (damon_target_has_pid(ctx)) 464 t->pid = pids[i]; 465 damon_add_target(ctx, t); 466 } 467 468 return 0; 469 } 470 471 static ssize_t dbgfs_target_ids_write(struct file *file, 472 const char __user *buf, size_t count, loff_t *ppos) 473 { 474 struct damon_ctx *ctx = file->private_data; 475 bool id_is_pid = true; 476 char *kbuf; 477 struct pid **target_pids = NULL; 478 ssize_t nr_targets; 479 ssize_t ret; 480 481 kbuf = user_input_str(buf, count, ppos); 482 if (IS_ERR(kbuf)) 483 return PTR_ERR(kbuf); 484 485 if (!strncmp(kbuf, "paddr\n", count)) { 486 id_is_pid = false; 487 nr_targets = 1; 488 } 489 490 if (id_is_pid) { 491 target_pids = str_to_pids(kbuf, count, &nr_targets); 492 if (!target_pids) { 493 ret = -ENOMEM; 494 goto out; 495 } 496 } 497 498 mutex_lock(&ctx->kdamond_lock); 499 if (ctx->kdamond) { 500 if (id_is_pid) 501 dbgfs_put_pids(target_pids, nr_targets); 502 ret = -EBUSY; 503 goto unlock_out; 504 } 505 506 /* remove previously set targets */ 507 dbgfs_set_targets(ctx, 0, NULL); 508 if (!nr_targets) { 509 ret = count; 510 goto unlock_out; 511 } 512 513 /* Configure the context for the address space type */ 514 if (id_is_pid) 515 ret = damon_select_ops(ctx, DAMON_OPS_VADDR); 516 else 517 ret = damon_select_ops(ctx, DAMON_OPS_PADDR); 518 if (ret) 519 goto unlock_out; 520 521 ret = dbgfs_set_targets(ctx, nr_targets, target_pids); 522 if (!ret) 523 ret = count; 524 525 unlock_out: 526 mutex_unlock(&ctx->kdamond_lock); 527 kfree(target_pids); 528 out: 529 kfree(kbuf); 530 return ret; 531 } 532 533 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len) 534 { 535 struct damon_target *t; 536 struct damon_region *r; 537 int target_idx = 0; 538 int written = 0; 539 int rc; 540 541 damon_for_each_target(t, c) { 542 damon_for_each_region(r, t) { 543 rc = scnprintf(&buf[written], len - written, 544 "%d %lu %lu\n", 545 target_idx, r->ar.start, r->ar.end); 546 if (!rc) 547 return -ENOMEM; 548 written += rc; 549 } 550 target_idx++; 551 } 552 return written; 553 } 554 555 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, 556 size_t count, loff_t *ppos) 557 { 558 struct damon_ctx *ctx = file->private_data; 559 char *kbuf; 560 ssize_t len; 561 562 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); 563 if (!kbuf) 564 return -ENOMEM; 565 566 mutex_lock(&ctx->kdamond_lock); 567 if (ctx->kdamond) { 568 mutex_unlock(&ctx->kdamond_lock); 569 len = -EBUSY; 570 goto out; 571 } 572 573 len = sprint_init_regions(ctx, kbuf, count); 574 mutex_unlock(&ctx->kdamond_lock); 575 if (len < 0) 576 goto out; 577 len = simple_read_from_buffer(buf, count, ppos, kbuf, len); 578 579 out: 580 kfree(kbuf); 581 return len; 582 } 583 584 static int add_init_region(struct damon_ctx *c, int target_idx, 585 struct damon_addr_range *ar) 586 { 587 struct damon_target *t; 588 struct damon_region *r, *prev; 589 unsigned long idx = 0; 590 int rc = -EINVAL; 591 592 if (ar->start >= ar->end) 593 return -EINVAL; 594 595 damon_for_each_target(t, c) { 596 if (idx++ == target_idx) { 597 r = damon_new_region(ar->start, ar->end); 598 if (!r) 599 return -ENOMEM; 600 damon_add_region(r, t); 601 if (damon_nr_regions(t) > 1) { 602 prev = damon_prev_region(r); 603 if (prev->ar.end > r->ar.start) { 604 damon_destroy_region(r, t); 605 return -EINVAL; 606 } 607 } 608 rc = 0; 609 } 610 } 611 return rc; 612 } 613 614 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) 615 { 616 struct damon_target *t; 617 struct damon_region *r, *next; 618 int pos = 0, parsed, ret; 619 int target_idx; 620 struct damon_addr_range ar; 621 int err; 622 623 damon_for_each_target(t, c) { 624 damon_for_each_region_safe(r, next, t) 625 damon_destroy_region(r, t); 626 } 627 628 while (pos < len) { 629 ret = sscanf(&str[pos], "%d %lu %lu%n", 630 &target_idx, &ar.start, &ar.end, &parsed); 631 if (ret != 3) 632 break; 633 err = add_init_region(c, target_idx, &ar); 634 if (err) 635 goto fail; 636 pos += parsed; 637 } 638 639 return 0; 640 641 fail: 642 damon_for_each_target(t, c) { 643 damon_for_each_region_safe(r, next, t) 644 damon_destroy_region(r, t); 645 } 646 return err; 647 } 648 649 static ssize_t dbgfs_init_regions_write(struct file *file, 650 const char __user *buf, size_t count, 651 loff_t *ppos) 652 { 653 struct damon_ctx *ctx = file->private_data; 654 char *kbuf; 655 ssize_t ret = count; 656 int err; 657 658 kbuf = user_input_str(buf, count, ppos); 659 if (IS_ERR(kbuf)) 660 return PTR_ERR(kbuf); 661 662 mutex_lock(&ctx->kdamond_lock); 663 if (ctx->kdamond) { 664 ret = -EBUSY; 665 goto unlock_out; 666 } 667 668 err = set_init_regions(ctx, kbuf, ret); 669 if (err) 670 ret = err; 671 672 unlock_out: 673 mutex_unlock(&ctx->kdamond_lock); 674 kfree(kbuf); 675 return ret; 676 } 677 678 static ssize_t dbgfs_kdamond_pid_read(struct file *file, 679 char __user *buf, size_t count, loff_t *ppos) 680 { 681 struct damon_ctx *ctx = file->private_data; 682 char *kbuf; 683 ssize_t len; 684 685 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); 686 if (!kbuf) 687 return -ENOMEM; 688 689 mutex_lock(&ctx->kdamond_lock); 690 if (ctx->kdamond) 691 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid); 692 else 693 len = scnprintf(kbuf, count, "none\n"); 694 mutex_unlock(&ctx->kdamond_lock); 695 if (!len) 696 goto out; 697 len = simple_read_from_buffer(buf, count, ppos, kbuf, len); 698 699 out: 700 kfree(kbuf); 701 return len; 702 } 703 704 static int damon_dbgfs_open(struct inode *inode, struct file *file) 705 { 706 file->private_data = inode->i_private; 707 708 return nonseekable_open(inode, file); 709 } 710 711 static const struct file_operations attrs_fops = { 712 .open = damon_dbgfs_open, 713 .read = dbgfs_attrs_read, 714 .write = dbgfs_attrs_write, 715 }; 716 717 static const struct file_operations schemes_fops = { 718 .open = damon_dbgfs_open, 719 .read = dbgfs_schemes_read, 720 .write = dbgfs_schemes_write, 721 }; 722 723 static const struct file_operations target_ids_fops = { 724 .open = damon_dbgfs_open, 725 .read = dbgfs_target_ids_read, 726 .write = dbgfs_target_ids_write, 727 }; 728 729 static const struct file_operations init_regions_fops = { 730 .open = damon_dbgfs_open, 731 .read = dbgfs_init_regions_read, 732 .write = dbgfs_init_regions_write, 733 }; 734 735 static const struct file_operations kdamond_pid_fops = { 736 .open = damon_dbgfs_open, 737 .read = dbgfs_kdamond_pid_read, 738 }; 739 740 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx) 741 { 742 const char * const file_names[] = {"attrs", "schemes", "target_ids", 743 "init_regions", "kdamond_pid"}; 744 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops, 745 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops}; 746 int i; 747 748 for (i = 0; i < ARRAY_SIZE(file_names); i++) 749 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]); 750 } 751 752 static void dbgfs_before_terminate(struct damon_ctx *ctx) 753 { 754 struct damon_target *t, *next; 755 756 if (!damon_target_has_pid(ctx)) 757 return; 758 759 mutex_lock(&ctx->kdamond_lock); 760 damon_for_each_target_safe(t, next, ctx) { 761 put_pid(t->pid); 762 damon_destroy_target(t); 763 } 764 mutex_unlock(&ctx->kdamond_lock); 765 } 766 767 static struct damon_ctx *dbgfs_new_ctx(void) 768 { 769 struct damon_ctx *ctx; 770 771 ctx = damon_new_ctx(); 772 if (!ctx) 773 return NULL; 774 775 if (damon_select_ops(ctx, DAMON_OPS_VADDR) && 776 damon_select_ops(ctx, DAMON_OPS_PADDR)) { 777 damon_destroy_ctx(ctx); 778 return NULL; 779 } 780 ctx->callback.before_terminate = dbgfs_before_terminate; 781 return ctx; 782 } 783 784 static void dbgfs_destroy_ctx(struct damon_ctx *ctx) 785 { 786 damon_destroy_ctx(ctx); 787 } 788 789 /* 790 * Make a context of @name and create a debugfs directory for it. 791 * 792 * This function should be called while holding damon_dbgfs_lock. 793 * 794 * Returns 0 on success, negative error code otherwise. 795 */ 796 static int dbgfs_mk_context(char *name) 797 { 798 struct dentry *root, **new_dirs, *new_dir; 799 struct damon_ctx **new_ctxs, *new_ctx; 800 801 if (damon_nr_running_ctxs()) 802 return -EBUSY; 803 804 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) * 805 (dbgfs_nr_ctxs + 1), GFP_KERNEL); 806 if (!new_ctxs) 807 return -ENOMEM; 808 dbgfs_ctxs = new_ctxs; 809 810 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) * 811 (dbgfs_nr_ctxs + 1), GFP_KERNEL); 812 if (!new_dirs) 813 return -ENOMEM; 814 dbgfs_dirs = new_dirs; 815 816 root = dbgfs_dirs[0]; 817 if (!root) 818 return -ENOENT; 819 820 new_dir = debugfs_create_dir(name, root); 821 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir; 822 823 new_ctx = dbgfs_new_ctx(); 824 if (!new_ctx) { 825 debugfs_remove(new_dir); 826 dbgfs_dirs[dbgfs_nr_ctxs] = NULL; 827 return -ENOMEM; 828 } 829 830 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx; 831 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs], 832 dbgfs_ctxs[dbgfs_nr_ctxs]); 833 dbgfs_nr_ctxs++; 834 835 return 0; 836 } 837 838 static ssize_t dbgfs_mk_context_write(struct file *file, 839 const char __user *buf, size_t count, loff_t *ppos) 840 { 841 char *kbuf; 842 char *ctx_name; 843 ssize_t ret; 844 845 kbuf = user_input_str(buf, count, ppos); 846 if (IS_ERR(kbuf)) 847 return PTR_ERR(kbuf); 848 ctx_name = kmalloc(count + 1, GFP_KERNEL); 849 if (!ctx_name) { 850 kfree(kbuf); 851 return -ENOMEM; 852 } 853 854 /* Trim white space */ 855 if (sscanf(kbuf, "%s", ctx_name) != 1) { 856 ret = -EINVAL; 857 goto out; 858 } 859 860 mutex_lock(&damon_dbgfs_lock); 861 ret = dbgfs_mk_context(ctx_name); 862 if (!ret) 863 ret = count; 864 mutex_unlock(&damon_dbgfs_lock); 865 866 out: 867 kfree(kbuf); 868 kfree(ctx_name); 869 return ret; 870 } 871 872 /* 873 * Remove a context of @name and its debugfs directory. 874 * 875 * This function should be called while holding damon_dbgfs_lock. 876 * 877 * Return 0 on success, negative error code otherwise. 878 */ 879 static int dbgfs_rm_context(char *name) 880 { 881 struct dentry *root, *dir, **new_dirs; 882 struct damon_ctx **new_ctxs; 883 int i, j; 884 885 if (damon_nr_running_ctxs()) 886 return -EBUSY; 887 888 root = dbgfs_dirs[0]; 889 if (!root) 890 return -ENOENT; 891 892 dir = debugfs_lookup(name, root); 893 if (!dir) 894 return -ENOENT; 895 896 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs), 897 GFP_KERNEL); 898 if (!new_dirs) 899 return -ENOMEM; 900 901 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs), 902 GFP_KERNEL); 903 if (!new_ctxs) { 904 kfree(new_dirs); 905 return -ENOMEM; 906 } 907 908 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { 909 if (dbgfs_dirs[i] == dir) { 910 debugfs_remove(dbgfs_dirs[i]); 911 dbgfs_destroy_ctx(dbgfs_ctxs[i]); 912 continue; 913 } 914 new_dirs[j] = dbgfs_dirs[i]; 915 new_ctxs[j++] = dbgfs_ctxs[i]; 916 } 917 918 kfree(dbgfs_dirs); 919 kfree(dbgfs_ctxs); 920 921 dbgfs_dirs = new_dirs; 922 dbgfs_ctxs = new_ctxs; 923 dbgfs_nr_ctxs--; 924 925 return 0; 926 } 927 928 static ssize_t dbgfs_rm_context_write(struct file *file, 929 const char __user *buf, size_t count, loff_t *ppos) 930 { 931 char *kbuf; 932 ssize_t ret; 933 char *ctx_name; 934 935 kbuf = user_input_str(buf, count, ppos); 936 if (IS_ERR(kbuf)) 937 return PTR_ERR(kbuf); 938 ctx_name = kmalloc(count + 1, GFP_KERNEL); 939 if (!ctx_name) { 940 kfree(kbuf); 941 return -ENOMEM; 942 } 943 944 /* Trim white space */ 945 if (sscanf(kbuf, "%s", ctx_name) != 1) { 946 ret = -EINVAL; 947 goto out; 948 } 949 950 mutex_lock(&damon_dbgfs_lock); 951 ret = dbgfs_rm_context(ctx_name); 952 if (!ret) 953 ret = count; 954 mutex_unlock(&damon_dbgfs_lock); 955 956 out: 957 kfree(kbuf); 958 kfree(ctx_name); 959 return ret; 960 } 961 962 static ssize_t dbgfs_monitor_on_read(struct file *file, 963 char __user *buf, size_t count, loff_t *ppos) 964 { 965 char monitor_on_buf[5]; 966 bool monitor_on = damon_nr_running_ctxs() != 0; 967 int len; 968 969 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n"); 970 971 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len); 972 } 973 974 static ssize_t dbgfs_monitor_on_write(struct file *file, 975 const char __user *buf, size_t count, loff_t *ppos) 976 { 977 ssize_t ret; 978 char *kbuf; 979 980 kbuf = user_input_str(buf, count, ppos); 981 if (IS_ERR(kbuf)) 982 return PTR_ERR(kbuf); 983 984 /* Remove white space */ 985 if (sscanf(kbuf, "%s", kbuf) != 1) { 986 kfree(kbuf); 987 return -EINVAL; 988 } 989 990 mutex_lock(&damon_dbgfs_lock); 991 if (!strncmp(kbuf, "on", count)) { 992 int i; 993 994 for (i = 0; i < dbgfs_nr_ctxs; i++) { 995 if (damon_targets_empty(dbgfs_ctxs[i])) { 996 kfree(kbuf); 997 mutex_unlock(&damon_dbgfs_lock); 998 return -EINVAL; 999 } 1000 } 1001 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true); 1002 } else if (!strncmp(kbuf, "off", count)) { 1003 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs); 1004 } else { 1005 ret = -EINVAL; 1006 } 1007 mutex_unlock(&damon_dbgfs_lock); 1008 1009 if (!ret) 1010 ret = count; 1011 kfree(kbuf); 1012 return ret; 1013 } 1014 1015 static const struct file_operations mk_contexts_fops = { 1016 .write = dbgfs_mk_context_write, 1017 }; 1018 1019 static const struct file_operations rm_contexts_fops = { 1020 .write = dbgfs_rm_context_write, 1021 }; 1022 1023 static const struct file_operations monitor_on_fops = { 1024 .read = dbgfs_monitor_on_read, 1025 .write = dbgfs_monitor_on_write, 1026 }; 1027 1028 static int __init __damon_dbgfs_init(void) 1029 { 1030 struct dentry *dbgfs_root; 1031 const char * const file_names[] = {"mk_contexts", "rm_contexts", 1032 "monitor_on"}; 1033 const struct file_operations *fops[] = {&mk_contexts_fops, 1034 &rm_contexts_fops, &monitor_on_fops}; 1035 int i; 1036 1037 dbgfs_root = debugfs_create_dir("damon", NULL); 1038 1039 for (i = 0; i < ARRAY_SIZE(file_names); i++) 1040 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL, 1041 fops[i]); 1042 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]); 1043 1044 dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL); 1045 if (!dbgfs_dirs) { 1046 debugfs_remove(dbgfs_root); 1047 return -ENOMEM; 1048 } 1049 dbgfs_dirs[0] = dbgfs_root; 1050 1051 return 0; 1052 } 1053 1054 /* 1055 * Functions for the initialization 1056 */ 1057 1058 static int __init damon_dbgfs_init(void) 1059 { 1060 int rc = -ENOMEM; 1061 1062 mutex_lock(&damon_dbgfs_lock); 1063 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); 1064 if (!dbgfs_ctxs) 1065 goto out; 1066 dbgfs_ctxs[0] = dbgfs_new_ctx(); 1067 if (!dbgfs_ctxs[0]) { 1068 kfree(dbgfs_ctxs); 1069 goto out; 1070 } 1071 dbgfs_nr_ctxs = 1; 1072 1073 rc = __damon_dbgfs_init(); 1074 if (rc) { 1075 kfree(dbgfs_ctxs[0]); 1076 kfree(dbgfs_ctxs); 1077 pr_err("%s: dbgfs init failed\n", __func__); 1078 } 1079 1080 out: 1081 mutex_unlock(&damon_dbgfs_lock); 1082 return rc; 1083 } 1084 1085 module_init(damon_dbgfs_init); 1086 1087 #include "dbgfs-test.h" 1088