1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 INIT_LIST_HEAD(®ion->list); 132 133 region->age = 0; 134 region->last_nr_accesses = 0; 135 136 return region; 137 } 138 139 void damon_add_region(struct damon_region *r, struct damon_target *t) 140 { 141 list_add_tail(&r->list, &t->regions_list); 142 t->nr_regions++; 143 } 144 145 static void damon_del_region(struct damon_region *r, struct damon_target *t) 146 { 147 list_del(&r->list); 148 t->nr_regions--; 149 } 150 151 static void damon_free_region(struct damon_region *r) 152 { 153 kmem_cache_free(damon_region_cache, r); 154 } 155 156 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 157 { 158 damon_del_region(r, t); 159 damon_free_region(r); 160 } 161 162 /* 163 * Check whether a region is intersecting an address range 164 * 165 * Returns true if it is. 166 */ 167 static bool damon_intersect(struct damon_region *r, 168 struct damon_addr_range *re) 169 { 170 return !(r->ar.end <= re->start || re->end <= r->ar.start); 171 } 172 173 /* 174 * Fill holes in regions with new regions. 175 */ 176 static int damon_fill_regions_holes(struct damon_region *first, 177 struct damon_region *last, struct damon_target *t) 178 { 179 struct damon_region *r = first; 180 181 damon_for_each_region_from(r, t) { 182 struct damon_region *next, *newr; 183 184 if (r == last) 185 break; 186 next = damon_next_region(r); 187 if (r->ar.end != next->ar.start) { 188 newr = damon_new_region(r->ar.end, next->ar.start); 189 if (!newr) 190 return -ENOMEM; 191 damon_insert_region(newr, r, next, t); 192 } 193 } 194 return 0; 195 } 196 197 /* 198 * damon_set_regions() - Set regions of a target for given address ranges. 199 * @t: the given target. 200 * @ranges: array of new monitoring target ranges. 201 * @nr_ranges: length of @ranges. 202 * 203 * This function adds new regions to, or modify existing regions of a 204 * monitoring target to fit in specific ranges. 205 * 206 * Return: 0 if success, or negative error code otherwise. 207 */ 208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 209 unsigned int nr_ranges) 210 { 211 struct damon_region *r, *next; 212 unsigned int i; 213 int err; 214 215 /* Remove regions which are not in the new ranges */ 216 damon_for_each_region_safe(r, next, t) { 217 for (i = 0; i < nr_ranges; i++) { 218 if (damon_intersect(r, &ranges[i])) 219 break; 220 } 221 if (i == nr_ranges) 222 damon_destroy_region(r, t); 223 } 224 225 r = damon_first_region(t); 226 /* Add new regions or resize existing regions to fit in the ranges */ 227 for (i = 0; i < nr_ranges; i++) { 228 struct damon_region *first = NULL, *last, *newr; 229 struct damon_addr_range *range; 230 231 range = &ranges[i]; 232 /* Get the first/last regions intersecting with the range */ 233 damon_for_each_region_from(r, t) { 234 if (damon_intersect(r, range)) { 235 if (!first) 236 first = r; 237 last = r; 238 } 239 if (r->ar.start >= range->end) 240 break; 241 } 242 if (!first) { 243 /* no region intersects with this range */ 244 newr = damon_new_region( 245 ALIGN_DOWN(range->start, 246 DAMON_MIN_REGION), 247 ALIGN(range->end, DAMON_MIN_REGION)); 248 if (!newr) 249 return -ENOMEM; 250 damon_insert_region(newr, damon_prev_region(r), r, t); 251 } else { 252 /* resize intersecting regions to fit in this range */ 253 first->ar.start = ALIGN_DOWN(range->start, 254 DAMON_MIN_REGION); 255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 256 257 /* fill possible holes in the range */ 258 err = damon_fill_regions_holes(first, last, t); 259 if (err) 260 return err; 261 } 262 } 263 return 0; 264 } 265 266 struct damos_filter *damos_new_filter(enum damos_filter_type type, 267 bool matching) 268 { 269 struct damos_filter *filter; 270 271 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 272 if (!filter) 273 return NULL; 274 filter->type = type; 275 filter->matching = matching; 276 return filter; 277 } 278 279 void damos_add_filter(struct damos *s, struct damos_filter *f) 280 { 281 list_add_tail(&f->list, &s->filters); 282 } 283 284 static void damos_del_filter(struct damos_filter *f) 285 { 286 list_del(&f->list); 287 } 288 289 static void damos_free_filter(struct damos_filter *f) 290 { 291 kfree(f); 292 } 293 294 void damos_destroy_filter(struct damos_filter *f) 295 { 296 damos_del_filter(f); 297 damos_free_filter(f); 298 } 299 300 /* initialize private fields of damos_quota and return the pointer */ 301 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 302 { 303 quota->total_charged_sz = 0; 304 quota->total_charged_ns = 0; 305 quota->esz = 0; 306 quota->charged_sz = 0; 307 quota->charged_from = 0; 308 quota->charge_target_from = NULL; 309 quota->charge_addr_from = 0; 310 return quota; 311 } 312 313 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 314 enum damos_action action, struct damos_quota *quota, 315 struct damos_watermarks *wmarks) 316 { 317 struct damos *scheme; 318 319 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 320 if (!scheme) 321 return NULL; 322 scheme->pattern = *pattern; 323 scheme->action = action; 324 INIT_LIST_HEAD(&scheme->filters); 325 scheme->stat = (struct damos_stat){}; 326 INIT_LIST_HEAD(&scheme->list); 327 328 scheme->quota = *(damos_quota_init_priv(quota)); 329 330 scheme->wmarks = *wmarks; 331 scheme->wmarks.activated = true; 332 333 return scheme; 334 } 335 336 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 337 { 338 list_add_tail(&s->list, &ctx->schemes); 339 } 340 341 static void damon_del_scheme(struct damos *s) 342 { 343 list_del(&s->list); 344 } 345 346 static void damon_free_scheme(struct damos *s) 347 { 348 kfree(s); 349 } 350 351 void damon_destroy_scheme(struct damos *s) 352 { 353 struct damos_filter *f, *next; 354 355 damos_for_each_filter_safe(f, next, s) 356 damos_destroy_filter(f); 357 damon_del_scheme(s); 358 damon_free_scheme(s); 359 } 360 361 /* 362 * Construct a damon_target struct 363 * 364 * Returns the pointer to the new struct if success, or NULL otherwise 365 */ 366 struct damon_target *damon_new_target(void) 367 { 368 struct damon_target *t; 369 370 t = kmalloc(sizeof(*t), GFP_KERNEL); 371 if (!t) 372 return NULL; 373 374 t->pid = NULL; 375 t->nr_regions = 0; 376 INIT_LIST_HEAD(&t->regions_list); 377 INIT_LIST_HEAD(&t->list); 378 379 return t; 380 } 381 382 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 383 { 384 list_add_tail(&t->list, &ctx->adaptive_targets); 385 } 386 387 bool damon_targets_empty(struct damon_ctx *ctx) 388 { 389 return list_empty(&ctx->adaptive_targets); 390 } 391 392 static void damon_del_target(struct damon_target *t) 393 { 394 list_del(&t->list); 395 } 396 397 void damon_free_target(struct damon_target *t) 398 { 399 struct damon_region *r, *next; 400 401 damon_for_each_region_safe(r, next, t) 402 damon_free_region(r); 403 kfree(t); 404 } 405 406 void damon_destroy_target(struct damon_target *t) 407 { 408 damon_del_target(t); 409 damon_free_target(t); 410 } 411 412 unsigned int damon_nr_regions(struct damon_target *t) 413 { 414 return t->nr_regions; 415 } 416 417 struct damon_ctx *damon_new_ctx(void) 418 { 419 struct damon_ctx *ctx; 420 421 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 422 if (!ctx) 423 return NULL; 424 425 ctx->attrs.sample_interval = 5 * 1000; 426 ctx->attrs.aggr_interval = 100 * 1000; 427 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 428 429 ktime_get_coarse_ts64(&ctx->last_aggregation); 430 ctx->last_ops_update = ctx->last_aggregation; 431 432 mutex_init(&ctx->kdamond_lock); 433 434 ctx->attrs.min_nr_regions = 10; 435 ctx->attrs.max_nr_regions = 1000; 436 437 INIT_LIST_HEAD(&ctx->adaptive_targets); 438 INIT_LIST_HEAD(&ctx->schemes); 439 440 return ctx; 441 } 442 443 static void damon_destroy_targets(struct damon_ctx *ctx) 444 { 445 struct damon_target *t, *next_t; 446 447 if (ctx->ops.cleanup) { 448 ctx->ops.cleanup(ctx); 449 return; 450 } 451 452 damon_for_each_target_safe(t, next_t, ctx) 453 damon_destroy_target(t); 454 } 455 456 void damon_destroy_ctx(struct damon_ctx *ctx) 457 { 458 struct damos *s, *next_s; 459 460 damon_destroy_targets(ctx); 461 462 damon_for_each_scheme_safe(s, next_s, ctx) 463 damon_destroy_scheme(s); 464 465 kfree(ctx); 466 } 467 468 static unsigned int damon_age_for_new_attrs(unsigned int age, 469 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 470 { 471 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 472 } 473 474 /* convert access ratio in bp (per 10,000) to nr_accesses */ 475 static unsigned int damon_accesses_bp_to_nr_accesses( 476 unsigned int accesses_bp, struct damon_attrs *attrs) 477 { 478 unsigned int max_nr_accesses = 479 attrs->aggr_interval / attrs->sample_interval; 480 481 return accesses_bp * max_nr_accesses / 10000; 482 } 483 484 /* convert nr_accesses to access ratio in bp (per 10,000) */ 485 static unsigned int damon_nr_accesses_to_accesses_bp( 486 unsigned int nr_accesses, struct damon_attrs *attrs) 487 { 488 unsigned int max_nr_accesses = 489 attrs->aggr_interval / attrs->sample_interval; 490 491 return nr_accesses * 10000 / max_nr_accesses; 492 } 493 494 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 495 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 496 { 497 return damon_accesses_bp_to_nr_accesses( 498 damon_nr_accesses_to_accesses_bp( 499 nr_accesses, old_attrs), 500 new_attrs); 501 } 502 503 static void damon_update_monitoring_result(struct damon_region *r, 504 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 505 { 506 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 507 old_attrs, new_attrs); 508 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 509 } 510 511 /* 512 * region->nr_accesses is the number of sampling intervals in the last 513 * aggregation interval that access to the region has found, and region->age is 514 * the number of aggregation intervals that its access pattern has maintained. 515 * For the reason, the real meaning of the two fields depend on current 516 * sampling interval and aggregation interval. This function updates 517 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 518 */ 519 static void damon_update_monitoring_results(struct damon_ctx *ctx, 520 struct damon_attrs *new_attrs) 521 { 522 struct damon_attrs *old_attrs = &ctx->attrs; 523 struct damon_target *t; 524 struct damon_region *r; 525 526 /* if any interval is zero, simply forgive conversion */ 527 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 528 !new_attrs->sample_interval || 529 !new_attrs->aggr_interval) 530 return; 531 532 damon_for_each_target(t, ctx) 533 damon_for_each_region(r, t) 534 damon_update_monitoring_result( 535 r, old_attrs, new_attrs); 536 } 537 538 /** 539 * damon_set_attrs() - Set attributes for the monitoring. 540 * @ctx: monitoring context 541 * @attrs: monitoring attributes 542 * 543 * This function should not be called while the kdamond is running. 544 * Every time interval is in micro-seconds. 545 * 546 * Return: 0 on success, negative error code otherwise. 547 */ 548 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 549 { 550 if (attrs->min_nr_regions < 3) 551 return -EINVAL; 552 if (attrs->min_nr_regions > attrs->max_nr_regions) 553 return -EINVAL; 554 if (attrs->sample_interval > attrs->aggr_interval) 555 return -EINVAL; 556 557 damon_update_monitoring_results(ctx, attrs); 558 ctx->attrs = *attrs; 559 return 0; 560 } 561 562 /** 563 * damon_set_schemes() - Set data access monitoring based operation schemes. 564 * @ctx: monitoring context 565 * @schemes: array of the schemes 566 * @nr_schemes: number of entries in @schemes 567 * 568 * This function should not be called while the kdamond of the context is 569 * running. 570 */ 571 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 572 ssize_t nr_schemes) 573 { 574 struct damos *s, *next; 575 ssize_t i; 576 577 damon_for_each_scheme_safe(s, next, ctx) 578 damon_destroy_scheme(s); 579 for (i = 0; i < nr_schemes; i++) 580 damon_add_scheme(ctx, schemes[i]); 581 } 582 583 /** 584 * damon_nr_running_ctxs() - Return number of currently running contexts. 585 */ 586 int damon_nr_running_ctxs(void) 587 { 588 int nr_ctxs; 589 590 mutex_lock(&damon_lock); 591 nr_ctxs = nr_running_ctxs; 592 mutex_unlock(&damon_lock); 593 594 return nr_ctxs; 595 } 596 597 /* Returns the size upper limit for each monitoring region */ 598 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 599 { 600 struct damon_target *t; 601 struct damon_region *r; 602 unsigned long sz = 0; 603 604 damon_for_each_target(t, ctx) { 605 damon_for_each_region(r, t) 606 sz += damon_sz_region(r); 607 } 608 609 if (ctx->attrs.min_nr_regions) 610 sz /= ctx->attrs.min_nr_regions; 611 if (sz < DAMON_MIN_REGION) 612 sz = DAMON_MIN_REGION; 613 614 return sz; 615 } 616 617 static int kdamond_fn(void *data); 618 619 /* 620 * __damon_start() - Starts monitoring with given context. 621 * @ctx: monitoring context 622 * 623 * This function should be called while damon_lock is hold. 624 * 625 * Return: 0 on success, negative error code otherwise. 626 */ 627 static int __damon_start(struct damon_ctx *ctx) 628 { 629 int err = -EBUSY; 630 631 mutex_lock(&ctx->kdamond_lock); 632 if (!ctx->kdamond) { 633 err = 0; 634 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 635 nr_running_ctxs); 636 if (IS_ERR(ctx->kdamond)) { 637 err = PTR_ERR(ctx->kdamond); 638 ctx->kdamond = NULL; 639 } 640 } 641 mutex_unlock(&ctx->kdamond_lock); 642 643 return err; 644 } 645 646 /** 647 * damon_start() - Starts the monitorings for a given group of contexts. 648 * @ctxs: an array of the pointers for contexts to start monitoring 649 * @nr_ctxs: size of @ctxs 650 * @exclusive: exclusiveness of this contexts group 651 * 652 * This function starts a group of monitoring threads for a group of monitoring 653 * contexts. One thread per each context is created and run in parallel. The 654 * caller should handle synchronization between the threads by itself. If 655 * @exclusive is true and a group of threads that created by other 656 * 'damon_start()' call is currently running, this function does nothing but 657 * returns -EBUSY. 658 * 659 * Return: 0 on success, negative error code otherwise. 660 */ 661 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 662 { 663 int i; 664 int err = 0; 665 666 mutex_lock(&damon_lock); 667 if ((exclusive && nr_running_ctxs) || 668 (!exclusive && running_exclusive_ctxs)) { 669 mutex_unlock(&damon_lock); 670 return -EBUSY; 671 } 672 673 for (i = 0; i < nr_ctxs; i++) { 674 err = __damon_start(ctxs[i]); 675 if (err) 676 break; 677 nr_running_ctxs++; 678 } 679 if (exclusive && nr_running_ctxs) 680 running_exclusive_ctxs = true; 681 mutex_unlock(&damon_lock); 682 683 return err; 684 } 685 686 /* 687 * __damon_stop() - Stops monitoring of a given context. 688 * @ctx: monitoring context 689 * 690 * Return: 0 on success, negative error code otherwise. 691 */ 692 static int __damon_stop(struct damon_ctx *ctx) 693 { 694 struct task_struct *tsk; 695 696 mutex_lock(&ctx->kdamond_lock); 697 tsk = ctx->kdamond; 698 if (tsk) { 699 get_task_struct(tsk); 700 mutex_unlock(&ctx->kdamond_lock); 701 kthread_stop(tsk); 702 put_task_struct(tsk); 703 return 0; 704 } 705 mutex_unlock(&ctx->kdamond_lock); 706 707 return -EPERM; 708 } 709 710 /** 711 * damon_stop() - Stops the monitorings for a given group of contexts. 712 * @ctxs: an array of the pointers for contexts to stop monitoring 713 * @nr_ctxs: size of @ctxs 714 * 715 * Return: 0 on success, negative error code otherwise. 716 */ 717 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 718 { 719 int i, err = 0; 720 721 for (i = 0; i < nr_ctxs; i++) { 722 /* nr_running_ctxs is decremented in kdamond_fn */ 723 err = __damon_stop(ctxs[i]); 724 if (err) 725 break; 726 } 727 return err; 728 } 729 730 /* 731 * damon_check_reset_time_interval() - Check if a time interval is elapsed. 732 * @baseline: the time to check whether the interval has elapsed since 733 * @interval: the time interval (microseconds) 734 * 735 * See whether the given time interval has passed since the given baseline 736 * time. If so, it also updates the baseline to current time for next check. 737 * 738 * Return: true if the time interval has passed, or false otherwise. 739 */ 740 static bool damon_check_reset_time_interval(struct timespec64 *baseline, 741 unsigned long interval) 742 { 743 struct timespec64 now; 744 745 ktime_get_coarse_ts64(&now); 746 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) < 747 interval * 1000) 748 return false; 749 *baseline = now; 750 return true; 751 } 752 753 /* 754 * Check whether it is time to flush the aggregated information 755 */ 756 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx) 757 { 758 return damon_check_reset_time_interval(&ctx->last_aggregation, 759 ctx->attrs.aggr_interval); 760 } 761 762 /* 763 * Reset the aggregated monitoring results ('nr_accesses' of each region). 764 */ 765 static void kdamond_reset_aggregated(struct damon_ctx *c) 766 { 767 struct damon_target *t; 768 unsigned int ti = 0; /* target's index */ 769 770 damon_for_each_target(t, c) { 771 struct damon_region *r; 772 773 damon_for_each_region(r, t) { 774 trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); 775 r->last_nr_accesses = r->nr_accesses; 776 r->nr_accesses = 0; 777 } 778 ti++; 779 } 780 } 781 782 static void damon_split_region_at(struct damon_target *t, 783 struct damon_region *r, unsigned long sz_r); 784 785 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 786 { 787 unsigned long sz; 788 789 sz = damon_sz_region(r); 790 return s->pattern.min_sz_region <= sz && 791 sz <= s->pattern.max_sz_region && 792 s->pattern.min_nr_accesses <= r->nr_accesses && 793 r->nr_accesses <= s->pattern.max_nr_accesses && 794 s->pattern.min_age_region <= r->age && 795 r->age <= s->pattern.max_age_region; 796 } 797 798 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 799 struct damon_region *r, struct damos *s) 800 { 801 bool ret = __damos_valid_target(r, s); 802 803 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 804 return ret; 805 806 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 807 } 808 809 /* 810 * damos_skip_charged_region() - Check if the given region or starting part of 811 * it is already charged for the DAMOS quota. 812 * @t: The target of the region. 813 * @rp: The pointer to the region. 814 * @s: The scheme to be applied. 815 * 816 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 817 * action would applied to only a part of the target access pattern fulfilling 818 * regions. To avoid applying the scheme action to only already applied 819 * regions, DAMON skips applying the scheme action to the regions that charged 820 * in the previous charge window. 821 * 822 * This function checks if a given region should be skipped or not for the 823 * reason. If only the starting part of the region has previously charged, 824 * this function splits the region into two so that the second one covers the 825 * area that not charged in the previous charge widnow and saves the second 826 * region in *rp and returns false, so that the caller can apply DAMON action 827 * to the second one. 828 * 829 * Return: true if the region should be entirely skipped, false otherwise. 830 */ 831 static bool damos_skip_charged_region(struct damon_target *t, 832 struct damon_region **rp, struct damos *s) 833 { 834 struct damon_region *r = *rp; 835 struct damos_quota *quota = &s->quota; 836 unsigned long sz_to_skip; 837 838 /* Skip previously charged regions */ 839 if (quota->charge_target_from) { 840 if (t != quota->charge_target_from) 841 return true; 842 if (r == damon_last_region(t)) { 843 quota->charge_target_from = NULL; 844 quota->charge_addr_from = 0; 845 return true; 846 } 847 if (quota->charge_addr_from && 848 r->ar.end <= quota->charge_addr_from) 849 return true; 850 851 if (quota->charge_addr_from && r->ar.start < 852 quota->charge_addr_from) { 853 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 854 r->ar.start, DAMON_MIN_REGION); 855 if (!sz_to_skip) { 856 if (damon_sz_region(r) <= DAMON_MIN_REGION) 857 return true; 858 sz_to_skip = DAMON_MIN_REGION; 859 } 860 damon_split_region_at(t, r, sz_to_skip); 861 r = damon_next_region(r); 862 *rp = r; 863 } 864 quota->charge_target_from = NULL; 865 quota->charge_addr_from = 0; 866 } 867 return false; 868 } 869 870 static void damos_update_stat(struct damos *s, 871 unsigned long sz_tried, unsigned long sz_applied) 872 { 873 s->stat.nr_tried++; 874 s->stat.sz_tried += sz_tried; 875 if (sz_applied) 876 s->stat.nr_applied++; 877 s->stat.sz_applied += sz_applied; 878 } 879 880 static bool __damos_filter_out(struct damon_target *t, struct damon_region *r, 881 struct damos_filter *filter) 882 { 883 bool matched = false; 884 unsigned long start, end; 885 886 switch (filter->type) { 887 case DAMOS_FILTER_TYPE_ADDR: 888 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 889 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 890 891 /* inside the range */ 892 if (start <= r->ar.start && r->ar.end <= end) { 893 matched = true; 894 break; 895 } 896 /* outside of the range */ 897 if (r->ar.end <= start || end <= r->ar.start) { 898 matched = false; 899 break; 900 } 901 /* start before the range and overlap */ 902 if (r->ar.start < start) { 903 damon_split_region_at(t, r, start - r->ar.start); 904 matched = false; 905 break; 906 } 907 /* start inside the range */ 908 damon_split_region_at(t, r, end - r->ar.start); 909 matched = true; 910 break; 911 default: 912 break; 913 } 914 915 return matched == filter->matching; 916 } 917 918 static bool damos_filter_out(struct damon_target *t, struct damon_region *r, 919 struct damos *s) 920 { 921 struct damos_filter *filter; 922 923 damos_for_each_filter(filter, s) { 924 if (__damos_filter_out(t, r, filter)) 925 return true; 926 } 927 return false; 928 } 929 930 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 931 struct damon_region *r, struct damos *s) 932 { 933 struct damos_quota *quota = &s->quota; 934 unsigned long sz = damon_sz_region(r); 935 struct timespec64 begin, end; 936 unsigned long sz_applied = 0; 937 int err = 0; 938 939 if (c->ops.apply_scheme) { 940 if (quota->esz && quota->charged_sz + sz > quota->esz) { 941 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 942 DAMON_MIN_REGION); 943 if (!sz) 944 goto update_stat; 945 damon_split_region_at(t, r, sz); 946 } 947 if (damos_filter_out(t, r, s)) 948 return; 949 ktime_get_coarse_ts64(&begin); 950 if (c->callback.before_damos_apply) 951 err = c->callback.before_damos_apply(c, t, r, s); 952 if (!err) 953 sz_applied = c->ops.apply_scheme(c, t, r, s); 954 ktime_get_coarse_ts64(&end); 955 quota->total_charged_ns += timespec64_to_ns(&end) - 956 timespec64_to_ns(&begin); 957 quota->charged_sz += sz; 958 if (quota->esz && quota->charged_sz >= quota->esz) { 959 quota->charge_target_from = t; 960 quota->charge_addr_from = r->ar.end + 1; 961 } 962 } 963 if (s->action != DAMOS_STAT) 964 r->age = 0; 965 966 update_stat: 967 damos_update_stat(s, sz, sz_applied); 968 } 969 970 static void damon_do_apply_schemes(struct damon_ctx *c, 971 struct damon_target *t, 972 struct damon_region *r) 973 { 974 struct damos *s; 975 976 damon_for_each_scheme(s, c) { 977 struct damos_quota *quota = &s->quota; 978 979 if (!s->wmarks.activated) 980 continue; 981 982 /* Check the quota */ 983 if (quota->esz && quota->charged_sz >= quota->esz) 984 continue; 985 986 if (damos_skip_charged_region(t, &r, s)) 987 continue; 988 989 if (!damos_valid_target(c, t, r, s)) 990 continue; 991 992 damos_apply_scheme(c, t, r, s); 993 } 994 } 995 996 /* Shouldn't be called if quota->ms and quota->sz are zero */ 997 static void damos_set_effective_quota(struct damos_quota *quota) 998 { 999 unsigned long throughput; 1000 unsigned long esz; 1001 1002 if (!quota->ms) { 1003 quota->esz = quota->sz; 1004 return; 1005 } 1006 1007 if (quota->total_charged_ns) 1008 throughput = quota->total_charged_sz * 1000000 / 1009 quota->total_charged_ns; 1010 else 1011 throughput = PAGE_SIZE * 1024; 1012 esz = throughput * quota->ms; 1013 1014 if (quota->sz && quota->sz < esz) 1015 esz = quota->sz; 1016 quota->esz = esz; 1017 } 1018 1019 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1020 { 1021 struct damos_quota *quota = &s->quota; 1022 struct damon_target *t; 1023 struct damon_region *r; 1024 unsigned long cumulated_sz; 1025 unsigned int score, max_score = 0; 1026 1027 if (!quota->ms && !quota->sz) 1028 return; 1029 1030 /* New charge window starts */ 1031 if (time_after_eq(jiffies, quota->charged_from + 1032 msecs_to_jiffies(quota->reset_interval))) { 1033 if (quota->esz && quota->charged_sz >= quota->esz) 1034 s->stat.qt_exceeds++; 1035 quota->total_charged_sz += quota->charged_sz; 1036 quota->charged_from = jiffies; 1037 quota->charged_sz = 0; 1038 damos_set_effective_quota(quota); 1039 } 1040 1041 if (!c->ops.get_scheme_score) 1042 return; 1043 1044 /* Fill up the score histogram */ 1045 memset(quota->histogram, 0, sizeof(quota->histogram)); 1046 damon_for_each_target(t, c) { 1047 damon_for_each_region(r, t) { 1048 if (!__damos_valid_target(r, s)) 1049 continue; 1050 score = c->ops.get_scheme_score(c, t, r, s); 1051 quota->histogram[score] += damon_sz_region(r); 1052 if (score > max_score) 1053 max_score = score; 1054 } 1055 } 1056 1057 /* Set the min score limit */ 1058 for (cumulated_sz = 0, score = max_score; ; score--) { 1059 cumulated_sz += quota->histogram[score]; 1060 if (cumulated_sz >= quota->esz || !score) 1061 break; 1062 } 1063 quota->min_score = score; 1064 } 1065 1066 static void kdamond_apply_schemes(struct damon_ctx *c) 1067 { 1068 struct damon_target *t; 1069 struct damon_region *r, *next_r; 1070 struct damos *s; 1071 1072 damon_for_each_scheme(s, c) { 1073 if (!s->wmarks.activated) 1074 continue; 1075 1076 damos_adjust_quota(c, s); 1077 } 1078 1079 damon_for_each_target(t, c) { 1080 damon_for_each_region_safe(r, next_r, t) 1081 damon_do_apply_schemes(c, t, r); 1082 } 1083 } 1084 1085 /* 1086 * Merge two adjacent regions into one region 1087 */ 1088 static void damon_merge_two_regions(struct damon_target *t, 1089 struct damon_region *l, struct damon_region *r) 1090 { 1091 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1092 1093 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1094 (sz_l + sz_r); 1095 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1096 l->ar.end = r->ar.end; 1097 damon_destroy_region(r, t); 1098 } 1099 1100 /* 1101 * Merge adjacent regions having similar access frequencies 1102 * 1103 * t target affected by this merge operation 1104 * thres '->nr_accesses' diff threshold for the merge 1105 * sz_limit size upper limit of each region 1106 */ 1107 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1108 unsigned long sz_limit) 1109 { 1110 struct damon_region *r, *prev = NULL, *next; 1111 1112 damon_for_each_region_safe(r, next, t) { 1113 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1114 r->age = 0; 1115 else 1116 r->age++; 1117 1118 if (prev && prev->ar.end == r->ar.start && 1119 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1120 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1121 damon_merge_two_regions(t, prev, r); 1122 else 1123 prev = r; 1124 } 1125 } 1126 1127 /* 1128 * Merge adjacent regions having similar access frequencies 1129 * 1130 * threshold '->nr_accesses' diff threshold for the merge 1131 * sz_limit size upper limit of each region 1132 * 1133 * This function merges monitoring target regions which are adjacent and their 1134 * access frequencies are similar. This is for minimizing the monitoring 1135 * overhead under the dynamically changeable access pattern. If a merge was 1136 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1137 */ 1138 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1139 unsigned long sz_limit) 1140 { 1141 struct damon_target *t; 1142 1143 damon_for_each_target(t, c) 1144 damon_merge_regions_of(t, threshold, sz_limit); 1145 } 1146 1147 /* 1148 * Split a region in two 1149 * 1150 * r the region to be split 1151 * sz_r size of the first sub-region that will be made 1152 */ 1153 static void damon_split_region_at(struct damon_target *t, 1154 struct damon_region *r, unsigned long sz_r) 1155 { 1156 struct damon_region *new; 1157 1158 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1159 if (!new) 1160 return; 1161 1162 r->ar.end = new->ar.start; 1163 1164 new->age = r->age; 1165 new->last_nr_accesses = r->last_nr_accesses; 1166 1167 damon_insert_region(new, r, damon_next_region(r), t); 1168 } 1169 1170 /* Split every region in the given target into 'nr_subs' regions */ 1171 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1172 { 1173 struct damon_region *r, *next; 1174 unsigned long sz_region, sz_sub = 0; 1175 int i; 1176 1177 damon_for_each_region_safe(r, next, t) { 1178 sz_region = damon_sz_region(r); 1179 1180 for (i = 0; i < nr_subs - 1 && 1181 sz_region > 2 * DAMON_MIN_REGION; i++) { 1182 /* 1183 * Randomly select size of left sub-region to be at 1184 * least 10 percent and at most 90% of original region 1185 */ 1186 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1187 sz_region / 10, DAMON_MIN_REGION); 1188 /* Do not allow blank region */ 1189 if (sz_sub == 0 || sz_sub >= sz_region) 1190 continue; 1191 1192 damon_split_region_at(t, r, sz_sub); 1193 sz_region = sz_sub; 1194 } 1195 } 1196 } 1197 1198 /* 1199 * Split every target region into randomly-sized small regions 1200 * 1201 * This function splits every target region into random-sized small regions if 1202 * current total number of the regions is equal or smaller than half of the 1203 * user-specified maximum number of regions. This is for maximizing the 1204 * monitoring accuracy under the dynamically changeable access patterns. If a 1205 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1206 * it. 1207 */ 1208 static void kdamond_split_regions(struct damon_ctx *ctx) 1209 { 1210 struct damon_target *t; 1211 unsigned int nr_regions = 0; 1212 static unsigned int last_nr_regions; 1213 int nr_subregions = 2; 1214 1215 damon_for_each_target(t, ctx) 1216 nr_regions += damon_nr_regions(t); 1217 1218 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1219 return; 1220 1221 /* Maybe the middle of the region has different access frequency */ 1222 if (last_nr_regions == nr_regions && 1223 nr_regions < ctx->attrs.max_nr_regions / 3) 1224 nr_subregions = 3; 1225 1226 damon_for_each_target(t, ctx) 1227 damon_split_regions_of(t, nr_subregions); 1228 1229 last_nr_regions = nr_regions; 1230 } 1231 1232 /* 1233 * Check whether it is time to check and apply the operations-related data 1234 * structures. 1235 * 1236 * Returns true if it is. 1237 */ 1238 static bool kdamond_need_update_operations(struct damon_ctx *ctx) 1239 { 1240 return damon_check_reset_time_interval(&ctx->last_ops_update, 1241 ctx->attrs.ops_update_interval); 1242 } 1243 1244 /* 1245 * Check whether current monitoring should be stopped 1246 * 1247 * The monitoring is stopped when either the user requested to stop, or all 1248 * monitoring targets are invalid. 1249 * 1250 * Returns true if need to stop current monitoring. 1251 */ 1252 static bool kdamond_need_stop(struct damon_ctx *ctx) 1253 { 1254 struct damon_target *t; 1255 1256 if (kthread_should_stop()) 1257 return true; 1258 1259 if (!ctx->ops.target_valid) 1260 return false; 1261 1262 damon_for_each_target(t, ctx) { 1263 if (ctx->ops.target_valid(t)) 1264 return false; 1265 } 1266 1267 return true; 1268 } 1269 1270 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1271 { 1272 struct sysinfo i; 1273 1274 switch (metric) { 1275 case DAMOS_WMARK_FREE_MEM_RATE: 1276 si_meminfo(&i); 1277 return i.freeram * 1000 / i.totalram; 1278 default: 1279 break; 1280 } 1281 return -EINVAL; 1282 } 1283 1284 /* 1285 * Returns zero if the scheme is active. Else, returns time to wait for next 1286 * watermark check in micro-seconds. 1287 */ 1288 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1289 { 1290 unsigned long metric; 1291 1292 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1293 return 0; 1294 1295 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1296 /* higher than high watermark or lower than low watermark */ 1297 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1298 if (scheme->wmarks.activated) 1299 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1300 scheme->action, 1301 metric > scheme->wmarks.high ? 1302 "high" : "low"); 1303 scheme->wmarks.activated = false; 1304 return scheme->wmarks.interval; 1305 } 1306 1307 /* inactive and higher than middle watermark */ 1308 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1309 !scheme->wmarks.activated) 1310 return scheme->wmarks.interval; 1311 1312 if (!scheme->wmarks.activated) 1313 pr_debug("activate a scheme (%d)\n", scheme->action); 1314 scheme->wmarks.activated = true; 1315 return 0; 1316 } 1317 1318 static void kdamond_usleep(unsigned long usecs) 1319 { 1320 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1321 if (usecs > 20 * USEC_PER_MSEC) 1322 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1323 else 1324 usleep_idle_range(usecs, usecs + 1); 1325 } 1326 1327 /* Returns negative error code if it's not activated but should return */ 1328 static int kdamond_wait_activation(struct damon_ctx *ctx) 1329 { 1330 struct damos *s; 1331 unsigned long wait_time; 1332 unsigned long min_wait_time = 0; 1333 bool init_wait_time = false; 1334 1335 while (!kdamond_need_stop(ctx)) { 1336 damon_for_each_scheme(s, ctx) { 1337 wait_time = damos_wmark_wait_us(s); 1338 if (!init_wait_time || wait_time < min_wait_time) { 1339 init_wait_time = true; 1340 min_wait_time = wait_time; 1341 } 1342 } 1343 if (!min_wait_time) 1344 return 0; 1345 1346 kdamond_usleep(min_wait_time); 1347 1348 if (ctx->callback.after_wmarks_check && 1349 ctx->callback.after_wmarks_check(ctx)) 1350 break; 1351 } 1352 return -EBUSY; 1353 } 1354 1355 /* 1356 * The monitoring daemon that runs as a kernel thread 1357 */ 1358 static int kdamond_fn(void *data) 1359 { 1360 struct damon_ctx *ctx = data; 1361 struct damon_target *t; 1362 struct damon_region *r, *next; 1363 unsigned int max_nr_accesses = 0; 1364 unsigned long sz_limit = 0; 1365 1366 pr_debug("kdamond (%d) starts\n", current->pid); 1367 1368 if (ctx->ops.init) 1369 ctx->ops.init(ctx); 1370 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1371 goto done; 1372 1373 sz_limit = damon_region_sz_limit(ctx); 1374 1375 while (!kdamond_need_stop(ctx)) { 1376 if (kdamond_wait_activation(ctx)) 1377 break; 1378 1379 if (ctx->ops.prepare_access_checks) 1380 ctx->ops.prepare_access_checks(ctx); 1381 if (ctx->callback.after_sampling && 1382 ctx->callback.after_sampling(ctx)) 1383 break; 1384 1385 kdamond_usleep(ctx->attrs.sample_interval); 1386 1387 if (ctx->ops.check_accesses) 1388 max_nr_accesses = ctx->ops.check_accesses(ctx); 1389 1390 if (kdamond_aggregate_interval_passed(ctx)) { 1391 kdamond_merge_regions(ctx, 1392 max_nr_accesses / 10, 1393 sz_limit); 1394 if (ctx->callback.after_aggregation && 1395 ctx->callback.after_aggregation(ctx)) 1396 break; 1397 if (!list_empty(&ctx->schemes)) 1398 kdamond_apply_schemes(ctx); 1399 kdamond_reset_aggregated(ctx); 1400 kdamond_split_regions(ctx); 1401 if (ctx->ops.reset_aggregated) 1402 ctx->ops.reset_aggregated(ctx); 1403 } 1404 1405 if (kdamond_need_update_operations(ctx)) { 1406 if (ctx->ops.update) 1407 ctx->ops.update(ctx); 1408 sz_limit = damon_region_sz_limit(ctx); 1409 } 1410 } 1411 done: 1412 damon_for_each_target(t, ctx) { 1413 damon_for_each_region_safe(r, next, t) 1414 damon_destroy_region(r, t); 1415 } 1416 1417 if (ctx->callback.before_terminate) 1418 ctx->callback.before_terminate(ctx); 1419 if (ctx->ops.cleanup) 1420 ctx->ops.cleanup(ctx); 1421 1422 pr_debug("kdamond (%d) finishes\n", current->pid); 1423 mutex_lock(&ctx->kdamond_lock); 1424 ctx->kdamond = NULL; 1425 mutex_unlock(&ctx->kdamond_lock); 1426 1427 mutex_lock(&damon_lock); 1428 nr_running_ctxs--; 1429 if (!nr_running_ctxs && running_exclusive_ctxs) 1430 running_exclusive_ctxs = false; 1431 mutex_unlock(&damon_lock); 1432 1433 return 0; 1434 } 1435 1436 /* 1437 * struct damon_system_ram_region - System RAM resource address region of 1438 * [@start, @end). 1439 * @start: Start address of the region (inclusive). 1440 * @end: End address of the region (exclusive). 1441 */ 1442 struct damon_system_ram_region { 1443 unsigned long start; 1444 unsigned long end; 1445 }; 1446 1447 static int walk_system_ram(struct resource *res, void *arg) 1448 { 1449 struct damon_system_ram_region *a = arg; 1450 1451 if (a->end - a->start < resource_size(res)) { 1452 a->start = res->start; 1453 a->end = res->end; 1454 } 1455 return 0; 1456 } 1457 1458 /* 1459 * Find biggest 'System RAM' resource and store its start and end address in 1460 * @start and @end, respectively. If no System RAM is found, returns false. 1461 */ 1462 static bool damon_find_biggest_system_ram(unsigned long *start, 1463 unsigned long *end) 1464 1465 { 1466 struct damon_system_ram_region arg = {}; 1467 1468 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1469 if (arg.end <= arg.start) 1470 return false; 1471 1472 *start = arg.start; 1473 *end = arg.end; 1474 return true; 1475 } 1476 1477 /** 1478 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1479 * monitoring target as requested, or biggest 'System RAM'. 1480 * @t: The monitoring target to set the region. 1481 * @start: The pointer to the start address of the region. 1482 * @end: The pointer to the end address of the region. 1483 * 1484 * This function sets the region of @t as requested by @start and @end. If the 1485 * values of @start and @end are zero, however, this function finds the biggest 1486 * 'System RAM' resource and sets the region to cover the resource. In the 1487 * latter case, this function saves the start and end addresses of the resource 1488 * in @start and @end, respectively. 1489 * 1490 * Return: 0 on success, negative error code otherwise. 1491 */ 1492 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1493 unsigned long *start, unsigned long *end) 1494 { 1495 struct damon_addr_range addr_range; 1496 1497 if (*start > *end) 1498 return -EINVAL; 1499 1500 if (!*start && !*end && 1501 !damon_find_biggest_system_ram(start, end)) 1502 return -EINVAL; 1503 1504 addr_range.start = *start; 1505 addr_range.end = *end; 1506 return damon_set_regions(t, &addr_range, 1); 1507 } 1508 1509 static int __init damon_init(void) 1510 { 1511 damon_region_cache = KMEM_CACHE(damon_region, 0); 1512 if (unlikely(!damon_region_cache)) { 1513 pr_err("creating damon_region_cache fails\n"); 1514 return -ENOMEM; 1515 } 1516 1517 return 0; 1518 } 1519 1520 subsys_initcall(damon_init); 1521 1522 #include "core-test.h" 1523