1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 INIT_LIST_HEAD(®ion->list); 132 133 region->age = 0; 134 region->last_nr_accesses = 0; 135 136 return region; 137 } 138 139 void damon_add_region(struct damon_region *r, struct damon_target *t) 140 { 141 list_add_tail(&r->list, &t->regions_list); 142 t->nr_regions++; 143 } 144 145 static void damon_del_region(struct damon_region *r, struct damon_target *t) 146 { 147 list_del(&r->list); 148 t->nr_regions--; 149 } 150 151 static void damon_free_region(struct damon_region *r) 152 { 153 kmem_cache_free(damon_region_cache, r); 154 } 155 156 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 157 { 158 damon_del_region(r, t); 159 damon_free_region(r); 160 } 161 162 /* 163 * Check whether a region is intersecting an address range 164 * 165 * Returns true if it is. 166 */ 167 static bool damon_intersect(struct damon_region *r, 168 struct damon_addr_range *re) 169 { 170 return !(r->ar.end <= re->start || re->end <= r->ar.start); 171 } 172 173 /* 174 * Fill holes in regions with new regions. 175 */ 176 static int damon_fill_regions_holes(struct damon_region *first, 177 struct damon_region *last, struct damon_target *t) 178 { 179 struct damon_region *r = first; 180 181 damon_for_each_region_from(r, t) { 182 struct damon_region *next, *newr; 183 184 if (r == last) 185 break; 186 next = damon_next_region(r); 187 if (r->ar.end != next->ar.start) { 188 newr = damon_new_region(r->ar.end, next->ar.start); 189 if (!newr) 190 return -ENOMEM; 191 damon_insert_region(newr, r, next, t); 192 } 193 } 194 return 0; 195 } 196 197 /* 198 * damon_set_regions() - Set regions of a target for given address ranges. 199 * @t: the given target. 200 * @ranges: array of new monitoring target ranges. 201 * @nr_ranges: length of @ranges. 202 * 203 * This function adds new regions to, or modify existing regions of a 204 * monitoring target to fit in specific ranges. 205 * 206 * Return: 0 if success, or negative error code otherwise. 207 */ 208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 209 unsigned int nr_ranges) 210 { 211 struct damon_region *r, *next; 212 unsigned int i; 213 int err; 214 215 /* Remove regions which are not in the new ranges */ 216 damon_for_each_region_safe(r, next, t) { 217 for (i = 0; i < nr_ranges; i++) { 218 if (damon_intersect(r, &ranges[i])) 219 break; 220 } 221 if (i == nr_ranges) 222 damon_destroy_region(r, t); 223 } 224 225 r = damon_first_region(t); 226 /* Add new regions or resize existing regions to fit in the ranges */ 227 for (i = 0; i < nr_ranges; i++) { 228 struct damon_region *first = NULL, *last, *newr; 229 struct damon_addr_range *range; 230 231 range = &ranges[i]; 232 /* Get the first/last regions intersecting with the range */ 233 damon_for_each_region_from(r, t) { 234 if (damon_intersect(r, range)) { 235 if (!first) 236 first = r; 237 last = r; 238 } 239 if (r->ar.start >= range->end) 240 break; 241 } 242 if (!first) { 243 /* no region intersects with this range */ 244 newr = damon_new_region( 245 ALIGN_DOWN(range->start, 246 DAMON_MIN_REGION), 247 ALIGN(range->end, DAMON_MIN_REGION)); 248 if (!newr) 249 return -ENOMEM; 250 damon_insert_region(newr, damon_prev_region(r), r, t); 251 } else { 252 /* resize intersecting regions to fit in this range */ 253 first->ar.start = ALIGN_DOWN(range->start, 254 DAMON_MIN_REGION); 255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 256 257 /* fill possible holes in the range */ 258 err = damon_fill_regions_holes(first, last, t); 259 if (err) 260 return err; 261 } 262 } 263 return 0; 264 } 265 266 struct damos_filter *damos_new_filter(enum damos_filter_type type, 267 bool matching) 268 { 269 struct damos_filter *filter; 270 271 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 272 if (!filter) 273 return NULL; 274 filter->type = type; 275 filter->matching = matching; 276 INIT_LIST_HEAD(&filter->list); 277 return filter; 278 } 279 280 void damos_add_filter(struct damos *s, struct damos_filter *f) 281 { 282 list_add_tail(&f->list, &s->filters); 283 } 284 285 static void damos_del_filter(struct damos_filter *f) 286 { 287 list_del(&f->list); 288 } 289 290 static void damos_free_filter(struct damos_filter *f) 291 { 292 kfree(f); 293 } 294 295 void damos_destroy_filter(struct damos_filter *f) 296 { 297 damos_del_filter(f); 298 damos_free_filter(f); 299 } 300 301 /* initialize private fields of damos_quota and return the pointer */ 302 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 303 { 304 quota->total_charged_sz = 0; 305 quota->total_charged_ns = 0; 306 quota->esz = 0; 307 quota->charged_sz = 0; 308 quota->charged_from = 0; 309 quota->charge_target_from = NULL; 310 quota->charge_addr_from = 0; 311 return quota; 312 } 313 314 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 315 enum damos_action action, struct damos_quota *quota, 316 struct damos_watermarks *wmarks) 317 { 318 struct damos *scheme; 319 320 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 321 if (!scheme) 322 return NULL; 323 scheme->pattern = *pattern; 324 scheme->action = action; 325 INIT_LIST_HEAD(&scheme->filters); 326 scheme->stat = (struct damos_stat){}; 327 INIT_LIST_HEAD(&scheme->list); 328 329 scheme->quota = *(damos_quota_init_priv(quota)); 330 331 scheme->wmarks = *wmarks; 332 scheme->wmarks.activated = true; 333 334 return scheme; 335 } 336 337 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 338 { 339 list_add_tail(&s->list, &ctx->schemes); 340 } 341 342 static void damon_del_scheme(struct damos *s) 343 { 344 list_del(&s->list); 345 } 346 347 static void damon_free_scheme(struct damos *s) 348 { 349 kfree(s); 350 } 351 352 void damon_destroy_scheme(struct damos *s) 353 { 354 struct damos_filter *f, *next; 355 356 damos_for_each_filter_safe(f, next, s) 357 damos_destroy_filter(f); 358 damon_del_scheme(s); 359 damon_free_scheme(s); 360 } 361 362 /* 363 * Construct a damon_target struct 364 * 365 * Returns the pointer to the new struct if success, or NULL otherwise 366 */ 367 struct damon_target *damon_new_target(void) 368 { 369 struct damon_target *t; 370 371 t = kmalloc(sizeof(*t), GFP_KERNEL); 372 if (!t) 373 return NULL; 374 375 t->pid = NULL; 376 t->nr_regions = 0; 377 INIT_LIST_HEAD(&t->regions_list); 378 INIT_LIST_HEAD(&t->list); 379 380 return t; 381 } 382 383 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 384 { 385 list_add_tail(&t->list, &ctx->adaptive_targets); 386 } 387 388 bool damon_targets_empty(struct damon_ctx *ctx) 389 { 390 return list_empty(&ctx->adaptive_targets); 391 } 392 393 static void damon_del_target(struct damon_target *t) 394 { 395 list_del(&t->list); 396 } 397 398 void damon_free_target(struct damon_target *t) 399 { 400 struct damon_region *r, *next; 401 402 damon_for_each_region_safe(r, next, t) 403 damon_free_region(r); 404 kfree(t); 405 } 406 407 void damon_destroy_target(struct damon_target *t) 408 { 409 damon_del_target(t); 410 damon_free_target(t); 411 } 412 413 unsigned int damon_nr_regions(struct damon_target *t) 414 { 415 return t->nr_regions; 416 } 417 418 struct damon_ctx *damon_new_ctx(void) 419 { 420 struct damon_ctx *ctx; 421 422 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 423 if (!ctx) 424 return NULL; 425 426 ctx->attrs.sample_interval = 5 * 1000; 427 ctx->attrs.aggr_interval = 100 * 1000; 428 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 429 430 ktime_get_coarse_ts64(&ctx->last_aggregation); 431 ctx->last_ops_update = ctx->last_aggregation; 432 433 mutex_init(&ctx->kdamond_lock); 434 435 ctx->attrs.min_nr_regions = 10; 436 ctx->attrs.max_nr_regions = 1000; 437 438 INIT_LIST_HEAD(&ctx->adaptive_targets); 439 INIT_LIST_HEAD(&ctx->schemes); 440 441 return ctx; 442 } 443 444 static void damon_destroy_targets(struct damon_ctx *ctx) 445 { 446 struct damon_target *t, *next_t; 447 448 if (ctx->ops.cleanup) { 449 ctx->ops.cleanup(ctx); 450 return; 451 } 452 453 damon_for_each_target_safe(t, next_t, ctx) 454 damon_destroy_target(t); 455 } 456 457 void damon_destroy_ctx(struct damon_ctx *ctx) 458 { 459 struct damos *s, *next_s; 460 461 damon_destroy_targets(ctx); 462 463 damon_for_each_scheme_safe(s, next_s, ctx) 464 damon_destroy_scheme(s); 465 466 kfree(ctx); 467 } 468 469 static unsigned int damon_age_for_new_attrs(unsigned int age, 470 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 471 { 472 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 473 } 474 475 /* convert access ratio in bp (per 10,000) to nr_accesses */ 476 static unsigned int damon_accesses_bp_to_nr_accesses( 477 unsigned int accesses_bp, struct damon_attrs *attrs) 478 { 479 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 480 } 481 482 /* convert nr_accesses to access ratio in bp (per 10,000) */ 483 static unsigned int damon_nr_accesses_to_accesses_bp( 484 unsigned int nr_accesses, struct damon_attrs *attrs) 485 { 486 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 487 } 488 489 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 490 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 491 { 492 return damon_accesses_bp_to_nr_accesses( 493 damon_nr_accesses_to_accesses_bp( 494 nr_accesses, old_attrs), 495 new_attrs); 496 } 497 498 static void damon_update_monitoring_result(struct damon_region *r, 499 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 500 { 501 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 502 old_attrs, new_attrs); 503 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 504 } 505 506 /* 507 * region->nr_accesses is the number of sampling intervals in the last 508 * aggregation interval that access to the region has found, and region->age is 509 * the number of aggregation intervals that its access pattern has maintained. 510 * For the reason, the real meaning of the two fields depend on current 511 * sampling interval and aggregation interval. This function updates 512 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 513 */ 514 static void damon_update_monitoring_results(struct damon_ctx *ctx, 515 struct damon_attrs *new_attrs) 516 { 517 struct damon_attrs *old_attrs = &ctx->attrs; 518 struct damon_target *t; 519 struct damon_region *r; 520 521 /* if any interval is zero, simply forgive conversion */ 522 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 523 !new_attrs->sample_interval || 524 !new_attrs->aggr_interval) 525 return; 526 527 damon_for_each_target(t, ctx) 528 damon_for_each_region(r, t) 529 damon_update_monitoring_result( 530 r, old_attrs, new_attrs); 531 } 532 533 /** 534 * damon_set_attrs() - Set attributes for the monitoring. 535 * @ctx: monitoring context 536 * @attrs: monitoring attributes 537 * 538 * This function should not be called while the kdamond is running. 539 * Every time interval is in micro-seconds. 540 * 541 * Return: 0 on success, negative error code otherwise. 542 */ 543 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 544 { 545 if (attrs->min_nr_regions < 3) 546 return -EINVAL; 547 if (attrs->min_nr_regions > attrs->max_nr_regions) 548 return -EINVAL; 549 if (attrs->sample_interval > attrs->aggr_interval) 550 return -EINVAL; 551 552 damon_update_monitoring_results(ctx, attrs); 553 ctx->attrs = *attrs; 554 return 0; 555 } 556 557 /** 558 * damon_set_schemes() - Set data access monitoring based operation schemes. 559 * @ctx: monitoring context 560 * @schemes: array of the schemes 561 * @nr_schemes: number of entries in @schemes 562 * 563 * This function should not be called while the kdamond of the context is 564 * running. 565 */ 566 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 567 ssize_t nr_schemes) 568 { 569 struct damos *s, *next; 570 ssize_t i; 571 572 damon_for_each_scheme_safe(s, next, ctx) 573 damon_destroy_scheme(s); 574 for (i = 0; i < nr_schemes; i++) 575 damon_add_scheme(ctx, schemes[i]); 576 } 577 578 /** 579 * damon_nr_running_ctxs() - Return number of currently running contexts. 580 */ 581 int damon_nr_running_ctxs(void) 582 { 583 int nr_ctxs; 584 585 mutex_lock(&damon_lock); 586 nr_ctxs = nr_running_ctxs; 587 mutex_unlock(&damon_lock); 588 589 return nr_ctxs; 590 } 591 592 /* Returns the size upper limit for each monitoring region */ 593 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 594 { 595 struct damon_target *t; 596 struct damon_region *r; 597 unsigned long sz = 0; 598 599 damon_for_each_target(t, ctx) { 600 damon_for_each_region(r, t) 601 sz += damon_sz_region(r); 602 } 603 604 if (ctx->attrs.min_nr_regions) 605 sz /= ctx->attrs.min_nr_regions; 606 if (sz < DAMON_MIN_REGION) 607 sz = DAMON_MIN_REGION; 608 609 return sz; 610 } 611 612 static int kdamond_fn(void *data); 613 614 /* 615 * __damon_start() - Starts monitoring with given context. 616 * @ctx: monitoring context 617 * 618 * This function should be called while damon_lock is hold. 619 * 620 * Return: 0 on success, negative error code otherwise. 621 */ 622 static int __damon_start(struct damon_ctx *ctx) 623 { 624 int err = -EBUSY; 625 626 mutex_lock(&ctx->kdamond_lock); 627 if (!ctx->kdamond) { 628 err = 0; 629 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 630 nr_running_ctxs); 631 if (IS_ERR(ctx->kdamond)) { 632 err = PTR_ERR(ctx->kdamond); 633 ctx->kdamond = NULL; 634 } 635 } 636 mutex_unlock(&ctx->kdamond_lock); 637 638 return err; 639 } 640 641 /** 642 * damon_start() - Starts the monitorings for a given group of contexts. 643 * @ctxs: an array of the pointers for contexts to start monitoring 644 * @nr_ctxs: size of @ctxs 645 * @exclusive: exclusiveness of this contexts group 646 * 647 * This function starts a group of monitoring threads for a group of monitoring 648 * contexts. One thread per each context is created and run in parallel. The 649 * caller should handle synchronization between the threads by itself. If 650 * @exclusive is true and a group of threads that created by other 651 * 'damon_start()' call is currently running, this function does nothing but 652 * returns -EBUSY. 653 * 654 * Return: 0 on success, negative error code otherwise. 655 */ 656 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 657 { 658 int i; 659 int err = 0; 660 661 mutex_lock(&damon_lock); 662 if ((exclusive && nr_running_ctxs) || 663 (!exclusive && running_exclusive_ctxs)) { 664 mutex_unlock(&damon_lock); 665 return -EBUSY; 666 } 667 668 for (i = 0; i < nr_ctxs; i++) { 669 err = __damon_start(ctxs[i]); 670 if (err) 671 break; 672 nr_running_ctxs++; 673 } 674 if (exclusive && nr_running_ctxs) 675 running_exclusive_ctxs = true; 676 mutex_unlock(&damon_lock); 677 678 return err; 679 } 680 681 /* 682 * __damon_stop() - Stops monitoring of a given context. 683 * @ctx: monitoring context 684 * 685 * Return: 0 on success, negative error code otherwise. 686 */ 687 static int __damon_stop(struct damon_ctx *ctx) 688 { 689 struct task_struct *tsk; 690 691 mutex_lock(&ctx->kdamond_lock); 692 tsk = ctx->kdamond; 693 if (tsk) { 694 get_task_struct(tsk); 695 mutex_unlock(&ctx->kdamond_lock); 696 kthread_stop(tsk); 697 put_task_struct(tsk); 698 return 0; 699 } 700 mutex_unlock(&ctx->kdamond_lock); 701 702 return -EPERM; 703 } 704 705 /** 706 * damon_stop() - Stops the monitorings for a given group of contexts. 707 * @ctxs: an array of the pointers for contexts to stop monitoring 708 * @nr_ctxs: size of @ctxs 709 * 710 * Return: 0 on success, negative error code otherwise. 711 */ 712 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 713 { 714 int i, err = 0; 715 716 for (i = 0; i < nr_ctxs; i++) { 717 /* nr_running_ctxs is decremented in kdamond_fn */ 718 err = __damon_stop(ctxs[i]); 719 if (err) 720 break; 721 } 722 return err; 723 } 724 725 /* 726 * damon_check_reset_time_interval() - Check if a time interval is elapsed. 727 * @baseline: the time to check whether the interval has elapsed since 728 * @interval: the time interval (microseconds) 729 * 730 * See whether the given time interval has passed since the given baseline 731 * time. If so, it also updates the baseline to current time for next check. 732 * 733 * Return: true if the time interval has passed, or false otherwise. 734 */ 735 static bool damon_check_reset_time_interval(struct timespec64 *baseline, 736 unsigned long interval) 737 { 738 struct timespec64 now; 739 740 ktime_get_coarse_ts64(&now); 741 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) < 742 interval * 1000) 743 return false; 744 *baseline = now; 745 return true; 746 } 747 748 /* 749 * Check whether it is time to flush the aggregated information 750 */ 751 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx) 752 { 753 return damon_check_reset_time_interval(&ctx->last_aggregation, 754 ctx->attrs.aggr_interval); 755 } 756 757 /* 758 * Reset the aggregated monitoring results ('nr_accesses' of each region). 759 */ 760 static void kdamond_reset_aggregated(struct damon_ctx *c) 761 { 762 struct damon_target *t; 763 unsigned int ti = 0; /* target's index */ 764 765 damon_for_each_target(t, c) { 766 struct damon_region *r; 767 768 damon_for_each_region(r, t) { 769 trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); 770 r->last_nr_accesses = r->nr_accesses; 771 r->nr_accesses = 0; 772 } 773 ti++; 774 } 775 } 776 777 static void damon_split_region_at(struct damon_target *t, 778 struct damon_region *r, unsigned long sz_r); 779 780 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 781 { 782 unsigned long sz; 783 784 sz = damon_sz_region(r); 785 return s->pattern.min_sz_region <= sz && 786 sz <= s->pattern.max_sz_region && 787 s->pattern.min_nr_accesses <= r->nr_accesses && 788 r->nr_accesses <= s->pattern.max_nr_accesses && 789 s->pattern.min_age_region <= r->age && 790 r->age <= s->pattern.max_age_region; 791 } 792 793 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 794 struct damon_region *r, struct damos *s) 795 { 796 bool ret = __damos_valid_target(r, s); 797 798 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 799 return ret; 800 801 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 802 } 803 804 /* 805 * damos_skip_charged_region() - Check if the given region or starting part of 806 * it is already charged for the DAMOS quota. 807 * @t: The target of the region. 808 * @rp: The pointer to the region. 809 * @s: The scheme to be applied. 810 * 811 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 812 * action would applied to only a part of the target access pattern fulfilling 813 * regions. To avoid applying the scheme action to only already applied 814 * regions, DAMON skips applying the scheme action to the regions that charged 815 * in the previous charge window. 816 * 817 * This function checks if a given region should be skipped or not for the 818 * reason. If only the starting part of the region has previously charged, 819 * this function splits the region into two so that the second one covers the 820 * area that not charged in the previous charge widnow and saves the second 821 * region in *rp and returns false, so that the caller can apply DAMON action 822 * to the second one. 823 * 824 * Return: true if the region should be entirely skipped, false otherwise. 825 */ 826 static bool damos_skip_charged_region(struct damon_target *t, 827 struct damon_region **rp, struct damos *s) 828 { 829 struct damon_region *r = *rp; 830 struct damos_quota *quota = &s->quota; 831 unsigned long sz_to_skip; 832 833 /* Skip previously charged regions */ 834 if (quota->charge_target_from) { 835 if (t != quota->charge_target_from) 836 return true; 837 if (r == damon_last_region(t)) { 838 quota->charge_target_from = NULL; 839 quota->charge_addr_from = 0; 840 return true; 841 } 842 if (quota->charge_addr_from && 843 r->ar.end <= quota->charge_addr_from) 844 return true; 845 846 if (quota->charge_addr_from && r->ar.start < 847 quota->charge_addr_from) { 848 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 849 r->ar.start, DAMON_MIN_REGION); 850 if (!sz_to_skip) { 851 if (damon_sz_region(r) <= DAMON_MIN_REGION) 852 return true; 853 sz_to_skip = DAMON_MIN_REGION; 854 } 855 damon_split_region_at(t, r, sz_to_skip); 856 r = damon_next_region(r); 857 *rp = r; 858 } 859 quota->charge_target_from = NULL; 860 quota->charge_addr_from = 0; 861 } 862 return false; 863 } 864 865 static void damos_update_stat(struct damos *s, 866 unsigned long sz_tried, unsigned long sz_applied) 867 { 868 s->stat.nr_tried++; 869 s->stat.sz_tried += sz_tried; 870 if (sz_applied) 871 s->stat.nr_applied++; 872 s->stat.sz_applied += sz_applied; 873 } 874 875 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 876 struct damon_region *r, struct damos_filter *filter) 877 { 878 bool matched = false; 879 struct damon_target *ti; 880 int target_idx = 0; 881 unsigned long start, end; 882 883 switch (filter->type) { 884 case DAMOS_FILTER_TYPE_TARGET: 885 damon_for_each_target(ti, ctx) { 886 if (ti == t) 887 break; 888 target_idx++; 889 } 890 matched = target_idx == filter->target_idx; 891 break; 892 case DAMOS_FILTER_TYPE_ADDR: 893 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 894 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 895 896 /* inside the range */ 897 if (start <= r->ar.start && r->ar.end <= end) { 898 matched = true; 899 break; 900 } 901 /* outside of the range */ 902 if (r->ar.end <= start || end <= r->ar.start) { 903 matched = false; 904 break; 905 } 906 /* start before the range and overlap */ 907 if (r->ar.start < start) { 908 damon_split_region_at(t, r, start - r->ar.start); 909 matched = false; 910 break; 911 } 912 /* start inside the range */ 913 damon_split_region_at(t, r, end - r->ar.start); 914 matched = true; 915 break; 916 default: 917 return false; 918 } 919 920 return matched == filter->matching; 921 } 922 923 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 924 struct damon_region *r, struct damos *s) 925 { 926 struct damos_filter *filter; 927 928 damos_for_each_filter(filter, s) { 929 if (__damos_filter_out(ctx, t, r, filter)) 930 return true; 931 } 932 return false; 933 } 934 935 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 936 struct damon_region *r, struct damos *s) 937 { 938 struct damos_quota *quota = &s->quota; 939 unsigned long sz = damon_sz_region(r); 940 struct timespec64 begin, end; 941 unsigned long sz_applied = 0; 942 int err = 0; 943 944 if (c->ops.apply_scheme) { 945 if (quota->esz && quota->charged_sz + sz > quota->esz) { 946 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 947 DAMON_MIN_REGION); 948 if (!sz) 949 goto update_stat; 950 damon_split_region_at(t, r, sz); 951 } 952 if (damos_filter_out(c, t, r, s)) 953 return; 954 ktime_get_coarse_ts64(&begin); 955 if (c->callback.before_damos_apply) 956 err = c->callback.before_damos_apply(c, t, r, s); 957 if (!err) 958 sz_applied = c->ops.apply_scheme(c, t, r, s); 959 ktime_get_coarse_ts64(&end); 960 quota->total_charged_ns += timespec64_to_ns(&end) - 961 timespec64_to_ns(&begin); 962 quota->charged_sz += sz; 963 if (quota->esz && quota->charged_sz >= quota->esz) { 964 quota->charge_target_from = t; 965 quota->charge_addr_from = r->ar.end + 1; 966 } 967 } 968 if (s->action != DAMOS_STAT) 969 r->age = 0; 970 971 update_stat: 972 damos_update_stat(s, sz, sz_applied); 973 } 974 975 static void damon_do_apply_schemes(struct damon_ctx *c, 976 struct damon_target *t, 977 struct damon_region *r) 978 { 979 struct damos *s; 980 981 damon_for_each_scheme(s, c) { 982 struct damos_quota *quota = &s->quota; 983 984 if (!s->wmarks.activated) 985 continue; 986 987 /* Check the quota */ 988 if (quota->esz && quota->charged_sz >= quota->esz) 989 continue; 990 991 if (damos_skip_charged_region(t, &r, s)) 992 continue; 993 994 if (!damos_valid_target(c, t, r, s)) 995 continue; 996 997 damos_apply_scheme(c, t, r, s); 998 } 999 } 1000 1001 /* Shouldn't be called if quota->ms and quota->sz are zero */ 1002 static void damos_set_effective_quota(struct damos_quota *quota) 1003 { 1004 unsigned long throughput; 1005 unsigned long esz; 1006 1007 if (!quota->ms) { 1008 quota->esz = quota->sz; 1009 return; 1010 } 1011 1012 if (quota->total_charged_ns) 1013 throughput = quota->total_charged_sz * 1000000 / 1014 quota->total_charged_ns; 1015 else 1016 throughput = PAGE_SIZE * 1024; 1017 esz = throughput * quota->ms; 1018 1019 if (quota->sz && quota->sz < esz) 1020 esz = quota->sz; 1021 quota->esz = esz; 1022 } 1023 1024 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1025 { 1026 struct damos_quota *quota = &s->quota; 1027 struct damon_target *t; 1028 struct damon_region *r; 1029 unsigned long cumulated_sz; 1030 unsigned int score, max_score = 0; 1031 1032 if (!quota->ms && !quota->sz) 1033 return; 1034 1035 /* New charge window starts */ 1036 if (time_after_eq(jiffies, quota->charged_from + 1037 msecs_to_jiffies(quota->reset_interval))) { 1038 if (quota->esz && quota->charged_sz >= quota->esz) 1039 s->stat.qt_exceeds++; 1040 quota->total_charged_sz += quota->charged_sz; 1041 quota->charged_from = jiffies; 1042 quota->charged_sz = 0; 1043 damos_set_effective_quota(quota); 1044 } 1045 1046 if (!c->ops.get_scheme_score) 1047 return; 1048 1049 /* Fill up the score histogram */ 1050 memset(quota->histogram, 0, sizeof(quota->histogram)); 1051 damon_for_each_target(t, c) { 1052 damon_for_each_region(r, t) { 1053 if (!__damos_valid_target(r, s)) 1054 continue; 1055 score = c->ops.get_scheme_score(c, t, r, s); 1056 quota->histogram[score] += damon_sz_region(r); 1057 if (score > max_score) 1058 max_score = score; 1059 } 1060 } 1061 1062 /* Set the min score limit */ 1063 for (cumulated_sz = 0, score = max_score; ; score--) { 1064 cumulated_sz += quota->histogram[score]; 1065 if (cumulated_sz >= quota->esz || !score) 1066 break; 1067 } 1068 quota->min_score = score; 1069 } 1070 1071 static void kdamond_apply_schemes(struct damon_ctx *c) 1072 { 1073 struct damon_target *t; 1074 struct damon_region *r, *next_r; 1075 struct damos *s; 1076 1077 damon_for_each_scheme(s, c) { 1078 if (!s->wmarks.activated) 1079 continue; 1080 1081 damos_adjust_quota(c, s); 1082 } 1083 1084 damon_for_each_target(t, c) { 1085 damon_for_each_region_safe(r, next_r, t) 1086 damon_do_apply_schemes(c, t, r); 1087 } 1088 } 1089 1090 /* 1091 * Merge two adjacent regions into one region 1092 */ 1093 static void damon_merge_two_regions(struct damon_target *t, 1094 struct damon_region *l, struct damon_region *r) 1095 { 1096 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1097 1098 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1099 (sz_l + sz_r); 1100 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1101 l->ar.end = r->ar.end; 1102 damon_destroy_region(r, t); 1103 } 1104 1105 /* 1106 * Merge adjacent regions having similar access frequencies 1107 * 1108 * t target affected by this merge operation 1109 * thres '->nr_accesses' diff threshold for the merge 1110 * sz_limit size upper limit of each region 1111 */ 1112 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1113 unsigned long sz_limit) 1114 { 1115 struct damon_region *r, *prev = NULL, *next; 1116 1117 damon_for_each_region_safe(r, next, t) { 1118 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1119 r->age = 0; 1120 else 1121 r->age++; 1122 1123 if (prev && prev->ar.end == r->ar.start && 1124 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1125 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1126 damon_merge_two_regions(t, prev, r); 1127 else 1128 prev = r; 1129 } 1130 } 1131 1132 /* 1133 * Merge adjacent regions having similar access frequencies 1134 * 1135 * threshold '->nr_accesses' diff threshold for the merge 1136 * sz_limit size upper limit of each region 1137 * 1138 * This function merges monitoring target regions which are adjacent and their 1139 * access frequencies are similar. This is for minimizing the monitoring 1140 * overhead under the dynamically changeable access pattern. If a merge was 1141 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1142 */ 1143 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1144 unsigned long sz_limit) 1145 { 1146 struct damon_target *t; 1147 1148 damon_for_each_target(t, c) 1149 damon_merge_regions_of(t, threshold, sz_limit); 1150 } 1151 1152 /* 1153 * Split a region in two 1154 * 1155 * r the region to be split 1156 * sz_r size of the first sub-region that will be made 1157 */ 1158 static void damon_split_region_at(struct damon_target *t, 1159 struct damon_region *r, unsigned long sz_r) 1160 { 1161 struct damon_region *new; 1162 1163 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1164 if (!new) 1165 return; 1166 1167 r->ar.end = new->ar.start; 1168 1169 new->age = r->age; 1170 new->last_nr_accesses = r->last_nr_accesses; 1171 1172 damon_insert_region(new, r, damon_next_region(r), t); 1173 } 1174 1175 /* Split every region in the given target into 'nr_subs' regions */ 1176 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1177 { 1178 struct damon_region *r, *next; 1179 unsigned long sz_region, sz_sub = 0; 1180 int i; 1181 1182 damon_for_each_region_safe(r, next, t) { 1183 sz_region = damon_sz_region(r); 1184 1185 for (i = 0; i < nr_subs - 1 && 1186 sz_region > 2 * DAMON_MIN_REGION; i++) { 1187 /* 1188 * Randomly select size of left sub-region to be at 1189 * least 10 percent and at most 90% of original region 1190 */ 1191 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1192 sz_region / 10, DAMON_MIN_REGION); 1193 /* Do not allow blank region */ 1194 if (sz_sub == 0 || sz_sub >= sz_region) 1195 continue; 1196 1197 damon_split_region_at(t, r, sz_sub); 1198 sz_region = sz_sub; 1199 } 1200 } 1201 } 1202 1203 /* 1204 * Split every target region into randomly-sized small regions 1205 * 1206 * This function splits every target region into random-sized small regions if 1207 * current total number of the regions is equal or smaller than half of the 1208 * user-specified maximum number of regions. This is for maximizing the 1209 * monitoring accuracy under the dynamically changeable access patterns. If a 1210 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1211 * it. 1212 */ 1213 static void kdamond_split_regions(struct damon_ctx *ctx) 1214 { 1215 struct damon_target *t; 1216 unsigned int nr_regions = 0; 1217 static unsigned int last_nr_regions; 1218 int nr_subregions = 2; 1219 1220 damon_for_each_target(t, ctx) 1221 nr_regions += damon_nr_regions(t); 1222 1223 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1224 return; 1225 1226 /* Maybe the middle of the region has different access frequency */ 1227 if (last_nr_regions == nr_regions && 1228 nr_regions < ctx->attrs.max_nr_regions / 3) 1229 nr_subregions = 3; 1230 1231 damon_for_each_target(t, ctx) 1232 damon_split_regions_of(t, nr_subregions); 1233 1234 last_nr_regions = nr_regions; 1235 } 1236 1237 /* 1238 * Check whether it is time to check and apply the operations-related data 1239 * structures. 1240 * 1241 * Returns true if it is. 1242 */ 1243 static bool kdamond_need_update_operations(struct damon_ctx *ctx) 1244 { 1245 return damon_check_reset_time_interval(&ctx->last_ops_update, 1246 ctx->attrs.ops_update_interval); 1247 } 1248 1249 /* 1250 * Check whether current monitoring should be stopped 1251 * 1252 * The monitoring is stopped when either the user requested to stop, or all 1253 * monitoring targets are invalid. 1254 * 1255 * Returns true if need to stop current monitoring. 1256 */ 1257 static bool kdamond_need_stop(struct damon_ctx *ctx) 1258 { 1259 struct damon_target *t; 1260 1261 if (kthread_should_stop()) 1262 return true; 1263 1264 if (!ctx->ops.target_valid) 1265 return false; 1266 1267 damon_for_each_target(t, ctx) { 1268 if (ctx->ops.target_valid(t)) 1269 return false; 1270 } 1271 1272 return true; 1273 } 1274 1275 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1276 { 1277 struct sysinfo i; 1278 1279 switch (metric) { 1280 case DAMOS_WMARK_FREE_MEM_RATE: 1281 si_meminfo(&i); 1282 return i.freeram * 1000 / i.totalram; 1283 default: 1284 break; 1285 } 1286 return -EINVAL; 1287 } 1288 1289 /* 1290 * Returns zero if the scheme is active. Else, returns time to wait for next 1291 * watermark check in micro-seconds. 1292 */ 1293 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1294 { 1295 unsigned long metric; 1296 1297 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1298 return 0; 1299 1300 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1301 /* higher than high watermark or lower than low watermark */ 1302 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1303 if (scheme->wmarks.activated) 1304 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1305 scheme->action, 1306 metric > scheme->wmarks.high ? 1307 "high" : "low"); 1308 scheme->wmarks.activated = false; 1309 return scheme->wmarks.interval; 1310 } 1311 1312 /* inactive and higher than middle watermark */ 1313 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1314 !scheme->wmarks.activated) 1315 return scheme->wmarks.interval; 1316 1317 if (!scheme->wmarks.activated) 1318 pr_debug("activate a scheme (%d)\n", scheme->action); 1319 scheme->wmarks.activated = true; 1320 return 0; 1321 } 1322 1323 static void kdamond_usleep(unsigned long usecs) 1324 { 1325 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1326 if (usecs > 20 * USEC_PER_MSEC) 1327 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1328 else 1329 usleep_idle_range(usecs, usecs + 1); 1330 } 1331 1332 /* Returns negative error code if it's not activated but should return */ 1333 static int kdamond_wait_activation(struct damon_ctx *ctx) 1334 { 1335 struct damos *s; 1336 unsigned long wait_time; 1337 unsigned long min_wait_time = 0; 1338 bool init_wait_time = false; 1339 1340 while (!kdamond_need_stop(ctx)) { 1341 damon_for_each_scheme(s, ctx) { 1342 wait_time = damos_wmark_wait_us(s); 1343 if (!init_wait_time || wait_time < min_wait_time) { 1344 init_wait_time = true; 1345 min_wait_time = wait_time; 1346 } 1347 } 1348 if (!min_wait_time) 1349 return 0; 1350 1351 kdamond_usleep(min_wait_time); 1352 1353 if (ctx->callback.after_wmarks_check && 1354 ctx->callback.after_wmarks_check(ctx)) 1355 break; 1356 } 1357 return -EBUSY; 1358 } 1359 1360 /* 1361 * The monitoring daemon that runs as a kernel thread 1362 */ 1363 static int kdamond_fn(void *data) 1364 { 1365 struct damon_ctx *ctx = data; 1366 struct damon_target *t; 1367 struct damon_region *r, *next; 1368 unsigned int max_nr_accesses = 0; 1369 unsigned long sz_limit = 0; 1370 1371 pr_debug("kdamond (%d) starts\n", current->pid); 1372 1373 if (ctx->ops.init) 1374 ctx->ops.init(ctx); 1375 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1376 goto done; 1377 1378 sz_limit = damon_region_sz_limit(ctx); 1379 1380 while (!kdamond_need_stop(ctx)) { 1381 if (kdamond_wait_activation(ctx)) 1382 break; 1383 1384 if (ctx->ops.prepare_access_checks) 1385 ctx->ops.prepare_access_checks(ctx); 1386 if (ctx->callback.after_sampling && 1387 ctx->callback.after_sampling(ctx)) 1388 break; 1389 1390 kdamond_usleep(ctx->attrs.sample_interval); 1391 1392 if (ctx->ops.check_accesses) 1393 max_nr_accesses = ctx->ops.check_accesses(ctx); 1394 1395 if (kdamond_aggregate_interval_passed(ctx)) { 1396 kdamond_merge_regions(ctx, 1397 max_nr_accesses / 10, 1398 sz_limit); 1399 if (ctx->callback.after_aggregation && 1400 ctx->callback.after_aggregation(ctx)) 1401 break; 1402 if (!list_empty(&ctx->schemes)) 1403 kdamond_apply_schemes(ctx); 1404 kdamond_reset_aggregated(ctx); 1405 kdamond_split_regions(ctx); 1406 if (ctx->ops.reset_aggregated) 1407 ctx->ops.reset_aggregated(ctx); 1408 } 1409 1410 if (kdamond_need_update_operations(ctx)) { 1411 if (ctx->ops.update) 1412 ctx->ops.update(ctx); 1413 sz_limit = damon_region_sz_limit(ctx); 1414 } 1415 } 1416 done: 1417 damon_for_each_target(t, ctx) { 1418 damon_for_each_region_safe(r, next, t) 1419 damon_destroy_region(r, t); 1420 } 1421 1422 if (ctx->callback.before_terminate) 1423 ctx->callback.before_terminate(ctx); 1424 if (ctx->ops.cleanup) 1425 ctx->ops.cleanup(ctx); 1426 1427 pr_debug("kdamond (%d) finishes\n", current->pid); 1428 mutex_lock(&ctx->kdamond_lock); 1429 ctx->kdamond = NULL; 1430 mutex_unlock(&ctx->kdamond_lock); 1431 1432 mutex_lock(&damon_lock); 1433 nr_running_ctxs--; 1434 if (!nr_running_ctxs && running_exclusive_ctxs) 1435 running_exclusive_ctxs = false; 1436 mutex_unlock(&damon_lock); 1437 1438 return 0; 1439 } 1440 1441 /* 1442 * struct damon_system_ram_region - System RAM resource address region of 1443 * [@start, @end). 1444 * @start: Start address of the region (inclusive). 1445 * @end: End address of the region (exclusive). 1446 */ 1447 struct damon_system_ram_region { 1448 unsigned long start; 1449 unsigned long end; 1450 }; 1451 1452 static int walk_system_ram(struct resource *res, void *arg) 1453 { 1454 struct damon_system_ram_region *a = arg; 1455 1456 if (a->end - a->start < resource_size(res)) { 1457 a->start = res->start; 1458 a->end = res->end; 1459 } 1460 return 0; 1461 } 1462 1463 /* 1464 * Find biggest 'System RAM' resource and store its start and end address in 1465 * @start and @end, respectively. If no System RAM is found, returns false. 1466 */ 1467 static bool damon_find_biggest_system_ram(unsigned long *start, 1468 unsigned long *end) 1469 1470 { 1471 struct damon_system_ram_region arg = {}; 1472 1473 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1474 if (arg.end <= arg.start) 1475 return false; 1476 1477 *start = arg.start; 1478 *end = arg.end; 1479 return true; 1480 } 1481 1482 /** 1483 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1484 * monitoring target as requested, or biggest 'System RAM'. 1485 * @t: The monitoring target to set the region. 1486 * @start: The pointer to the start address of the region. 1487 * @end: The pointer to the end address of the region. 1488 * 1489 * This function sets the region of @t as requested by @start and @end. If the 1490 * values of @start and @end are zero, however, this function finds the biggest 1491 * 'System RAM' resource and sets the region to cover the resource. In the 1492 * latter case, this function saves the start and end addresses of the resource 1493 * in @start and @end, respectively. 1494 * 1495 * Return: 0 on success, negative error code otherwise. 1496 */ 1497 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1498 unsigned long *start, unsigned long *end) 1499 { 1500 struct damon_addr_range addr_range; 1501 1502 if (*start > *end) 1503 return -EINVAL; 1504 1505 if (!*start && !*end && 1506 !damon_find_biggest_system_ram(start, end)) 1507 return -EINVAL; 1508 1509 addr_range.start = *start; 1510 addr_range.end = *end; 1511 return damon_set_regions(t, &addr_range, 1); 1512 } 1513 1514 static int __init damon_init(void) 1515 { 1516 damon_region_cache = KMEM_CACHE(damon_region, 0); 1517 if (unlikely(!damon_region_cache)) { 1518 pr_err("creating damon_region_cache fails\n"); 1519 return -ENOMEM; 1520 } 1521 1522 return 0; 1523 } 1524 1525 subsys_initcall(damon_init); 1526 1527 #include "core-test.h" 1528