1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 INIT_LIST_HEAD(®ion->list); 132 133 region->age = 0; 134 region->last_nr_accesses = 0; 135 136 return region; 137 } 138 139 void damon_add_region(struct damon_region *r, struct damon_target *t) 140 { 141 list_add_tail(&r->list, &t->regions_list); 142 t->nr_regions++; 143 } 144 145 static void damon_del_region(struct damon_region *r, struct damon_target *t) 146 { 147 list_del(&r->list); 148 t->nr_regions--; 149 } 150 151 static void damon_free_region(struct damon_region *r) 152 { 153 kmem_cache_free(damon_region_cache, r); 154 } 155 156 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 157 { 158 damon_del_region(r, t); 159 damon_free_region(r); 160 } 161 162 /* 163 * Check whether a region is intersecting an address range 164 * 165 * Returns true if it is. 166 */ 167 static bool damon_intersect(struct damon_region *r, 168 struct damon_addr_range *re) 169 { 170 return !(r->ar.end <= re->start || re->end <= r->ar.start); 171 } 172 173 /* 174 * Fill holes in regions with new regions. 175 */ 176 static int damon_fill_regions_holes(struct damon_region *first, 177 struct damon_region *last, struct damon_target *t) 178 { 179 struct damon_region *r = first; 180 181 damon_for_each_region_from(r, t) { 182 struct damon_region *next, *newr; 183 184 if (r == last) 185 break; 186 next = damon_next_region(r); 187 if (r->ar.end != next->ar.start) { 188 newr = damon_new_region(r->ar.end, next->ar.start); 189 if (!newr) 190 return -ENOMEM; 191 damon_insert_region(newr, r, next, t); 192 } 193 } 194 return 0; 195 } 196 197 /* 198 * damon_set_regions() - Set regions of a target for given address ranges. 199 * @t: the given target. 200 * @ranges: array of new monitoring target ranges. 201 * @nr_ranges: length of @ranges. 202 * 203 * This function adds new regions to, or modify existing regions of a 204 * monitoring target to fit in specific ranges. 205 * 206 * Return: 0 if success, or negative error code otherwise. 207 */ 208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 209 unsigned int nr_ranges) 210 { 211 struct damon_region *r, *next; 212 unsigned int i; 213 int err; 214 215 /* Remove regions which are not in the new ranges */ 216 damon_for_each_region_safe(r, next, t) { 217 for (i = 0; i < nr_ranges; i++) { 218 if (damon_intersect(r, &ranges[i])) 219 break; 220 } 221 if (i == nr_ranges) 222 damon_destroy_region(r, t); 223 } 224 225 r = damon_first_region(t); 226 /* Add new regions or resize existing regions to fit in the ranges */ 227 for (i = 0; i < nr_ranges; i++) { 228 struct damon_region *first = NULL, *last, *newr; 229 struct damon_addr_range *range; 230 231 range = &ranges[i]; 232 /* Get the first/last regions intersecting with the range */ 233 damon_for_each_region_from(r, t) { 234 if (damon_intersect(r, range)) { 235 if (!first) 236 first = r; 237 last = r; 238 } 239 if (r->ar.start >= range->end) 240 break; 241 } 242 if (!first) { 243 /* no region intersects with this range */ 244 newr = damon_new_region( 245 ALIGN_DOWN(range->start, 246 DAMON_MIN_REGION), 247 ALIGN(range->end, DAMON_MIN_REGION)); 248 if (!newr) 249 return -ENOMEM; 250 damon_insert_region(newr, damon_prev_region(r), r, t); 251 } else { 252 /* resize intersecting regions to fit in this range */ 253 first->ar.start = ALIGN_DOWN(range->start, 254 DAMON_MIN_REGION); 255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 256 257 /* fill possible holes in the range */ 258 err = damon_fill_regions_holes(first, last, t); 259 if (err) 260 return err; 261 } 262 } 263 return 0; 264 } 265 266 struct damos_filter *damos_new_filter(enum damos_filter_type type, 267 bool matching) 268 { 269 struct damos_filter *filter; 270 271 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 272 if (!filter) 273 return NULL; 274 filter->type = type; 275 filter->matching = matching; 276 INIT_LIST_HEAD(&filter->list); 277 return filter; 278 } 279 280 void damos_add_filter(struct damos *s, struct damos_filter *f) 281 { 282 list_add_tail(&f->list, &s->filters); 283 } 284 285 static void damos_del_filter(struct damos_filter *f) 286 { 287 list_del(&f->list); 288 } 289 290 static void damos_free_filter(struct damos_filter *f) 291 { 292 kfree(f); 293 } 294 295 void damos_destroy_filter(struct damos_filter *f) 296 { 297 damos_del_filter(f); 298 damos_free_filter(f); 299 } 300 301 /* initialize private fields of damos_quota and return the pointer */ 302 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 303 { 304 quota->total_charged_sz = 0; 305 quota->total_charged_ns = 0; 306 quota->esz = 0; 307 quota->charged_sz = 0; 308 quota->charged_from = 0; 309 quota->charge_target_from = NULL; 310 quota->charge_addr_from = 0; 311 return quota; 312 } 313 314 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 315 enum damos_action action, struct damos_quota *quota, 316 struct damos_watermarks *wmarks) 317 { 318 struct damos *scheme; 319 320 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 321 if (!scheme) 322 return NULL; 323 scheme->pattern = *pattern; 324 scheme->action = action; 325 INIT_LIST_HEAD(&scheme->filters); 326 scheme->stat = (struct damos_stat){}; 327 INIT_LIST_HEAD(&scheme->list); 328 329 scheme->quota = *(damos_quota_init_priv(quota)); 330 331 scheme->wmarks = *wmarks; 332 scheme->wmarks.activated = true; 333 334 return scheme; 335 } 336 337 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 338 { 339 list_add_tail(&s->list, &ctx->schemes); 340 } 341 342 static void damon_del_scheme(struct damos *s) 343 { 344 list_del(&s->list); 345 } 346 347 static void damon_free_scheme(struct damos *s) 348 { 349 kfree(s); 350 } 351 352 void damon_destroy_scheme(struct damos *s) 353 { 354 struct damos_filter *f, *next; 355 356 damos_for_each_filter_safe(f, next, s) 357 damos_destroy_filter(f); 358 damon_del_scheme(s); 359 damon_free_scheme(s); 360 } 361 362 /* 363 * Construct a damon_target struct 364 * 365 * Returns the pointer to the new struct if success, or NULL otherwise 366 */ 367 struct damon_target *damon_new_target(void) 368 { 369 struct damon_target *t; 370 371 t = kmalloc(sizeof(*t), GFP_KERNEL); 372 if (!t) 373 return NULL; 374 375 t->pid = NULL; 376 t->nr_regions = 0; 377 INIT_LIST_HEAD(&t->regions_list); 378 INIT_LIST_HEAD(&t->list); 379 380 return t; 381 } 382 383 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 384 { 385 list_add_tail(&t->list, &ctx->adaptive_targets); 386 } 387 388 bool damon_targets_empty(struct damon_ctx *ctx) 389 { 390 return list_empty(&ctx->adaptive_targets); 391 } 392 393 static void damon_del_target(struct damon_target *t) 394 { 395 list_del(&t->list); 396 } 397 398 void damon_free_target(struct damon_target *t) 399 { 400 struct damon_region *r, *next; 401 402 damon_for_each_region_safe(r, next, t) 403 damon_free_region(r); 404 kfree(t); 405 } 406 407 void damon_destroy_target(struct damon_target *t) 408 { 409 damon_del_target(t); 410 damon_free_target(t); 411 } 412 413 unsigned int damon_nr_regions(struct damon_target *t) 414 { 415 return t->nr_regions; 416 } 417 418 struct damon_ctx *damon_new_ctx(void) 419 { 420 struct damon_ctx *ctx; 421 422 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 423 if (!ctx) 424 return NULL; 425 426 init_completion(&ctx->kdamond_started); 427 428 ctx->attrs.sample_interval = 5 * 1000; 429 ctx->attrs.aggr_interval = 100 * 1000; 430 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 431 432 ctx->passed_sample_intervals = 0; 433 /* These will be set from kdamond_init_intervals_sis() */ 434 ctx->next_aggregation_sis = 0; 435 ctx->next_ops_update_sis = 0; 436 437 mutex_init(&ctx->kdamond_lock); 438 439 ctx->attrs.min_nr_regions = 10; 440 ctx->attrs.max_nr_regions = 1000; 441 442 INIT_LIST_HEAD(&ctx->adaptive_targets); 443 INIT_LIST_HEAD(&ctx->schemes); 444 445 return ctx; 446 } 447 448 static void damon_destroy_targets(struct damon_ctx *ctx) 449 { 450 struct damon_target *t, *next_t; 451 452 if (ctx->ops.cleanup) { 453 ctx->ops.cleanup(ctx); 454 return; 455 } 456 457 damon_for_each_target_safe(t, next_t, ctx) 458 damon_destroy_target(t); 459 } 460 461 void damon_destroy_ctx(struct damon_ctx *ctx) 462 { 463 struct damos *s, *next_s; 464 465 damon_destroy_targets(ctx); 466 467 damon_for_each_scheme_safe(s, next_s, ctx) 468 damon_destroy_scheme(s); 469 470 kfree(ctx); 471 } 472 473 static unsigned int damon_age_for_new_attrs(unsigned int age, 474 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 475 { 476 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 477 } 478 479 /* convert access ratio in bp (per 10,000) to nr_accesses */ 480 static unsigned int damon_accesses_bp_to_nr_accesses( 481 unsigned int accesses_bp, struct damon_attrs *attrs) 482 { 483 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 484 } 485 486 /* convert nr_accesses to access ratio in bp (per 10,000) */ 487 static unsigned int damon_nr_accesses_to_accesses_bp( 488 unsigned int nr_accesses, struct damon_attrs *attrs) 489 { 490 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 491 } 492 493 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 494 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 495 { 496 return damon_accesses_bp_to_nr_accesses( 497 damon_nr_accesses_to_accesses_bp( 498 nr_accesses, old_attrs), 499 new_attrs); 500 } 501 502 static void damon_update_monitoring_result(struct damon_region *r, 503 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 504 { 505 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 506 old_attrs, new_attrs); 507 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 508 } 509 510 /* 511 * region->nr_accesses is the number of sampling intervals in the last 512 * aggregation interval that access to the region has found, and region->age is 513 * the number of aggregation intervals that its access pattern has maintained. 514 * For the reason, the real meaning of the two fields depend on current 515 * sampling interval and aggregation interval. This function updates 516 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 517 */ 518 static void damon_update_monitoring_results(struct damon_ctx *ctx, 519 struct damon_attrs *new_attrs) 520 { 521 struct damon_attrs *old_attrs = &ctx->attrs; 522 struct damon_target *t; 523 struct damon_region *r; 524 525 /* if any interval is zero, simply forgive conversion */ 526 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 527 !new_attrs->sample_interval || 528 !new_attrs->aggr_interval) 529 return; 530 531 damon_for_each_target(t, ctx) 532 damon_for_each_region(r, t) 533 damon_update_monitoring_result( 534 r, old_attrs, new_attrs); 535 } 536 537 /** 538 * damon_set_attrs() - Set attributes for the monitoring. 539 * @ctx: monitoring context 540 * @attrs: monitoring attributes 541 * 542 * This function should not be called while the kdamond is running. 543 * Every time interval is in micro-seconds. 544 * 545 * Return: 0 on success, negative error code otherwise. 546 */ 547 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 548 { 549 unsigned long sample_interval = attrs->sample_interval ? 550 attrs->sample_interval : 1; 551 552 if (attrs->min_nr_regions < 3) 553 return -EINVAL; 554 if (attrs->min_nr_regions > attrs->max_nr_regions) 555 return -EINVAL; 556 if (attrs->sample_interval > attrs->aggr_interval) 557 return -EINVAL; 558 559 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 560 attrs->aggr_interval / sample_interval; 561 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 562 attrs->ops_update_interval / sample_interval; 563 564 damon_update_monitoring_results(ctx, attrs); 565 ctx->attrs = *attrs; 566 return 0; 567 } 568 569 /** 570 * damon_set_schemes() - Set data access monitoring based operation schemes. 571 * @ctx: monitoring context 572 * @schemes: array of the schemes 573 * @nr_schemes: number of entries in @schemes 574 * 575 * This function should not be called while the kdamond of the context is 576 * running. 577 */ 578 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 579 ssize_t nr_schemes) 580 { 581 struct damos *s, *next; 582 ssize_t i; 583 584 damon_for_each_scheme_safe(s, next, ctx) 585 damon_destroy_scheme(s); 586 for (i = 0; i < nr_schemes; i++) 587 damon_add_scheme(ctx, schemes[i]); 588 } 589 590 /** 591 * damon_nr_running_ctxs() - Return number of currently running contexts. 592 */ 593 int damon_nr_running_ctxs(void) 594 { 595 int nr_ctxs; 596 597 mutex_lock(&damon_lock); 598 nr_ctxs = nr_running_ctxs; 599 mutex_unlock(&damon_lock); 600 601 return nr_ctxs; 602 } 603 604 /* Returns the size upper limit for each monitoring region */ 605 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 606 { 607 struct damon_target *t; 608 struct damon_region *r; 609 unsigned long sz = 0; 610 611 damon_for_each_target(t, ctx) { 612 damon_for_each_region(r, t) 613 sz += damon_sz_region(r); 614 } 615 616 if (ctx->attrs.min_nr_regions) 617 sz /= ctx->attrs.min_nr_regions; 618 if (sz < DAMON_MIN_REGION) 619 sz = DAMON_MIN_REGION; 620 621 return sz; 622 } 623 624 static int kdamond_fn(void *data); 625 626 /* 627 * __damon_start() - Starts monitoring with given context. 628 * @ctx: monitoring context 629 * 630 * This function should be called while damon_lock is hold. 631 * 632 * Return: 0 on success, negative error code otherwise. 633 */ 634 static int __damon_start(struct damon_ctx *ctx) 635 { 636 int err = -EBUSY; 637 638 mutex_lock(&ctx->kdamond_lock); 639 if (!ctx->kdamond) { 640 err = 0; 641 reinit_completion(&ctx->kdamond_started); 642 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 643 nr_running_ctxs); 644 if (IS_ERR(ctx->kdamond)) { 645 err = PTR_ERR(ctx->kdamond); 646 ctx->kdamond = NULL; 647 } else { 648 wait_for_completion(&ctx->kdamond_started); 649 } 650 } 651 mutex_unlock(&ctx->kdamond_lock); 652 653 return err; 654 } 655 656 /** 657 * damon_start() - Starts the monitorings for a given group of contexts. 658 * @ctxs: an array of the pointers for contexts to start monitoring 659 * @nr_ctxs: size of @ctxs 660 * @exclusive: exclusiveness of this contexts group 661 * 662 * This function starts a group of monitoring threads for a group of monitoring 663 * contexts. One thread per each context is created and run in parallel. The 664 * caller should handle synchronization between the threads by itself. If 665 * @exclusive is true and a group of threads that created by other 666 * 'damon_start()' call is currently running, this function does nothing but 667 * returns -EBUSY. 668 * 669 * Return: 0 on success, negative error code otherwise. 670 */ 671 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 672 { 673 int i; 674 int err = 0; 675 676 mutex_lock(&damon_lock); 677 if ((exclusive && nr_running_ctxs) || 678 (!exclusive && running_exclusive_ctxs)) { 679 mutex_unlock(&damon_lock); 680 return -EBUSY; 681 } 682 683 for (i = 0; i < nr_ctxs; i++) { 684 err = __damon_start(ctxs[i]); 685 if (err) 686 break; 687 nr_running_ctxs++; 688 } 689 if (exclusive && nr_running_ctxs) 690 running_exclusive_ctxs = true; 691 mutex_unlock(&damon_lock); 692 693 return err; 694 } 695 696 /* 697 * __damon_stop() - Stops monitoring of a given context. 698 * @ctx: monitoring context 699 * 700 * Return: 0 on success, negative error code otherwise. 701 */ 702 static int __damon_stop(struct damon_ctx *ctx) 703 { 704 struct task_struct *tsk; 705 706 mutex_lock(&ctx->kdamond_lock); 707 tsk = ctx->kdamond; 708 if (tsk) { 709 get_task_struct(tsk); 710 mutex_unlock(&ctx->kdamond_lock); 711 kthread_stop(tsk); 712 put_task_struct(tsk); 713 return 0; 714 } 715 mutex_unlock(&ctx->kdamond_lock); 716 717 return -EPERM; 718 } 719 720 /** 721 * damon_stop() - Stops the monitorings for a given group of contexts. 722 * @ctxs: an array of the pointers for contexts to stop monitoring 723 * @nr_ctxs: size of @ctxs 724 * 725 * Return: 0 on success, negative error code otherwise. 726 */ 727 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 728 { 729 int i, err = 0; 730 731 for (i = 0; i < nr_ctxs; i++) { 732 /* nr_running_ctxs is decremented in kdamond_fn */ 733 err = __damon_stop(ctxs[i]); 734 if (err) 735 break; 736 } 737 return err; 738 } 739 740 /* 741 * Reset the aggregated monitoring results ('nr_accesses' of each region). 742 */ 743 static void kdamond_reset_aggregated(struct damon_ctx *c) 744 { 745 struct damon_target *t; 746 unsigned int ti = 0; /* target's index */ 747 748 damon_for_each_target(t, c) { 749 struct damon_region *r; 750 751 damon_for_each_region(r, t) { 752 trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); 753 r->last_nr_accesses = r->nr_accesses; 754 r->nr_accesses = 0; 755 } 756 ti++; 757 } 758 } 759 760 static void damon_split_region_at(struct damon_target *t, 761 struct damon_region *r, unsigned long sz_r); 762 763 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 764 { 765 unsigned long sz; 766 767 sz = damon_sz_region(r); 768 return s->pattern.min_sz_region <= sz && 769 sz <= s->pattern.max_sz_region && 770 s->pattern.min_nr_accesses <= r->nr_accesses && 771 r->nr_accesses <= s->pattern.max_nr_accesses && 772 s->pattern.min_age_region <= r->age && 773 r->age <= s->pattern.max_age_region; 774 } 775 776 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 777 struct damon_region *r, struct damos *s) 778 { 779 bool ret = __damos_valid_target(r, s); 780 781 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 782 return ret; 783 784 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 785 } 786 787 /* 788 * damos_skip_charged_region() - Check if the given region or starting part of 789 * it is already charged for the DAMOS quota. 790 * @t: The target of the region. 791 * @rp: The pointer to the region. 792 * @s: The scheme to be applied. 793 * 794 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 795 * action would applied to only a part of the target access pattern fulfilling 796 * regions. To avoid applying the scheme action to only already applied 797 * regions, DAMON skips applying the scheme action to the regions that charged 798 * in the previous charge window. 799 * 800 * This function checks if a given region should be skipped or not for the 801 * reason. If only the starting part of the region has previously charged, 802 * this function splits the region into two so that the second one covers the 803 * area that not charged in the previous charge widnow and saves the second 804 * region in *rp and returns false, so that the caller can apply DAMON action 805 * to the second one. 806 * 807 * Return: true if the region should be entirely skipped, false otherwise. 808 */ 809 static bool damos_skip_charged_region(struct damon_target *t, 810 struct damon_region **rp, struct damos *s) 811 { 812 struct damon_region *r = *rp; 813 struct damos_quota *quota = &s->quota; 814 unsigned long sz_to_skip; 815 816 /* Skip previously charged regions */ 817 if (quota->charge_target_from) { 818 if (t != quota->charge_target_from) 819 return true; 820 if (r == damon_last_region(t)) { 821 quota->charge_target_from = NULL; 822 quota->charge_addr_from = 0; 823 return true; 824 } 825 if (quota->charge_addr_from && 826 r->ar.end <= quota->charge_addr_from) 827 return true; 828 829 if (quota->charge_addr_from && r->ar.start < 830 quota->charge_addr_from) { 831 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 832 r->ar.start, DAMON_MIN_REGION); 833 if (!sz_to_skip) { 834 if (damon_sz_region(r) <= DAMON_MIN_REGION) 835 return true; 836 sz_to_skip = DAMON_MIN_REGION; 837 } 838 damon_split_region_at(t, r, sz_to_skip); 839 r = damon_next_region(r); 840 *rp = r; 841 } 842 quota->charge_target_from = NULL; 843 quota->charge_addr_from = 0; 844 } 845 return false; 846 } 847 848 static void damos_update_stat(struct damos *s, 849 unsigned long sz_tried, unsigned long sz_applied) 850 { 851 s->stat.nr_tried++; 852 s->stat.sz_tried += sz_tried; 853 if (sz_applied) 854 s->stat.nr_applied++; 855 s->stat.sz_applied += sz_applied; 856 } 857 858 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 859 struct damon_region *r, struct damos_filter *filter) 860 { 861 bool matched = false; 862 struct damon_target *ti; 863 int target_idx = 0; 864 unsigned long start, end; 865 866 switch (filter->type) { 867 case DAMOS_FILTER_TYPE_TARGET: 868 damon_for_each_target(ti, ctx) { 869 if (ti == t) 870 break; 871 target_idx++; 872 } 873 matched = target_idx == filter->target_idx; 874 break; 875 case DAMOS_FILTER_TYPE_ADDR: 876 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 877 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 878 879 /* inside the range */ 880 if (start <= r->ar.start && r->ar.end <= end) { 881 matched = true; 882 break; 883 } 884 /* outside of the range */ 885 if (r->ar.end <= start || end <= r->ar.start) { 886 matched = false; 887 break; 888 } 889 /* start before the range and overlap */ 890 if (r->ar.start < start) { 891 damon_split_region_at(t, r, start - r->ar.start); 892 matched = false; 893 break; 894 } 895 /* start inside the range */ 896 damon_split_region_at(t, r, end - r->ar.start); 897 matched = true; 898 break; 899 default: 900 return false; 901 } 902 903 return matched == filter->matching; 904 } 905 906 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 907 struct damon_region *r, struct damos *s) 908 { 909 struct damos_filter *filter; 910 911 damos_for_each_filter(filter, s) { 912 if (__damos_filter_out(ctx, t, r, filter)) 913 return true; 914 } 915 return false; 916 } 917 918 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 919 struct damon_region *r, struct damos *s) 920 { 921 struct damos_quota *quota = &s->quota; 922 unsigned long sz = damon_sz_region(r); 923 struct timespec64 begin, end; 924 unsigned long sz_applied = 0; 925 int err = 0; 926 927 if (c->ops.apply_scheme) { 928 if (quota->esz && quota->charged_sz + sz > quota->esz) { 929 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 930 DAMON_MIN_REGION); 931 if (!sz) 932 goto update_stat; 933 damon_split_region_at(t, r, sz); 934 } 935 if (damos_filter_out(c, t, r, s)) 936 return; 937 ktime_get_coarse_ts64(&begin); 938 if (c->callback.before_damos_apply) 939 err = c->callback.before_damos_apply(c, t, r, s); 940 if (!err) 941 sz_applied = c->ops.apply_scheme(c, t, r, s); 942 ktime_get_coarse_ts64(&end); 943 quota->total_charged_ns += timespec64_to_ns(&end) - 944 timespec64_to_ns(&begin); 945 quota->charged_sz += sz; 946 if (quota->esz && quota->charged_sz >= quota->esz) { 947 quota->charge_target_from = t; 948 quota->charge_addr_from = r->ar.end + 1; 949 } 950 } 951 if (s->action != DAMOS_STAT) 952 r->age = 0; 953 954 update_stat: 955 damos_update_stat(s, sz, sz_applied); 956 } 957 958 static void damon_do_apply_schemes(struct damon_ctx *c, 959 struct damon_target *t, 960 struct damon_region *r) 961 { 962 struct damos *s; 963 964 damon_for_each_scheme(s, c) { 965 struct damos_quota *quota = &s->quota; 966 967 if (!s->wmarks.activated) 968 continue; 969 970 /* Check the quota */ 971 if (quota->esz && quota->charged_sz >= quota->esz) 972 continue; 973 974 if (damos_skip_charged_region(t, &r, s)) 975 continue; 976 977 if (!damos_valid_target(c, t, r, s)) 978 continue; 979 980 damos_apply_scheme(c, t, r, s); 981 } 982 } 983 984 /* Shouldn't be called if quota->ms and quota->sz are zero */ 985 static void damos_set_effective_quota(struct damos_quota *quota) 986 { 987 unsigned long throughput; 988 unsigned long esz; 989 990 if (!quota->ms) { 991 quota->esz = quota->sz; 992 return; 993 } 994 995 if (quota->total_charged_ns) 996 throughput = quota->total_charged_sz * 1000000 / 997 quota->total_charged_ns; 998 else 999 throughput = PAGE_SIZE * 1024; 1000 esz = throughput * quota->ms; 1001 1002 if (quota->sz && quota->sz < esz) 1003 esz = quota->sz; 1004 quota->esz = esz; 1005 } 1006 1007 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1008 { 1009 struct damos_quota *quota = &s->quota; 1010 struct damon_target *t; 1011 struct damon_region *r; 1012 unsigned long cumulated_sz; 1013 unsigned int score, max_score = 0; 1014 1015 if (!quota->ms && !quota->sz) 1016 return; 1017 1018 /* New charge window starts */ 1019 if (time_after_eq(jiffies, quota->charged_from + 1020 msecs_to_jiffies(quota->reset_interval))) { 1021 if (quota->esz && quota->charged_sz >= quota->esz) 1022 s->stat.qt_exceeds++; 1023 quota->total_charged_sz += quota->charged_sz; 1024 quota->charged_from = jiffies; 1025 quota->charged_sz = 0; 1026 damos_set_effective_quota(quota); 1027 } 1028 1029 if (!c->ops.get_scheme_score) 1030 return; 1031 1032 /* Fill up the score histogram */ 1033 memset(quota->histogram, 0, sizeof(quota->histogram)); 1034 damon_for_each_target(t, c) { 1035 damon_for_each_region(r, t) { 1036 if (!__damos_valid_target(r, s)) 1037 continue; 1038 score = c->ops.get_scheme_score(c, t, r, s); 1039 quota->histogram[score] += damon_sz_region(r); 1040 if (score > max_score) 1041 max_score = score; 1042 } 1043 } 1044 1045 /* Set the min score limit */ 1046 for (cumulated_sz = 0, score = max_score; ; score--) { 1047 cumulated_sz += quota->histogram[score]; 1048 if (cumulated_sz >= quota->esz || !score) 1049 break; 1050 } 1051 quota->min_score = score; 1052 } 1053 1054 static void kdamond_apply_schemes(struct damon_ctx *c) 1055 { 1056 struct damon_target *t; 1057 struct damon_region *r, *next_r; 1058 struct damos *s; 1059 1060 damon_for_each_scheme(s, c) { 1061 if (!s->wmarks.activated) 1062 continue; 1063 1064 damos_adjust_quota(c, s); 1065 } 1066 1067 damon_for_each_target(t, c) { 1068 damon_for_each_region_safe(r, next_r, t) 1069 damon_do_apply_schemes(c, t, r); 1070 } 1071 } 1072 1073 /* 1074 * Merge two adjacent regions into one region 1075 */ 1076 static void damon_merge_two_regions(struct damon_target *t, 1077 struct damon_region *l, struct damon_region *r) 1078 { 1079 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1080 1081 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1082 (sz_l + sz_r); 1083 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1084 l->ar.end = r->ar.end; 1085 damon_destroy_region(r, t); 1086 } 1087 1088 /* 1089 * Merge adjacent regions having similar access frequencies 1090 * 1091 * t target affected by this merge operation 1092 * thres '->nr_accesses' diff threshold for the merge 1093 * sz_limit size upper limit of each region 1094 */ 1095 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1096 unsigned long sz_limit) 1097 { 1098 struct damon_region *r, *prev = NULL, *next; 1099 1100 damon_for_each_region_safe(r, next, t) { 1101 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1102 r->age = 0; 1103 else 1104 r->age++; 1105 1106 if (prev && prev->ar.end == r->ar.start && 1107 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1108 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1109 damon_merge_two_regions(t, prev, r); 1110 else 1111 prev = r; 1112 } 1113 } 1114 1115 /* 1116 * Merge adjacent regions having similar access frequencies 1117 * 1118 * threshold '->nr_accesses' diff threshold for the merge 1119 * sz_limit size upper limit of each region 1120 * 1121 * This function merges monitoring target regions which are adjacent and their 1122 * access frequencies are similar. This is for minimizing the monitoring 1123 * overhead under the dynamically changeable access pattern. If a merge was 1124 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1125 */ 1126 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1127 unsigned long sz_limit) 1128 { 1129 struct damon_target *t; 1130 1131 damon_for_each_target(t, c) 1132 damon_merge_regions_of(t, threshold, sz_limit); 1133 } 1134 1135 /* 1136 * Split a region in two 1137 * 1138 * r the region to be split 1139 * sz_r size of the first sub-region that will be made 1140 */ 1141 static void damon_split_region_at(struct damon_target *t, 1142 struct damon_region *r, unsigned long sz_r) 1143 { 1144 struct damon_region *new; 1145 1146 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1147 if (!new) 1148 return; 1149 1150 r->ar.end = new->ar.start; 1151 1152 new->age = r->age; 1153 new->last_nr_accesses = r->last_nr_accesses; 1154 1155 damon_insert_region(new, r, damon_next_region(r), t); 1156 } 1157 1158 /* Split every region in the given target into 'nr_subs' regions */ 1159 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1160 { 1161 struct damon_region *r, *next; 1162 unsigned long sz_region, sz_sub = 0; 1163 int i; 1164 1165 damon_for_each_region_safe(r, next, t) { 1166 sz_region = damon_sz_region(r); 1167 1168 for (i = 0; i < nr_subs - 1 && 1169 sz_region > 2 * DAMON_MIN_REGION; i++) { 1170 /* 1171 * Randomly select size of left sub-region to be at 1172 * least 10 percent and at most 90% of original region 1173 */ 1174 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1175 sz_region / 10, DAMON_MIN_REGION); 1176 /* Do not allow blank region */ 1177 if (sz_sub == 0 || sz_sub >= sz_region) 1178 continue; 1179 1180 damon_split_region_at(t, r, sz_sub); 1181 sz_region = sz_sub; 1182 } 1183 } 1184 } 1185 1186 /* 1187 * Split every target region into randomly-sized small regions 1188 * 1189 * This function splits every target region into random-sized small regions if 1190 * current total number of the regions is equal or smaller than half of the 1191 * user-specified maximum number of regions. This is for maximizing the 1192 * monitoring accuracy under the dynamically changeable access patterns. If a 1193 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1194 * it. 1195 */ 1196 static void kdamond_split_regions(struct damon_ctx *ctx) 1197 { 1198 struct damon_target *t; 1199 unsigned int nr_regions = 0; 1200 static unsigned int last_nr_regions; 1201 int nr_subregions = 2; 1202 1203 damon_for_each_target(t, ctx) 1204 nr_regions += damon_nr_regions(t); 1205 1206 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1207 return; 1208 1209 /* Maybe the middle of the region has different access frequency */ 1210 if (last_nr_regions == nr_regions && 1211 nr_regions < ctx->attrs.max_nr_regions / 3) 1212 nr_subregions = 3; 1213 1214 damon_for_each_target(t, ctx) 1215 damon_split_regions_of(t, nr_subregions); 1216 1217 last_nr_regions = nr_regions; 1218 } 1219 1220 /* 1221 * Check whether current monitoring should be stopped 1222 * 1223 * The monitoring is stopped when either the user requested to stop, or all 1224 * monitoring targets are invalid. 1225 * 1226 * Returns true if need to stop current monitoring. 1227 */ 1228 static bool kdamond_need_stop(struct damon_ctx *ctx) 1229 { 1230 struct damon_target *t; 1231 1232 if (kthread_should_stop()) 1233 return true; 1234 1235 if (!ctx->ops.target_valid) 1236 return false; 1237 1238 damon_for_each_target(t, ctx) { 1239 if (ctx->ops.target_valid(t)) 1240 return false; 1241 } 1242 1243 return true; 1244 } 1245 1246 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1247 { 1248 struct sysinfo i; 1249 1250 switch (metric) { 1251 case DAMOS_WMARK_FREE_MEM_RATE: 1252 si_meminfo(&i); 1253 return i.freeram * 1000 / i.totalram; 1254 default: 1255 break; 1256 } 1257 return -EINVAL; 1258 } 1259 1260 /* 1261 * Returns zero if the scheme is active. Else, returns time to wait for next 1262 * watermark check in micro-seconds. 1263 */ 1264 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1265 { 1266 unsigned long metric; 1267 1268 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1269 return 0; 1270 1271 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1272 /* higher than high watermark or lower than low watermark */ 1273 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1274 if (scheme->wmarks.activated) 1275 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1276 scheme->action, 1277 metric > scheme->wmarks.high ? 1278 "high" : "low"); 1279 scheme->wmarks.activated = false; 1280 return scheme->wmarks.interval; 1281 } 1282 1283 /* inactive and higher than middle watermark */ 1284 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1285 !scheme->wmarks.activated) 1286 return scheme->wmarks.interval; 1287 1288 if (!scheme->wmarks.activated) 1289 pr_debug("activate a scheme (%d)\n", scheme->action); 1290 scheme->wmarks.activated = true; 1291 return 0; 1292 } 1293 1294 static void kdamond_usleep(unsigned long usecs) 1295 { 1296 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1297 if (usecs > 20 * USEC_PER_MSEC) 1298 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1299 else 1300 usleep_idle_range(usecs, usecs + 1); 1301 } 1302 1303 /* Returns negative error code if it's not activated but should return */ 1304 static int kdamond_wait_activation(struct damon_ctx *ctx) 1305 { 1306 struct damos *s; 1307 unsigned long wait_time; 1308 unsigned long min_wait_time = 0; 1309 bool init_wait_time = false; 1310 1311 while (!kdamond_need_stop(ctx)) { 1312 damon_for_each_scheme(s, ctx) { 1313 wait_time = damos_wmark_wait_us(s); 1314 if (!init_wait_time || wait_time < min_wait_time) { 1315 init_wait_time = true; 1316 min_wait_time = wait_time; 1317 } 1318 } 1319 if (!min_wait_time) 1320 return 0; 1321 1322 kdamond_usleep(min_wait_time); 1323 1324 if (ctx->callback.after_wmarks_check && 1325 ctx->callback.after_wmarks_check(ctx)) 1326 break; 1327 } 1328 return -EBUSY; 1329 } 1330 1331 static void kdamond_init_intervals_sis(struct damon_ctx *ctx) 1332 { 1333 unsigned long sample_interval = ctx->attrs.sample_interval ? 1334 ctx->attrs.sample_interval : 1; 1335 1336 ctx->passed_sample_intervals = 0; 1337 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 1338 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 1339 sample_interval; 1340 } 1341 1342 /* 1343 * The monitoring daemon that runs as a kernel thread 1344 */ 1345 static int kdamond_fn(void *data) 1346 { 1347 struct damon_ctx *ctx = data; 1348 struct damon_target *t; 1349 struct damon_region *r, *next; 1350 unsigned int max_nr_accesses = 0; 1351 unsigned long sz_limit = 0; 1352 1353 pr_debug("kdamond (%d) starts\n", current->pid); 1354 1355 complete(&ctx->kdamond_started); 1356 kdamond_init_intervals_sis(ctx); 1357 1358 if (ctx->ops.init) 1359 ctx->ops.init(ctx); 1360 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1361 goto done; 1362 1363 sz_limit = damon_region_sz_limit(ctx); 1364 1365 while (!kdamond_need_stop(ctx)) { 1366 /* 1367 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 1368 * be changed from after_wmarks_check() or after_aggregation() 1369 * callbacks. Read the values here, and use those for this 1370 * iteration. That is, damon_set_attrs() updated new values 1371 * are respected from next iteration. 1372 */ 1373 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 1374 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 1375 unsigned long sample_interval = ctx->attrs.sample_interval; 1376 1377 if (kdamond_wait_activation(ctx)) 1378 break; 1379 1380 if (ctx->ops.prepare_access_checks) 1381 ctx->ops.prepare_access_checks(ctx); 1382 if (ctx->callback.after_sampling && 1383 ctx->callback.after_sampling(ctx)) 1384 break; 1385 1386 kdamond_usleep(sample_interval); 1387 ctx->passed_sample_intervals++; 1388 1389 if (ctx->ops.check_accesses) 1390 max_nr_accesses = ctx->ops.check_accesses(ctx); 1391 1392 sample_interval = ctx->attrs.sample_interval ? 1393 ctx->attrs.sample_interval : 1; 1394 if (ctx->passed_sample_intervals == next_aggregation_sis) { 1395 ctx->next_aggregation_sis = next_aggregation_sis + 1396 ctx->attrs.aggr_interval / sample_interval; 1397 kdamond_merge_regions(ctx, 1398 max_nr_accesses / 10, 1399 sz_limit); 1400 if (ctx->callback.after_aggregation && 1401 ctx->callback.after_aggregation(ctx)) 1402 break; 1403 if (!list_empty(&ctx->schemes)) 1404 kdamond_apply_schemes(ctx); 1405 kdamond_reset_aggregated(ctx); 1406 kdamond_split_regions(ctx); 1407 if (ctx->ops.reset_aggregated) 1408 ctx->ops.reset_aggregated(ctx); 1409 } 1410 1411 if (ctx->passed_sample_intervals == next_ops_update_sis) { 1412 ctx->next_ops_update_sis = next_ops_update_sis + 1413 ctx->attrs.ops_update_interval / 1414 sample_interval; 1415 if (ctx->ops.update) 1416 ctx->ops.update(ctx); 1417 sz_limit = damon_region_sz_limit(ctx); 1418 } 1419 } 1420 done: 1421 damon_for_each_target(t, ctx) { 1422 damon_for_each_region_safe(r, next, t) 1423 damon_destroy_region(r, t); 1424 } 1425 1426 if (ctx->callback.before_terminate) 1427 ctx->callback.before_terminate(ctx); 1428 if (ctx->ops.cleanup) 1429 ctx->ops.cleanup(ctx); 1430 1431 pr_debug("kdamond (%d) finishes\n", current->pid); 1432 mutex_lock(&ctx->kdamond_lock); 1433 ctx->kdamond = NULL; 1434 mutex_unlock(&ctx->kdamond_lock); 1435 1436 mutex_lock(&damon_lock); 1437 nr_running_ctxs--; 1438 if (!nr_running_ctxs && running_exclusive_ctxs) 1439 running_exclusive_ctxs = false; 1440 mutex_unlock(&damon_lock); 1441 1442 return 0; 1443 } 1444 1445 /* 1446 * struct damon_system_ram_region - System RAM resource address region of 1447 * [@start, @end). 1448 * @start: Start address of the region (inclusive). 1449 * @end: End address of the region (exclusive). 1450 */ 1451 struct damon_system_ram_region { 1452 unsigned long start; 1453 unsigned long end; 1454 }; 1455 1456 static int walk_system_ram(struct resource *res, void *arg) 1457 { 1458 struct damon_system_ram_region *a = arg; 1459 1460 if (a->end - a->start < resource_size(res)) { 1461 a->start = res->start; 1462 a->end = res->end; 1463 } 1464 return 0; 1465 } 1466 1467 /* 1468 * Find biggest 'System RAM' resource and store its start and end address in 1469 * @start and @end, respectively. If no System RAM is found, returns false. 1470 */ 1471 static bool damon_find_biggest_system_ram(unsigned long *start, 1472 unsigned long *end) 1473 1474 { 1475 struct damon_system_ram_region arg = {}; 1476 1477 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1478 if (arg.end <= arg.start) 1479 return false; 1480 1481 *start = arg.start; 1482 *end = arg.end; 1483 return true; 1484 } 1485 1486 /** 1487 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1488 * monitoring target as requested, or biggest 'System RAM'. 1489 * @t: The monitoring target to set the region. 1490 * @start: The pointer to the start address of the region. 1491 * @end: The pointer to the end address of the region. 1492 * 1493 * This function sets the region of @t as requested by @start and @end. If the 1494 * values of @start and @end are zero, however, this function finds the biggest 1495 * 'System RAM' resource and sets the region to cover the resource. In the 1496 * latter case, this function saves the start and end addresses of the resource 1497 * in @start and @end, respectively. 1498 * 1499 * Return: 0 on success, negative error code otherwise. 1500 */ 1501 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1502 unsigned long *start, unsigned long *end) 1503 { 1504 struct damon_addr_range addr_range; 1505 1506 if (*start > *end) 1507 return -EINVAL; 1508 1509 if (!*start && !*end && 1510 !damon_find_biggest_system_ram(start, end)) 1511 return -EINVAL; 1512 1513 addr_range.start = *start; 1514 addr_range.end = *end; 1515 return damon_set_regions(t, &addr_range, 1); 1516 } 1517 1518 static int __init damon_init(void) 1519 { 1520 damon_region_cache = KMEM_CACHE(damon_region, 0); 1521 if (unlikely(!damon_region_cache)) { 1522 pr_err("creating damon_region_cache fails\n"); 1523 return -ENOMEM; 1524 } 1525 1526 return 0; 1527 } 1528 1529 subsys_initcall(damon_init); 1530 1531 #include "core-test.h" 1532