1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 INIT_LIST_HEAD(®ion->list); 132 133 region->age = 0; 134 region->last_nr_accesses = 0; 135 136 return region; 137 } 138 139 void damon_add_region(struct damon_region *r, struct damon_target *t) 140 { 141 list_add_tail(&r->list, &t->regions_list); 142 t->nr_regions++; 143 } 144 145 static void damon_del_region(struct damon_region *r, struct damon_target *t) 146 { 147 list_del(&r->list); 148 t->nr_regions--; 149 } 150 151 static void damon_free_region(struct damon_region *r) 152 { 153 kmem_cache_free(damon_region_cache, r); 154 } 155 156 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 157 { 158 damon_del_region(r, t); 159 damon_free_region(r); 160 } 161 162 /* 163 * Check whether a region is intersecting an address range 164 * 165 * Returns true if it is. 166 */ 167 static bool damon_intersect(struct damon_region *r, 168 struct damon_addr_range *re) 169 { 170 return !(r->ar.end <= re->start || re->end <= r->ar.start); 171 } 172 173 /* 174 * Fill holes in regions with new regions. 175 */ 176 static int damon_fill_regions_holes(struct damon_region *first, 177 struct damon_region *last, struct damon_target *t) 178 { 179 struct damon_region *r = first; 180 181 damon_for_each_region_from(r, t) { 182 struct damon_region *next, *newr; 183 184 if (r == last) 185 break; 186 next = damon_next_region(r); 187 if (r->ar.end != next->ar.start) { 188 newr = damon_new_region(r->ar.end, next->ar.start); 189 if (!newr) 190 return -ENOMEM; 191 damon_insert_region(newr, r, next, t); 192 } 193 } 194 return 0; 195 } 196 197 /* 198 * damon_set_regions() - Set regions of a target for given address ranges. 199 * @t: the given target. 200 * @ranges: array of new monitoring target ranges. 201 * @nr_ranges: length of @ranges. 202 * 203 * This function adds new regions to, or modify existing regions of a 204 * monitoring target to fit in specific ranges. 205 * 206 * Return: 0 if success, or negative error code otherwise. 207 */ 208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 209 unsigned int nr_ranges) 210 { 211 struct damon_region *r, *next; 212 unsigned int i; 213 int err; 214 215 /* Remove regions which are not in the new ranges */ 216 damon_for_each_region_safe(r, next, t) { 217 for (i = 0; i < nr_ranges; i++) { 218 if (damon_intersect(r, &ranges[i])) 219 break; 220 } 221 if (i == nr_ranges) 222 damon_destroy_region(r, t); 223 } 224 225 r = damon_first_region(t); 226 /* Add new regions or resize existing regions to fit in the ranges */ 227 for (i = 0; i < nr_ranges; i++) { 228 struct damon_region *first = NULL, *last, *newr; 229 struct damon_addr_range *range; 230 231 range = &ranges[i]; 232 /* Get the first/last regions intersecting with the range */ 233 damon_for_each_region_from(r, t) { 234 if (damon_intersect(r, range)) { 235 if (!first) 236 first = r; 237 last = r; 238 } 239 if (r->ar.start >= range->end) 240 break; 241 } 242 if (!first) { 243 /* no region intersects with this range */ 244 newr = damon_new_region( 245 ALIGN_DOWN(range->start, 246 DAMON_MIN_REGION), 247 ALIGN(range->end, DAMON_MIN_REGION)); 248 if (!newr) 249 return -ENOMEM; 250 damon_insert_region(newr, damon_prev_region(r), r, t); 251 } else { 252 /* resize intersecting regions to fit in this range */ 253 first->ar.start = ALIGN_DOWN(range->start, 254 DAMON_MIN_REGION); 255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 256 257 /* fill possible holes in the range */ 258 err = damon_fill_regions_holes(first, last, t); 259 if (err) 260 return err; 261 } 262 } 263 return 0; 264 } 265 266 struct damos_filter *damos_new_filter(enum damos_filter_type type, 267 bool matching) 268 { 269 struct damos_filter *filter; 270 271 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 272 if (!filter) 273 return NULL; 274 filter->type = type; 275 filter->matching = matching; 276 return filter; 277 } 278 279 void damos_add_filter(struct damos *s, struct damos_filter *f) 280 { 281 list_add_tail(&f->list, &s->filters); 282 } 283 284 static void damos_del_filter(struct damos_filter *f) 285 { 286 list_del(&f->list); 287 } 288 289 static void damos_free_filter(struct damos_filter *f) 290 { 291 kfree(f); 292 } 293 294 void damos_destroy_filter(struct damos_filter *f) 295 { 296 damos_del_filter(f); 297 damos_free_filter(f); 298 } 299 300 /* initialize private fields of damos_quota and return the pointer */ 301 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 302 { 303 quota->total_charged_sz = 0; 304 quota->total_charged_ns = 0; 305 quota->esz = 0; 306 quota->charged_sz = 0; 307 quota->charged_from = 0; 308 quota->charge_target_from = NULL; 309 quota->charge_addr_from = 0; 310 return quota; 311 } 312 313 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 314 enum damos_action action, struct damos_quota *quota, 315 struct damos_watermarks *wmarks) 316 { 317 struct damos *scheme; 318 319 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 320 if (!scheme) 321 return NULL; 322 scheme->pattern = *pattern; 323 scheme->action = action; 324 INIT_LIST_HEAD(&scheme->filters); 325 scheme->stat = (struct damos_stat){}; 326 INIT_LIST_HEAD(&scheme->list); 327 328 scheme->quota = *(damos_quota_init_priv(quota)); 329 330 scheme->wmarks = *wmarks; 331 scheme->wmarks.activated = true; 332 333 return scheme; 334 } 335 336 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 337 { 338 list_add_tail(&s->list, &ctx->schemes); 339 } 340 341 static void damon_del_scheme(struct damos *s) 342 { 343 list_del(&s->list); 344 } 345 346 static void damon_free_scheme(struct damos *s) 347 { 348 kfree(s); 349 } 350 351 void damon_destroy_scheme(struct damos *s) 352 { 353 struct damos_filter *f, *next; 354 355 damos_for_each_filter_safe(f, next, s) 356 damos_destroy_filter(f); 357 damon_del_scheme(s); 358 damon_free_scheme(s); 359 } 360 361 /* 362 * Construct a damon_target struct 363 * 364 * Returns the pointer to the new struct if success, or NULL otherwise 365 */ 366 struct damon_target *damon_new_target(void) 367 { 368 struct damon_target *t; 369 370 t = kmalloc(sizeof(*t), GFP_KERNEL); 371 if (!t) 372 return NULL; 373 374 t->pid = NULL; 375 t->nr_regions = 0; 376 INIT_LIST_HEAD(&t->regions_list); 377 INIT_LIST_HEAD(&t->list); 378 379 return t; 380 } 381 382 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 383 { 384 list_add_tail(&t->list, &ctx->adaptive_targets); 385 } 386 387 bool damon_targets_empty(struct damon_ctx *ctx) 388 { 389 return list_empty(&ctx->adaptive_targets); 390 } 391 392 static void damon_del_target(struct damon_target *t) 393 { 394 list_del(&t->list); 395 } 396 397 void damon_free_target(struct damon_target *t) 398 { 399 struct damon_region *r, *next; 400 401 damon_for_each_region_safe(r, next, t) 402 damon_free_region(r); 403 kfree(t); 404 } 405 406 void damon_destroy_target(struct damon_target *t) 407 { 408 damon_del_target(t); 409 damon_free_target(t); 410 } 411 412 unsigned int damon_nr_regions(struct damon_target *t) 413 { 414 return t->nr_regions; 415 } 416 417 struct damon_ctx *damon_new_ctx(void) 418 { 419 struct damon_ctx *ctx; 420 421 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 422 if (!ctx) 423 return NULL; 424 425 ctx->attrs.sample_interval = 5 * 1000; 426 ctx->attrs.aggr_interval = 100 * 1000; 427 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 428 429 ktime_get_coarse_ts64(&ctx->last_aggregation); 430 ctx->last_ops_update = ctx->last_aggregation; 431 432 mutex_init(&ctx->kdamond_lock); 433 434 ctx->attrs.min_nr_regions = 10; 435 ctx->attrs.max_nr_regions = 1000; 436 437 INIT_LIST_HEAD(&ctx->adaptive_targets); 438 INIT_LIST_HEAD(&ctx->schemes); 439 440 return ctx; 441 } 442 443 static void damon_destroy_targets(struct damon_ctx *ctx) 444 { 445 struct damon_target *t, *next_t; 446 447 if (ctx->ops.cleanup) { 448 ctx->ops.cleanup(ctx); 449 return; 450 } 451 452 damon_for_each_target_safe(t, next_t, ctx) 453 damon_destroy_target(t); 454 } 455 456 void damon_destroy_ctx(struct damon_ctx *ctx) 457 { 458 struct damos *s, *next_s; 459 460 damon_destroy_targets(ctx); 461 462 damon_for_each_scheme_safe(s, next_s, ctx) 463 damon_destroy_scheme(s); 464 465 kfree(ctx); 466 } 467 468 static unsigned int damon_age_for_new_attrs(unsigned int age, 469 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 470 { 471 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 472 } 473 474 /* convert access ratio in bp (per 10,000) to nr_accesses */ 475 static unsigned int damon_accesses_bp_to_nr_accesses( 476 unsigned int accesses_bp, struct damon_attrs *attrs) 477 { 478 unsigned int max_nr_accesses = 479 attrs->aggr_interval / attrs->sample_interval; 480 481 return accesses_bp * max_nr_accesses / 10000; 482 } 483 484 /* convert nr_accesses to access ratio in bp (per 10,000) */ 485 static unsigned int damon_nr_accesses_to_accesses_bp( 486 unsigned int nr_accesses, struct damon_attrs *attrs) 487 { 488 unsigned int max_nr_accesses = 489 attrs->aggr_interval / attrs->sample_interval; 490 491 return nr_accesses * 10000 / max_nr_accesses; 492 } 493 494 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 495 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 496 { 497 return damon_accesses_bp_to_nr_accesses( 498 damon_nr_accesses_to_accesses_bp( 499 nr_accesses, old_attrs), 500 new_attrs); 501 } 502 503 static void damon_update_monitoring_result(struct damon_region *r, 504 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 505 { 506 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 507 old_attrs, new_attrs); 508 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 509 } 510 511 /* 512 * region->nr_accesses is the number of sampling intervals in the last 513 * aggregation interval that access to the region has found, and region->age is 514 * the number of aggregation intervals that its access pattern has maintained. 515 * For the reason, the real meaning of the two fields depend on current 516 * sampling interval and aggregation interval. This function updates 517 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 518 */ 519 static void damon_update_monitoring_results(struct damon_ctx *ctx, 520 struct damon_attrs *new_attrs) 521 { 522 struct damon_attrs *old_attrs = &ctx->attrs; 523 struct damon_target *t; 524 struct damon_region *r; 525 526 /* if any interval is zero, simply forgive conversion */ 527 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 528 !new_attrs->sample_interval || 529 !new_attrs->aggr_interval) 530 return; 531 532 damon_for_each_target(t, ctx) 533 damon_for_each_region(r, t) 534 damon_update_monitoring_result( 535 r, old_attrs, new_attrs); 536 } 537 538 /** 539 * damon_set_attrs() - Set attributes for the monitoring. 540 * @ctx: monitoring context 541 * @attrs: monitoring attributes 542 * 543 * This function should not be called while the kdamond is running. 544 * Every time interval is in micro-seconds. 545 * 546 * Return: 0 on success, negative error code otherwise. 547 */ 548 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 549 { 550 if (attrs->min_nr_regions < 3) 551 return -EINVAL; 552 if (attrs->min_nr_regions > attrs->max_nr_regions) 553 return -EINVAL; 554 555 damon_update_monitoring_results(ctx, attrs); 556 ctx->attrs = *attrs; 557 return 0; 558 } 559 560 /** 561 * damon_set_schemes() - Set data access monitoring based operation schemes. 562 * @ctx: monitoring context 563 * @schemes: array of the schemes 564 * @nr_schemes: number of entries in @schemes 565 * 566 * This function should not be called while the kdamond of the context is 567 * running. 568 */ 569 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 570 ssize_t nr_schemes) 571 { 572 struct damos *s, *next; 573 ssize_t i; 574 575 damon_for_each_scheme_safe(s, next, ctx) 576 damon_destroy_scheme(s); 577 for (i = 0; i < nr_schemes; i++) 578 damon_add_scheme(ctx, schemes[i]); 579 } 580 581 /** 582 * damon_nr_running_ctxs() - Return number of currently running contexts. 583 */ 584 int damon_nr_running_ctxs(void) 585 { 586 int nr_ctxs; 587 588 mutex_lock(&damon_lock); 589 nr_ctxs = nr_running_ctxs; 590 mutex_unlock(&damon_lock); 591 592 return nr_ctxs; 593 } 594 595 /* Returns the size upper limit for each monitoring region */ 596 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 597 { 598 struct damon_target *t; 599 struct damon_region *r; 600 unsigned long sz = 0; 601 602 damon_for_each_target(t, ctx) { 603 damon_for_each_region(r, t) 604 sz += damon_sz_region(r); 605 } 606 607 if (ctx->attrs.min_nr_regions) 608 sz /= ctx->attrs.min_nr_regions; 609 if (sz < DAMON_MIN_REGION) 610 sz = DAMON_MIN_REGION; 611 612 return sz; 613 } 614 615 static int kdamond_fn(void *data); 616 617 /* 618 * __damon_start() - Starts monitoring with given context. 619 * @ctx: monitoring context 620 * 621 * This function should be called while damon_lock is hold. 622 * 623 * Return: 0 on success, negative error code otherwise. 624 */ 625 static int __damon_start(struct damon_ctx *ctx) 626 { 627 int err = -EBUSY; 628 629 mutex_lock(&ctx->kdamond_lock); 630 if (!ctx->kdamond) { 631 err = 0; 632 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 633 nr_running_ctxs); 634 if (IS_ERR(ctx->kdamond)) { 635 err = PTR_ERR(ctx->kdamond); 636 ctx->kdamond = NULL; 637 } 638 } 639 mutex_unlock(&ctx->kdamond_lock); 640 641 return err; 642 } 643 644 /** 645 * damon_start() - Starts the monitorings for a given group of contexts. 646 * @ctxs: an array of the pointers for contexts to start monitoring 647 * @nr_ctxs: size of @ctxs 648 * @exclusive: exclusiveness of this contexts group 649 * 650 * This function starts a group of monitoring threads for a group of monitoring 651 * contexts. One thread per each context is created and run in parallel. The 652 * caller should handle synchronization between the threads by itself. If 653 * @exclusive is true and a group of threads that created by other 654 * 'damon_start()' call is currently running, this function does nothing but 655 * returns -EBUSY. 656 * 657 * Return: 0 on success, negative error code otherwise. 658 */ 659 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 660 { 661 int i; 662 int err = 0; 663 664 mutex_lock(&damon_lock); 665 if ((exclusive && nr_running_ctxs) || 666 (!exclusive && running_exclusive_ctxs)) { 667 mutex_unlock(&damon_lock); 668 return -EBUSY; 669 } 670 671 for (i = 0; i < nr_ctxs; i++) { 672 err = __damon_start(ctxs[i]); 673 if (err) 674 break; 675 nr_running_ctxs++; 676 } 677 if (exclusive && nr_running_ctxs) 678 running_exclusive_ctxs = true; 679 mutex_unlock(&damon_lock); 680 681 return err; 682 } 683 684 /* 685 * __damon_stop() - Stops monitoring of a given context. 686 * @ctx: monitoring context 687 * 688 * Return: 0 on success, negative error code otherwise. 689 */ 690 static int __damon_stop(struct damon_ctx *ctx) 691 { 692 struct task_struct *tsk; 693 694 mutex_lock(&ctx->kdamond_lock); 695 tsk = ctx->kdamond; 696 if (tsk) { 697 get_task_struct(tsk); 698 mutex_unlock(&ctx->kdamond_lock); 699 kthread_stop(tsk); 700 put_task_struct(tsk); 701 return 0; 702 } 703 mutex_unlock(&ctx->kdamond_lock); 704 705 return -EPERM; 706 } 707 708 /** 709 * damon_stop() - Stops the monitorings for a given group of contexts. 710 * @ctxs: an array of the pointers for contexts to stop monitoring 711 * @nr_ctxs: size of @ctxs 712 * 713 * Return: 0 on success, negative error code otherwise. 714 */ 715 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 716 { 717 int i, err = 0; 718 719 for (i = 0; i < nr_ctxs; i++) { 720 /* nr_running_ctxs is decremented in kdamond_fn */ 721 err = __damon_stop(ctxs[i]); 722 if (err) 723 break; 724 } 725 return err; 726 } 727 728 /* 729 * damon_check_reset_time_interval() - Check if a time interval is elapsed. 730 * @baseline: the time to check whether the interval has elapsed since 731 * @interval: the time interval (microseconds) 732 * 733 * See whether the given time interval has passed since the given baseline 734 * time. If so, it also updates the baseline to current time for next check. 735 * 736 * Return: true if the time interval has passed, or false otherwise. 737 */ 738 static bool damon_check_reset_time_interval(struct timespec64 *baseline, 739 unsigned long interval) 740 { 741 struct timespec64 now; 742 743 ktime_get_coarse_ts64(&now); 744 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) < 745 interval * 1000) 746 return false; 747 *baseline = now; 748 return true; 749 } 750 751 /* 752 * Check whether it is time to flush the aggregated information 753 */ 754 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx) 755 { 756 return damon_check_reset_time_interval(&ctx->last_aggregation, 757 ctx->attrs.aggr_interval); 758 } 759 760 /* 761 * Reset the aggregated monitoring results ('nr_accesses' of each region). 762 */ 763 static void kdamond_reset_aggregated(struct damon_ctx *c) 764 { 765 struct damon_target *t; 766 unsigned int ti = 0; /* target's index */ 767 768 damon_for_each_target(t, c) { 769 struct damon_region *r; 770 771 damon_for_each_region(r, t) { 772 trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); 773 r->last_nr_accesses = r->nr_accesses; 774 r->nr_accesses = 0; 775 } 776 ti++; 777 } 778 } 779 780 static void damon_split_region_at(struct damon_target *t, 781 struct damon_region *r, unsigned long sz_r); 782 783 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 784 { 785 unsigned long sz; 786 787 sz = damon_sz_region(r); 788 return s->pattern.min_sz_region <= sz && 789 sz <= s->pattern.max_sz_region && 790 s->pattern.min_nr_accesses <= r->nr_accesses && 791 r->nr_accesses <= s->pattern.max_nr_accesses && 792 s->pattern.min_age_region <= r->age && 793 r->age <= s->pattern.max_age_region; 794 } 795 796 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 797 struct damon_region *r, struct damos *s) 798 { 799 bool ret = __damos_valid_target(r, s); 800 801 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 802 return ret; 803 804 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 805 } 806 807 /* 808 * damos_skip_charged_region() - Check if the given region or starting part of 809 * it is already charged for the DAMOS quota. 810 * @t: The target of the region. 811 * @rp: The pointer to the region. 812 * @s: The scheme to be applied. 813 * 814 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 815 * action would applied to only a part of the target access pattern fulfilling 816 * regions. To avoid applying the scheme action to only already applied 817 * regions, DAMON skips applying the scheme action to the regions that charged 818 * in the previous charge window. 819 * 820 * This function checks if a given region should be skipped or not for the 821 * reason. If only the starting part of the region has previously charged, 822 * this function splits the region into two so that the second one covers the 823 * area that not charged in the previous charge widnow and saves the second 824 * region in *rp and returns false, so that the caller can apply DAMON action 825 * to the second one. 826 * 827 * Return: true if the region should be entirely skipped, false otherwise. 828 */ 829 static bool damos_skip_charged_region(struct damon_target *t, 830 struct damon_region **rp, struct damos *s) 831 { 832 struct damon_region *r = *rp; 833 struct damos_quota *quota = &s->quota; 834 unsigned long sz_to_skip; 835 836 /* Skip previously charged regions */ 837 if (quota->charge_target_from) { 838 if (t != quota->charge_target_from) 839 return true; 840 if (r == damon_last_region(t)) { 841 quota->charge_target_from = NULL; 842 quota->charge_addr_from = 0; 843 return true; 844 } 845 if (quota->charge_addr_from && 846 r->ar.end <= quota->charge_addr_from) 847 return true; 848 849 if (quota->charge_addr_from && r->ar.start < 850 quota->charge_addr_from) { 851 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 852 r->ar.start, DAMON_MIN_REGION); 853 if (!sz_to_skip) { 854 if (damon_sz_region(r) <= DAMON_MIN_REGION) 855 return true; 856 sz_to_skip = DAMON_MIN_REGION; 857 } 858 damon_split_region_at(t, r, sz_to_skip); 859 r = damon_next_region(r); 860 *rp = r; 861 } 862 quota->charge_target_from = NULL; 863 quota->charge_addr_from = 0; 864 } 865 return false; 866 } 867 868 static void damos_update_stat(struct damos *s, 869 unsigned long sz_tried, unsigned long sz_applied) 870 { 871 s->stat.nr_tried++; 872 s->stat.sz_tried += sz_tried; 873 if (sz_applied) 874 s->stat.nr_applied++; 875 s->stat.sz_applied += sz_applied; 876 } 877 878 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 879 struct damon_region *r, struct damos *s) 880 { 881 struct damos_quota *quota = &s->quota; 882 unsigned long sz = damon_sz_region(r); 883 struct timespec64 begin, end; 884 unsigned long sz_applied = 0; 885 int err = 0; 886 887 if (c->ops.apply_scheme) { 888 if (quota->esz && quota->charged_sz + sz > quota->esz) { 889 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 890 DAMON_MIN_REGION); 891 if (!sz) 892 goto update_stat; 893 damon_split_region_at(t, r, sz); 894 } 895 ktime_get_coarse_ts64(&begin); 896 if (c->callback.before_damos_apply) 897 err = c->callback.before_damos_apply(c, t, r, s); 898 if (!err) 899 sz_applied = c->ops.apply_scheme(c, t, r, s); 900 ktime_get_coarse_ts64(&end); 901 quota->total_charged_ns += timespec64_to_ns(&end) - 902 timespec64_to_ns(&begin); 903 quota->charged_sz += sz; 904 if (quota->esz && quota->charged_sz >= quota->esz) { 905 quota->charge_target_from = t; 906 quota->charge_addr_from = r->ar.end + 1; 907 } 908 } 909 if (s->action != DAMOS_STAT) 910 r->age = 0; 911 912 update_stat: 913 damos_update_stat(s, sz, sz_applied); 914 } 915 916 static void damon_do_apply_schemes(struct damon_ctx *c, 917 struct damon_target *t, 918 struct damon_region *r) 919 { 920 struct damos *s; 921 922 damon_for_each_scheme(s, c) { 923 struct damos_quota *quota = &s->quota; 924 925 if (!s->wmarks.activated) 926 continue; 927 928 /* Check the quota */ 929 if (quota->esz && quota->charged_sz >= quota->esz) 930 continue; 931 932 if (damos_skip_charged_region(t, &r, s)) 933 continue; 934 935 if (!damos_valid_target(c, t, r, s)) 936 continue; 937 938 damos_apply_scheme(c, t, r, s); 939 } 940 } 941 942 /* Shouldn't be called if quota->ms and quota->sz are zero */ 943 static void damos_set_effective_quota(struct damos_quota *quota) 944 { 945 unsigned long throughput; 946 unsigned long esz; 947 948 if (!quota->ms) { 949 quota->esz = quota->sz; 950 return; 951 } 952 953 if (quota->total_charged_ns) 954 throughput = quota->total_charged_sz * 1000000 / 955 quota->total_charged_ns; 956 else 957 throughput = PAGE_SIZE * 1024; 958 esz = throughput * quota->ms; 959 960 if (quota->sz && quota->sz < esz) 961 esz = quota->sz; 962 quota->esz = esz; 963 } 964 965 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 966 { 967 struct damos_quota *quota = &s->quota; 968 struct damon_target *t; 969 struct damon_region *r; 970 unsigned long cumulated_sz; 971 unsigned int score, max_score = 0; 972 973 if (!quota->ms && !quota->sz) 974 return; 975 976 /* New charge window starts */ 977 if (time_after_eq(jiffies, quota->charged_from + 978 msecs_to_jiffies(quota->reset_interval))) { 979 if (quota->esz && quota->charged_sz >= quota->esz) 980 s->stat.qt_exceeds++; 981 quota->total_charged_sz += quota->charged_sz; 982 quota->charged_from = jiffies; 983 quota->charged_sz = 0; 984 damos_set_effective_quota(quota); 985 } 986 987 if (!c->ops.get_scheme_score) 988 return; 989 990 /* Fill up the score histogram */ 991 memset(quota->histogram, 0, sizeof(quota->histogram)); 992 damon_for_each_target(t, c) { 993 damon_for_each_region(r, t) { 994 if (!__damos_valid_target(r, s)) 995 continue; 996 score = c->ops.get_scheme_score(c, t, r, s); 997 quota->histogram[score] += damon_sz_region(r); 998 if (score > max_score) 999 max_score = score; 1000 } 1001 } 1002 1003 /* Set the min score limit */ 1004 for (cumulated_sz = 0, score = max_score; ; score--) { 1005 cumulated_sz += quota->histogram[score]; 1006 if (cumulated_sz >= quota->esz || !score) 1007 break; 1008 } 1009 quota->min_score = score; 1010 } 1011 1012 static void kdamond_apply_schemes(struct damon_ctx *c) 1013 { 1014 struct damon_target *t; 1015 struct damon_region *r, *next_r; 1016 struct damos *s; 1017 1018 damon_for_each_scheme(s, c) { 1019 if (!s->wmarks.activated) 1020 continue; 1021 1022 damos_adjust_quota(c, s); 1023 } 1024 1025 damon_for_each_target(t, c) { 1026 damon_for_each_region_safe(r, next_r, t) 1027 damon_do_apply_schemes(c, t, r); 1028 } 1029 } 1030 1031 /* 1032 * Merge two adjacent regions into one region 1033 */ 1034 static void damon_merge_two_regions(struct damon_target *t, 1035 struct damon_region *l, struct damon_region *r) 1036 { 1037 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1038 1039 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1040 (sz_l + sz_r); 1041 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1042 l->ar.end = r->ar.end; 1043 damon_destroy_region(r, t); 1044 } 1045 1046 /* 1047 * Merge adjacent regions having similar access frequencies 1048 * 1049 * t target affected by this merge operation 1050 * thres '->nr_accesses' diff threshold for the merge 1051 * sz_limit size upper limit of each region 1052 */ 1053 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1054 unsigned long sz_limit) 1055 { 1056 struct damon_region *r, *prev = NULL, *next; 1057 1058 damon_for_each_region_safe(r, next, t) { 1059 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1060 r->age = 0; 1061 else 1062 r->age++; 1063 1064 if (prev && prev->ar.end == r->ar.start && 1065 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1066 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1067 damon_merge_two_regions(t, prev, r); 1068 else 1069 prev = r; 1070 } 1071 } 1072 1073 /* 1074 * Merge adjacent regions having similar access frequencies 1075 * 1076 * threshold '->nr_accesses' diff threshold for the merge 1077 * sz_limit size upper limit of each region 1078 * 1079 * This function merges monitoring target regions which are adjacent and their 1080 * access frequencies are similar. This is for minimizing the monitoring 1081 * overhead under the dynamically changeable access pattern. If a merge was 1082 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1083 */ 1084 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1085 unsigned long sz_limit) 1086 { 1087 struct damon_target *t; 1088 1089 damon_for_each_target(t, c) 1090 damon_merge_regions_of(t, threshold, sz_limit); 1091 } 1092 1093 /* 1094 * Split a region in two 1095 * 1096 * r the region to be split 1097 * sz_r size of the first sub-region that will be made 1098 */ 1099 static void damon_split_region_at(struct damon_target *t, 1100 struct damon_region *r, unsigned long sz_r) 1101 { 1102 struct damon_region *new; 1103 1104 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1105 if (!new) 1106 return; 1107 1108 r->ar.end = new->ar.start; 1109 1110 new->age = r->age; 1111 new->last_nr_accesses = r->last_nr_accesses; 1112 1113 damon_insert_region(new, r, damon_next_region(r), t); 1114 } 1115 1116 /* Split every region in the given target into 'nr_subs' regions */ 1117 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1118 { 1119 struct damon_region *r, *next; 1120 unsigned long sz_region, sz_sub = 0; 1121 int i; 1122 1123 damon_for_each_region_safe(r, next, t) { 1124 sz_region = damon_sz_region(r); 1125 1126 for (i = 0; i < nr_subs - 1 && 1127 sz_region > 2 * DAMON_MIN_REGION; i++) { 1128 /* 1129 * Randomly select size of left sub-region to be at 1130 * least 10 percent and at most 90% of original region 1131 */ 1132 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1133 sz_region / 10, DAMON_MIN_REGION); 1134 /* Do not allow blank region */ 1135 if (sz_sub == 0 || sz_sub >= sz_region) 1136 continue; 1137 1138 damon_split_region_at(t, r, sz_sub); 1139 sz_region = sz_sub; 1140 } 1141 } 1142 } 1143 1144 /* 1145 * Split every target region into randomly-sized small regions 1146 * 1147 * This function splits every target region into random-sized small regions if 1148 * current total number of the regions is equal or smaller than half of the 1149 * user-specified maximum number of regions. This is for maximizing the 1150 * monitoring accuracy under the dynamically changeable access patterns. If a 1151 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1152 * it. 1153 */ 1154 static void kdamond_split_regions(struct damon_ctx *ctx) 1155 { 1156 struct damon_target *t; 1157 unsigned int nr_regions = 0; 1158 static unsigned int last_nr_regions; 1159 int nr_subregions = 2; 1160 1161 damon_for_each_target(t, ctx) 1162 nr_regions += damon_nr_regions(t); 1163 1164 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1165 return; 1166 1167 /* Maybe the middle of the region has different access frequency */ 1168 if (last_nr_regions == nr_regions && 1169 nr_regions < ctx->attrs.max_nr_regions / 3) 1170 nr_subregions = 3; 1171 1172 damon_for_each_target(t, ctx) 1173 damon_split_regions_of(t, nr_subregions); 1174 1175 last_nr_regions = nr_regions; 1176 } 1177 1178 /* 1179 * Check whether it is time to check and apply the operations-related data 1180 * structures. 1181 * 1182 * Returns true if it is. 1183 */ 1184 static bool kdamond_need_update_operations(struct damon_ctx *ctx) 1185 { 1186 return damon_check_reset_time_interval(&ctx->last_ops_update, 1187 ctx->attrs.ops_update_interval); 1188 } 1189 1190 /* 1191 * Check whether current monitoring should be stopped 1192 * 1193 * The monitoring is stopped when either the user requested to stop, or all 1194 * monitoring targets are invalid. 1195 * 1196 * Returns true if need to stop current monitoring. 1197 */ 1198 static bool kdamond_need_stop(struct damon_ctx *ctx) 1199 { 1200 struct damon_target *t; 1201 1202 if (kthread_should_stop()) 1203 return true; 1204 1205 if (!ctx->ops.target_valid) 1206 return false; 1207 1208 damon_for_each_target(t, ctx) { 1209 if (ctx->ops.target_valid(t)) 1210 return false; 1211 } 1212 1213 return true; 1214 } 1215 1216 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1217 { 1218 struct sysinfo i; 1219 1220 switch (metric) { 1221 case DAMOS_WMARK_FREE_MEM_RATE: 1222 si_meminfo(&i); 1223 return i.freeram * 1000 / i.totalram; 1224 default: 1225 break; 1226 } 1227 return -EINVAL; 1228 } 1229 1230 /* 1231 * Returns zero if the scheme is active. Else, returns time to wait for next 1232 * watermark check in micro-seconds. 1233 */ 1234 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1235 { 1236 unsigned long metric; 1237 1238 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1239 return 0; 1240 1241 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1242 /* higher than high watermark or lower than low watermark */ 1243 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1244 if (scheme->wmarks.activated) 1245 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1246 scheme->action, 1247 metric > scheme->wmarks.high ? 1248 "high" : "low"); 1249 scheme->wmarks.activated = false; 1250 return scheme->wmarks.interval; 1251 } 1252 1253 /* inactive and higher than middle watermark */ 1254 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1255 !scheme->wmarks.activated) 1256 return scheme->wmarks.interval; 1257 1258 if (!scheme->wmarks.activated) 1259 pr_debug("activate a scheme (%d)\n", scheme->action); 1260 scheme->wmarks.activated = true; 1261 return 0; 1262 } 1263 1264 static void kdamond_usleep(unsigned long usecs) 1265 { 1266 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1267 if (usecs > 20 * USEC_PER_MSEC) 1268 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1269 else 1270 usleep_idle_range(usecs, usecs + 1); 1271 } 1272 1273 /* Returns negative error code if it's not activated but should return */ 1274 static int kdamond_wait_activation(struct damon_ctx *ctx) 1275 { 1276 struct damos *s; 1277 unsigned long wait_time; 1278 unsigned long min_wait_time = 0; 1279 bool init_wait_time = false; 1280 1281 while (!kdamond_need_stop(ctx)) { 1282 damon_for_each_scheme(s, ctx) { 1283 wait_time = damos_wmark_wait_us(s); 1284 if (!init_wait_time || wait_time < min_wait_time) { 1285 init_wait_time = true; 1286 min_wait_time = wait_time; 1287 } 1288 } 1289 if (!min_wait_time) 1290 return 0; 1291 1292 kdamond_usleep(min_wait_time); 1293 1294 if (ctx->callback.after_wmarks_check && 1295 ctx->callback.after_wmarks_check(ctx)) 1296 break; 1297 } 1298 return -EBUSY; 1299 } 1300 1301 /* 1302 * The monitoring daemon that runs as a kernel thread 1303 */ 1304 static int kdamond_fn(void *data) 1305 { 1306 struct damon_ctx *ctx = data; 1307 struct damon_target *t; 1308 struct damon_region *r, *next; 1309 unsigned int max_nr_accesses = 0; 1310 unsigned long sz_limit = 0; 1311 1312 pr_debug("kdamond (%d) starts\n", current->pid); 1313 1314 if (ctx->ops.init) 1315 ctx->ops.init(ctx); 1316 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1317 goto done; 1318 1319 sz_limit = damon_region_sz_limit(ctx); 1320 1321 while (!kdamond_need_stop(ctx)) { 1322 if (kdamond_wait_activation(ctx)) 1323 break; 1324 1325 if (ctx->ops.prepare_access_checks) 1326 ctx->ops.prepare_access_checks(ctx); 1327 if (ctx->callback.after_sampling && 1328 ctx->callback.after_sampling(ctx)) 1329 break; 1330 1331 kdamond_usleep(ctx->attrs.sample_interval); 1332 1333 if (ctx->ops.check_accesses) 1334 max_nr_accesses = ctx->ops.check_accesses(ctx); 1335 1336 if (kdamond_aggregate_interval_passed(ctx)) { 1337 kdamond_merge_regions(ctx, 1338 max_nr_accesses / 10, 1339 sz_limit); 1340 if (ctx->callback.after_aggregation && 1341 ctx->callback.after_aggregation(ctx)) 1342 break; 1343 if (!list_empty(&ctx->schemes)) 1344 kdamond_apply_schemes(ctx); 1345 kdamond_reset_aggregated(ctx); 1346 kdamond_split_regions(ctx); 1347 if (ctx->ops.reset_aggregated) 1348 ctx->ops.reset_aggregated(ctx); 1349 } 1350 1351 if (kdamond_need_update_operations(ctx)) { 1352 if (ctx->ops.update) 1353 ctx->ops.update(ctx); 1354 sz_limit = damon_region_sz_limit(ctx); 1355 } 1356 } 1357 done: 1358 damon_for_each_target(t, ctx) { 1359 damon_for_each_region_safe(r, next, t) 1360 damon_destroy_region(r, t); 1361 } 1362 1363 if (ctx->callback.before_terminate) 1364 ctx->callback.before_terminate(ctx); 1365 if (ctx->ops.cleanup) 1366 ctx->ops.cleanup(ctx); 1367 1368 pr_debug("kdamond (%d) finishes\n", current->pid); 1369 mutex_lock(&ctx->kdamond_lock); 1370 ctx->kdamond = NULL; 1371 mutex_unlock(&ctx->kdamond_lock); 1372 1373 mutex_lock(&damon_lock); 1374 nr_running_ctxs--; 1375 if (!nr_running_ctxs && running_exclusive_ctxs) 1376 running_exclusive_ctxs = false; 1377 mutex_unlock(&damon_lock); 1378 1379 return 0; 1380 } 1381 1382 /* 1383 * struct damon_system_ram_region - System RAM resource address region of 1384 * [@start, @end). 1385 * @start: Start address of the region (inclusive). 1386 * @end: End address of the region (exclusive). 1387 */ 1388 struct damon_system_ram_region { 1389 unsigned long start; 1390 unsigned long end; 1391 }; 1392 1393 static int walk_system_ram(struct resource *res, void *arg) 1394 { 1395 struct damon_system_ram_region *a = arg; 1396 1397 if (a->end - a->start < resource_size(res)) { 1398 a->start = res->start; 1399 a->end = res->end; 1400 } 1401 return 0; 1402 } 1403 1404 /* 1405 * Find biggest 'System RAM' resource and store its start and end address in 1406 * @start and @end, respectively. If no System RAM is found, returns false. 1407 */ 1408 static bool damon_find_biggest_system_ram(unsigned long *start, 1409 unsigned long *end) 1410 1411 { 1412 struct damon_system_ram_region arg = {}; 1413 1414 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1415 if (arg.end <= arg.start) 1416 return false; 1417 1418 *start = arg.start; 1419 *end = arg.end; 1420 return true; 1421 } 1422 1423 /** 1424 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1425 * monitoring target as requested, or biggest 'System RAM'. 1426 * @t: The monitoring target to set the region. 1427 * @start: The pointer to the start address of the region. 1428 * @end: The pointer to the end address of the region. 1429 * 1430 * This function sets the region of @t as requested by @start and @end. If the 1431 * values of @start and @end are zero, however, this function finds the biggest 1432 * 'System RAM' resource and sets the region to cover the resource. In the 1433 * latter case, this function saves the start and end addresses of the resource 1434 * in @start and @end, respectively. 1435 * 1436 * Return: 0 on success, negative error code otherwise. 1437 */ 1438 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1439 unsigned long *start, unsigned long *end) 1440 { 1441 struct damon_addr_range addr_range; 1442 1443 if (*start > *end) 1444 return -EINVAL; 1445 1446 if (!*start && !*end && 1447 !damon_find_biggest_system_ram(start, end)) 1448 return -EINVAL; 1449 1450 addr_range.start = *start; 1451 addr_range.end = *end; 1452 return damon_set_regions(t, &addr_range, 1); 1453 } 1454 1455 static int __init damon_init(void) 1456 { 1457 damon_region_cache = KMEM_CACHE(damon_region, 0); 1458 if (unlikely(!damon_region_cache)) { 1459 pr_err("creating damon_region_cache fails\n"); 1460 return -ENOMEM; 1461 } 1462 1463 return 0; 1464 } 1465 1466 subsys_initcall(damon_init); 1467 1468 #include "core-test.h" 1469