1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 INIT_LIST_HEAD(®ion->list); 132 133 region->age = 0; 134 region->last_nr_accesses = 0; 135 136 return region; 137 } 138 139 void damon_add_region(struct damon_region *r, struct damon_target *t) 140 { 141 list_add_tail(&r->list, &t->regions_list); 142 t->nr_regions++; 143 } 144 145 static void damon_del_region(struct damon_region *r, struct damon_target *t) 146 { 147 list_del(&r->list); 148 t->nr_regions--; 149 } 150 151 static void damon_free_region(struct damon_region *r) 152 { 153 kmem_cache_free(damon_region_cache, r); 154 } 155 156 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 157 { 158 damon_del_region(r, t); 159 damon_free_region(r); 160 } 161 162 /* 163 * Check whether a region is intersecting an address range 164 * 165 * Returns true if it is. 166 */ 167 static bool damon_intersect(struct damon_region *r, 168 struct damon_addr_range *re) 169 { 170 return !(r->ar.end <= re->start || re->end <= r->ar.start); 171 } 172 173 /* 174 * Fill holes in regions with new regions. 175 */ 176 static int damon_fill_regions_holes(struct damon_region *first, 177 struct damon_region *last, struct damon_target *t) 178 { 179 struct damon_region *r = first; 180 181 damon_for_each_region_from(r, t) { 182 struct damon_region *next, *newr; 183 184 if (r == last) 185 break; 186 next = damon_next_region(r); 187 if (r->ar.end != next->ar.start) { 188 newr = damon_new_region(r->ar.end, next->ar.start); 189 if (!newr) 190 return -ENOMEM; 191 damon_insert_region(newr, r, next, t); 192 } 193 } 194 return 0; 195 } 196 197 /* 198 * damon_set_regions() - Set regions of a target for given address ranges. 199 * @t: the given target. 200 * @ranges: array of new monitoring target ranges. 201 * @nr_ranges: length of @ranges. 202 * 203 * This function adds new regions to, or modify existing regions of a 204 * monitoring target to fit in specific ranges. 205 * 206 * Return: 0 if success, or negative error code otherwise. 207 */ 208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 209 unsigned int nr_ranges) 210 { 211 struct damon_region *r, *next; 212 unsigned int i; 213 int err; 214 215 /* Remove regions which are not in the new ranges */ 216 damon_for_each_region_safe(r, next, t) { 217 for (i = 0; i < nr_ranges; i++) { 218 if (damon_intersect(r, &ranges[i])) 219 break; 220 } 221 if (i == nr_ranges) 222 damon_destroy_region(r, t); 223 } 224 225 r = damon_first_region(t); 226 /* Add new regions or resize existing regions to fit in the ranges */ 227 for (i = 0; i < nr_ranges; i++) { 228 struct damon_region *first = NULL, *last, *newr; 229 struct damon_addr_range *range; 230 231 range = &ranges[i]; 232 /* Get the first/last regions intersecting with the range */ 233 damon_for_each_region_from(r, t) { 234 if (damon_intersect(r, range)) { 235 if (!first) 236 first = r; 237 last = r; 238 } 239 if (r->ar.start >= range->end) 240 break; 241 } 242 if (!first) { 243 /* no region intersects with this range */ 244 newr = damon_new_region( 245 ALIGN_DOWN(range->start, 246 DAMON_MIN_REGION), 247 ALIGN(range->end, DAMON_MIN_REGION)); 248 if (!newr) 249 return -ENOMEM; 250 damon_insert_region(newr, damon_prev_region(r), r, t); 251 } else { 252 /* resize intersecting regions to fit in this range */ 253 first->ar.start = ALIGN_DOWN(range->start, 254 DAMON_MIN_REGION); 255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 256 257 /* fill possible holes in the range */ 258 err = damon_fill_regions_holes(first, last, t); 259 if (err) 260 return err; 261 } 262 } 263 return 0; 264 } 265 266 struct damos_filter *damos_new_filter(enum damos_filter_type type, 267 bool matching) 268 { 269 struct damos_filter *filter; 270 271 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 272 if (!filter) 273 return NULL; 274 filter->type = type; 275 filter->matching = matching; 276 INIT_LIST_HEAD(&filter->list); 277 return filter; 278 } 279 280 void damos_add_filter(struct damos *s, struct damos_filter *f) 281 { 282 list_add_tail(&f->list, &s->filters); 283 } 284 285 static void damos_del_filter(struct damos_filter *f) 286 { 287 list_del(&f->list); 288 } 289 290 static void damos_free_filter(struct damos_filter *f) 291 { 292 kfree(f); 293 } 294 295 void damos_destroy_filter(struct damos_filter *f) 296 { 297 damos_del_filter(f); 298 damos_free_filter(f); 299 } 300 301 /* initialize private fields of damos_quota and return the pointer */ 302 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 303 { 304 quota->total_charged_sz = 0; 305 quota->total_charged_ns = 0; 306 quota->esz = 0; 307 quota->charged_sz = 0; 308 quota->charged_from = 0; 309 quota->charge_target_from = NULL; 310 quota->charge_addr_from = 0; 311 return quota; 312 } 313 314 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 315 enum damos_action action, 316 unsigned long apply_interval_us, 317 struct damos_quota *quota, 318 struct damos_watermarks *wmarks) 319 { 320 struct damos *scheme; 321 322 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 323 if (!scheme) 324 return NULL; 325 scheme->pattern = *pattern; 326 scheme->action = action; 327 scheme->apply_interval_us = apply_interval_us; 328 /* 329 * next_apply_sis will be set when kdamond starts. While kdamond is 330 * running, it will also updated when it is added to the DAMON context, 331 * or damon_attrs are updated. 332 */ 333 scheme->next_apply_sis = 0; 334 INIT_LIST_HEAD(&scheme->filters); 335 scheme->stat = (struct damos_stat){}; 336 INIT_LIST_HEAD(&scheme->list); 337 338 scheme->quota = *(damos_quota_init_priv(quota)); 339 340 scheme->wmarks = *wmarks; 341 scheme->wmarks.activated = true; 342 343 return scheme; 344 } 345 346 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 347 { 348 unsigned long sample_interval = ctx->attrs.sample_interval ? 349 ctx->attrs.sample_interval : 1; 350 unsigned long apply_interval = s->apply_interval_us ? 351 s->apply_interval_us : ctx->attrs.aggr_interval; 352 353 s->next_apply_sis = ctx->passed_sample_intervals + 354 apply_interval / sample_interval; 355 } 356 357 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 358 { 359 list_add_tail(&s->list, &ctx->schemes); 360 damos_set_next_apply_sis(s, ctx); 361 } 362 363 static void damon_del_scheme(struct damos *s) 364 { 365 list_del(&s->list); 366 } 367 368 static void damon_free_scheme(struct damos *s) 369 { 370 kfree(s); 371 } 372 373 void damon_destroy_scheme(struct damos *s) 374 { 375 struct damos_filter *f, *next; 376 377 damos_for_each_filter_safe(f, next, s) 378 damos_destroy_filter(f); 379 damon_del_scheme(s); 380 damon_free_scheme(s); 381 } 382 383 /* 384 * Construct a damon_target struct 385 * 386 * Returns the pointer to the new struct if success, or NULL otherwise 387 */ 388 struct damon_target *damon_new_target(void) 389 { 390 struct damon_target *t; 391 392 t = kmalloc(sizeof(*t), GFP_KERNEL); 393 if (!t) 394 return NULL; 395 396 t->pid = NULL; 397 t->nr_regions = 0; 398 INIT_LIST_HEAD(&t->regions_list); 399 INIT_LIST_HEAD(&t->list); 400 401 return t; 402 } 403 404 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 405 { 406 list_add_tail(&t->list, &ctx->adaptive_targets); 407 } 408 409 bool damon_targets_empty(struct damon_ctx *ctx) 410 { 411 return list_empty(&ctx->adaptive_targets); 412 } 413 414 static void damon_del_target(struct damon_target *t) 415 { 416 list_del(&t->list); 417 } 418 419 void damon_free_target(struct damon_target *t) 420 { 421 struct damon_region *r, *next; 422 423 damon_for_each_region_safe(r, next, t) 424 damon_free_region(r); 425 kfree(t); 426 } 427 428 void damon_destroy_target(struct damon_target *t) 429 { 430 damon_del_target(t); 431 damon_free_target(t); 432 } 433 434 unsigned int damon_nr_regions(struct damon_target *t) 435 { 436 return t->nr_regions; 437 } 438 439 struct damon_ctx *damon_new_ctx(void) 440 { 441 struct damon_ctx *ctx; 442 443 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 444 if (!ctx) 445 return NULL; 446 447 init_completion(&ctx->kdamond_started); 448 449 ctx->attrs.sample_interval = 5 * 1000; 450 ctx->attrs.aggr_interval = 100 * 1000; 451 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 452 453 ctx->passed_sample_intervals = 0; 454 /* These will be set from kdamond_init_intervals_sis() */ 455 ctx->next_aggregation_sis = 0; 456 ctx->next_ops_update_sis = 0; 457 458 mutex_init(&ctx->kdamond_lock); 459 460 ctx->attrs.min_nr_regions = 10; 461 ctx->attrs.max_nr_regions = 1000; 462 463 INIT_LIST_HEAD(&ctx->adaptive_targets); 464 INIT_LIST_HEAD(&ctx->schemes); 465 466 return ctx; 467 } 468 469 static void damon_destroy_targets(struct damon_ctx *ctx) 470 { 471 struct damon_target *t, *next_t; 472 473 if (ctx->ops.cleanup) { 474 ctx->ops.cleanup(ctx); 475 return; 476 } 477 478 damon_for_each_target_safe(t, next_t, ctx) 479 damon_destroy_target(t); 480 } 481 482 void damon_destroy_ctx(struct damon_ctx *ctx) 483 { 484 struct damos *s, *next_s; 485 486 damon_destroy_targets(ctx); 487 488 damon_for_each_scheme_safe(s, next_s, ctx) 489 damon_destroy_scheme(s); 490 491 kfree(ctx); 492 } 493 494 static unsigned int damon_age_for_new_attrs(unsigned int age, 495 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 496 { 497 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 498 } 499 500 /* convert access ratio in bp (per 10,000) to nr_accesses */ 501 static unsigned int damon_accesses_bp_to_nr_accesses( 502 unsigned int accesses_bp, struct damon_attrs *attrs) 503 { 504 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 505 } 506 507 /* convert nr_accesses to access ratio in bp (per 10,000) */ 508 static unsigned int damon_nr_accesses_to_accesses_bp( 509 unsigned int nr_accesses, struct damon_attrs *attrs) 510 { 511 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 512 } 513 514 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 515 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 516 { 517 return damon_accesses_bp_to_nr_accesses( 518 damon_nr_accesses_to_accesses_bp( 519 nr_accesses, old_attrs), 520 new_attrs); 521 } 522 523 static void damon_update_monitoring_result(struct damon_region *r, 524 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 525 { 526 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 527 old_attrs, new_attrs); 528 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 529 } 530 531 /* 532 * region->nr_accesses is the number of sampling intervals in the last 533 * aggregation interval that access to the region has found, and region->age is 534 * the number of aggregation intervals that its access pattern has maintained. 535 * For the reason, the real meaning of the two fields depend on current 536 * sampling interval and aggregation interval. This function updates 537 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 538 */ 539 static void damon_update_monitoring_results(struct damon_ctx *ctx, 540 struct damon_attrs *new_attrs) 541 { 542 struct damon_attrs *old_attrs = &ctx->attrs; 543 struct damon_target *t; 544 struct damon_region *r; 545 546 /* if any interval is zero, simply forgive conversion */ 547 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 548 !new_attrs->sample_interval || 549 !new_attrs->aggr_interval) 550 return; 551 552 damon_for_each_target(t, ctx) 553 damon_for_each_region(r, t) 554 damon_update_monitoring_result( 555 r, old_attrs, new_attrs); 556 } 557 558 /** 559 * damon_set_attrs() - Set attributes for the monitoring. 560 * @ctx: monitoring context 561 * @attrs: monitoring attributes 562 * 563 * This function should not be called while the kdamond is running. 564 * Every time interval is in micro-seconds. 565 * 566 * Return: 0 on success, negative error code otherwise. 567 */ 568 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 569 { 570 unsigned long sample_interval = attrs->sample_interval ? 571 attrs->sample_interval : 1; 572 struct damos *s; 573 574 if (attrs->min_nr_regions < 3) 575 return -EINVAL; 576 if (attrs->min_nr_regions > attrs->max_nr_regions) 577 return -EINVAL; 578 if (attrs->sample_interval > attrs->aggr_interval) 579 return -EINVAL; 580 581 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 582 attrs->aggr_interval / sample_interval; 583 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 584 attrs->ops_update_interval / sample_interval; 585 586 damon_update_monitoring_results(ctx, attrs); 587 ctx->attrs = *attrs; 588 589 damon_for_each_scheme(s, ctx) 590 damos_set_next_apply_sis(s, ctx); 591 592 return 0; 593 } 594 595 /** 596 * damon_set_schemes() - Set data access monitoring based operation schemes. 597 * @ctx: monitoring context 598 * @schemes: array of the schemes 599 * @nr_schemes: number of entries in @schemes 600 * 601 * This function should not be called while the kdamond of the context is 602 * running. 603 */ 604 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 605 ssize_t nr_schemes) 606 { 607 struct damos *s, *next; 608 ssize_t i; 609 610 damon_for_each_scheme_safe(s, next, ctx) 611 damon_destroy_scheme(s); 612 for (i = 0; i < nr_schemes; i++) 613 damon_add_scheme(ctx, schemes[i]); 614 } 615 616 /** 617 * damon_nr_running_ctxs() - Return number of currently running contexts. 618 */ 619 int damon_nr_running_ctxs(void) 620 { 621 int nr_ctxs; 622 623 mutex_lock(&damon_lock); 624 nr_ctxs = nr_running_ctxs; 625 mutex_unlock(&damon_lock); 626 627 return nr_ctxs; 628 } 629 630 /* Returns the size upper limit for each monitoring region */ 631 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 632 { 633 struct damon_target *t; 634 struct damon_region *r; 635 unsigned long sz = 0; 636 637 damon_for_each_target(t, ctx) { 638 damon_for_each_region(r, t) 639 sz += damon_sz_region(r); 640 } 641 642 if (ctx->attrs.min_nr_regions) 643 sz /= ctx->attrs.min_nr_regions; 644 if (sz < DAMON_MIN_REGION) 645 sz = DAMON_MIN_REGION; 646 647 return sz; 648 } 649 650 static int kdamond_fn(void *data); 651 652 /* 653 * __damon_start() - Starts monitoring with given context. 654 * @ctx: monitoring context 655 * 656 * This function should be called while damon_lock is hold. 657 * 658 * Return: 0 on success, negative error code otherwise. 659 */ 660 static int __damon_start(struct damon_ctx *ctx) 661 { 662 int err = -EBUSY; 663 664 mutex_lock(&ctx->kdamond_lock); 665 if (!ctx->kdamond) { 666 err = 0; 667 reinit_completion(&ctx->kdamond_started); 668 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 669 nr_running_ctxs); 670 if (IS_ERR(ctx->kdamond)) { 671 err = PTR_ERR(ctx->kdamond); 672 ctx->kdamond = NULL; 673 } else { 674 wait_for_completion(&ctx->kdamond_started); 675 } 676 } 677 mutex_unlock(&ctx->kdamond_lock); 678 679 return err; 680 } 681 682 /** 683 * damon_start() - Starts the monitorings for a given group of contexts. 684 * @ctxs: an array of the pointers for contexts to start monitoring 685 * @nr_ctxs: size of @ctxs 686 * @exclusive: exclusiveness of this contexts group 687 * 688 * This function starts a group of monitoring threads for a group of monitoring 689 * contexts. One thread per each context is created and run in parallel. The 690 * caller should handle synchronization between the threads by itself. If 691 * @exclusive is true and a group of threads that created by other 692 * 'damon_start()' call is currently running, this function does nothing but 693 * returns -EBUSY. 694 * 695 * Return: 0 on success, negative error code otherwise. 696 */ 697 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 698 { 699 int i; 700 int err = 0; 701 702 mutex_lock(&damon_lock); 703 if ((exclusive && nr_running_ctxs) || 704 (!exclusive && running_exclusive_ctxs)) { 705 mutex_unlock(&damon_lock); 706 return -EBUSY; 707 } 708 709 for (i = 0; i < nr_ctxs; i++) { 710 err = __damon_start(ctxs[i]); 711 if (err) 712 break; 713 nr_running_ctxs++; 714 } 715 if (exclusive && nr_running_ctxs) 716 running_exclusive_ctxs = true; 717 mutex_unlock(&damon_lock); 718 719 return err; 720 } 721 722 /* 723 * __damon_stop() - Stops monitoring of a given context. 724 * @ctx: monitoring context 725 * 726 * Return: 0 on success, negative error code otherwise. 727 */ 728 static int __damon_stop(struct damon_ctx *ctx) 729 { 730 struct task_struct *tsk; 731 732 mutex_lock(&ctx->kdamond_lock); 733 tsk = ctx->kdamond; 734 if (tsk) { 735 get_task_struct(tsk); 736 mutex_unlock(&ctx->kdamond_lock); 737 kthread_stop_put(tsk); 738 return 0; 739 } 740 mutex_unlock(&ctx->kdamond_lock); 741 742 return -EPERM; 743 } 744 745 /** 746 * damon_stop() - Stops the monitorings for a given group of contexts. 747 * @ctxs: an array of the pointers for contexts to stop monitoring 748 * @nr_ctxs: size of @ctxs 749 * 750 * Return: 0 on success, negative error code otherwise. 751 */ 752 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 753 { 754 int i, err = 0; 755 756 for (i = 0; i < nr_ctxs; i++) { 757 /* nr_running_ctxs is decremented in kdamond_fn */ 758 err = __damon_stop(ctxs[i]); 759 if (err) 760 break; 761 } 762 return err; 763 } 764 765 /* 766 * Reset the aggregated monitoring results ('nr_accesses' of each region). 767 */ 768 static void kdamond_reset_aggregated(struct damon_ctx *c) 769 { 770 struct damon_target *t; 771 unsigned int ti = 0; /* target's index */ 772 773 damon_for_each_target(t, c) { 774 struct damon_region *r; 775 776 damon_for_each_region(r, t) { 777 trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); 778 r->last_nr_accesses = r->nr_accesses; 779 r->nr_accesses = 0; 780 } 781 ti++; 782 } 783 } 784 785 static void damon_split_region_at(struct damon_target *t, 786 struct damon_region *r, unsigned long sz_r); 787 788 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 789 { 790 unsigned long sz; 791 792 sz = damon_sz_region(r); 793 return s->pattern.min_sz_region <= sz && 794 sz <= s->pattern.max_sz_region && 795 s->pattern.min_nr_accesses <= r->nr_accesses && 796 r->nr_accesses <= s->pattern.max_nr_accesses && 797 s->pattern.min_age_region <= r->age && 798 r->age <= s->pattern.max_age_region; 799 } 800 801 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 802 struct damon_region *r, struct damos *s) 803 { 804 bool ret = __damos_valid_target(r, s); 805 806 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 807 return ret; 808 809 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 810 } 811 812 /* 813 * damos_skip_charged_region() - Check if the given region or starting part of 814 * it is already charged for the DAMOS quota. 815 * @t: The target of the region. 816 * @rp: The pointer to the region. 817 * @s: The scheme to be applied. 818 * 819 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 820 * action would applied to only a part of the target access pattern fulfilling 821 * regions. To avoid applying the scheme action to only already applied 822 * regions, DAMON skips applying the scheme action to the regions that charged 823 * in the previous charge window. 824 * 825 * This function checks if a given region should be skipped or not for the 826 * reason. If only the starting part of the region has previously charged, 827 * this function splits the region into two so that the second one covers the 828 * area that not charged in the previous charge widnow and saves the second 829 * region in *rp and returns false, so that the caller can apply DAMON action 830 * to the second one. 831 * 832 * Return: true if the region should be entirely skipped, false otherwise. 833 */ 834 static bool damos_skip_charged_region(struct damon_target *t, 835 struct damon_region **rp, struct damos *s) 836 { 837 struct damon_region *r = *rp; 838 struct damos_quota *quota = &s->quota; 839 unsigned long sz_to_skip; 840 841 /* Skip previously charged regions */ 842 if (quota->charge_target_from) { 843 if (t != quota->charge_target_from) 844 return true; 845 if (r == damon_last_region(t)) { 846 quota->charge_target_from = NULL; 847 quota->charge_addr_from = 0; 848 return true; 849 } 850 if (quota->charge_addr_from && 851 r->ar.end <= quota->charge_addr_from) 852 return true; 853 854 if (quota->charge_addr_from && r->ar.start < 855 quota->charge_addr_from) { 856 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 857 r->ar.start, DAMON_MIN_REGION); 858 if (!sz_to_skip) { 859 if (damon_sz_region(r) <= DAMON_MIN_REGION) 860 return true; 861 sz_to_skip = DAMON_MIN_REGION; 862 } 863 damon_split_region_at(t, r, sz_to_skip); 864 r = damon_next_region(r); 865 *rp = r; 866 } 867 quota->charge_target_from = NULL; 868 quota->charge_addr_from = 0; 869 } 870 return false; 871 } 872 873 static void damos_update_stat(struct damos *s, 874 unsigned long sz_tried, unsigned long sz_applied) 875 { 876 s->stat.nr_tried++; 877 s->stat.sz_tried += sz_tried; 878 if (sz_applied) 879 s->stat.nr_applied++; 880 s->stat.sz_applied += sz_applied; 881 } 882 883 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 884 struct damon_region *r, struct damos_filter *filter) 885 { 886 bool matched = false; 887 struct damon_target *ti; 888 int target_idx = 0; 889 unsigned long start, end; 890 891 switch (filter->type) { 892 case DAMOS_FILTER_TYPE_TARGET: 893 damon_for_each_target(ti, ctx) { 894 if (ti == t) 895 break; 896 target_idx++; 897 } 898 matched = target_idx == filter->target_idx; 899 break; 900 case DAMOS_FILTER_TYPE_ADDR: 901 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 902 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 903 904 /* inside the range */ 905 if (start <= r->ar.start && r->ar.end <= end) { 906 matched = true; 907 break; 908 } 909 /* outside of the range */ 910 if (r->ar.end <= start || end <= r->ar.start) { 911 matched = false; 912 break; 913 } 914 /* start before the range and overlap */ 915 if (r->ar.start < start) { 916 damon_split_region_at(t, r, start - r->ar.start); 917 matched = false; 918 break; 919 } 920 /* start inside the range */ 921 damon_split_region_at(t, r, end - r->ar.start); 922 matched = true; 923 break; 924 default: 925 return false; 926 } 927 928 return matched == filter->matching; 929 } 930 931 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 932 struct damon_region *r, struct damos *s) 933 { 934 struct damos_filter *filter; 935 936 damos_for_each_filter(filter, s) { 937 if (__damos_filter_out(ctx, t, r, filter)) 938 return true; 939 } 940 return false; 941 } 942 943 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 944 struct damon_region *r, struct damos *s) 945 { 946 struct damos_quota *quota = &s->quota; 947 unsigned long sz = damon_sz_region(r); 948 struct timespec64 begin, end; 949 unsigned long sz_applied = 0; 950 int err = 0; 951 952 if (c->ops.apply_scheme) { 953 if (quota->esz && quota->charged_sz + sz > quota->esz) { 954 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 955 DAMON_MIN_REGION); 956 if (!sz) 957 goto update_stat; 958 damon_split_region_at(t, r, sz); 959 } 960 if (damos_filter_out(c, t, r, s)) 961 return; 962 ktime_get_coarse_ts64(&begin); 963 if (c->callback.before_damos_apply) 964 err = c->callback.before_damos_apply(c, t, r, s); 965 if (!err) 966 sz_applied = c->ops.apply_scheme(c, t, r, s); 967 ktime_get_coarse_ts64(&end); 968 quota->total_charged_ns += timespec64_to_ns(&end) - 969 timespec64_to_ns(&begin); 970 quota->charged_sz += sz; 971 if (quota->esz && quota->charged_sz >= quota->esz) { 972 quota->charge_target_from = t; 973 quota->charge_addr_from = r->ar.end + 1; 974 } 975 } 976 if (s->action != DAMOS_STAT) 977 r->age = 0; 978 979 update_stat: 980 damos_update_stat(s, sz, sz_applied); 981 } 982 983 static void damon_do_apply_schemes(struct damon_ctx *c, 984 struct damon_target *t, 985 struct damon_region *r) 986 { 987 struct damos *s; 988 989 damon_for_each_scheme(s, c) { 990 struct damos_quota *quota = &s->quota; 991 992 if (c->passed_sample_intervals < s->next_apply_sis) 993 continue; 994 995 if (!s->wmarks.activated) 996 continue; 997 998 /* Check the quota */ 999 if (quota->esz && quota->charged_sz >= quota->esz) 1000 continue; 1001 1002 if (damos_skip_charged_region(t, &r, s)) 1003 continue; 1004 1005 if (!damos_valid_target(c, t, r, s)) 1006 continue; 1007 1008 damos_apply_scheme(c, t, r, s); 1009 } 1010 } 1011 1012 /* Shouldn't be called if quota->ms and quota->sz are zero */ 1013 static void damos_set_effective_quota(struct damos_quota *quota) 1014 { 1015 unsigned long throughput; 1016 unsigned long esz; 1017 1018 if (!quota->ms) { 1019 quota->esz = quota->sz; 1020 return; 1021 } 1022 1023 if (quota->total_charged_ns) 1024 throughput = quota->total_charged_sz * 1000000 / 1025 quota->total_charged_ns; 1026 else 1027 throughput = PAGE_SIZE * 1024; 1028 esz = throughput * quota->ms; 1029 1030 if (quota->sz && quota->sz < esz) 1031 esz = quota->sz; 1032 quota->esz = esz; 1033 } 1034 1035 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1036 { 1037 struct damos_quota *quota = &s->quota; 1038 struct damon_target *t; 1039 struct damon_region *r; 1040 unsigned long cumulated_sz; 1041 unsigned int score, max_score = 0; 1042 1043 if (!quota->ms && !quota->sz) 1044 return; 1045 1046 /* New charge window starts */ 1047 if (time_after_eq(jiffies, quota->charged_from + 1048 msecs_to_jiffies(quota->reset_interval))) { 1049 if (quota->esz && quota->charged_sz >= quota->esz) 1050 s->stat.qt_exceeds++; 1051 quota->total_charged_sz += quota->charged_sz; 1052 quota->charged_from = jiffies; 1053 quota->charged_sz = 0; 1054 damos_set_effective_quota(quota); 1055 } 1056 1057 if (!c->ops.get_scheme_score) 1058 return; 1059 1060 /* Fill up the score histogram */ 1061 memset(quota->histogram, 0, sizeof(quota->histogram)); 1062 damon_for_each_target(t, c) { 1063 damon_for_each_region(r, t) { 1064 if (!__damos_valid_target(r, s)) 1065 continue; 1066 score = c->ops.get_scheme_score(c, t, r, s); 1067 quota->histogram[score] += damon_sz_region(r); 1068 if (score > max_score) 1069 max_score = score; 1070 } 1071 } 1072 1073 /* Set the min score limit */ 1074 for (cumulated_sz = 0, score = max_score; ; score--) { 1075 cumulated_sz += quota->histogram[score]; 1076 if (cumulated_sz >= quota->esz || !score) 1077 break; 1078 } 1079 quota->min_score = score; 1080 } 1081 1082 static void kdamond_apply_schemes(struct damon_ctx *c) 1083 { 1084 struct damon_target *t; 1085 struct damon_region *r, *next_r; 1086 struct damos *s; 1087 unsigned long sample_interval = c->attrs.sample_interval ? 1088 c->attrs.sample_interval : 1; 1089 bool has_schemes_to_apply = false; 1090 1091 damon_for_each_scheme(s, c) { 1092 if (c->passed_sample_intervals < s->next_apply_sis) 1093 continue; 1094 1095 if (!s->wmarks.activated) 1096 continue; 1097 1098 has_schemes_to_apply = true; 1099 1100 damos_adjust_quota(c, s); 1101 } 1102 1103 if (!has_schemes_to_apply) 1104 return; 1105 1106 damon_for_each_target(t, c) { 1107 damon_for_each_region_safe(r, next_r, t) 1108 damon_do_apply_schemes(c, t, r); 1109 } 1110 1111 damon_for_each_scheme(s, c) { 1112 if (c->passed_sample_intervals < s->next_apply_sis) 1113 continue; 1114 s->next_apply_sis = c->passed_sample_intervals + 1115 (s->apply_interval_us ? s->apply_interval_us : 1116 c->attrs.aggr_interval) / sample_interval; 1117 } 1118 } 1119 1120 /* 1121 * Merge two adjacent regions into one region 1122 */ 1123 static void damon_merge_two_regions(struct damon_target *t, 1124 struct damon_region *l, struct damon_region *r) 1125 { 1126 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1127 1128 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1129 (sz_l + sz_r); 1130 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1131 l->ar.end = r->ar.end; 1132 damon_destroy_region(r, t); 1133 } 1134 1135 /* 1136 * Merge adjacent regions having similar access frequencies 1137 * 1138 * t target affected by this merge operation 1139 * thres '->nr_accesses' diff threshold for the merge 1140 * sz_limit size upper limit of each region 1141 */ 1142 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1143 unsigned long sz_limit) 1144 { 1145 struct damon_region *r, *prev = NULL, *next; 1146 1147 damon_for_each_region_safe(r, next, t) { 1148 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1149 r->age = 0; 1150 else 1151 r->age++; 1152 1153 if (prev && prev->ar.end == r->ar.start && 1154 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1155 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1156 damon_merge_two_regions(t, prev, r); 1157 else 1158 prev = r; 1159 } 1160 } 1161 1162 /* 1163 * Merge adjacent regions having similar access frequencies 1164 * 1165 * threshold '->nr_accesses' diff threshold for the merge 1166 * sz_limit size upper limit of each region 1167 * 1168 * This function merges monitoring target regions which are adjacent and their 1169 * access frequencies are similar. This is for minimizing the monitoring 1170 * overhead under the dynamically changeable access pattern. If a merge was 1171 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1172 * 1173 * The total number of regions could be higher than the user-defined limit, 1174 * max_nr_regions for some cases. For example, the user can update 1175 * max_nr_regions to a number that lower than the current number of regions 1176 * while DAMON is running. For such a case, repeat merging until the limit is 1177 * met while increasing @threshold up to possible maximum level. 1178 */ 1179 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1180 unsigned long sz_limit) 1181 { 1182 struct damon_target *t; 1183 unsigned int nr_regions; 1184 unsigned int max_thres; 1185 1186 max_thres = c->attrs.aggr_interval / 1187 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 1188 do { 1189 nr_regions = 0; 1190 damon_for_each_target(t, c) { 1191 damon_merge_regions_of(t, threshold, sz_limit); 1192 nr_regions += damon_nr_regions(t); 1193 } 1194 threshold = max(1, threshold * 2); 1195 } while (nr_regions > c->attrs.max_nr_regions && 1196 threshold / 2 < max_thres); 1197 } 1198 1199 /* 1200 * Split a region in two 1201 * 1202 * r the region to be split 1203 * sz_r size of the first sub-region that will be made 1204 */ 1205 static void damon_split_region_at(struct damon_target *t, 1206 struct damon_region *r, unsigned long sz_r) 1207 { 1208 struct damon_region *new; 1209 1210 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1211 if (!new) 1212 return; 1213 1214 r->ar.end = new->ar.start; 1215 1216 new->age = r->age; 1217 new->last_nr_accesses = r->last_nr_accesses; 1218 new->nr_accesses = r->nr_accesses; 1219 1220 damon_insert_region(new, r, damon_next_region(r), t); 1221 } 1222 1223 /* Split every region in the given target into 'nr_subs' regions */ 1224 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1225 { 1226 struct damon_region *r, *next; 1227 unsigned long sz_region, sz_sub = 0; 1228 int i; 1229 1230 damon_for_each_region_safe(r, next, t) { 1231 sz_region = damon_sz_region(r); 1232 1233 for (i = 0; i < nr_subs - 1 && 1234 sz_region > 2 * DAMON_MIN_REGION; i++) { 1235 /* 1236 * Randomly select size of left sub-region to be at 1237 * least 10 percent and at most 90% of original region 1238 */ 1239 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1240 sz_region / 10, DAMON_MIN_REGION); 1241 /* Do not allow blank region */ 1242 if (sz_sub == 0 || sz_sub >= sz_region) 1243 continue; 1244 1245 damon_split_region_at(t, r, sz_sub); 1246 sz_region = sz_sub; 1247 } 1248 } 1249 } 1250 1251 /* 1252 * Split every target region into randomly-sized small regions 1253 * 1254 * This function splits every target region into random-sized small regions if 1255 * current total number of the regions is equal or smaller than half of the 1256 * user-specified maximum number of regions. This is for maximizing the 1257 * monitoring accuracy under the dynamically changeable access patterns. If a 1258 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1259 * it. 1260 */ 1261 static void kdamond_split_regions(struct damon_ctx *ctx) 1262 { 1263 struct damon_target *t; 1264 unsigned int nr_regions = 0; 1265 static unsigned int last_nr_regions; 1266 int nr_subregions = 2; 1267 1268 damon_for_each_target(t, ctx) 1269 nr_regions += damon_nr_regions(t); 1270 1271 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1272 return; 1273 1274 /* Maybe the middle of the region has different access frequency */ 1275 if (last_nr_regions == nr_regions && 1276 nr_regions < ctx->attrs.max_nr_regions / 3) 1277 nr_subregions = 3; 1278 1279 damon_for_each_target(t, ctx) 1280 damon_split_regions_of(t, nr_subregions); 1281 1282 last_nr_regions = nr_regions; 1283 } 1284 1285 /* 1286 * Check whether current monitoring should be stopped 1287 * 1288 * The monitoring is stopped when either the user requested to stop, or all 1289 * monitoring targets are invalid. 1290 * 1291 * Returns true if need to stop current monitoring. 1292 */ 1293 static bool kdamond_need_stop(struct damon_ctx *ctx) 1294 { 1295 struct damon_target *t; 1296 1297 if (kthread_should_stop()) 1298 return true; 1299 1300 if (!ctx->ops.target_valid) 1301 return false; 1302 1303 damon_for_each_target(t, ctx) { 1304 if (ctx->ops.target_valid(t)) 1305 return false; 1306 } 1307 1308 return true; 1309 } 1310 1311 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1312 { 1313 struct sysinfo i; 1314 1315 switch (metric) { 1316 case DAMOS_WMARK_FREE_MEM_RATE: 1317 si_meminfo(&i); 1318 return i.freeram * 1000 / i.totalram; 1319 default: 1320 break; 1321 } 1322 return -EINVAL; 1323 } 1324 1325 /* 1326 * Returns zero if the scheme is active. Else, returns time to wait for next 1327 * watermark check in micro-seconds. 1328 */ 1329 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1330 { 1331 unsigned long metric; 1332 1333 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1334 return 0; 1335 1336 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1337 /* higher than high watermark or lower than low watermark */ 1338 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1339 if (scheme->wmarks.activated) 1340 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1341 scheme->action, 1342 metric > scheme->wmarks.high ? 1343 "high" : "low"); 1344 scheme->wmarks.activated = false; 1345 return scheme->wmarks.interval; 1346 } 1347 1348 /* inactive and higher than middle watermark */ 1349 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1350 !scheme->wmarks.activated) 1351 return scheme->wmarks.interval; 1352 1353 if (!scheme->wmarks.activated) 1354 pr_debug("activate a scheme (%d)\n", scheme->action); 1355 scheme->wmarks.activated = true; 1356 return 0; 1357 } 1358 1359 static void kdamond_usleep(unsigned long usecs) 1360 { 1361 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1362 if (usecs > 20 * USEC_PER_MSEC) 1363 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1364 else 1365 usleep_idle_range(usecs, usecs + 1); 1366 } 1367 1368 /* Returns negative error code if it's not activated but should return */ 1369 static int kdamond_wait_activation(struct damon_ctx *ctx) 1370 { 1371 struct damos *s; 1372 unsigned long wait_time; 1373 unsigned long min_wait_time = 0; 1374 bool init_wait_time = false; 1375 1376 while (!kdamond_need_stop(ctx)) { 1377 damon_for_each_scheme(s, ctx) { 1378 wait_time = damos_wmark_wait_us(s); 1379 if (!init_wait_time || wait_time < min_wait_time) { 1380 init_wait_time = true; 1381 min_wait_time = wait_time; 1382 } 1383 } 1384 if (!min_wait_time) 1385 return 0; 1386 1387 kdamond_usleep(min_wait_time); 1388 1389 if (ctx->callback.after_wmarks_check && 1390 ctx->callback.after_wmarks_check(ctx)) 1391 break; 1392 } 1393 return -EBUSY; 1394 } 1395 1396 static void kdamond_init_intervals_sis(struct damon_ctx *ctx) 1397 { 1398 unsigned long sample_interval = ctx->attrs.sample_interval ? 1399 ctx->attrs.sample_interval : 1; 1400 unsigned long apply_interval; 1401 struct damos *scheme; 1402 1403 ctx->passed_sample_intervals = 0; 1404 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 1405 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 1406 sample_interval; 1407 1408 damon_for_each_scheme(scheme, ctx) { 1409 apply_interval = scheme->apply_interval_us ? 1410 scheme->apply_interval_us : ctx->attrs.aggr_interval; 1411 scheme->next_apply_sis = apply_interval / sample_interval; 1412 } 1413 } 1414 1415 /* 1416 * The monitoring daemon that runs as a kernel thread 1417 */ 1418 static int kdamond_fn(void *data) 1419 { 1420 struct damon_ctx *ctx = data; 1421 struct damon_target *t; 1422 struct damon_region *r, *next; 1423 unsigned int max_nr_accesses = 0; 1424 unsigned long sz_limit = 0; 1425 1426 pr_debug("kdamond (%d) starts\n", current->pid); 1427 1428 complete(&ctx->kdamond_started); 1429 kdamond_init_intervals_sis(ctx); 1430 1431 if (ctx->ops.init) 1432 ctx->ops.init(ctx); 1433 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1434 goto done; 1435 1436 sz_limit = damon_region_sz_limit(ctx); 1437 1438 while (!kdamond_need_stop(ctx)) { 1439 /* 1440 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 1441 * be changed from after_wmarks_check() or after_aggregation() 1442 * callbacks. Read the values here, and use those for this 1443 * iteration. That is, damon_set_attrs() updated new values 1444 * are respected from next iteration. 1445 */ 1446 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 1447 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 1448 unsigned long sample_interval = ctx->attrs.sample_interval; 1449 1450 if (kdamond_wait_activation(ctx)) 1451 break; 1452 1453 if (ctx->ops.prepare_access_checks) 1454 ctx->ops.prepare_access_checks(ctx); 1455 if (ctx->callback.after_sampling && 1456 ctx->callback.after_sampling(ctx)) 1457 break; 1458 1459 kdamond_usleep(sample_interval); 1460 ctx->passed_sample_intervals++; 1461 1462 if (ctx->ops.check_accesses) 1463 max_nr_accesses = ctx->ops.check_accesses(ctx); 1464 1465 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 1466 kdamond_merge_regions(ctx, 1467 max_nr_accesses / 10, 1468 sz_limit); 1469 if (ctx->callback.after_aggregation && 1470 ctx->callback.after_aggregation(ctx)) 1471 break; 1472 } 1473 1474 /* 1475 * do kdamond_apply_schemes() after kdamond_merge_regions() if 1476 * possible, to reduce overhead 1477 */ 1478 if (!list_empty(&ctx->schemes)) 1479 kdamond_apply_schemes(ctx); 1480 1481 sample_interval = ctx->attrs.sample_interval ? 1482 ctx->attrs.sample_interval : 1; 1483 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 1484 ctx->next_aggregation_sis = next_aggregation_sis + 1485 ctx->attrs.aggr_interval / sample_interval; 1486 1487 kdamond_reset_aggregated(ctx); 1488 kdamond_split_regions(ctx); 1489 if (ctx->ops.reset_aggregated) 1490 ctx->ops.reset_aggregated(ctx); 1491 } 1492 1493 if (ctx->passed_sample_intervals >= next_ops_update_sis) { 1494 ctx->next_ops_update_sis = next_ops_update_sis + 1495 ctx->attrs.ops_update_interval / 1496 sample_interval; 1497 if (ctx->ops.update) 1498 ctx->ops.update(ctx); 1499 sz_limit = damon_region_sz_limit(ctx); 1500 } 1501 } 1502 done: 1503 damon_for_each_target(t, ctx) { 1504 damon_for_each_region_safe(r, next, t) 1505 damon_destroy_region(r, t); 1506 } 1507 1508 if (ctx->callback.before_terminate) 1509 ctx->callback.before_terminate(ctx); 1510 if (ctx->ops.cleanup) 1511 ctx->ops.cleanup(ctx); 1512 1513 pr_debug("kdamond (%d) finishes\n", current->pid); 1514 mutex_lock(&ctx->kdamond_lock); 1515 ctx->kdamond = NULL; 1516 mutex_unlock(&ctx->kdamond_lock); 1517 1518 mutex_lock(&damon_lock); 1519 nr_running_ctxs--; 1520 if (!nr_running_ctxs && running_exclusive_ctxs) 1521 running_exclusive_ctxs = false; 1522 mutex_unlock(&damon_lock); 1523 1524 return 0; 1525 } 1526 1527 /* 1528 * struct damon_system_ram_region - System RAM resource address region of 1529 * [@start, @end). 1530 * @start: Start address of the region (inclusive). 1531 * @end: End address of the region (exclusive). 1532 */ 1533 struct damon_system_ram_region { 1534 unsigned long start; 1535 unsigned long end; 1536 }; 1537 1538 static int walk_system_ram(struct resource *res, void *arg) 1539 { 1540 struct damon_system_ram_region *a = arg; 1541 1542 if (a->end - a->start < resource_size(res)) { 1543 a->start = res->start; 1544 a->end = res->end; 1545 } 1546 return 0; 1547 } 1548 1549 /* 1550 * Find biggest 'System RAM' resource and store its start and end address in 1551 * @start and @end, respectively. If no System RAM is found, returns false. 1552 */ 1553 static bool damon_find_biggest_system_ram(unsigned long *start, 1554 unsigned long *end) 1555 1556 { 1557 struct damon_system_ram_region arg = {}; 1558 1559 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1560 if (arg.end <= arg.start) 1561 return false; 1562 1563 *start = arg.start; 1564 *end = arg.end; 1565 return true; 1566 } 1567 1568 /** 1569 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1570 * monitoring target as requested, or biggest 'System RAM'. 1571 * @t: The monitoring target to set the region. 1572 * @start: The pointer to the start address of the region. 1573 * @end: The pointer to the end address of the region. 1574 * 1575 * This function sets the region of @t as requested by @start and @end. If the 1576 * values of @start and @end are zero, however, this function finds the biggest 1577 * 'System RAM' resource and sets the region to cover the resource. In the 1578 * latter case, this function saves the start and end addresses of the resource 1579 * in @start and @end, respectively. 1580 * 1581 * Return: 0 on success, negative error code otherwise. 1582 */ 1583 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1584 unsigned long *start, unsigned long *end) 1585 { 1586 struct damon_addr_range addr_range; 1587 1588 if (*start > *end) 1589 return -EINVAL; 1590 1591 if (!*start && !*end && 1592 !damon_find_biggest_system_ram(start, end)) 1593 return -EINVAL; 1594 1595 addr_range.start = *start; 1596 addr_range.end = *end; 1597 return damon_set_regions(t, &addr_range, 1); 1598 } 1599 1600 static int __init damon_init(void) 1601 { 1602 damon_region_cache = KMEM_CACHE(damon_region, 0); 1603 if (unlikely(!damon_region_cache)) { 1604 pr_err("creating damon_region_cache fails\n"); 1605 return -ENOMEM; 1606 } 1607 1608 return 0; 1609 } 1610 1611 subsys_initcall(damon_init); 1612 1613 #include "core-test.h" 1614