1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 33 static bool __damon_is_registered_ops(enum damon_ops_id id) 34 { 35 struct damon_operations empty_ops = {}; 36 37 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 38 return false; 39 return true; 40 } 41 42 /** 43 * damon_is_registered_ops() - Check if a given damon_operations is registered. 44 * @id: Id of the damon_operations to check if registered. 45 * 46 * Return: true if the ops is set, false otherwise. 47 */ 48 bool damon_is_registered_ops(enum damon_ops_id id) 49 { 50 bool registered; 51 52 if (id >= NR_DAMON_OPS) 53 return false; 54 mutex_lock(&damon_ops_lock); 55 registered = __damon_is_registered_ops(id); 56 mutex_unlock(&damon_ops_lock); 57 return registered; 58 } 59 60 /** 61 * damon_register_ops() - Register a monitoring operations set to DAMON. 62 * @ops: monitoring operations set to register. 63 * 64 * This function registers a monitoring operations set of valid &struct 65 * damon_operations->id so that others can find and use them later. 66 * 67 * Return: 0 on success, negative error code otherwise. 68 */ 69 int damon_register_ops(struct damon_operations *ops) 70 { 71 int err = 0; 72 73 if (ops->id >= NR_DAMON_OPS) 74 return -EINVAL; 75 mutex_lock(&damon_ops_lock); 76 /* Fail for already registered ops */ 77 if (__damon_is_registered_ops(ops->id)) { 78 err = -EINVAL; 79 goto out; 80 } 81 damon_registered_ops[ops->id] = *ops; 82 out: 83 mutex_unlock(&damon_ops_lock); 84 return err; 85 } 86 87 /** 88 * damon_select_ops() - Select a monitoring operations to use with the context. 89 * @ctx: monitoring context to use the operations. 90 * @id: id of the registered monitoring operations to select. 91 * 92 * This function finds registered monitoring operations set of @id and make 93 * @ctx to use it. 94 * 95 * Return: 0 on success, negative error code otherwise. 96 */ 97 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 98 { 99 int err = 0; 100 101 if (id >= NR_DAMON_OPS) 102 return -EINVAL; 103 104 mutex_lock(&damon_ops_lock); 105 if (!__damon_is_registered_ops(id)) 106 err = -EINVAL; 107 else 108 ctx->ops = damon_registered_ops[id]; 109 mutex_unlock(&damon_ops_lock); 110 return err; 111 } 112 113 /* 114 * Construct a damon_region struct 115 * 116 * Returns the pointer to the new struct if success, or NULL otherwise 117 */ 118 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 119 { 120 struct damon_region *region; 121 122 region = kmalloc(sizeof(*region), GFP_KERNEL); 123 if (!region) 124 return NULL; 125 126 region->ar.start = start; 127 region->ar.end = end; 128 region->nr_accesses = 0; 129 INIT_LIST_HEAD(®ion->list); 130 131 region->age = 0; 132 region->last_nr_accesses = 0; 133 134 return region; 135 } 136 137 void damon_add_region(struct damon_region *r, struct damon_target *t) 138 { 139 list_add_tail(&r->list, &t->regions_list); 140 t->nr_regions++; 141 } 142 143 static void damon_del_region(struct damon_region *r, struct damon_target *t) 144 { 145 list_del(&r->list); 146 t->nr_regions--; 147 } 148 149 static void damon_free_region(struct damon_region *r) 150 { 151 kfree(r); 152 } 153 154 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 155 { 156 damon_del_region(r, t); 157 damon_free_region(r); 158 } 159 160 /* 161 * Check whether a region is intersecting an address range 162 * 163 * Returns true if it is. 164 */ 165 static bool damon_intersect(struct damon_region *r, 166 struct damon_addr_range *re) 167 { 168 return !(r->ar.end <= re->start || re->end <= r->ar.start); 169 } 170 171 /* 172 * damon_set_regions() - Set regions of a target for given address ranges. 173 * @t: the given target. 174 * @ranges: array of new monitoring target ranges. 175 * @nr_ranges: length of @ranges. 176 * 177 * This function adds new regions to, or modify existing regions of a 178 * monitoring target to fit in specific ranges. 179 * 180 * Return: 0 if success, or negative error code otherwise. 181 */ 182 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 183 unsigned int nr_ranges) 184 { 185 struct damon_region *r, *next; 186 unsigned int i; 187 188 /* Remove regions which are not in the new ranges */ 189 damon_for_each_region_safe(r, next, t) { 190 for (i = 0; i < nr_ranges; i++) { 191 if (damon_intersect(r, &ranges[i])) 192 break; 193 } 194 if (i == nr_ranges) 195 damon_destroy_region(r, t); 196 } 197 198 /* Add new regions or resize existing regions to fit in the ranges */ 199 for (i = 0; i < nr_ranges; i++) { 200 struct damon_region *first = NULL, *last, *newr; 201 struct damon_addr_range *range; 202 203 range = &ranges[i]; 204 /* Get the first/last regions intersecting with the range */ 205 damon_for_each_region(r, t) { 206 if (damon_intersect(r, range)) { 207 if (!first) 208 first = r; 209 last = r; 210 } 211 if (r->ar.start >= range->end) 212 break; 213 } 214 if (!first) { 215 /* no region intersects with this range */ 216 newr = damon_new_region( 217 ALIGN_DOWN(range->start, 218 DAMON_MIN_REGION), 219 ALIGN(range->end, DAMON_MIN_REGION)); 220 if (!newr) 221 return -ENOMEM; 222 damon_insert_region(newr, damon_prev_region(r), r, t); 223 } else { 224 /* resize intersecting regions to fit in this range */ 225 first->ar.start = ALIGN_DOWN(range->start, 226 DAMON_MIN_REGION); 227 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 228 } 229 } 230 return 0; 231 } 232 233 struct damos *damon_new_scheme( 234 unsigned long min_sz_region, unsigned long max_sz_region, 235 unsigned int min_nr_accesses, unsigned int max_nr_accesses, 236 unsigned int min_age_region, unsigned int max_age_region, 237 enum damos_action action, struct damos_quota *quota, 238 struct damos_watermarks *wmarks) 239 { 240 struct damos *scheme; 241 242 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 243 if (!scheme) 244 return NULL; 245 scheme->min_sz_region = min_sz_region; 246 scheme->max_sz_region = max_sz_region; 247 scheme->min_nr_accesses = min_nr_accesses; 248 scheme->max_nr_accesses = max_nr_accesses; 249 scheme->min_age_region = min_age_region; 250 scheme->max_age_region = max_age_region; 251 scheme->action = action; 252 scheme->stat = (struct damos_stat){}; 253 INIT_LIST_HEAD(&scheme->list); 254 255 scheme->quota.ms = quota->ms; 256 scheme->quota.sz = quota->sz; 257 scheme->quota.reset_interval = quota->reset_interval; 258 scheme->quota.weight_sz = quota->weight_sz; 259 scheme->quota.weight_nr_accesses = quota->weight_nr_accesses; 260 scheme->quota.weight_age = quota->weight_age; 261 scheme->quota.total_charged_sz = 0; 262 scheme->quota.total_charged_ns = 0; 263 scheme->quota.esz = 0; 264 scheme->quota.charged_sz = 0; 265 scheme->quota.charged_from = 0; 266 scheme->quota.charge_target_from = NULL; 267 scheme->quota.charge_addr_from = 0; 268 269 scheme->wmarks.metric = wmarks->metric; 270 scheme->wmarks.interval = wmarks->interval; 271 scheme->wmarks.high = wmarks->high; 272 scheme->wmarks.mid = wmarks->mid; 273 scheme->wmarks.low = wmarks->low; 274 scheme->wmarks.activated = true; 275 276 return scheme; 277 } 278 279 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 280 { 281 list_add_tail(&s->list, &ctx->schemes); 282 } 283 284 static void damon_del_scheme(struct damos *s) 285 { 286 list_del(&s->list); 287 } 288 289 static void damon_free_scheme(struct damos *s) 290 { 291 kfree(s); 292 } 293 294 void damon_destroy_scheme(struct damos *s) 295 { 296 damon_del_scheme(s); 297 damon_free_scheme(s); 298 } 299 300 /* 301 * Construct a damon_target struct 302 * 303 * Returns the pointer to the new struct if success, or NULL otherwise 304 */ 305 struct damon_target *damon_new_target(void) 306 { 307 struct damon_target *t; 308 309 t = kmalloc(sizeof(*t), GFP_KERNEL); 310 if (!t) 311 return NULL; 312 313 t->pid = NULL; 314 t->nr_regions = 0; 315 INIT_LIST_HEAD(&t->regions_list); 316 317 return t; 318 } 319 320 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 321 { 322 list_add_tail(&t->list, &ctx->adaptive_targets); 323 } 324 325 bool damon_targets_empty(struct damon_ctx *ctx) 326 { 327 return list_empty(&ctx->adaptive_targets); 328 } 329 330 static void damon_del_target(struct damon_target *t) 331 { 332 list_del(&t->list); 333 } 334 335 void damon_free_target(struct damon_target *t) 336 { 337 struct damon_region *r, *next; 338 339 damon_for_each_region_safe(r, next, t) 340 damon_free_region(r); 341 kfree(t); 342 } 343 344 void damon_destroy_target(struct damon_target *t) 345 { 346 damon_del_target(t); 347 damon_free_target(t); 348 } 349 350 unsigned int damon_nr_regions(struct damon_target *t) 351 { 352 return t->nr_regions; 353 } 354 355 struct damon_ctx *damon_new_ctx(void) 356 { 357 struct damon_ctx *ctx; 358 359 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 360 if (!ctx) 361 return NULL; 362 363 ctx->sample_interval = 5 * 1000; 364 ctx->aggr_interval = 100 * 1000; 365 ctx->ops_update_interval = 60 * 1000 * 1000; 366 367 ktime_get_coarse_ts64(&ctx->last_aggregation); 368 ctx->last_ops_update = ctx->last_aggregation; 369 370 mutex_init(&ctx->kdamond_lock); 371 372 ctx->min_nr_regions = 10; 373 ctx->max_nr_regions = 1000; 374 375 INIT_LIST_HEAD(&ctx->adaptive_targets); 376 INIT_LIST_HEAD(&ctx->schemes); 377 378 return ctx; 379 } 380 381 static void damon_destroy_targets(struct damon_ctx *ctx) 382 { 383 struct damon_target *t, *next_t; 384 385 if (ctx->ops.cleanup) { 386 ctx->ops.cleanup(ctx); 387 return; 388 } 389 390 damon_for_each_target_safe(t, next_t, ctx) 391 damon_destroy_target(t); 392 } 393 394 void damon_destroy_ctx(struct damon_ctx *ctx) 395 { 396 struct damos *s, *next_s; 397 398 damon_destroy_targets(ctx); 399 400 damon_for_each_scheme_safe(s, next_s, ctx) 401 damon_destroy_scheme(s); 402 403 kfree(ctx); 404 } 405 406 /** 407 * damon_set_attrs() - Set attributes for the monitoring. 408 * @ctx: monitoring context 409 * @sample_int: time interval between samplings 410 * @aggr_int: time interval between aggregations 411 * @ops_upd_int: time interval between monitoring operations updates 412 * @min_nr_reg: minimal number of regions 413 * @max_nr_reg: maximum number of regions 414 * 415 * This function should not be called while the kdamond is running. 416 * Every time interval is in micro-seconds. 417 * 418 * Return: 0 on success, negative error code otherwise. 419 */ 420 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, 421 unsigned long aggr_int, unsigned long ops_upd_int, 422 unsigned long min_nr_reg, unsigned long max_nr_reg) 423 { 424 if (min_nr_reg < 3) 425 return -EINVAL; 426 if (min_nr_reg > max_nr_reg) 427 return -EINVAL; 428 429 ctx->sample_interval = sample_int; 430 ctx->aggr_interval = aggr_int; 431 ctx->ops_update_interval = ops_upd_int; 432 ctx->min_nr_regions = min_nr_reg; 433 ctx->max_nr_regions = max_nr_reg; 434 435 return 0; 436 } 437 438 /** 439 * damon_set_schemes() - Set data access monitoring based operation schemes. 440 * @ctx: monitoring context 441 * @schemes: array of the schemes 442 * @nr_schemes: number of entries in @schemes 443 * 444 * This function should not be called while the kdamond of the context is 445 * running. 446 * 447 * Return: 0 if success, or negative error code otherwise. 448 */ 449 int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 450 ssize_t nr_schemes) 451 { 452 struct damos *s, *next; 453 ssize_t i; 454 455 damon_for_each_scheme_safe(s, next, ctx) 456 damon_destroy_scheme(s); 457 for (i = 0; i < nr_schemes; i++) 458 damon_add_scheme(ctx, schemes[i]); 459 return 0; 460 } 461 462 /** 463 * damon_nr_running_ctxs() - Return number of currently running contexts. 464 */ 465 int damon_nr_running_ctxs(void) 466 { 467 int nr_ctxs; 468 469 mutex_lock(&damon_lock); 470 nr_ctxs = nr_running_ctxs; 471 mutex_unlock(&damon_lock); 472 473 return nr_ctxs; 474 } 475 476 /* Returns the size upper limit for each monitoring region */ 477 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 478 { 479 struct damon_target *t; 480 struct damon_region *r; 481 unsigned long sz = 0; 482 483 damon_for_each_target(t, ctx) { 484 damon_for_each_region(r, t) 485 sz += r->ar.end - r->ar.start; 486 } 487 488 if (ctx->min_nr_regions) 489 sz /= ctx->min_nr_regions; 490 if (sz < DAMON_MIN_REGION) 491 sz = DAMON_MIN_REGION; 492 493 return sz; 494 } 495 496 static int kdamond_fn(void *data); 497 498 /* 499 * __damon_start() - Starts monitoring with given context. 500 * @ctx: monitoring context 501 * 502 * This function should be called while damon_lock is hold. 503 * 504 * Return: 0 on success, negative error code otherwise. 505 */ 506 static int __damon_start(struct damon_ctx *ctx) 507 { 508 int err = -EBUSY; 509 510 mutex_lock(&ctx->kdamond_lock); 511 if (!ctx->kdamond) { 512 err = 0; 513 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 514 nr_running_ctxs); 515 if (IS_ERR(ctx->kdamond)) { 516 err = PTR_ERR(ctx->kdamond); 517 ctx->kdamond = NULL; 518 } 519 } 520 mutex_unlock(&ctx->kdamond_lock); 521 522 return err; 523 } 524 525 /** 526 * damon_start() - Starts the monitorings for a given group of contexts. 527 * @ctxs: an array of the pointers for contexts to start monitoring 528 * @nr_ctxs: size of @ctxs 529 * @exclusive: exclusiveness of this contexts group 530 * 531 * This function starts a group of monitoring threads for a group of monitoring 532 * contexts. One thread per each context is created and run in parallel. The 533 * caller should handle synchronization between the threads by itself. If 534 * @exclusive is true and a group of threads that created by other 535 * 'damon_start()' call is currently running, this function does nothing but 536 * returns -EBUSY. 537 * 538 * Return: 0 on success, negative error code otherwise. 539 */ 540 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 541 { 542 int i; 543 int err = 0; 544 545 mutex_lock(&damon_lock); 546 if ((exclusive && nr_running_ctxs) || 547 (!exclusive && running_exclusive_ctxs)) { 548 mutex_unlock(&damon_lock); 549 return -EBUSY; 550 } 551 552 for (i = 0; i < nr_ctxs; i++) { 553 err = __damon_start(ctxs[i]); 554 if (err) 555 break; 556 nr_running_ctxs++; 557 } 558 if (exclusive && nr_running_ctxs) 559 running_exclusive_ctxs = true; 560 mutex_unlock(&damon_lock); 561 562 return err; 563 } 564 565 /* 566 * __damon_stop() - Stops monitoring of a given context. 567 * @ctx: monitoring context 568 * 569 * Return: 0 on success, negative error code otherwise. 570 */ 571 static int __damon_stop(struct damon_ctx *ctx) 572 { 573 struct task_struct *tsk; 574 575 mutex_lock(&ctx->kdamond_lock); 576 tsk = ctx->kdamond; 577 if (tsk) { 578 get_task_struct(tsk); 579 mutex_unlock(&ctx->kdamond_lock); 580 kthread_stop(tsk); 581 put_task_struct(tsk); 582 return 0; 583 } 584 mutex_unlock(&ctx->kdamond_lock); 585 586 return -EPERM; 587 } 588 589 /** 590 * damon_stop() - Stops the monitorings for a given group of contexts. 591 * @ctxs: an array of the pointers for contexts to stop monitoring 592 * @nr_ctxs: size of @ctxs 593 * 594 * Return: 0 on success, negative error code otherwise. 595 */ 596 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 597 { 598 int i, err = 0; 599 600 for (i = 0; i < nr_ctxs; i++) { 601 /* nr_running_ctxs is decremented in kdamond_fn */ 602 err = __damon_stop(ctxs[i]); 603 if (err) 604 break; 605 } 606 return err; 607 } 608 609 /* 610 * damon_check_reset_time_interval() - Check if a time interval is elapsed. 611 * @baseline: the time to check whether the interval has elapsed since 612 * @interval: the time interval (microseconds) 613 * 614 * See whether the given time interval has passed since the given baseline 615 * time. If so, it also updates the baseline to current time for next check. 616 * 617 * Return: true if the time interval has passed, or false otherwise. 618 */ 619 static bool damon_check_reset_time_interval(struct timespec64 *baseline, 620 unsigned long interval) 621 { 622 struct timespec64 now; 623 624 ktime_get_coarse_ts64(&now); 625 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) < 626 interval * 1000) 627 return false; 628 *baseline = now; 629 return true; 630 } 631 632 /* 633 * Check whether it is time to flush the aggregated information 634 */ 635 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx) 636 { 637 return damon_check_reset_time_interval(&ctx->last_aggregation, 638 ctx->aggr_interval); 639 } 640 641 /* 642 * Reset the aggregated monitoring results ('nr_accesses' of each region). 643 */ 644 static void kdamond_reset_aggregated(struct damon_ctx *c) 645 { 646 struct damon_target *t; 647 unsigned int ti = 0; /* target's index */ 648 649 damon_for_each_target(t, c) { 650 struct damon_region *r; 651 652 damon_for_each_region(r, t) { 653 trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); 654 r->last_nr_accesses = r->nr_accesses; 655 r->nr_accesses = 0; 656 } 657 ti++; 658 } 659 } 660 661 static void damon_split_region_at(struct damon_target *t, 662 struct damon_region *r, unsigned long sz_r); 663 664 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 665 { 666 unsigned long sz; 667 668 sz = r->ar.end - r->ar.start; 669 return s->min_sz_region <= sz && sz <= s->max_sz_region && 670 s->min_nr_accesses <= r->nr_accesses && 671 r->nr_accesses <= s->max_nr_accesses && 672 s->min_age_region <= r->age && r->age <= s->max_age_region; 673 } 674 675 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 676 struct damon_region *r, struct damos *s) 677 { 678 bool ret = __damos_valid_target(r, s); 679 680 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 681 return ret; 682 683 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 684 } 685 686 static void damon_do_apply_schemes(struct damon_ctx *c, 687 struct damon_target *t, 688 struct damon_region *r) 689 { 690 struct damos *s; 691 692 damon_for_each_scheme(s, c) { 693 struct damos_quota *quota = &s->quota; 694 unsigned long sz = r->ar.end - r->ar.start; 695 struct timespec64 begin, end; 696 unsigned long sz_applied = 0; 697 698 if (!s->wmarks.activated) 699 continue; 700 701 /* Check the quota */ 702 if (quota->esz && quota->charged_sz >= quota->esz) 703 continue; 704 705 /* Skip previously charged regions */ 706 if (quota->charge_target_from) { 707 if (t != quota->charge_target_from) 708 continue; 709 if (r == damon_last_region(t)) { 710 quota->charge_target_from = NULL; 711 quota->charge_addr_from = 0; 712 continue; 713 } 714 if (quota->charge_addr_from && 715 r->ar.end <= quota->charge_addr_from) 716 continue; 717 718 if (quota->charge_addr_from && r->ar.start < 719 quota->charge_addr_from) { 720 sz = ALIGN_DOWN(quota->charge_addr_from - 721 r->ar.start, DAMON_MIN_REGION); 722 if (!sz) { 723 if (r->ar.end - r->ar.start <= 724 DAMON_MIN_REGION) 725 continue; 726 sz = DAMON_MIN_REGION; 727 } 728 damon_split_region_at(t, r, sz); 729 r = damon_next_region(r); 730 sz = r->ar.end - r->ar.start; 731 } 732 quota->charge_target_from = NULL; 733 quota->charge_addr_from = 0; 734 } 735 736 if (!damos_valid_target(c, t, r, s)) 737 continue; 738 739 /* Apply the scheme */ 740 if (c->ops.apply_scheme) { 741 if (quota->esz && 742 quota->charged_sz + sz > quota->esz) { 743 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 744 DAMON_MIN_REGION); 745 if (!sz) 746 goto update_stat; 747 damon_split_region_at(t, r, sz); 748 } 749 ktime_get_coarse_ts64(&begin); 750 sz_applied = c->ops.apply_scheme(c, t, r, s); 751 ktime_get_coarse_ts64(&end); 752 quota->total_charged_ns += timespec64_to_ns(&end) - 753 timespec64_to_ns(&begin); 754 quota->charged_sz += sz; 755 if (quota->esz && quota->charged_sz >= quota->esz) { 756 quota->charge_target_from = t; 757 quota->charge_addr_from = r->ar.end + 1; 758 } 759 } 760 if (s->action != DAMOS_STAT) 761 r->age = 0; 762 763 update_stat: 764 s->stat.nr_tried++; 765 s->stat.sz_tried += sz; 766 if (sz_applied) 767 s->stat.nr_applied++; 768 s->stat.sz_applied += sz_applied; 769 } 770 } 771 772 /* Shouldn't be called if quota->ms and quota->sz are zero */ 773 static void damos_set_effective_quota(struct damos_quota *quota) 774 { 775 unsigned long throughput; 776 unsigned long esz; 777 778 if (!quota->ms) { 779 quota->esz = quota->sz; 780 return; 781 } 782 783 if (quota->total_charged_ns) 784 throughput = quota->total_charged_sz * 1000000 / 785 quota->total_charged_ns; 786 else 787 throughput = PAGE_SIZE * 1024; 788 esz = throughput * quota->ms; 789 790 if (quota->sz && quota->sz < esz) 791 esz = quota->sz; 792 quota->esz = esz; 793 } 794 795 static void kdamond_apply_schemes(struct damon_ctx *c) 796 { 797 struct damon_target *t; 798 struct damon_region *r, *next_r; 799 struct damos *s; 800 801 damon_for_each_scheme(s, c) { 802 struct damos_quota *quota = &s->quota; 803 unsigned long cumulated_sz; 804 unsigned int score, max_score = 0; 805 806 if (!s->wmarks.activated) 807 continue; 808 809 if (!quota->ms && !quota->sz) 810 continue; 811 812 /* New charge window starts */ 813 if (time_after_eq(jiffies, quota->charged_from + 814 msecs_to_jiffies( 815 quota->reset_interval))) { 816 if (quota->esz && quota->charged_sz >= quota->esz) 817 s->stat.qt_exceeds++; 818 quota->total_charged_sz += quota->charged_sz; 819 quota->charged_from = jiffies; 820 quota->charged_sz = 0; 821 damos_set_effective_quota(quota); 822 } 823 824 if (!c->ops.get_scheme_score) 825 continue; 826 827 /* Fill up the score histogram */ 828 memset(quota->histogram, 0, sizeof(quota->histogram)); 829 damon_for_each_target(t, c) { 830 damon_for_each_region(r, t) { 831 if (!__damos_valid_target(r, s)) 832 continue; 833 score = c->ops.get_scheme_score( 834 c, t, r, s); 835 quota->histogram[score] += 836 r->ar.end - r->ar.start; 837 if (score > max_score) 838 max_score = score; 839 } 840 } 841 842 /* Set the min score limit */ 843 for (cumulated_sz = 0, score = max_score; ; score--) { 844 cumulated_sz += quota->histogram[score]; 845 if (cumulated_sz >= quota->esz || !score) 846 break; 847 } 848 quota->min_score = score; 849 } 850 851 damon_for_each_target(t, c) { 852 damon_for_each_region_safe(r, next_r, t) 853 damon_do_apply_schemes(c, t, r); 854 } 855 } 856 857 static inline unsigned long sz_damon_region(struct damon_region *r) 858 { 859 return r->ar.end - r->ar.start; 860 } 861 862 /* 863 * Merge two adjacent regions into one region 864 */ 865 static void damon_merge_two_regions(struct damon_target *t, 866 struct damon_region *l, struct damon_region *r) 867 { 868 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r); 869 870 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 871 (sz_l + sz_r); 872 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 873 l->ar.end = r->ar.end; 874 damon_destroy_region(r, t); 875 } 876 877 /* 878 * Merge adjacent regions having similar access frequencies 879 * 880 * t target affected by this merge operation 881 * thres '->nr_accesses' diff threshold for the merge 882 * sz_limit size upper limit of each region 883 */ 884 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 885 unsigned long sz_limit) 886 { 887 struct damon_region *r, *prev = NULL, *next; 888 889 damon_for_each_region_safe(r, next, t) { 890 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 891 r->age = 0; 892 else 893 r->age++; 894 895 if (prev && prev->ar.end == r->ar.start && 896 abs(prev->nr_accesses - r->nr_accesses) <= thres && 897 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit) 898 damon_merge_two_regions(t, prev, r); 899 else 900 prev = r; 901 } 902 } 903 904 /* 905 * Merge adjacent regions having similar access frequencies 906 * 907 * threshold '->nr_accesses' diff threshold for the merge 908 * sz_limit size upper limit of each region 909 * 910 * This function merges monitoring target regions which are adjacent and their 911 * access frequencies are similar. This is for minimizing the monitoring 912 * overhead under the dynamically changeable access pattern. If a merge was 913 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 914 */ 915 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 916 unsigned long sz_limit) 917 { 918 struct damon_target *t; 919 920 damon_for_each_target(t, c) 921 damon_merge_regions_of(t, threshold, sz_limit); 922 } 923 924 /* 925 * Split a region in two 926 * 927 * r the region to be split 928 * sz_r size of the first sub-region that will be made 929 */ 930 static void damon_split_region_at(struct damon_target *t, 931 struct damon_region *r, unsigned long sz_r) 932 { 933 struct damon_region *new; 934 935 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 936 if (!new) 937 return; 938 939 r->ar.end = new->ar.start; 940 941 new->age = r->age; 942 new->last_nr_accesses = r->last_nr_accesses; 943 944 damon_insert_region(new, r, damon_next_region(r), t); 945 } 946 947 /* Split every region in the given target into 'nr_subs' regions */ 948 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 949 { 950 struct damon_region *r, *next; 951 unsigned long sz_region, sz_sub = 0; 952 int i; 953 954 damon_for_each_region_safe(r, next, t) { 955 sz_region = r->ar.end - r->ar.start; 956 957 for (i = 0; i < nr_subs - 1 && 958 sz_region > 2 * DAMON_MIN_REGION; i++) { 959 /* 960 * Randomly select size of left sub-region to be at 961 * least 10 percent and at most 90% of original region 962 */ 963 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 964 sz_region / 10, DAMON_MIN_REGION); 965 /* Do not allow blank region */ 966 if (sz_sub == 0 || sz_sub >= sz_region) 967 continue; 968 969 damon_split_region_at(t, r, sz_sub); 970 sz_region = sz_sub; 971 } 972 } 973 } 974 975 /* 976 * Split every target region into randomly-sized small regions 977 * 978 * This function splits every target region into random-sized small regions if 979 * current total number of the regions is equal or smaller than half of the 980 * user-specified maximum number of regions. This is for maximizing the 981 * monitoring accuracy under the dynamically changeable access patterns. If a 982 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 983 * it. 984 */ 985 static void kdamond_split_regions(struct damon_ctx *ctx) 986 { 987 struct damon_target *t; 988 unsigned int nr_regions = 0; 989 static unsigned int last_nr_regions; 990 int nr_subregions = 2; 991 992 damon_for_each_target(t, ctx) 993 nr_regions += damon_nr_regions(t); 994 995 if (nr_regions > ctx->max_nr_regions / 2) 996 return; 997 998 /* Maybe the middle of the region has different access frequency */ 999 if (last_nr_regions == nr_regions && 1000 nr_regions < ctx->max_nr_regions / 3) 1001 nr_subregions = 3; 1002 1003 damon_for_each_target(t, ctx) 1004 damon_split_regions_of(t, nr_subregions); 1005 1006 last_nr_regions = nr_regions; 1007 } 1008 1009 /* 1010 * Check whether it is time to check and apply the operations-related data 1011 * structures. 1012 * 1013 * Returns true if it is. 1014 */ 1015 static bool kdamond_need_update_operations(struct damon_ctx *ctx) 1016 { 1017 return damon_check_reset_time_interval(&ctx->last_ops_update, 1018 ctx->ops_update_interval); 1019 } 1020 1021 /* 1022 * Check whether current monitoring should be stopped 1023 * 1024 * The monitoring is stopped when either the user requested to stop, or all 1025 * monitoring targets are invalid. 1026 * 1027 * Returns true if need to stop current monitoring. 1028 */ 1029 static bool kdamond_need_stop(struct damon_ctx *ctx) 1030 { 1031 struct damon_target *t; 1032 1033 if (kthread_should_stop()) 1034 return true; 1035 1036 if (!ctx->ops.target_valid) 1037 return false; 1038 1039 damon_for_each_target(t, ctx) { 1040 if (ctx->ops.target_valid(t)) 1041 return false; 1042 } 1043 1044 return true; 1045 } 1046 1047 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1048 { 1049 struct sysinfo i; 1050 1051 switch (metric) { 1052 case DAMOS_WMARK_FREE_MEM_RATE: 1053 si_meminfo(&i); 1054 return i.freeram * 1000 / i.totalram; 1055 default: 1056 break; 1057 } 1058 return -EINVAL; 1059 } 1060 1061 /* 1062 * Returns zero if the scheme is active. Else, returns time to wait for next 1063 * watermark check in micro-seconds. 1064 */ 1065 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1066 { 1067 unsigned long metric; 1068 1069 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1070 return 0; 1071 1072 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1073 /* higher than high watermark or lower than low watermark */ 1074 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1075 if (scheme->wmarks.activated) 1076 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1077 scheme->action, 1078 metric > scheme->wmarks.high ? 1079 "high" : "low"); 1080 scheme->wmarks.activated = false; 1081 return scheme->wmarks.interval; 1082 } 1083 1084 /* inactive and higher than middle watermark */ 1085 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1086 !scheme->wmarks.activated) 1087 return scheme->wmarks.interval; 1088 1089 if (!scheme->wmarks.activated) 1090 pr_debug("activate a scheme (%d)\n", scheme->action); 1091 scheme->wmarks.activated = true; 1092 return 0; 1093 } 1094 1095 static void kdamond_usleep(unsigned long usecs) 1096 { 1097 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1098 if (usecs > 20 * USEC_PER_MSEC) 1099 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1100 else 1101 usleep_idle_range(usecs, usecs + 1); 1102 } 1103 1104 /* Returns negative error code if it's not activated but should return */ 1105 static int kdamond_wait_activation(struct damon_ctx *ctx) 1106 { 1107 struct damos *s; 1108 unsigned long wait_time; 1109 unsigned long min_wait_time = 0; 1110 bool init_wait_time = false; 1111 1112 while (!kdamond_need_stop(ctx)) { 1113 damon_for_each_scheme(s, ctx) { 1114 wait_time = damos_wmark_wait_us(s); 1115 if (!init_wait_time || wait_time < min_wait_time) { 1116 init_wait_time = true; 1117 min_wait_time = wait_time; 1118 } 1119 } 1120 if (!min_wait_time) 1121 return 0; 1122 1123 kdamond_usleep(min_wait_time); 1124 1125 if (ctx->callback.after_wmarks_check && 1126 ctx->callback.after_wmarks_check(ctx)) 1127 break; 1128 } 1129 return -EBUSY; 1130 } 1131 1132 /* 1133 * The monitoring daemon that runs as a kernel thread 1134 */ 1135 static int kdamond_fn(void *data) 1136 { 1137 struct damon_ctx *ctx = data; 1138 struct damon_target *t; 1139 struct damon_region *r, *next; 1140 unsigned int max_nr_accesses = 0; 1141 unsigned long sz_limit = 0; 1142 bool done = false; 1143 1144 pr_debug("kdamond (%d) starts\n", current->pid); 1145 1146 if (ctx->ops.init) 1147 ctx->ops.init(ctx); 1148 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1149 done = true; 1150 1151 sz_limit = damon_region_sz_limit(ctx); 1152 1153 while (!kdamond_need_stop(ctx) && !done) { 1154 if (kdamond_wait_activation(ctx)) { 1155 done = true; 1156 continue; 1157 } 1158 1159 if (ctx->ops.prepare_access_checks) 1160 ctx->ops.prepare_access_checks(ctx); 1161 if (ctx->callback.after_sampling && 1162 ctx->callback.after_sampling(ctx)) { 1163 done = true; 1164 continue; 1165 } 1166 1167 kdamond_usleep(ctx->sample_interval); 1168 1169 if (ctx->ops.check_accesses) 1170 max_nr_accesses = ctx->ops.check_accesses(ctx); 1171 1172 if (kdamond_aggregate_interval_passed(ctx)) { 1173 kdamond_merge_regions(ctx, 1174 max_nr_accesses / 10, 1175 sz_limit); 1176 if (ctx->callback.after_aggregation && 1177 ctx->callback.after_aggregation(ctx)) { 1178 done = true; 1179 continue; 1180 } 1181 kdamond_apply_schemes(ctx); 1182 kdamond_reset_aggregated(ctx); 1183 kdamond_split_regions(ctx); 1184 if (ctx->ops.reset_aggregated) 1185 ctx->ops.reset_aggregated(ctx); 1186 } 1187 1188 if (kdamond_need_update_operations(ctx)) { 1189 if (ctx->ops.update) 1190 ctx->ops.update(ctx); 1191 sz_limit = damon_region_sz_limit(ctx); 1192 } 1193 } 1194 damon_for_each_target(t, ctx) { 1195 damon_for_each_region_safe(r, next, t) 1196 damon_destroy_region(r, t); 1197 } 1198 1199 if (ctx->callback.before_terminate) 1200 ctx->callback.before_terminate(ctx); 1201 if (ctx->ops.cleanup) 1202 ctx->ops.cleanup(ctx); 1203 1204 pr_debug("kdamond (%d) finishes\n", current->pid); 1205 mutex_lock(&ctx->kdamond_lock); 1206 ctx->kdamond = NULL; 1207 mutex_unlock(&ctx->kdamond_lock); 1208 1209 mutex_lock(&damon_lock); 1210 nr_running_ctxs--; 1211 if (!nr_running_ctxs && running_exclusive_ctxs) 1212 running_exclusive_ctxs = false; 1213 mutex_unlock(&damon_lock); 1214 1215 return 0; 1216 } 1217 1218 #include "core-test.h" 1219