1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using 4 * scalable techniques. 5 * 6 * Copyright (C) 2017 Facebook 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/blkdev.h> 11 #include <linux/blk-mq.h> 12 #include <linux/elevator.h> 13 #include <linux/module.h> 14 #include <linux/sbitmap.h> 15 16 #include <trace/events/block.h> 17 18 #include "blk.h" 19 #include "blk-mq.h" 20 #include "blk-mq-debugfs.h" 21 #include "blk-mq-sched.h" 22 #include "blk-mq-tag.h" 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/kyber.h> 26 27 /* 28 * Scheduling domains: the device is divided into multiple domains based on the 29 * request type. 30 */ 31 enum { 32 KYBER_READ, 33 KYBER_WRITE, 34 KYBER_DISCARD, 35 KYBER_OTHER, 36 KYBER_NUM_DOMAINS, 37 }; 38 39 static const char *kyber_domain_names[] = { 40 [KYBER_READ] = "READ", 41 [KYBER_WRITE] = "WRITE", 42 [KYBER_DISCARD] = "DISCARD", 43 [KYBER_OTHER] = "OTHER", 44 }; 45 46 enum { 47 /* 48 * In order to prevent starvation of synchronous requests by a flood of 49 * asynchronous requests, we reserve 25% of requests for synchronous 50 * operations. 51 */ 52 KYBER_ASYNC_PERCENT = 75, 53 }; 54 55 /* 56 * Maximum device-wide depth for each scheduling domain. 57 * 58 * Even for fast devices with lots of tags like NVMe, you can saturate the 59 * device with only a fraction of the maximum possible queue depth. So, we cap 60 * these to a reasonable value. 61 */ 62 static const unsigned int kyber_depth[] = { 63 [KYBER_READ] = 256, 64 [KYBER_WRITE] = 128, 65 [KYBER_DISCARD] = 64, 66 [KYBER_OTHER] = 16, 67 }; 68 69 /* 70 * Default latency targets for each scheduling domain. 71 */ 72 static const u64 kyber_latency_targets[] = { 73 [KYBER_READ] = 2ULL * NSEC_PER_MSEC, 74 [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC, 75 [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC, 76 }; 77 78 /* 79 * Batch size (number of requests we'll dispatch in a row) for each scheduling 80 * domain. 81 */ 82 static const unsigned int kyber_batch_size[] = { 83 [KYBER_READ] = 16, 84 [KYBER_WRITE] = 8, 85 [KYBER_DISCARD] = 1, 86 [KYBER_OTHER] = 1, 87 }; 88 89 /* 90 * Requests latencies are recorded in a histogram with buckets defined relative 91 * to the target latency: 92 * 93 * <= 1/4 * target latency 94 * <= 1/2 * target latency 95 * <= 3/4 * target latency 96 * <= target latency 97 * <= 1 1/4 * target latency 98 * <= 1 1/2 * target latency 99 * <= 1 3/4 * target latency 100 * > 1 3/4 * target latency 101 */ 102 enum { 103 /* 104 * The width of the latency histogram buckets is 105 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency. 106 */ 107 KYBER_LATENCY_SHIFT = 2, 108 /* 109 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency, 110 * thus, "good". 111 */ 112 KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT, 113 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */ 114 KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT, 115 }; 116 117 /* 118 * We measure both the total latency and the I/O latency (i.e., latency after 119 * submitting to the device). 120 */ 121 enum { 122 KYBER_TOTAL_LATENCY, 123 KYBER_IO_LATENCY, 124 }; 125 126 static const char *kyber_latency_type_names[] = { 127 [KYBER_TOTAL_LATENCY] = "total", 128 [KYBER_IO_LATENCY] = "I/O", 129 }; 130 131 /* 132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling 133 * domain except for KYBER_OTHER. 134 */ 135 struct kyber_cpu_latency { 136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; 137 }; 138 139 /* 140 * There is a same mapping between ctx & hctx and kcq & khd, 141 * we use request->mq_ctx->index_hw to index the kcq in khd. 142 */ 143 struct kyber_ctx_queue { 144 /* 145 * Used to ensure operations on rq_list and kcq_map to be an atmoic one. 146 * Also protect the rqs on rq_list when merge. 147 */ 148 spinlock_t lock; 149 struct list_head rq_list[KYBER_NUM_DOMAINS]; 150 } ____cacheline_aligned_in_smp; 151 152 struct kyber_queue_data { 153 struct request_queue *q; 154 155 /* 156 * Each scheduling domain has a limited number of in-flight requests 157 * device-wide, limited by these tokens. 158 */ 159 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS]; 160 161 /* 162 * Async request percentage, converted to per-word depth for 163 * sbitmap_get_shallow(). 164 */ 165 unsigned int async_depth; 166 167 struct kyber_cpu_latency __percpu *cpu_latency; 168 169 /* Timer for stats aggregation and adjusting domain tokens. */ 170 struct timer_list timer; 171 172 unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; 173 174 unsigned long latency_timeout[KYBER_OTHER]; 175 176 int domain_p99[KYBER_OTHER]; 177 178 /* Target latencies in nanoseconds. */ 179 u64 latency_targets[KYBER_OTHER]; 180 }; 181 182 struct kyber_hctx_data { 183 spinlock_t lock; 184 struct list_head rqs[KYBER_NUM_DOMAINS]; 185 unsigned int cur_domain; 186 unsigned int batching; 187 struct kyber_ctx_queue *kcqs; 188 struct sbitmap kcq_map[KYBER_NUM_DOMAINS]; 189 struct sbq_wait domain_wait[KYBER_NUM_DOMAINS]; 190 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS]; 191 atomic_t wait_index[KYBER_NUM_DOMAINS]; 192 }; 193 194 static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 195 void *key); 196 197 static unsigned int kyber_sched_domain(unsigned int op) 198 { 199 switch (op & REQ_OP_MASK) { 200 case REQ_OP_READ: 201 return KYBER_READ; 202 case REQ_OP_WRITE: 203 return KYBER_WRITE; 204 case REQ_OP_DISCARD: 205 return KYBER_DISCARD; 206 default: 207 return KYBER_OTHER; 208 } 209 } 210 211 static void flush_latency_buckets(struct kyber_queue_data *kqd, 212 struct kyber_cpu_latency *cpu_latency, 213 unsigned int sched_domain, unsigned int type) 214 { 215 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; 216 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; 217 unsigned int bucket; 218 219 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) 220 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); 221 } 222 223 /* 224 * Calculate the histogram bucket with the given percentile rank, or -1 if there 225 * aren't enough samples yet. 226 */ 227 static int calculate_percentile(struct kyber_queue_data *kqd, 228 unsigned int sched_domain, unsigned int type, 229 unsigned int percentile) 230 { 231 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; 232 unsigned int bucket, samples = 0, percentile_samples; 233 234 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) 235 samples += buckets[bucket]; 236 237 if (!samples) 238 return -1; 239 240 /* 241 * We do the calculation once we have 500 samples or one second passes 242 * since the first sample was recorded, whichever comes first. 243 */ 244 if (!kqd->latency_timeout[sched_domain]) 245 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL); 246 if (samples < 500 && 247 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) { 248 return -1; 249 } 250 kqd->latency_timeout[sched_domain] = 0; 251 252 percentile_samples = DIV_ROUND_UP(samples * percentile, 100); 253 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) { 254 if (buckets[bucket] >= percentile_samples) 255 break; 256 percentile_samples -= buckets[bucket]; 257 } 258 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); 259 260 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], 261 kyber_latency_type_names[type], percentile, 262 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); 263 264 return bucket; 265 } 266 267 static void kyber_resize_domain(struct kyber_queue_data *kqd, 268 unsigned int sched_domain, unsigned int depth) 269 { 270 depth = clamp(depth, 1U, kyber_depth[sched_domain]); 271 if (depth != kqd->domain_tokens[sched_domain].sb.depth) { 272 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); 273 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], 274 depth); 275 } 276 } 277 278 static void kyber_timer_fn(struct timer_list *t) 279 { 280 struct kyber_queue_data *kqd = from_timer(kqd, t, timer); 281 unsigned int sched_domain; 282 int cpu; 283 bool bad = false; 284 285 /* Sum all of the per-cpu latency histograms. */ 286 for_each_online_cpu(cpu) { 287 struct kyber_cpu_latency *cpu_latency; 288 289 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu); 290 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { 291 flush_latency_buckets(kqd, cpu_latency, sched_domain, 292 KYBER_TOTAL_LATENCY); 293 flush_latency_buckets(kqd, cpu_latency, sched_domain, 294 KYBER_IO_LATENCY); 295 } 296 } 297 298 /* 299 * Check if any domains have a high I/O latency, which might indicate 300 * congestion in the device. Note that we use the p90; we don't want to 301 * be too sensitive to outliers here. 302 */ 303 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { 304 int p90; 305 306 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY, 307 90); 308 if (p90 >= KYBER_GOOD_BUCKETS) 309 bad = true; 310 } 311 312 /* 313 * Adjust the scheduling domain depths. If we determined that there was 314 * congestion, we throttle all domains with good latencies. Either way, 315 * we ease up on throttling domains with bad latencies. 316 */ 317 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { 318 unsigned int orig_depth, depth; 319 int p99; 320 321 p99 = calculate_percentile(kqd, sched_domain, 322 KYBER_TOTAL_LATENCY, 99); 323 /* 324 * This is kind of subtle: different domains will not 325 * necessarily have enough samples to calculate the latency 326 * percentiles during the same window, so we have to remember 327 * the p99 for the next time we observe congestion; once we do, 328 * we don't want to throttle again until we get more data, so we 329 * reset it to -1. 330 */ 331 if (bad) { 332 if (p99 < 0) 333 p99 = kqd->domain_p99[sched_domain]; 334 kqd->domain_p99[sched_domain] = -1; 335 } else if (p99 >= 0) { 336 kqd->domain_p99[sched_domain] = p99; 337 } 338 if (p99 < 0) 339 continue; 340 341 /* 342 * If this domain has bad latency, throttle less. Otherwise, 343 * throttle more iff we determined that there is congestion. 344 * 345 * The new depth is scaled linearly with the p99 latency vs the 346 * latency target. E.g., if the p99 is 3/4 of the target, then 347 * we throttle down to 3/4 of the current depth, and if the p99 348 * is 2x the target, then we double the depth. 349 */ 350 if (bad || p99 >= KYBER_GOOD_BUCKETS) { 351 orig_depth = kqd->domain_tokens[sched_domain].sb.depth; 352 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT; 353 kyber_resize_domain(kqd, sched_domain, depth); 354 } 355 } 356 } 357 358 static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) 359 { 360 struct kyber_queue_data *kqd; 361 int ret = -ENOMEM; 362 int i; 363 364 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node); 365 if (!kqd) 366 goto err; 367 368 kqd->q = q; 369 370 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency, 371 GFP_KERNEL | __GFP_ZERO); 372 if (!kqd->cpu_latency) 373 goto err_kqd; 374 375 timer_setup(&kqd->timer, kyber_timer_fn, 0); 376 377 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 378 WARN_ON(!kyber_depth[i]); 379 WARN_ON(!kyber_batch_size[i]); 380 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], 381 kyber_depth[i], -1, false, 382 GFP_KERNEL, q->node); 383 if (ret) { 384 while (--i >= 0) 385 sbitmap_queue_free(&kqd->domain_tokens[i]); 386 goto err_buckets; 387 } 388 } 389 390 for (i = 0; i < KYBER_OTHER; i++) { 391 kqd->domain_p99[i] = -1; 392 kqd->latency_targets[i] = kyber_latency_targets[i]; 393 } 394 395 return kqd; 396 397 err_buckets: 398 free_percpu(kqd->cpu_latency); 399 err_kqd: 400 kfree(kqd); 401 err: 402 return ERR_PTR(ret); 403 } 404 405 static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) 406 { 407 struct kyber_queue_data *kqd; 408 struct elevator_queue *eq; 409 410 eq = elevator_alloc(q, e); 411 if (!eq) 412 return -ENOMEM; 413 414 kqd = kyber_queue_data_alloc(q); 415 if (IS_ERR(kqd)) { 416 kobject_put(&eq->kobj); 417 return PTR_ERR(kqd); 418 } 419 420 blk_stat_enable_accounting(q); 421 422 eq->elevator_data = kqd; 423 q->elevator = eq; 424 425 return 0; 426 } 427 428 static void kyber_exit_sched(struct elevator_queue *e) 429 { 430 struct kyber_queue_data *kqd = e->elevator_data; 431 int i; 432 433 del_timer_sync(&kqd->timer); 434 435 for (i = 0; i < KYBER_NUM_DOMAINS; i++) 436 sbitmap_queue_free(&kqd->domain_tokens[i]); 437 free_percpu(kqd->cpu_latency); 438 kfree(kqd); 439 } 440 441 static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq) 442 { 443 unsigned int i; 444 445 spin_lock_init(&kcq->lock); 446 for (i = 0; i < KYBER_NUM_DOMAINS; i++) 447 INIT_LIST_HEAD(&kcq->rq_list[i]); 448 } 449 450 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) 451 { 452 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; 453 struct blk_mq_tags *tags = hctx->sched_tags; 454 unsigned int shift = tags->bitmap_tags->sb.shift; 455 456 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; 457 458 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth); 459 } 460 461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 462 { 463 struct kyber_hctx_data *khd; 464 int i; 465 466 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); 467 if (!khd) 468 return -ENOMEM; 469 470 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, 471 sizeof(struct kyber_ctx_queue), 472 GFP_KERNEL, hctx->numa_node); 473 if (!khd->kcqs) 474 goto err_khd; 475 476 for (i = 0; i < hctx->nr_ctx; i++) 477 kyber_ctx_queue_init(&khd->kcqs[i]); 478 479 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 480 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, 481 ilog2(8), GFP_KERNEL, hctx->numa_node)) { 482 while (--i >= 0) 483 sbitmap_free(&khd->kcq_map[i]); 484 goto err_kcqs; 485 } 486 } 487 488 spin_lock_init(&khd->lock); 489 490 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 491 INIT_LIST_HEAD(&khd->rqs[i]); 492 khd->domain_wait[i].sbq = NULL; 493 init_waitqueue_func_entry(&khd->domain_wait[i].wait, 494 kyber_domain_wake); 495 khd->domain_wait[i].wait.private = hctx; 496 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); 497 atomic_set(&khd->wait_index[i], 0); 498 } 499 500 khd->cur_domain = 0; 501 khd->batching = 0; 502 503 hctx->sched_data = khd; 504 kyber_depth_updated(hctx); 505 506 return 0; 507 508 err_kcqs: 509 kfree(khd->kcqs); 510 err_khd: 511 kfree(khd); 512 return -ENOMEM; 513 } 514 515 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 516 { 517 struct kyber_hctx_data *khd = hctx->sched_data; 518 int i; 519 520 for (i = 0; i < KYBER_NUM_DOMAINS; i++) 521 sbitmap_free(&khd->kcq_map[i]); 522 kfree(khd->kcqs); 523 kfree(hctx->sched_data); 524 } 525 526 static int rq_get_domain_token(struct request *rq) 527 { 528 return (long)rq->elv.priv[0]; 529 } 530 531 static void rq_set_domain_token(struct request *rq, int token) 532 { 533 rq->elv.priv[0] = (void *)(long)token; 534 } 535 536 static void rq_clear_domain_token(struct kyber_queue_data *kqd, 537 struct request *rq) 538 { 539 unsigned int sched_domain; 540 int nr; 541 542 nr = rq_get_domain_token(rq); 543 if (nr != -1) { 544 sched_domain = kyber_sched_domain(rq->cmd_flags); 545 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr, 546 rq->mq_ctx->cpu); 547 } 548 } 549 550 static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) 551 { 552 /* 553 * We use the scheduler tags as per-hardware queue queueing tokens. 554 * Async requests can be limited at this stage. 555 */ 556 if (!op_is_sync(op)) { 557 struct kyber_queue_data *kqd = data->q->elevator->elevator_data; 558 559 data->shallow_depth = kqd->async_depth; 560 } 561 } 562 563 static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, 564 unsigned int nr_segs) 565 { 566 struct kyber_hctx_data *khd = hctx->sched_data; 567 struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); 568 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; 569 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); 570 struct list_head *rq_list = &kcq->rq_list[sched_domain]; 571 bool merged; 572 573 spin_lock(&kcq->lock); 574 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); 575 spin_unlock(&kcq->lock); 576 577 return merged; 578 } 579 580 static void kyber_prepare_request(struct request *rq) 581 { 582 rq_set_domain_token(rq, -1); 583 } 584 585 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, 586 struct list_head *rq_list, bool at_head) 587 { 588 struct kyber_hctx_data *khd = hctx->sched_data; 589 struct request *rq, *next; 590 591 list_for_each_entry_safe(rq, next, rq_list, queuelist) { 592 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags); 593 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; 594 struct list_head *head = &kcq->rq_list[sched_domain]; 595 596 spin_lock(&kcq->lock); 597 if (at_head) 598 list_move(&rq->queuelist, head); 599 else 600 list_move_tail(&rq->queuelist, head); 601 sbitmap_set_bit(&khd->kcq_map[sched_domain], 602 rq->mq_ctx->index_hw[hctx->type]); 603 trace_block_rq_insert(rq); 604 spin_unlock(&kcq->lock); 605 } 606 } 607 608 static void kyber_finish_request(struct request *rq) 609 { 610 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; 611 612 rq_clear_domain_token(kqd, rq); 613 } 614 615 static void add_latency_sample(struct kyber_cpu_latency *cpu_latency, 616 unsigned int sched_domain, unsigned int type, 617 u64 target, u64 latency) 618 { 619 unsigned int bucket; 620 u64 divisor; 621 622 if (latency > 0) { 623 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1); 624 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor), 625 KYBER_LATENCY_BUCKETS - 1); 626 } else { 627 bucket = 0; 628 } 629 630 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); 631 } 632 633 static void kyber_completed_request(struct request *rq, u64 now) 634 { 635 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; 636 struct kyber_cpu_latency *cpu_latency; 637 unsigned int sched_domain; 638 u64 target; 639 640 sched_domain = kyber_sched_domain(rq->cmd_flags); 641 if (sched_domain == KYBER_OTHER) 642 return; 643 644 cpu_latency = get_cpu_ptr(kqd->cpu_latency); 645 target = kqd->latency_targets[sched_domain]; 646 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY, 647 target, now - rq->start_time_ns); 648 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target, 649 now - rq->io_start_time_ns); 650 put_cpu_ptr(kqd->cpu_latency); 651 652 timer_reduce(&kqd->timer, jiffies + HZ / 10); 653 } 654 655 struct flush_kcq_data { 656 struct kyber_hctx_data *khd; 657 unsigned int sched_domain; 658 struct list_head *list; 659 }; 660 661 static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data) 662 { 663 struct flush_kcq_data *flush_data = data; 664 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr]; 665 666 spin_lock(&kcq->lock); 667 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain], 668 flush_data->list); 669 sbitmap_clear_bit(sb, bitnr); 670 spin_unlock(&kcq->lock); 671 672 return true; 673 } 674 675 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, 676 unsigned int sched_domain, 677 struct list_head *list) 678 { 679 struct flush_kcq_data data = { 680 .khd = khd, 681 .sched_domain = sched_domain, 682 .list = list, 683 }; 684 685 sbitmap_for_each_set(&khd->kcq_map[sched_domain], 686 flush_busy_kcq, &data); 687 } 688 689 static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags, 690 void *key) 691 { 692 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); 693 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait); 694 695 sbitmap_del_wait_queue(wait); 696 blk_mq_run_hw_queue(hctx, true); 697 return 1; 698 } 699 700 static int kyber_get_domain_token(struct kyber_queue_data *kqd, 701 struct kyber_hctx_data *khd, 702 struct blk_mq_hw_ctx *hctx) 703 { 704 unsigned int sched_domain = khd->cur_domain; 705 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; 706 struct sbq_wait *wait = &khd->domain_wait[sched_domain]; 707 struct sbq_wait_state *ws; 708 int nr; 709 710 nr = __sbitmap_queue_get(domain_tokens); 711 712 /* 713 * If we failed to get a domain token, make sure the hardware queue is 714 * run when one becomes available. Note that this is serialized on 715 * khd->lock, but we still need to be careful about the waker. 716 */ 717 if (nr < 0 && list_empty_careful(&wait->wait.entry)) { 718 ws = sbq_wait_ptr(domain_tokens, 719 &khd->wait_index[sched_domain]); 720 khd->domain_ws[sched_domain] = ws; 721 sbitmap_add_wait_queue(domain_tokens, ws, wait); 722 723 /* 724 * Try again in case a token was freed before we got on the wait 725 * queue. 726 */ 727 nr = __sbitmap_queue_get(domain_tokens); 728 } 729 730 /* 731 * If we got a token while we were on the wait queue, remove ourselves 732 * from the wait queue to ensure that all wake ups make forward 733 * progress. It's possible that the waker already deleted the entry 734 * between the !list_empty_careful() check and us grabbing the lock, but 735 * list_del_init() is okay with that. 736 */ 737 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) { 738 ws = khd->domain_ws[sched_domain]; 739 spin_lock_irq(&ws->wait.lock); 740 sbitmap_del_wait_queue(wait); 741 spin_unlock_irq(&ws->wait.lock); 742 } 743 744 return nr; 745 } 746 747 static struct request * 748 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, 749 struct kyber_hctx_data *khd, 750 struct blk_mq_hw_ctx *hctx) 751 { 752 struct list_head *rqs; 753 struct request *rq; 754 int nr; 755 756 rqs = &khd->rqs[khd->cur_domain]; 757 758 /* 759 * If we already have a flushed request, then we just need to get a 760 * token for it. Otherwise, if there are pending requests in the kcqs, 761 * flush the kcqs, but only if we can get a token. If not, we should 762 * leave the requests in the kcqs so that they can be merged. Note that 763 * khd->lock serializes the flushes, so if we observed any bit set in 764 * the kcq_map, we will always get a request. 765 */ 766 rq = list_first_entry_or_null(rqs, struct request, queuelist); 767 if (rq) { 768 nr = kyber_get_domain_token(kqd, khd, hctx); 769 if (nr >= 0) { 770 khd->batching++; 771 rq_set_domain_token(rq, nr); 772 list_del_init(&rq->queuelist); 773 return rq; 774 } else { 775 trace_kyber_throttled(kqd->q, 776 kyber_domain_names[khd->cur_domain]); 777 } 778 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { 779 nr = kyber_get_domain_token(kqd, khd, hctx); 780 if (nr >= 0) { 781 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); 782 rq = list_first_entry(rqs, struct request, queuelist); 783 khd->batching++; 784 rq_set_domain_token(rq, nr); 785 list_del_init(&rq->queuelist); 786 return rq; 787 } else { 788 trace_kyber_throttled(kqd->q, 789 kyber_domain_names[khd->cur_domain]); 790 } 791 } 792 793 /* There were either no pending requests or no tokens. */ 794 return NULL; 795 } 796 797 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) 798 { 799 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; 800 struct kyber_hctx_data *khd = hctx->sched_data; 801 struct request *rq; 802 int i; 803 804 spin_lock(&khd->lock); 805 806 /* 807 * First, if we are still entitled to batch, try to dispatch a request 808 * from the batch. 809 */ 810 if (khd->batching < kyber_batch_size[khd->cur_domain]) { 811 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); 812 if (rq) 813 goto out; 814 } 815 816 /* 817 * Either, 818 * 1. We were no longer entitled to a batch. 819 * 2. The domain we were batching didn't have any requests. 820 * 3. The domain we were batching was out of tokens. 821 * 822 * Start another batch. Note that this wraps back around to the original 823 * domain if no other domains have requests or tokens. 824 */ 825 khd->batching = 0; 826 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 827 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) 828 khd->cur_domain = 0; 829 else 830 khd->cur_domain++; 831 832 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); 833 if (rq) 834 goto out; 835 } 836 837 rq = NULL; 838 out: 839 spin_unlock(&khd->lock); 840 return rq; 841 } 842 843 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) 844 { 845 struct kyber_hctx_data *khd = hctx->sched_data; 846 int i; 847 848 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 849 if (!list_empty_careful(&khd->rqs[i]) || 850 sbitmap_any_bit_set(&khd->kcq_map[i])) 851 return true; 852 } 853 854 return false; 855 } 856 857 #define KYBER_LAT_SHOW_STORE(domain, name) \ 858 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \ 859 char *page) \ 860 { \ 861 struct kyber_queue_data *kqd = e->elevator_data; \ 862 \ 863 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \ 864 } \ 865 \ 866 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \ 867 const char *page, size_t count) \ 868 { \ 869 struct kyber_queue_data *kqd = e->elevator_data; \ 870 unsigned long long nsec; \ 871 int ret; \ 872 \ 873 ret = kstrtoull(page, 10, &nsec); \ 874 if (ret) \ 875 return ret; \ 876 \ 877 kqd->latency_targets[domain] = nsec; \ 878 \ 879 return count; \ 880 } 881 KYBER_LAT_SHOW_STORE(KYBER_READ, read); 882 KYBER_LAT_SHOW_STORE(KYBER_WRITE, write); 883 #undef KYBER_LAT_SHOW_STORE 884 885 #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store) 886 static struct elv_fs_entry kyber_sched_attrs[] = { 887 KYBER_LAT_ATTR(read), 888 KYBER_LAT_ATTR(write), 889 __ATTR_NULL 890 }; 891 #undef KYBER_LAT_ATTR 892 893 #ifdef CONFIG_BLK_DEBUG_FS 894 #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \ 895 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \ 896 { \ 897 struct request_queue *q = data; \ 898 struct kyber_queue_data *kqd = q->elevator->elevator_data; \ 899 \ 900 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \ 901 return 0; \ 902 } \ 903 \ 904 static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \ 905 __acquires(&khd->lock) \ 906 { \ 907 struct blk_mq_hw_ctx *hctx = m->private; \ 908 struct kyber_hctx_data *khd = hctx->sched_data; \ 909 \ 910 spin_lock(&khd->lock); \ 911 return seq_list_start(&khd->rqs[domain], *pos); \ 912 } \ 913 \ 914 static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \ 915 loff_t *pos) \ 916 { \ 917 struct blk_mq_hw_ctx *hctx = m->private; \ 918 struct kyber_hctx_data *khd = hctx->sched_data; \ 919 \ 920 return seq_list_next(v, &khd->rqs[domain], pos); \ 921 } \ 922 \ 923 static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \ 924 __releases(&khd->lock) \ 925 { \ 926 struct blk_mq_hw_ctx *hctx = m->private; \ 927 struct kyber_hctx_data *khd = hctx->sched_data; \ 928 \ 929 spin_unlock(&khd->lock); \ 930 } \ 931 \ 932 static const struct seq_operations kyber_##name##_rqs_seq_ops = { \ 933 .start = kyber_##name##_rqs_start, \ 934 .next = kyber_##name##_rqs_next, \ 935 .stop = kyber_##name##_rqs_stop, \ 936 .show = blk_mq_debugfs_rq_show, \ 937 }; \ 938 \ 939 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ 940 { \ 941 struct blk_mq_hw_ctx *hctx = data; \ 942 struct kyber_hctx_data *khd = hctx->sched_data; \ 943 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \ 944 \ 945 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \ 946 return 0; \ 947 } 948 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read) 949 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write) 950 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard) 951 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other) 952 #undef KYBER_DEBUGFS_DOMAIN_ATTRS 953 954 static int kyber_async_depth_show(void *data, struct seq_file *m) 955 { 956 struct request_queue *q = data; 957 struct kyber_queue_data *kqd = q->elevator->elevator_data; 958 959 seq_printf(m, "%u\n", kqd->async_depth); 960 return 0; 961 } 962 963 static int kyber_cur_domain_show(void *data, struct seq_file *m) 964 { 965 struct blk_mq_hw_ctx *hctx = data; 966 struct kyber_hctx_data *khd = hctx->sched_data; 967 968 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]); 969 return 0; 970 } 971 972 static int kyber_batching_show(void *data, struct seq_file *m) 973 { 974 struct blk_mq_hw_ctx *hctx = data; 975 struct kyber_hctx_data *khd = hctx->sched_data; 976 977 seq_printf(m, "%u\n", khd->batching); 978 return 0; 979 } 980 981 #define KYBER_QUEUE_DOMAIN_ATTRS(name) \ 982 {#name "_tokens", 0400, kyber_##name##_tokens_show} 983 static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = { 984 KYBER_QUEUE_DOMAIN_ATTRS(read), 985 KYBER_QUEUE_DOMAIN_ATTRS(write), 986 KYBER_QUEUE_DOMAIN_ATTRS(discard), 987 KYBER_QUEUE_DOMAIN_ATTRS(other), 988 {"async_depth", 0400, kyber_async_depth_show}, 989 {}, 990 }; 991 #undef KYBER_QUEUE_DOMAIN_ATTRS 992 993 #define KYBER_HCTX_DOMAIN_ATTRS(name) \ 994 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \ 995 {#name "_waiting", 0400, kyber_##name##_waiting_show} 996 static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = { 997 KYBER_HCTX_DOMAIN_ATTRS(read), 998 KYBER_HCTX_DOMAIN_ATTRS(write), 999 KYBER_HCTX_DOMAIN_ATTRS(discard), 1000 KYBER_HCTX_DOMAIN_ATTRS(other), 1001 {"cur_domain", 0400, kyber_cur_domain_show}, 1002 {"batching", 0400, kyber_batching_show}, 1003 {}, 1004 }; 1005 #undef KYBER_HCTX_DOMAIN_ATTRS 1006 #endif 1007 1008 static struct elevator_type kyber_sched = { 1009 .ops = { 1010 .init_sched = kyber_init_sched, 1011 .exit_sched = kyber_exit_sched, 1012 .init_hctx = kyber_init_hctx, 1013 .exit_hctx = kyber_exit_hctx, 1014 .limit_depth = kyber_limit_depth, 1015 .bio_merge = kyber_bio_merge, 1016 .prepare_request = kyber_prepare_request, 1017 .insert_requests = kyber_insert_requests, 1018 .finish_request = kyber_finish_request, 1019 .requeue_request = kyber_finish_request, 1020 .completed_request = kyber_completed_request, 1021 .dispatch_request = kyber_dispatch_request, 1022 .has_work = kyber_has_work, 1023 .depth_updated = kyber_depth_updated, 1024 }, 1025 #ifdef CONFIG_BLK_DEBUG_FS 1026 .queue_debugfs_attrs = kyber_queue_debugfs_attrs, 1027 .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs, 1028 #endif 1029 .elevator_attrs = kyber_sched_attrs, 1030 .elevator_name = "kyber", 1031 .elevator_features = ELEVATOR_F_MQ_AWARE, 1032 .elevator_owner = THIS_MODULE, 1033 }; 1034 1035 static int __init kyber_init(void) 1036 { 1037 return elv_register(&kyber_sched); 1038 } 1039 1040 static void __exit kyber_exit(void) 1041 { 1042 elv_unregister(&kyber_sched); 1043 } 1044 1045 module_init(kyber_init); 1046 module_exit(kyber_exit); 1047 1048 MODULE_AUTHOR("Omar Sandoval"); 1049 MODULE_LICENSE("GPL"); 1050 MODULE_DESCRIPTION("Kyber I/O scheduler"); 1051