1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Header file for the BFQ I/O scheduler: data structures and 4 * prototypes of interface functions among BFQ components. 5 */ 6 #ifndef _BFQ_H 7 #define _BFQ_H 8 9 #include <linux/blktrace_api.h> 10 #include <linux/hrtimer.h> 11 12 #include "blk-cgroup-rwstat.h" 13 14 #define BFQ_IOPRIO_CLASSES 3 15 #define BFQ_CL_IDLE_TIMEOUT (HZ/5) 16 17 #define BFQ_MIN_WEIGHT 1 18 #define BFQ_MAX_WEIGHT 1000 19 #define BFQ_WEIGHT_CONVERSION_COEFF 10 20 21 #define BFQ_DEFAULT_QUEUE_IOPRIO 4 22 23 #define BFQ_WEIGHT_LEGACY_DFL 100 24 #define BFQ_DEFAULT_GRP_IOPRIO 0 25 #define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE 26 27 #define MAX_BFQQ_NAME_LENGTH 16 28 29 /* 30 * Soft real-time applications are extremely more latency sensitive 31 * than interactive ones. Over-raise the weight of the former to 32 * privilege them against the latter. 33 */ 34 #define BFQ_SOFTRT_WEIGHT_FACTOR 100 35 36 struct bfq_entity; 37 38 /** 39 * struct bfq_service_tree - per ioprio_class service tree. 40 * 41 * Each service tree represents a B-WF2Q+ scheduler on its own. Each 42 * ioprio_class has its own independent scheduler, and so its own 43 * bfq_service_tree. All the fields are protected by the queue lock 44 * of the containing bfqd. 45 */ 46 struct bfq_service_tree { 47 /* tree for active entities (i.e., those backlogged) */ 48 struct rb_root active; 49 /* tree for idle entities (i.e., not backlogged, with V < F_i)*/ 50 struct rb_root idle; 51 52 /* idle entity with minimum F_i */ 53 struct bfq_entity *first_idle; 54 /* idle entity with maximum F_i */ 55 struct bfq_entity *last_idle; 56 57 /* scheduler virtual time */ 58 u64 vtime; 59 /* scheduler weight sum; active and idle entities contribute to it */ 60 unsigned long wsum; 61 }; 62 63 /** 64 * struct bfq_sched_data - multi-class scheduler. 65 * 66 * bfq_sched_data is the basic scheduler queue. It supports three 67 * ioprio_classes, and can be used either as a toplevel queue or as an 68 * intermediate queue in a hierarchical setup. 69 * 70 * The supported ioprio_classes are the same as in CFQ, in descending 71 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. 72 * Requests from higher priority queues are served before all the 73 * requests from lower priority queues; among requests of the same 74 * queue requests are served according to B-WF2Q+. 75 * 76 * The schedule is implemented by the service trees, plus the field 77 * @next_in_service, which points to the entity on the active trees 78 * that will be served next, if 1) no changes in the schedule occurs 79 * before the current in-service entity is expired, 2) the in-service 80 * queue becomes idle when it expires, and 3) if the entity pointed by 81 * in_service_entity is not a queue, then the in-service child entity 82 * of the entity pointed by in_service_entity becomes idle on 83 * expiration. This peculiar definition allows for the following 84 * optimization, not yet exploited: while a given entity is still in 85 * service, we already know which is the best candidate for next 86 * service among the other active entities in the same parent 87 * entity. We can then quickly compare the timestamps of the 88 * in-service entity with those of such best candidate. 89 * 90 * All fields are protected by the lock of the containing bfqd. 91 */ 92 struct bfq_sched_data { 93 /* entity in service */ 94 struct bfq_entity *in_service_entity; 95 /* head-of-line entity (see comments above) */ 96 struct bfq_entity *next_in_service; 97 /* array of service trees, one per ioprio_class */ 98 struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; 99 /* last time CLASS_IDLE was served */ 100 unsigned long bfq_class_idle_last_service; 101 102 }; 103 104 /** 105 * struct bfq_weight_counter - counter of the number of all active queues 106 * with a given weight. 107 */ 108 struct bfq_weight_counter { 109 unsigned int weight; /* weight of the queues this counter refers to */ 110 unsigned int num_active; /* nr of active queues with this weight */ 111 /* 112 * Weights tree member (see bfq_data's @queue_weights_tree) 113 */ 114 struct rb_node weights_node; 115 }; 116 117 /** 118 * struct bfq_entity - schedulable entity. 119 * 120 * A bfq_entity is used to represent either a bfq_queue (leaf node in the 121 * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each 122 * entity belongs to the sched_data of the parent group in the cgroup 123 * hierarchy. Non-leaf entities have also their own sched_data, stored 124 * in @my_sched_data. 125 * 126 * Each entity stores independently its priority values; this would 127 * allow different weights on different devices, but this 128 * functionality is not exported to userspace by now. Priorities and 129 * weights are updated lazily, first storing the new values into the 130 * new_* fields, then setting the @prio_changed flag. As soon as 131 * there is a transition in the entity state that allows the priority 132 * update to take place the effective and the requested priority 133 * values are synchronized. 134 * 135 * Unless cgroups are used, the weight value is calculated from the 136 * ioprio to export the same interface as CFQ. When dealing with 137 * "well-behaved" queues (i.e., queues that do not spend too much 138 * time to consume their budget and have true sequential behavior, and 139 * when there are no external factors breaking anticipation) the 140 * relative weights at each level of the cgroups hierarchy should be 141 * guaranteed. All the fields are protected by the queue lock of the 142 * containing bfqd. 143 */ 144 struct bfq_entity { 145 /* service_tree member */ 146 struct rb_node rb_node; 147 148 /* 149 * Flag, true if the entity is on a tree (either the active or 150 * the idle one of its service_tree) or is in service. 151 */ 152 bool on_st_or_in_serv; 153 154 /* B-WF2Q+ start and finish timestamps [sectors/weight] */ 155 u64 start, finish; 156 157 /* tree the entity is enqueued into; %NULL if not on a tree */ 158 struct rb_root *tree; 159 160 /* 161 * minimum start time of the (active) subtree rooted at this 162 * entity; used for O(log N) lookups into active trees 163 */ 164 u64 min_start; 165 166 /* amount of service received during the last service slot */ 167 int service; 168 169 /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ 170 int budget; 171 172 /* Number of requests allocated in the subtree of this entity */ 173 int allocated; 174 175 /* device weight, if non-zero, it overrides the default weight of 176 * bfq_group_data */ 177 int dev_weight; 178 /* weight of the queue */ 179 int weight; 180 /* next weight if a change is in progress */ 181 int new_weight; 182 183 /* original weight, used to implement weight boosting */ 184 int orig_weight; 185 186 /* parent entity, for hierarchical scheduling */ 187 struct bfq_entity *parent; 188 189 /* 190 * For non-leaf nodes in the hierarchy, the associated 191 * scheduler queue, %NULL on leaf nodes. 192 */ 193 struct bfq_sched_data *my_sched_data; 194 /* the scheduler queue this entity belongs to */ 195 struct bfq_sched_data *sched_data; 196 197 /* flag, set to request a weight, ioprio or ioprio_class change */ 198 int prio_changed; 199 200 /* flag, set if the entity is counted in groups_with_pending_reqs */ 201 bool in_groups_with_pending_reqs; 202 203 /* last child queue of entity created (for non-leaf entities) */ 204 struct bfq_queue *last_bfqq_created; 205 }; 206 207 struct bfq_group; 208 209 /** 210 * struct bfq_ttime - per process thinktime stats. 211 */ 212 struct bfq_ttime { 213 /* completion time of the last request */ 214 u64 last_end_request; 215 216 /* total process thinktime */ 217 u64 ttime_total; 218 /* number of thinktime samples */ 219 unsigned long ttime_samples; 220 /* average process thinktime */ 221 u64 ttime_mean; 222 }; 223 224 /** 225 * struct bfq_queue - leaf schedulable entity. 226 * 227 * A bfq_queue is a leaf request queue; it can be associated with an 228 * io_context or more, if it is async or shared between cooperating 229 * processes. @cgroup holds a reference to the cgroup, to be sure that it 230 * does not disappear while a bfqq still references it (mostly to avoid 231 * races between request issuing and task migration followed by cgroup 232 * destruction). 233 * All the fields are protected by the queue lock of the containing bfqd. 234 */ 235 struct bfq_queue { 236 /* reference counter */ 237 int ref; 238 /* counter of references from other queues for delayed stable merge */ 239 int stable_ref; 240 /* parent bfq_data */ 241 struct bfq_data *bfqd; 242 243 /* current ioprio and ioprio class */ 244 unsigned short ioprio, ioprio_class; 245 /* next ioprio and ioprio class if a change is in progress */ 246 unsigned short new_ioprio, new_ioprio_class; 247 248 /* last total-service-time sample, see bfq_update_inject_limit() */ 249 u64 last_serv_time_ns; 250 /* limit for request injection */ 251 unsigned int inject_limit; 252 /* last time the inject limit has been decreased, in jiffies */ 253 unsigned long decrease_time_jif; 254 255 /* 256 * Shared bfq_queue if queue is cooperating with one or more 257 * other queues. 258 */ 259 struct bfq_queue *new_bfqq; 260 /* request-position tree member (see bfq_group's @rq_pos_tree) */ 261 struct rb_node pos_node; 262 /* request-position tree root (see bfq_group's @rq_pos_tree) */ 263 struct rb_root *pos_root; 264 265 /* sorted list of pending requests */ 266 struct rb_root sort_list; 267 /* if fifo isn't expired, next request to serve */ 268 struct request *next_rq; 269 /* number of sync and async requests queued */ 270 int queued[2]; 271 /* number of pending metadata requests */ 272 int meta_pending; 273 /* fifo list of requests in sort_list */ 274 struct list_head fifo; 275 276 /* entity representing this queue in the scheduler */ 277 struct bfq_entity entity; 278 279 /* pointer to the weight counter associated with this entity */ 280 struct bfq_weight_counter *weight_counter; 281 282 /* maximum budget allowed from the feedback mechanism */ 283 int max_budget; 284 /* budget expiration (in jiffies) */ 285 unsigned long budget_timeout; 286 287 /* number of requests on the dispatch list or inside driver */ 288 int dispatched; 289 290 /* status flags */ 291 unsigned long flags; 292 293 /* node for active/idle bfqq list inside parent bfqd */ 294 struct list_head bfqq_list; 295 296 /* associated @bfq_ttime struct */ 297 struct bfq_ttime ttime; 298 299 /* when bfqq started to do I/O within the last observation window */ 300 u64 io_start_time; 301 /* how long bfqq has remained empty during the last observ. window */ 302 u64 tot_idle_time; 303 304 /* bit vector: a 1 for each seeky requests in history */ 305 u32 seek_history; 306 307 /* node for the device's burst list */ 308 struct hlist_node burst_list_node; 309 310 /* position of the last request enqueued */ 311 sector_t last_request_pos; 312 313 /* Number of consecutive pairs of request completion and 314 * arrival, such that the queue becomes idle after the 315 * completion, but the next request arrives within an idle 316 * time slice; used only if the queue's IO_bound flag has been 317 * cleared. 318 */ 319 unsigned int requests_within_timer; 320 321 /* pid of the process owning the queue, used for logging purposes */ 322 pid_t pid; 323 324 /* 325 * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL 326 * if the queue is shared. 327 */ 328 struct bfq_io_cq *bic; 329 330 /* current maximum weight-raising time for this queue */ 331 unsigned long wr_cur_max_time; 332 /* 333 * Minimum time instant such that, only if a new request is 334 * enqueued after this time instant in an idle @bfq_queue with 335 * no outstanding requests, then the task associated with the 336 * queue it is deemed as soft real-time (see the comments on 337 * the function bfq_bfqq_softrt_next_start()) 338 */ 339 unsigned long soft_rt_next_start; 340 /* 341 * Start time of the current weight-raising period if 342 * the @bfq-queue is being weight-raised, otherwise 343 * finish time of the last weight-raising period. 344 */ 345 unsigned long last_wr_start_finish; 346 /* factor by which the weight of this queue is multiplied */ 347 unsigned int wr_coeff; 348 /* 349 * Time of the last transition of the @bfq_queue from idle to 350 * backlogged. 351 */ 352 unsigned long last_idle_bklogged; 353 /* 354 * Cumulative service received from the @bfq_queue since the 355 * last transition from idle to backlogged. 356 */ 357 unsigned long service_from_backlogged; 358 /* 359 * Cumulative service received from the @bfq_queue since its 360 * last transition to weight-raised state. 361 */ 362 unsigned long service_from_wr; 363 364 /* 365 * Value of wr start time when switching to soft rt 366 */ 367 unsigned long wr_start_at_switch_to_srt; 368 369 unsigned long split_time; /* time of last split */ 370 371 unsigned long first_IO_time; /* time of first I/O for this queue */ 372 unsigned long creation_time; /* when this queue is created */ 373 374 /* 375 * Pointer to the waker queue for this queue, i.e., to the 376 * queue Q such that this queue happens to get new I/O right 377 * after some I/O request of Q is completed. For details, see 378 * the comments on the choice of the queue for injection in 379 * bfq_select_queue(). 380 */ 381 struct bfq_queue *waker_bfqq; 382 /* pointer to the curr. tentative waker queue, see bfq_check_waker() */ 383 struct bfq_queue *tentative_waker_bfqq; 384 /* number of times the same tentative waker has been detected */ 385 unsigned int num_waker_detections; 386 /* time when we started considering this waker */ 387 u64 waker_detection_started; 388 389 /* node for woken_list, see below */ 390 struct hlist_node woken_list_node; 391 /* 392 * Head of the list of the woken queues for this queue, i.e., 393 * of the list of the queues for which this queue is a waker 394 * queue. This list is used to reset the waker_bfqq pointer in 395 * the woken queues when this queue exits. 396 */ 397 struct hlist_head woken_list; 398 }; 399 400 /** 401 * struct bfq_io_cq - per (request_queue, io_context) structure. 402 */ 403 struct bfq_io_cq { 404 /* associated io_cq structure */ 405 struct io_cq icq; /* must be the first member */ 406 /* array of two process queues, the sync and the async */ 407 struct bfq_queue *bfqq[2]; 408 /* per (request_queue, blkcg) ioprio */ 409 int ioprio; 410 #ifdef CONFIG_BFQ_GROUP_IOSCHED 411 uint64_t blkcg_serial_nr; /* the current blkcg serial */ 412 #endif 413 /* 414 * Snapshot of the has_short_time flag before merging; taken 415 * to remember its value while the queue is merged, so as to 416 * be able to restore it in case of split. 417 */ 418 bool saved_has_short_ttime; 419 /* 420 * Same purpose as the previous two fields for the I/O bound 421 * classification of a queue. 422 */ 423 bool saved_IO_bound; 424 425 u64 saved_io_start_time; 426 u64 saved_tot_idle_time; 427 428 /* 429 * Same purpose as the previous fields for the value of the 430 * field keeping the queue's belonging to a large burst 431 */ 432 bool saved_in_large_burst; 433 /* 434 * True if the queue belonged to a burst list before its merge 435 * with another cooperating queue. 436 */ 437 bool was_in_burst_list; 438 439 /* 440 * Save the weight when a merge occurs, to be able 441 * to restore it in case of split. If the weight is not 442 * correctly resumed when the queue is recycled, 443 * then the weight of the recycled queue could differ 444 * from the weight of the original queue. 445 */ 446 unsigned int saved_weight; 447 448 /* 449 * Similar to previous fields: save wr information. 450 */ 451 unsigned long saved_wr_coeff; 452 unsigned long saved_last_wr_start_finish; 453 unsigned long saved_service_from_wr; 454 unsigned long saved_wr_start_at_switch_to_srt; 455 unsigned int saved_wr_cur_max_time; 456 struct bfq_ttime saved_ttime; 457 458 /* Save also injection state */ 459 u64 saved_last_serv_time_ns; 460 unsigned int saved_inject_limit; 461 unsigned long saved_decrease_time_jif; 462 463 /* candidate queue for a stable merge (due to close creation time) */ 464 struct bfq_queue *stable_merge_bfqq; 465 466 bool stably_merged; /* non splittable if true */ 467 unsigned int requests; /* Number of requests this process has in flight */ 468 }; 469 470 /** 471 * struct bfq_data - per-device data structure. 472 * 473 * All the fields are protected by @lock. 474 */ 475 struct bfq_data { 476 /* device request queue */ 477 struct request_queue *queue; 478 /* dispatch queue */ 479 struct list_head dispatch; 480 481 /* root bfq_group for the device */ 482 struct bfq_group *root_group; 483 484 /* 485 * rbtree of weight counters of @bfq_queues, sorted by 486 * weight. Used to keep track of whether all @bfq_queues have 487 * the same weight. The tree contains one counter for each 488 * distinct weight associated to some active and not 489 * weight-raised @bfq_queue (see the comments to the functions 490 * bfq_weights_tree_[add|remove] for further details). 491 */ 492 struct rb_root_cached queue_weights_tree; 493 494 /* 495 * Number of groups with at least one process that 496 * has at least one request waiting for completion. Note that 497 * this accounts for also requests already dispatched, but not 498 * yet completed. Therefore this number of groups may differ 499 * (be larger) than the number of active groups, as a group is 500 * considered active only if its corresponding entity has 501 * queues with at least one request queued. This 502 * number is used to decide whether a scenario is symmetric. 503 * For a detailed explanation see comments on the computation 504 * of the variable asymmetric_scenario in the function 505 * bfq_better_to_idle(). 506 * 507 * However, it is hard to compute this number exactly, for 508 * groups with multiple processes. Consider a group 509 * that is inactive, i.e., that has no process with 510 * pending I/O inside BFQ queues. Then suppose that 511 * num_groups_with_pending_reqs is still accounting for this 512 * group, because the group has processes with some 513 * I/O request still in flight. num_groups_with_pending_reqs 514 * should be decremented when the in-flight request of the 515 * last process is finally completed (assuming that 516 * nothing else has changed for the group in the meantime, in 517 * terms of composition of the group and active/inactive state of child 518 * groups and processes). To accomplish this, an additional 519 * pending-request counter must be added to entities, and must 520 * be updated correctly. To avoid this additional field and operations, 521 * we resort to the following tradeoff between simplicity and 522 * accuracy: for an inactive group that is still counted in 523 * num_groups_with_pending_reqs, we decrement 524 * num_groups_with_pending_reqs when the first 525 * process of the group remains with no request waiting for 526 * completion. 527 * 528 * Even this simpler decrement strategy requires a little 529 * carefulness: to avoid multiple decrements, we flag a group, 530 * more precisely an entity representing a group, as still 531 * counted in num_groups_with_pending_reqs when it becomes 532 * inactive. Then, when the first queue of the 533 * entity remains with no request waiting for completion, 534 * num_groups_with_pending_reqs is decremented, and this flag 535 * is reset. After this flag is reset for the entity, 536 * num_groups_with_pending_reqs won't be decremented any 537 * longer in case a new queue of the entity remains 538 * with no request waiting for completion. 539 */ 540 unsigned int num_groups_with_pending_reqs; 541 542 /* 543 * Per-class (RT, BE, IDLE) number of bfq_queues containing 544 * requests (including the queue in service, even if it is 545 * idling). 546 */ 547 unsigned int busy_queues[3]; 548 /* number of weight-raised busy @bfq_queues */ 549 int wr_busy_queues; 550 /* number of queued requests */ 551 int queued; 552 /* number of requests dispatched and waiting for completion */ 553 int rq_in_driver; 554 555 /* true if the device is non rotational and performs queueing */ 556 bool nonrot_with_queueing; 557 558 /* 559 * Maximum number of requests in driver in the last 560 * @hw_tag_samples completed requests. 561 */ 562 int max_rq_in_driver; 563 /* number of samples used to calculate hw_tag */ 564 int hw_tag_samples; 565 /* flag set to one if the driver is showing a queueing behavior */ 566 int hw_tag; 567 568 /* number of budgets assigned */ 569 int budgets_assigned; 570 571 /* 572 * Timer set when idling (waiting) for the next request from 573 * the queue in service. 574 */ 575 struct hrtimer idle_slice_timer; 576 577 /* bfq_queue in service */ 578 struct bfq_queue *in_service_queue; 579 580 /* on-disk position of the last served request */ 581 sector_t last_position; 582 583 /* position of the last served request for the in-service queue */ 584 sector_t in_serv_last_pos; 585 586 /* time of last request completion (ns) */ 587 u64 last_completion; 588 589 /* bfqq owning the last completed rq */ 590 struct bfq_queue *last_completed_rq_bfqq; 591 592 /* last bfqq created, among those in the root group */ 593 struct bfq_queue *last_bfqq_created; 594 595 /* time of last transition from empty to non-empty (ns) */ 596 u64 last_empty_occupied_ns; 597 598 /* 599 * Flag set to activate the sampling of the total service time 600 * of a just-arrived first I/O request (see 601 * bfq_update_inject_limit()). This will cause the setting of 602 * waited_rq when the request is finally dispatched. 603 */ 604 bool wait_dispatch; 605 /* 606 * If set, then bfq_update_inject_limit() is invoked when 607 * waited_rq is eventually completed. 608 */ 609 struct request *waited_rq; 610 /* 611 * True if some request has been injected during the last service hole. 612 */ 613 bool rqs_injected; 614 615 /* time of first rq dispatch in current observation interval (ns) */ 616 u64 first_dispatch; 617 /* time of last rq dispatch in current observation interval (ns) */ 618 u64 last_dispatch; 619 620 /* beginning of the last budget */ 621 ktime_t last_budget_start; 622 /* beginning of the last idle slice */ 623 ktime_t last_idling_start; 624 unsigned long last_idling_start_jiffies; 625 626 /* number of samples in current observation interval */ 627 int peak_rate_samples; 628 /* num of samples of seq dispatches in current observation interval */ 629 u32 sequential_samples; 630 /* total num of sectors transferred in current observation interval */ 631 u64 tot_sectors_dispatched; 632 /* max rq size seen during current observation interval (sectors) */ 633 u32 last_rq_max_size; 634 /* time elapsed from first dispatch in current observ. interval (us) */ 635 u64 delta_from_first; 636 /* 637 * Current estimate of the device peak rate, measured in 638 * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by 639 * BFQ_RATE_SHIFT is performed to increase precision in 640 * fixed-point calculations. 641 */ 642 u32 peak_rate; 643 644 /* maximum budget allotted to a bfq_queue before rescheduling */ 645 int bfq_max_budget; 646 647 /* list of all the bfq_queues active on the device */ 648 struct list_head active_list; 649 /* list of all the bfq_queues idle on the device */ 650 struct list_head idle_list; 651 652 /* 653 * Timeout for async/sync requests; when it fires, requests 654 * are served in fifo order. 655 */ 656 u64 bfq_fifo_expire[2]; 657 /* weight of backward seeks wrt forward ones */ 658 unsigned int bfq_back_penalty; 659 /* maximum allowed backward seek */ 660 unsigned int bfq_back_max; 661 /* maximum idling time */ 662 u32 bfq_slice_idle; 663 664 /* user-configured max budget value (0 for auto-tuning) */ 665 int bfq_user_max_budget; 666 /* 667 * Timeout for bfq_queues to consume their budget; used to 668 * prevent seeky queues from imposing long latencies to 669 * sequential or quasi-sequential ones (this also implies that 670 * seeky queues cannot receive guarantees in the service 671 * domain; after a timeout they are charged for the time they 672 * have been in service, to preserve fairness among them, but 673 * without service-domain guarantees). 674 */ 675 unsigned int bfq_timeout; 676 677 /* 678 * Force device idling whenever needed to provide accurate 679 * service guarantees, without caring about throughput 680 * issues. CAVEAT: this may even increase latencies, in case 681 * of useless idling for processes that did stop doing I/O. 682 */ 683 bool strict_guarantees; 684 685 /* 686 * Last time at which a queue entered the current burst of 687 * queues being activated shortly after each other; for more 688 * details about this and the following parameters related to 689 * a burst of activations, see the comments on the function 690 * bfq_handle_burst. 691 */ 692 unsigned long last_ins_in_burst; 693 /* 694 * Reference time interval used to decide whether a queue has 695 * been activated shortly after @last_ins_in_burst. 696 */ 697 unsigned long bfq_burst_interval; 698 /* number of queues in the current burst of queue activations */ 699 int burst_size; 700 701 /* common parent entity for the queues in the burst */ 702 struct bfq_entity *burst_parent_entity; 703 /* Maximum burst size above which the current queue-activation 704 * burst is deemed as 'large'. 705 */ 706 unsigned long bfq_large_burst_thresh; 707 /* true if a large queue-activation burst is in progress */ 708 bool large_burst; 709 /* 710 * Head of the burst list (as for the above fields, more 711 * details in the comments on the function bfq_handle_burst). 712 */ 713 struct hlist_head burst_list; 714 715 /* if set to true, low-latency heuristics are enabled */ 716 bool low_latency; 717 /* 718 * Maximum factor by which the weight of a weight-raised queue 719 * is multiplied. 720 */ 721 unsigned int bfq_wr_coeff; 722 /* maximum duration of a weight-raising period (jiffies) */ 723 unsigned int bfq_wr_max_time; 724 725 /* Maximum weight-raising duration for soft real-time processes */ 726 unsigned int bfq_wr_rt_max_time; 727 /* 728 * Minimum idle period after which weight-raising may be 729 * reactivated for a queue (in jiffies). 730 */ 731 unsigned int bfq_wr_min_idle_time; 732 /* 733 * Minimum period between request arrivals after which 734 * weight-raising may be reactivated for an already busy async 735 * queue (in jiffies). 736 */ 737 unsigned long bfq_wr_min_inter_arr_async; 738 739 /* Max service-rate for a soft real-time queue, in sectors/sec */ 740 unsigned int bfq_wr_max_softrt_rate; 741 /* 742 * Cached value of the product ref_rate*ref_wr_duration, used 743 * for computing the maximum duration of weight raising 744 * automatically. 745 */ 746 u64 rate_dur_prod; 747 748 /* fallback dummy bfqq for extreme OOM conditions */ 749 struct bfq_queue oom_bfqq; 750 751 spinlock_t lock; 752 753 /* 754 * bic associated with the task issuing current bio for 755 * merging. This and the next field are used as a support to 756 * be able to perform the bic lookup, needed by bio-merge 757 * functions, before the scheduler lock is taken, and thus 758 * avoid taking the request-queue lock while the scheduler 759 * lock is being held. 760 */ 761 struct bfq_io_cq *bio_bic; 762 /* bfqq associated with the task issuing current bio for merging */ 763 struct bfq_queue *bio_bfqq; 764 765 /* 766 * Depth limits used in bfq_limit_depth (see comments on the 767 * function) 768 */ 769 unsigned int word_depths[2][2]; 770 unsigned int full_depth_shift; 771 }; 772 773 enum bfqq_state_flags { 774 BFQQF_just_created = 0, /* queue just allocated */ 775 BFQQF_busy, /* has requests or is in service */ 776 BFQQF_wait_request, /* waiting for a request */ 777 BFQQF_non_blocking_wait_rq, /* 778 * waiting for a request 779 * without idling the device 780 */ 781 BFQQF_fifo_expire, /* FIFO checked in this slice */ 782 BFQQF_has_short_ttime, /* queue has a short think time */ 783 BFQQF_sync, /* synchronous queue */ 784 BFQQF_IO_bound, /* 785 * bfqq has timed-out at least once 786 * having consumed at most 2/10 of 787 * its budget 788 */ 789 BFQQF_in_large_burst, /* 790 * bfqq activated in a large burst, 791 * see comments to bfq_handle_burst. 792 */ 793 BFQQF_softrt_update, /* 794 * may need softrt-next-start 795 * update 796 */ 797 BFQQF_coop, /* bfqq is shared */ 798 BFQQF_split_coop, /* shared bfqq will be split */ 799 }; 800 801 #define BFQ_BFQQ_FNS(name) \ 802 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq); \ 803 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq); \ 804 int bfq_bfqq_##name(const struct bfq_queue *bfqq); 805 806 BFQ_BFQQ_FNS(just_created); 807 BFQ_BFQQ_FNS(busy); 808 BFQ_BFQQ_FNS(wait_request); 809 BFQ_BFQQ_FNS(non_blocking_wait_rq); 810 BFQ_BFQQ_FNS(fifo_expire); 811 BFQ_BFQQ_FNS(has_short_ttime); 812 BFQ_BFQQ_FNS(sync); 813 BFQ_BFQQ_FNS(IO_bound); 814 BFQ_BFQQ_FNS(in_large_burst); 815 BFQ_BFQQ_FNS(coop); 816 BFQ_BFQQ_FNS(split_coop); 817 BFQ_BFQQ_FNS(softrt_update); 818 #undef BFQ_BFQQ_FNS 819 820 /* Expiration reasons. */ 821 enum bfqq_expiration { 822 BFQQE_TOO_IDLE = 0, /* 823 * queue has been idling for 824 * too long 825 */ 826 BFQQE_BUDGET_TIMEOUT, /* budget took too long to be used */ 827 BFQQE_BUDGET_EXHAUSTED, /* budget consumed */ 828 BFQQE_NO_MORE_REQUESTS, /* the queue has no more requests */ 829 BFQQE_PREEMPTED /* preemption in progress */ 830 }; 831 832 struct bfq_stat { 833 struct percpu_counter cpu_cnt; 834 atomic64_t aux_cnt; 835 }; 836 837 struct bfqg_stats { 838 /* basic stats */ 839 struct blkg_rwstat bytes; 840 struct blkg_rwstat ios; 841 #ifdef CONFIG_BFQ_CGROUP_DEBUG 842 /* number of ios merged */ 843 struct blkg_rwstat merged; 844 /* total time spent on device in ns, may not be accurate w/ queueing */ 845 struct blkg_rwstat service_time; 846 /* total time spent waiting in scheduler queue in ns */ 847 struct blkg_rwstat wait_time; 848 /* number of IOs queued up */ 849 struct blkg_rwstat queued; 850 /* total disk time and nr sectors dispatched by this group */ 851 struct bfq_stat time; 852 /* sum of number of ios queued across all samples */ 853 struct bfq_stat avg_queue_size_sum; 854 /* count of samples taken for average */ 855 struct bfq_stat avg_queue_size_samples; 856 /* how many times this group has been removed from service tree */ 857 struct bfq_stat dequeue; 858 /* total time spent waiting for it to be assigned a timeslice. */ 859 struct bfq_stat group_wait_time; 860 /* time spent idling for this blkcg_gq */ 861 struct bfq_stat idle_time; 862 /* total time with empty current active q with other requests queued */ 863 struct bfq_stat empty_time; 864 /* fields after this shouldn't be cleared on stat reset */ 865 u64 start_group_wait_time; 866 u64 start_idle_time; 867 u64 start_empty_time; 868 uint16_t flags; 869 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 870 }; 871 872 #ifdef CONFIG_BFQ_GROUP_IOSCHED 873 874 /* 875 * struct bfq_group_data - per-blkcg storage for the blkio subsystem. 876 * 877 * @ps: @blkcg_policy_storage that this structure inherits 878 * @weight: weight of the bfq_group 879 */ 880 struct bfq_group_data { 881 /* must be the first member */ 882 struct blkcg_policy_data pd; 883 884 unsigned int weight; 885 }; 886 887 /** 888 * struct bfq_group - per (device, cgroup) data structure. 889 * @entity: schedulable entity to insert into the parent group sched_data. 890 * @sched_data: own sched_data, to contain child entities (they may be 891 * both bfq_queues and bfq_groups). 892 * @bfqd: the bfq_data for the device this group acts upon. 893 * @async_bfqq: array of async queues for all the tasks belonging to 894 * the group, one queue per ioprio value per ioprio_class, 895 * except for the idle class that has only one queue. 896 * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). 897 * @my_entity: pointer to @entity, %NULL for the toplevel group; used 898 * to avoid too many special cases during group creation/ 899 * migration. 900 * @stats: stats for this bfqg. 901 * @active_entities: number of active entities belonging to the group; 902 * unused for the root group. Used to know whether there 903 * are groups with more than one active @bfq_entity 904 * (see the comments to the function 905 * bfq_bfqq_may_idle()). 906 * @rq_pos_tree: rbtree sorted by next_request position, used when 907 * determining if two or more queues have interleaving 908 * requests (see bfq_find_close_cooperator()). 909 * 910 * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup 911 * there is a set of bfq_groups, each one collecting the lower-level 912 * entities belonging to the group that are acting on the same device. 913 * 914 * Locking works as follows: 915 * o @bfqd is protected by the queue lock, RCU is used to access it 916 * from the readers. 917 * o All the other fields are protected by the @bfqd queue lock. 918 */ 919 struct bfq_group { 920 /* must be the first member */ 921 struct blkg_policy_data pd; 922 923 /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */ 924 char blkg_path[128]; 925 926 /* reference counter (see comments in bfq_bic_update_cgroup) */ 927 int ref; 928 /* Is bfq_group still online? */ 929 bool online; 930 931 struct bfq_entity entity; 932 struct bfq_sched_data sched_data; 933 934 struct bfq_data *bfqd; 935 936 struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS]; 937 struct bfq_queue *async_idle_bfqq; 938 939 struct bfq_entity *my_entity; 940 941 int active_entities; 942 int num_queues_with_pending_reqs; 943 944 struct rb_root rq_pos_tree; 945 946 struct bfqg_stats stats; 947 }; 948 949 #else 950 struct bfq_group { 951 struct bfq_entity entity; 952 struct bfq_sched_data sched_data; 953 954 struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS]; 955 struct bfq_queue *async_idle_bfqq; 956 957 struct rb_root rq_pos_tree; 958 }; 959 #endif 960 961 /* --------------- main algorithm interface ----------------- */ 962 963 #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ 964 { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) 965 966 extern const int bfq_timeout; 967 968 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync); 969 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync); 970 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic); 971 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); 972 void bfq_weights_tree_add(struct bfq_queue *bfqq); 973 void bfq_weights_tree_remove(struct bfq_queue *bfqq); 974 void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, 975 bool compensate, enum bfqq_expiration reason); 976 void bfq_put_queue(struct bfq_queue *bfqq); 977 void bfq_put_cooperator(struct bfq_queue *bfqq); 978 void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); 979 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); 980 void bfq_schedule_dispatch(struct bfq_data *bfqd); 981 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); 982 983 /* ------------ end of main algorithm interface -------------- */ 984 985 /* ---------------- cgroups-support interface ---------------- */ 986 987 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq); 988 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf); 989 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf); 990 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, 991 u64 io_start_time_ns, blk_opf_t opf); 992 void bfqg_stats_update_dequeue(struct bfq_group *bfqg); 993 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg); 994 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 995 struct bfq_group *bfqg); 996 997 #ifdef CONFIG_BFQ_CGROUP_DEBUG 998 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 999 blk_opf_t opf); 1000 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); 1001 void bfqg_stats_update_idle_time(struct bfq_group *bfqg); 1002 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg); 1003 #endif 1004 1005 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg); 1006 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio); 1007 void bfq_end_wr_async(struct bfq_data *bfqd); 1008 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio); 1009 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); 1010 struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 1011 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); 1012 void bfqg_and_blkg_put(struct bfq_group *bfqg); 1013 1014 #ifdef CONFIG_BFQ_GROUP_IOSCHED 1015 extern struct cftype bfq_blkcg_legacy_files[]; 1016 extern struct cftype bfq_blkg_files[]; 1017 extern struct blkcg_policy blkcg_policy_bfq; 1018 #endif 1019 1020 /* ------------- end of cgroups-support interface ------------- */ 1021 1022 /* - interface of the internal hierarchical B-WF2Q+ scheduler - */ 1023 1024 #ifdef CONFIG_BFQ_GROUP_IOSCHED 1025 /* both next loops stop at one of the child entities of the root group */ 1026 #define for_each_entity(entity) \ 1027 for (; entity ; entity = entity->parent) 1028 1029 /* 1030 * For each iteration, compute parent in advance, so as to be safe if 1031 * entity is deallocated during the iteration. Such a deallocation may 1032 * happen as a consequence of a bfq_put_queue that frees the bfq_queue 1033 * containing entity. 1034 */ 1035 #define for_each_entity_safe(entity, parent) \ 1036 for (; entity && ({ parent = entity->parent; 1; }); entity = parent) 1037 1038 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 1039 /* 1040 * Next two macros are fake loops when cgroups support is not 1041 * enabled. I fact, in such a case, there is only one level to go up 1042 * (to reach the root group). 1043 */ 1044 #define for_each_entity(entity) \ 1045 for (; entity ; entity = NULL) 1046 1047 #define for_each_entity_safe(entity, parent) \ 1048 for (parent = NULL; entity ; entity = parent) 1049 #endif /* CONFIG_BFQ_GROUP_IOSCHED */ 1050 1051 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); 1052 unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd); 1053 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity); 1054 struct bfq_entity *bfq_entity_of(struct rb_node *node); 1055 unsigned short bfq_ioprio_to_weight(int ioprio); 1056 void bfq_put_idle_entity(struct bfq_service_tree *st, 1057 struct bfq_entity *entity); 1058 struct bfq_service_tree * 1059 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, 1060 struct bfq_entity *entity, 1061 bool update_class_too); 1062 void bfq_bfqq_served(struct bfq_queue *bfqq, int served); 1063 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, 1064 unsigned long time_ms); 1065 bool __bfq_deactivate_entity(struct bfq_entity *entity, 1066 bool ins_into_idle_tree); 1067 bool next_queue_may_preempt(struct bfq_data *bfqd); 1068 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); 1069 bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); 1070 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, 1071 bool ins_into_idle_tree, bool expiration); 1072 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); 1073 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, 1074 bool expiration); 1075 void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration); 1076 void bfq_add_bfqq_busy(struct bfq_queue *bfqq); 1077 void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq); 1078 void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq); 1079 1080 /* --------------- end of interface of B-WF2Q+ ---------------- */ 1081 1082 /* Logging facilities. */ 1083 static inline void bfq_bfqq_name(struct bfq_queue *bfqq, char *str, int len) 1084 { 1085 char type = bfq_bfqq_sync(bfqq) ? 'S' : 'A'; 1086 1087 if (bfqq->pid != -1) 1088 snprintf(str, len, "bfq%d%c", bfqq->pid, type); 1089 else 1090 snprintf(str, len, "bfqSHARED-%c", type); 1091 } 1092 1093 #ifdef CONFIG_BFQ_GROUP_IOSCHED 1094 struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 1095 1096 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 1097 char pid_str[MAX_BFQQ_NAME_LENGTH]; \ 1098 if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ 1099 break; \ 1100 bfq_bfqq_name((bfqq), pid_str, MAX_BFQQ_NAME_LENGTH); \ 1101 blk_add_cgroup_trace_msg((bfqd)->queue, \ 1102 &bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css, \ 1103 "%s " fmt, pid_str, ##args); \ 1104 } while (0) 1105 1106 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ 1107 blk_add_cgroup_trace_msg((bfqd)->queue, \ 1108 &bfqg_to_blkg(bfqg)->blkcg->css, fmt, ##args); \ 1109 } while (0) 1110 1111 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 1112 1113 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 1114 char pid_str[MAX_BFQQ_NAME_LENGTH]; \ 1115 if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ 1116 break; \ 1117 bfq_bfqq_name((bfqq), pid_str, MAX_BFQQ_NAME_LENGTH); \ 1118 blk_add_trace_msg((bfqd)->queue, "%s " fmt, pid_str, ##args); \ 1119 } while (0) 1120 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) 1121 1122 #endif /* CONFIG_BFQ_GROUP_IOSCHED */ 1123 1124 #define bfq_log(bfqd, fmt, args...) \ 1125 blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) 1126 1127 #endif /* _BFQ_H */ 1128