xref: /openbmc/linux/block/bfq-iosched.h (revision cb1aaebe)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Header file for the BFQ I/O scheduler: data structures and
4  * prototypes of interface functions among BFQ components.
5  */
6 #ifndef _BFQ_H
7 #define _BFQ_H
8 
9 #include <linux/blktrace_api.h>
10 #include <linux/hrtimer.h>
11 #include <linux/blk-cgroup.h>
12 
13 #define BFQ_IOPRIO_CLASSES	3
14 #define BFQ_CL_IDLE_TIMEOUT	(HZ/5)
15 
16 #define BFQ_MIN_WEIGHT			1
17 #define BFQ_MAX_WEIGHT			1000
18 #define BFQ_WEIGHT_CONVERSION_COEFF	10
19 
20 #define BFQ_DEFAULT_QUEUE_IOPRIO	4
21 
22 #define BFQ_WEIGHT_LEGACY_DFL	100
23 #define BFQ_DEFAULT_GRP_IOPRIO	0
24 #define BFQ_DEFAULT_GRP_CLASS	IOPRIO_CLASS_BE
25 
26 #define MAX_PID_STR_LENGTH 12
27 
28 /*
29  * Soft real-time applications are extremely more latency sensitive
30  * than interactive ones. Over-raise the weight of the former to
31  * privilege them against the latter.
32  */
33 #define BFQ_SOFTRT_WEIGHT_FACTOR	100
34 
35 struct bfq_entity;
36 
37 /**
38  * struct bfq_service_tree - per ioprio_class service tree.
39  *
40  * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
41  * ioprio_class has its own independent scheduler, and so its own
42  * bfq_service_tree.  All the fields are protected by the queue lock
43  * of the containing bfqd.
44  */
45 struct bfq_service_tree {
46 	/* tree for active entities (i.e., those backlogged) */
47 	struct rb_root active;
48 	/* tree for idle entities (i.e., not backlogged, with V < F_i)*/
49 	struct rb_root idle;
50 
51 	/* idle entity with minimum F_i */
52 	struct bfq_entity *first_idle;
53 	/* idle entity with maximum F_i */
54 	struct bfq_entity *last_idle;
55 
56 	/* scheduler virtual time */
57 	u64 vtime;
58 	/* scheduler weight sum; active and idle entities contribute to it */
59 	unsigned long wsum;
60 };
61 
62 /**
63  * struct bfq_sched_data - multi-class scheduler.
64  *
65  * bfq_sched_data is the basic scheduler queue.  It supports three
66  * ioprio_classes, and can be used either as a toplevel queue or as an
67  * intermediate queue in a hierarchical setup.
68  *
69  * The supported ioprio_classes are the same as in CFQ, in descending
70  * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
71  * Requests from higher priority queues are served before all the
72  * requests from lower priority queues; among requests of the same
73  * queue requests are served according to B-WF2Q+.
74  *
75  * The schedule is implemented by the service trees, plus the field
76  * @next_in_service, which points to the entity on the active trees
77  * that will be served next, if 1) no changes in the schedule occurs
78  * before the current in-service entity is expired, 2) the in-service
79  * queue becomes idle when it expires, and 3) if the entity pointed by
80  * in_service_entity is not a queue, then the in-service child entity
81  * of the entity pointed by in_service_entity becomes idle on
82  * expiration. This peculiar definition allows for the following
83  * optimization, not yet exploited: while a given entity is still in
84  * service, we already know which is the best candidate for next
85  * service among the other active entities in the same parent
86  * entity. We can then quickly compare the timestamps of the
87  * in-service entity with those of such best candidate.
88  *
89  * All fields are protected by the lock of the containing bfqd.
90  */
91 struct bfq_sched_data {
92 	/* entity in service */
93 	struct bfq_entity *in_service_entity;
94 	/* head-of-line entity (see comments above) */
95 	struct bfq_entity *next_in_service;
96 	/* array of service trees, one per ioprio_class */
97 	struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
98 	/* last time CLASS_IDLE was served */
99 	unsigned long bfq_class_idle_last_service;
100 
101 };
102 
103 /**
104  * struct bfq_weight_counter - counter of the number of all active queues
105  *                             with a given weight.
106  */
107 struct bfq_weight_counter {
108 	unsigned int weight; /* weight of the queues this counter refers to */
109 	unsigned int num_active; /* nr of active queues with this weight */
110 	/*
111 	 * Weights tree member (see bfq_data's @queue_weights_tree)
112 	 */
113 	struct rb_node weights_node;
114 };
115 
116 /**
117  * struct bfq_entity - schedulable entity.
118  *
119  * A bfq_entity is used to represent either a bfq_queue (leaf node in the
120  * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
121  * entity belongs to the sched_data of the parent group in the cgroup
122  * hierarchy.  Non-leaf entities have also their own sched_data, stored
123  * in @my_sched_data.
124  *
125  * Each entity stores independently its priority values; this would
126  * allow different weights on different devices, but this
127  * functionality is not exported to userspace by now.  Priorities and
128  * weights are updated lazily, first storing the new values into the
129  * new_* fields, then setting the @prio_changed flag.  As soon as
130  * there is a transition in the entity state that allows the priority
131  * update to take place the effective and the requested priority
132  * values are synchronized.
133  *
134  * Unless cgroups are used, the weight value is calculated from the
135  * ioprio to export the same interface as CFQ.  When dealing with
136  * "well-behaved" queues (i.e., queues that do not spend too much
137  * time to consume their budget and have true sequential behavior, and
138  * when there are no external factors breaking anticipation) the
139  * relative weights at each level of the cgroups hierarchy should be
140  * guaranteed.  All the fields are protected by the queue lock of the
141  * containing bfqd.
142  */
143 struct bfq_entity {
144 	/* service_tree member */
145 	struct rb_node rb_node;
146 
147 	/*
148 	 * Flag, true if the entity is on a tree (either the active or
149 	 * the idle one of its service_tree) or is in service.
150 	 */
151 	bool on_st;
152 
153 	/* B-WF2Q+ start and finish timestamps [sectors/weight] */
154 	u64 start, finish;
155 
156 	/* tree the entity is enqueued into; %NULL if not on a tree */
157 	struct rb_root *tree;
158 
159 	/*
160 	 * minimum start time of the (active) subtree rooted at this
161 	 * entity; used for O(log N) lookups into active trees
162 	 */
163 	u64 min_start;
164 
165 	/* amount of service received during the last service slot */
166 	int service;
167 
168 	/* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
169 	int budget;
170 
171 	/* weight of the queue */
172 	int weight;
173 	/* next weight if a change is in progress */
174 	int new_weight;
175 
176 	/* original weight, used to implement weight boosting */
177 	int orig_weight;
178 
179 	/* parent entity, for hierarchical scheduling */
180 	struct bfq_entity *parent;
181 
182 	/*
183 	 * For non-leaf nodes in the hierarchy, the associated
184 	 * scheduler queue, %NULL on leaf nodes.
185 	 */
186 	struct bfq_sched_data *my_sched_data;
187 	/* the scheduler queue this entity belongs to */
188 	struct bfq_sched_data *sched_data;
189 
190 	/* flag, set to request a weight, ioprio or ioprio_class change  */
191 	int prio_changed;
192 
193 	/* flag, set if the entity is counted in groups_with_pending_reqs */
194 	bool in_groups_with_pending_reqs;
195 };
196 
197 struct bfq_group;
198 
199 /**
200  * struct bfq_ttime - per process thinktime stats.
201  */
202 struct bfq_ttime {
203 	/* completion time of the last request */
204 	u64 last_end_request;
205 
206 	/* total process thinktime */
207 	u64 ttime_total;
208 	/* number of thinktime samples */
209 	unsigned long ttime_samples;
210 	/* average process thinktime */
211 	u64 ttime_mean;
212 };
213 
214 /**
215  * struct bfq_queue - leaf schedulable entity.
216  *
217  * A bfq_queue is a leaf request queue; it can be associated with an
218  * io_context or more, if it  is  async or shared  between  cooperating
219  * processes. @cgroup holds a reference to the cgroup, to be sure that it
220  * does not disappear while a bfqq still references it (mostly to avoid
221  * races between request issuing and task migration followed by cgroup
222  * destruction).
223  * All the fields are protected by the queue lock of the containing bfqd.
224  */
225 struct bfq_queue {
226 	/* reference counter */
227 	int ref;
228 	/* parent bfq_data */
229 	struct bfq_data *bfqd;
230 
231 	/* current ioprio and ioprio class */
232 	unsigned short ioprio, ioprio_class;
233 	/* next ioprio and ioprio class if a change is in progress */
234 	unsigned short new_ioprio, new_ioprio_class;
235 
236 	/* last total-service-time sample, see bfq_update_inject_limit() */
237 	u64 last_serv_time_ns;
238 	/* limit for request injection */
239 	unsigned int inject_limit;
240 	/* last time the inject limit has been decreased, in jiffies */
241 	unsigned long decrease_time_jif;
242 
243 	/*
244 	 * Shared bfq_queue if queue is cooperating with one or more
245 	 * other queues.
246 	 */
247 	struct bfq_queue *new_bfqq;
248 	/* request-position tree member (see bfq_group's @rq_pos_tree) */
249 	struct rb_node pos_node;
250 	/* request-position tree root (see bfq_group's @rq_pos_tree) */
251 	struct rb_root *pos_root;
252 
253 	/* sorted list of pending requests */
254 	struct rb_root sort_list;
255 	/* if fifo isn't expired, next request to serve */
256 	struct request *next_rq;
257 	/* number of sync and async requests queued */
258 	int queued[2];
259 	/* number of requests currently allocated */
260 	int allocated;
261 	/* number of pending metadata requests */
262 	int meta_pending;
263 	/* fifo list of requests in sort_list */
264 	struct list_head fifo;
265 
266 	/* entity representing this queue in the scheduler */
267 	struct bfq_entity entity;
268 
269 	/* pointer to the weight counter associated with this entity */
270 	struct bfq_weight_counter *weight_counter;
271 
272 	/* maximum budget allowed from the feedback mechanism */
273 	int max_budget;
274 	/* budget expiration (in jiffies) */
275 	unsigned long budget_timeout;
276 
277 	/* number of requests on the dispatch list or inside driver */
278 	int dispatched;
279 
280 	/* status flags */
281 	unsigned long flags;
282 
283 	/* node for active/idle bfqq list inside parent bfqd */
284 	struct list_head bfqq_list;
285 
286 	/* associated @bfq_ttime struct */
287 	struct bfq_ttime ttime;
288 
289 	/* bit vector: a 1 for each seeky requests in history */
290 	u32 seek_history;
291 
292 	/* node for the device's burst list */
293 	struct hlist_node burst_list_node;
294 
295 	/* position of the last request enqueued */
296 	sector_t last_request_pos;
297 
298 	/* Number of consecutive pairs of request completion and
299 	 * arrival, such that the queue becomes idle after the
300 	 * completion, but the next request arrives within an idle
301 	 * time slice; used only if the queue's IO_bound flag has been
302 	 * cleared.
303 	 */
304 	unsigned int requests_within_timer;
305 
306 	/* pid of the process owning the queue, used for logging purposes */
307 	pid_t pid;
308 
309 	/*
310 	 * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
311 	 * if the queue is shared.
312 	 */
313 	struct bfq_io_cq *bic;
314 
315 	/* current maximum weight-raising time for this queue */
316 	unsigned long wr_cur_max_time;
317 	/*
318 	 * Minimum time instant such that, only if a new request is
319 	 * enqueued after this time instant in an idle @bfq_queue with
320 	 * no outstanding requests, then the task associated with the
321 	 * queue it is deemed as soft real-time (see the comments on
322 	 * the function bfq_bfqq_softrt_next_start())
323 	 */
324 	unsigned long soft_rt_next_start;
325 	/*
326 	 * Start time of the current weight-raising period if
327 	 * the @bfq-queue is being weight-raised, otherwise
328 	 * finish time of the last weight-raising period.
329 	 */
330 	unsigned long last_wr_start_finish;
331 	/* factor by which the weight of this queue is multiplied */
332 	unsigned int wr_coeff;
333 	/*
334 	 * Time of the last transition of the @bfq_queue from idle to
335 	 * backlogged.
336 	 */
337 	unsigned long last_idle_bklogged;
338 	/*
339 	 * Cumulative service received from the @bfq_queue since the
340 	 * last transition from idle to backlogged.
341 	 */
342 	unsigned long service_from_backlogged;
343 	/*
344 	 * Cumulative service received from the @bfq_queue since its
345 	 * last transition to weight-raised state.
346 	 */
347 	unsigned long service_from_wr;
348 
349 	/*
350 	 * Value of wr start time when switching to soft rt
351 	 */
352 	unsigned long wr_start_at_switch_to_srt;
353 
354 	unsigned long split_time; /* time of last split */
355 
356 	unsigned long first_IO_time; /* time of first I/O for this queue */
357 
358 	/* max service rate measured so far */
359 	u32 max_service_rate;
360 };
361 
362 /**
363  * struct bfq_io_cq - per (request_queue, io_context) structure.
364  */
365 struct bfq_io_cq {
366 	/* associated io_cq structure */
367 	struct io_cq icq; /* must be the first member */
368 	/* array of two process queues, the sync and the async */
369 	struct bfq_queue *bfqq[2];
370 	/* per (request_queue, blkcg) ioprio */
371 	int ioprio;
372 #ifdef CONFIG_BFQ_GROUP_IOSCHED
373 	uint64_t blkcg_serial_nr; /* the current blkcg serial */
374 #endif
375 	/*
376 	 * Snapshot of the has_short_time flag before merging; taken
377 	 * to remember its value while the queue is merged, so as to
378 	 * be able to restore it in case of split.
379 	 */
380 	bool saved_has_short_ttime;
381 	/*
382 	 * Same purpose as the previous two fields for the I/O bound
383 	 * classification of a queue.
384 	 */
385 	bool saved_IO_bound;
386 
387 	/*
388 	 * Same purpose as the previous fields for the value of the
389 	 * field keeping the queue's belonging to a large burst
390 	 */
391 	bool saved_in_large_burst;
392 	/*
393 	 * True if the queue belonged to a burst list before its merge
394 	 * with another cooperating queue.
395 	 */
396 	bool was_in_burst_list;
397 
398 	/*
399 	 * Save the weight when a merge occurs, to be able
400 	 * to restore it in case of split. If the weight is not
401 	 * correctly resumed when the queue is recycled,
402 	 * then the weight of the recycled queue could differ
403 	 * from the weight of the original queue.
404 	 */
405 	unsigned int saved_weight;
406 
407 	/*
408 	 * Similar to previous fields: save wr information.
409 	 */
410 	unsigned long saved_wr_coeff;
411 	unsigned long saved_last_wr_start_finish;
412 	unsigned long saved_wr_start_at_switch_to_srt;
413 	unsigned int saved_wr_cur_max_time;
414 	struct bfq_ttime saved_ttime;
415 };
416 
417 /**
418  * struct bfq_data - per-device data structure.
419  *
420  * All the fields are protected by @lock.
421  */
422 struct bfq_data {
423 	/* device request queue */
424 	struct request_queue *queue;
425 	/* dispatch queue */
426 	struct list_head dispatch;
427 
428 	/* root bfq_group for the device */
429 	struct bfq_group *root_group;
430 
431 	/*
432 	 * rbtree of weight counters of @bfq_queues, sorted by
433 	 * weight. Used to keep track of whether all @bfq_queues have
434 	 * the same weight. The tree contains one counter for each
435 	 * distinct weight associated to some active and not
436 	 * weight-raised @bfq_queue (see the comments to the functions
437 	 * bfq_weights_tree_[add|remove] for further details).
438 	 */
439 	struct rb_root_cached queue_weights_tree;
440 
441 	/*
442 	 * Number of groups with at least one descendant process that
443 	 * has at least one request waiting for completion. Note that
444 	 * this accounts for also requests already dispatched, but not
445 	 * yet completed. Therefore this number of groups may differ
446 	 * (be larger) than the number of active groups, as a group is
447 	 * considered active only if its corresponding entity has
448 	 * descendant queues with at least one request queued. This
449 	 * number is used to decide whether a scenario is symmetric.
450 	 * For a detailed explanation see comments on the computation
451 	 * of the variable asymmetric_scenario in the function
452 	 * bfq_better_to_idle().
453 	 *
454 	 * However, it is hard to compute this number exactly, for
455 	 * groups with multiple descendant processes. Consider a group
456 	 * that is inactive, i.e., that has no descendant process with
457 	 * pending I/O inside BFQ queues. Then suppose that
458 	 * num_groups_with_pending_reqs is still accounting for this
459 	 * group, because the group has descendant processes with some
460 	 * I/O request still in flight. num_groups_with_pending_reqs
461 	 * should be decremented when the in-flight request of the
462 	 * last descendant process is finally completed (assuming that
463 	 * nothing else has changed for the group in the meantime, in
464 	 * terms of composition of the group and active/inactive state of child
465 	 * groups and processes). To accomplish this, an additional
466 	 * pending-request counter must be added to entities, and must
467 	 * be updated correctly. To avoid this additional field and operations,
468 	 * we resort to the following tradeoff between simplicity and
469 	 * accuracy: for an inactive group that is still counted in
470 	 * num_groups_with_pending_reqs, we decrement
471 	 * num_groups_with_pending_reqs when the first descendant
472 	 * process of the group remains with no request waiting for
473 	 * completion.
474 	 *
475 	 * Even this simpler decrement strategy requires a little
476 	 * carefulness: to avoid multiple decrements, we flag a group,
477 	 * more precisely an entity representing a group, as still
478 	 * counted in num_groups_with_pending_reqs when it becomes
479 	 * inactive. Then, when the first descendant queue of the
480 	 * entity remains with no request waiting for completion,
481 	 * num_groups_with_pending_reqs is decremented, and this flag
482 	 * is reset. After this flag is reset for the entity,
483 	 * num_groups_with_pending_reqs won't be decremented any
484 	 * longer in case a new descendant queue of the entity remains
485 	 * with no request waiting for completion.
486 	 */
487 	unsigned int num_groups_with_pending_reqs;
488 
489 	/*
490 	 * Per-class (RT, BE, IDLE) number of bfq_queues containing
491 	 * requests (including the queue in service, even if it is
492 	 * idling).
493 	 */
494 	unsigned int busy_queues[3];
495 	/* number of weight-raised busy @bfq_queues */
496 	int wr_busy_queues;
497 	/* number of queued requests */
498 	int queued;
499 	/* number of requests dispatched and waiting for completion */
500 	int rq_in_driver;
501 
502 	/* true if the device is non rotational and performs queueing */
503 	bool nonrot_with_queueing;
504 
505 	/*
506 	 * Maximum number of requests in driver in the last
507 	 * @hw_tag_samples completed requests.
508 	 */
509 	int max_rq_in_driver;
510 	/* number of samples used to calculate hw_tag */
511 	int hw_tag_samples;
512 	/* flag set to one if the driver is showing a queueing behavior */
513 	int hw_tag;
514 
515 	/* number of budgets assigned */
516 	int budgets_assigned;
517 
518 	/*
519 	 * Timer set when idling (waiting) for the next request from
520 	 * the queue in service.
521 	 */
522 	struct hrtimer idle_slice_timer;
523 
524 	/* bfq_queue in service */
525 	struct bfq_queue *in_service_queue;
526 
527 	/* on-disk position of the last served request */
528 	sector_t last_position;
529 
530 	/* position of the last served request for the in-service queue */
531 	sector_t in_serv_last_pos;
532 
533 	/* time of last request completion (ns) */
534 	u64 last_completion;
535 
536 	/* time of last transition from empty to non-empty (ns) */
537 	u64 last_empty_occupied_ns;
538 
539 	/*
540 	 * Flag set to activate the sampling of the total service time
541 	 * of a just-arrived first I/O request (see
542 	 * bfq_update_inject_limit()). This will cause the setting of
543 	 * waited_rq when the request is finally dispatched.
544 	 */
545 	bool wait_dispatch;
546 	/*
547 	 *  If set, then bfq_update_inject_limit() is invoked when
548 	 *  waited_rq is eventually completed.
549 	 */
550 	struct request *waited_rq;
551 	/*
552 	 * True if some request has been injected during the last service hole.
553 	 */
554 	bool rqs_injected;
555 
556 	/* time of first rq dispatch in current observation interval (ns) */
557 	u64 first_dispatch;
558 	/* time of last rq dispatch in current observation interval (ns) */
559 	u64 last_dispatch;
560 
561 	/* beginning of the last budget */
562 	ktime_t last_budget_start;
563 	/* beginning of the last idle slice */
564 	ktime_t last_idling_start;
565 	unsigned long last_idling_start_jiffies;
566 
567 	/* number of samples in current observation interval */
568 	int peak_rate_samples;
569 	/* num of samples of seq dispatches in current observation interval */
570 	u32 sequential_samples;
571 	/* total num of sectors transferred in current observation interval */
572 	u64 tot_sectors_dispatched;
573 	/* max rq size seen during current observation interval (sectors) */
574 	u32 last_rq_max_size;
575 	/* time elapsed from first dispatch in current observ. interval (us) */
576 	u64 delta_from_first;
577 	/*
578 	 * Current estimate of the device peak rate, measured in
579 	 * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by
580 	 * BFQ_RATE_SHIFT is performed to increase precision in
581 	 * fixed-point calculations.
582 	 */
583 	u32 peak_rate;
584 
585 	/* maximum budget allotted to a bfq_queue before rescheduling */
586 	int bfq_max_budget;
587 
588 	/* list of all the bfq_queues active on the device */
589 	struct list_head active_list;
590 	/* list of all the bfq_queues idle on the device */
591 	struct list_head idle_list;
592 
593 	/*
594 	 * Timeout for async/sync requests; when it fires, requests
595 	 * are served in fifo order.
596 	 */
597 	u64 bfq_fifo_expire[2];
598 	/* weight of backward seeks wrt forward ones */
599 	unsigned int bfq_back_penalty;
600 	/* maximum allowed backward seek */
601 	unsigned int bfq_back_max;
602 	/* maximum idling time */
603 	u32 bfq_slice_idle;
604 
605 	/* user-configured max budget value (0 for auto-tuning) */
606 	int bfq_user_max_budget;
607 	/*
608 	 * Timeout for bfq_queues to consume their budget; used to
609 	 * prevent seeky queues from imposing long latencies to
610 	 * sequential or quasi-sequential ones (this also implies that
611 	 * seeky queues cannot receive guarantees in the service
612 	 * domain; after a timeout they are charged for the time they
613 	 * have been in service, to preserve fairness among them, but
614 	 * without service-domain guarantees).
615 	 */
616 	unsigned int bfq_timeout;
617 
618 	/*
619 	 * Number of consecutive requests that must be issued within
620 	 * the idle time slice to set again idling to a queue which
621 	 * was marked as non-I/O-bound (see the definition of the
622 	 * IO_bound flag for further details).
623 	 */
624 	unsigned int bfq_requests_within_timer;
625 
626 	/*
627 	 * Force device idling whenever needed to provide accurate
628 	 * service guarantees, without caring about throughput
629 	 * issues. CAVEAT: this may even increase latencies, in case
630 	 * of useless idling for processes that did stop doing I/O.
631 	 */
632 	bool strict_guarantees;
633 
634 	/*
635 	 * Last time at which a queue entered the current burst of
636 	 * queues being activated shortly after each other; for more
637 	 * details about this and the following parameters related to
638 	 * a burst of activations, see the comments on the function
639 	 * bfq_handle_burst.
640 	 */
641 	unsigned long last_ins_in_burst;
642 	/*
643 	 * Reference time interval used to decide whether a queue has
644 	 * been activated shortly after @last_ins_in_burst.
645 	 */
646 	unsigned long bfq_burst_interval;
647 	/* number of queues in the current burst of queue activations */
648 	int burst_size;
649 
650 	/* common parent entity for the queues in the burst */
651 	struct bfq_entity *burst_parent_entity;
652 	/* Maximum burst size above which the current queue-activation
653 	 * burst is deemed as 'large'.
654 	 */
655 	unsigned long bfq_large_burst_thresh;
656 	/* true if a large queue-activation burst is in progress */
657 	bool large_burst;
658 	/*
659 	 * Head of the burst list (as for the above fields, more
660 	 * details in the comments on the function bfq_handle_burst).
661 	 */
662 	struct hlist_head burst_list;
663 
664 	/* if set to true, low-latency heuristics are enabled */
665 	bool low_latency;
666 	/*
667 	 * Maximum factor by which the weight of a weight-raised queue
668 	 * is multiplied.
669 	 */
670 	unsigned int bfq_wr_coeff;
671 	/* maximum duration of a weight-raising period (jiffies) */
672 	unsigned int bfq_wr_max_time;
673 
674 	/* Maximum weight-raising duration for soft real-time processes */
675 	unsigned int bfq_wr_rt_max_time;
676 	/*
677 	 * Minimum idle period after which weight-raising may be
678 	 * reactivated for a queue (in jiffies).
679 	 */
680 	unsigned int bfq_wr_min_idle_time;
681 	/*
682 	 * Minimum period between request arrivals after which
683 	 * weight-raising may be reactivated for an already busy async
684 	 * queue (in jiffies).
685 	 */
686 	unsigned long bfq_wr_min_inter_arr_async;
687 
688 	/* Max service-rate for a soft real-time queue, in sectors/sec */
689 	unsigned int bfq_wr_max_softrt_rate;
690 	/*
691 	 * Cached value of the product ref_rate*ref_wr_duration, used
692 	 * for computing the maximum duration of weight raising
693 	 * automatically.
694 	 */
695 	u64 rate_dur_prod;
696 
697 	/* fallback dummy bfqq for extreme OOM conditions */
698 	struct bfq_queue oom_bfqq;
699 
700 	spinlock_t lock;
701 
702 	/*
703 	 * bic associated with the task issuing current bio for
704 	 * merging. This and the next field are used as a support to
705 	 * be able to perform the bic lookup, needed by bio-merge
706 	 * functions, before the scheduler lock is taken, and thus
707 	 * avoid taking the request-queue lock while the scheduler
708 	 * lock is being held.
709 	 */
710 	struct bfq_io_cq *bio_bic;
711 	/* bfqq associated with the task issuing current bio for merging */
712 	struct bfq_queue *bio_bfqq;
713 
714 	/*
715 	 * Depth limits used in bfq_limit_depth (see comments on the
716 	 * function)
717 	 */
718 	unsigned int word_depths[2][2];
719 };
720 
721 enum bfqq_state_flags {
722 	BFQQF_just_created = 0,	/* queue just allocated */
723 	BFQQF_busy,		/* has requests or is in service */
724 	BFQQF_wait_request,	/* waiting for a request */
725 	BFQQF_non_blocking_wait_rq, /*
726 				     * waiting for a request
727 				     * without idling the device
728 				     */
729 	BFQQF_fifo_expire,	/* FIFO checked in this slice */
730 	BFQQF_has_short_ttime,	/* queue has a short think time */
731 	BFQQF_sync,		/* synchronous queue */
732 	BFQQF_IO_bound,		/*
733 				 * bfqq has timed-out at least once
734 				 * having consumed at most 2/10 of
735 				 * its budget
736 				 */
737 	BFQQF_in_large_burst,	/*
738 				 * bfqq activated in a large burst,
739 				 * see comments to bfq_handle_burst.
740 				 */
741 	BFQQF_softrt_update,	/*
742 				 * may need softrt-next-start
743 				 * update
744 				 */
745 	BFQQF_coop,		/* bfqq is shared */
746 	BFQQF_split_coop	/* shared bfqq will be split */
747 };
748 
749 #define BFQ_BFQQ_FNS(name)						\
750 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq);			\
751 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq);			\
752 int bfq_bfqq_##name(const struct bfq_queue *bfqq);
753 
754 BFQ_BFQQ_FNS(just_created);
755 BFQ_BFQQ_FNS(busy);
756 BFQ_BFQQ_FNS(wait_request);
757 BFQ_BFQQ_FNS(non_blocking_wait_rq);
758 BFQ_BFQQ_FNS(fifo_expire);
759 BFQ_BFQQ_FNS(has_short_ttime);
760 BFQ_BFQQ_FNS(sync);
761 BFQ_BFQQ_FNS(IO_bound);
762 BFQ_BFQQ_FNS(in_large_burst);
763 BFQ_BFQQ_FNS(coop);
764 BFQ_BFQQ_FNS(split_coop);
765 BFQ_BFQQ_FNS(softrt_update);
766 #undef BFQ_BFQQ_FNS
767 
768 /* Expiration reasons. */
769 enum bfqq_expiration {
770 	BFQQE_TOO_IDLE = 0,		/*
771 					 * queue has been idling for
772 					 * too long
773 					 */
774 	BFQQE_BUDGET_TIMEOUT,	/* budget took too long to be used */
775 	BFQQE_BUDGET_EXHAUSTED,	/* budget consumed */
776 	BFQQE_NO_MORE_REQUESTS,	/* the queue has no more requests */
777 	BFQQE_PREEMPTED		/* preemption in progress */
778 };
779 
780 struct bfqg_stats {
781 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
782 	/* number of ios merged */
783 	struct blkg_rwstat		merged;
784 	/* total time spent on device in ns, may not be accurate w/ queueing */
785 	struct blkg_rwstat		service_time;
786 	/* total time spent waiting in scheduler queue in ns */
787 	struct blkg_rwstat		wait_time;
788 	/* number of IOs queued up */
789 	struct blkg_rwstat		queued;
790 	/* total disk time and nr sectors dispatched by this group */
791 	struct blkg_stat		time;
792 	/* sum of number of ios queued across all samples */
793 	struct blkg_stat		avg_queue_size_sum;
794 	/* count of samples taken for average */
795 	struct blkg_stat		avg_queue_size_samples;
796 	/* how many times this group has been removed from service tree */
797 	struct blkg_stat		dequeue;
798 	/* total time spent waiting for it to be assigned a timeslice. */
799 	struct blkg_stat		group_wait_time;
800 	/* time spent idling for this blkcg_gq */
801 	struct blkg_stat		idle_time;
802 	/* total time with empty current active q with other requests queued */
803 	struct blkg_stat		empty_time;
804 	/* fields after this shouldn't be cleared on stat reset */
805 	u64				start_group_wait_time;
806 	u64				start_idle_time;
807 	u64				start_empty_time;
808 	uint16_t			flags;
809 #endif	/* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
810 };
811 
812 #ifdef CONFIG_BFQ_GROUP_IOSCHED
813 
814 /*
815  * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
816  *
817  * @ps: @blkcg_policy_storage that this structure inherits
818  * @weight: weight of the bfq_group
819  */
820 struct bfq_group_data {
821 	/* must be the first member */
822 	struct blkcg_policy_data pd;
823 
824 	unsigned int weight;
825 };
826 
827 /**
828  * struct bfq_group - per (device, cgroup) data structure.
829  * @entity: schedulable entity to insert into the parent group sched_data.
830  * @sched_data: own sched_data, to contain child entities (they may be
831  *              both bfq_queues and bfq_groups).
832  * @bfqd: the bfq_data for the device this group acts upon.
833  * @async_bfqq: array of async queues for all the tasks belonging to
834  *              the group, one queue per ioprio value per ioprio_class,
835  *              except for the idle class that has only one queue.
836  * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
837  * @my_entity: pointer to @entity, %NULL for the toplevel group; used
838  *             to avoid too many special cases during group creation/
839  *             migration.
840  * @stats: stats for this bfqg.
841  * @active_entities: number of active entities belonging to the group;
842  *                   unused for the root group. Used to know whether there
843  *                   are groups with more than one active @bfq_entity
844  *                   (see the comments to the function
845  *                   bfq_bfqq_may_idle()).
846  * @rq_pos_tree: rbtree sorted by next_request position, used when
847  *               determining if two or more queues have interleaving
848  *               requests (see bfq_find_close_cooperator()).
849  *
850  * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
851  * there is a set of bfq_groups, each one collecting the lower-level
852  * entities belonging to the group that are acting on the same device.
853  *
854  * Locking works as follows:
855  *    o @bfqd is protected by the queue lock, RCU is used to access it
856  *      from the readers.
857  *    o All the other fields are protected by the @bfqd queue lock.
858  */
859 struct bfq_group {
860 	/* must be the first member */
861 	struct blkg_policy_data pd;
862 
863 	/* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
864 	char blkg_path[128];
865 
866 	/* reference counter (see comments in bfq_bic_update_cgroup) */
867 	int ref;
868 
869 	struct bfq_entity entity;
870 	struct bfq_sched_data sched_data;
871 
872 	void *bfqd;
873 
874 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
875 	struct bfq_queue *async_idle_bfqq;
876 
877 	struct bfq_entity *my_entity;
878 
879 	int active_entities;
880 
881 	struct rb_root rq_pos_tree;
882 
883 	struct bfqg_stats stats;
884 };
885 
886 #else
887 struct bfq_group {
888 	struct bfq_sched_data sched_data;
889 
890 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
891 	struct bfq_queue *async_idle_bfqq;
892 
893 	struct rb_root rq_pos_tree;
894 };
895 #endif
896 
897 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
898 
899 /* --------------- main algorithm interface ----------------- */
900 
901 #define BFQ_SERVICE_TREE_INIT	((struct bfq_service_tree)		\
902 				{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
903 
904 extern const int bfq_timeout;
905 
906 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
907 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
908 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
909 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
910 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
911 			  struct rb_root_cached *root);
912 void __bfq_weights_tree_remove(struct bfq_data *bfqd,
913 			       struct bfq_queue *bfqq,
914 			       struct rb_root_cached *root);
915 void bfq_weights_tree_remove(struct bfq_data *bfqd,
916 			     struct bfq_queue *bfqq);
917 void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
918 		     bool compensate, enum bfqq_expiration reason);
919 void bfq_put_queue(struct bfq_queue *bfqq);
920 void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
921 void bfq_schedule_dispatch(struct bfq_data *bfqd);
922 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
923 
924 /* ------------ end of main algorithm interface -------------- */
925 
926 /* ---------------- cgroups-support interface ---------------- */
927 
928 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
929 			      unsigned int op);
930 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
931 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
932 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
933 				  u64 io_start_time_ns, unsigned int op);
934 void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
935 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
936 void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
937 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg);
938 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg);
939 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
940 		   struct bfq_group *bfqg);
941 
942 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
943 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
944 void bfq_end_wr_async(struct bfq_data *bfqd);
945 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
946 				     struct blkcg *blkcg);
947 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
948 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
949 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
950 void bfqg_and_blkg_put(struct bfq_group *bfqg);
951 
952 #ifdef CONFIG_BFQ_GROUP_IOSCHED
953 extern struct cftype bfq_blkcg_legacy_files[];
954 extern struct cftype bfq_blkg_files[];
955 extern struct blkcg_policy blkcg_policy_bfq;
956 #endif
957 
958 /* ------------- end of cgroups-support interface ------------- */
959 
960 /* - interface of the internal hierarchical B-WF2Q+ scheduler - */
961 
962 #ifdef CONFIG_BFQ_GROUP_IOSCHED
963 /* both next loops stop at one of the child entities of the root group */
964 #define for_each_entity(entity)	\
965 	for (; entity ; entity = entity->parent)
966 
967 /*
968  * For each iteration, compute parent in advance, so as to be safe if
969  * entity is deallocated during the iteration. Such a deallocation may
970  * happen as a consequence of a bfq_put_queue that frees the bfq_queue
971  * containing entity.
972  */
973 #define for_each_entity_safe(entity, parent) \
974 	for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
975 
976 #else /* CONFIG_BFQ_GROUP_IOSCHED */
977 /*
978  * Next two macros are fake loops when cgroups support is not
979  * enabled. I fact, in such a case, there is only one level to go up
980  * (to reach the root group).
981  */
982 #define for_each_entity(entity)	\
983 	for (; entity ; entity = NULL)
984 
985 #define for_each_entity_safe(entity, parent) \
986 	for (parent = NULL; entity ; entity = parent)
987 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
988 
989 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
990 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
991 unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
992 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
993 struct bfq_entity *bfq_entity_of(struct rb_node *node);
994 unsigned short bfq_ioprio_to_weight(int ioprio);
995 void bfq_put_idle_entity(struct bfq_service_tree *st,
996 			 struct bfq_entity *entity);
997 struct bfq_service_tree *
998 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
999 				struct bfq_entity *entity,
1000 				bool update_class_too);
1001 void bfq_bfqq_served(struct bfq_queue *bfqq, int served);
1002 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1003 			  unsigned long time_ms);
1004 bool __bfq_deactivate_entity(struct bfq_entity *entity,
1005 			     bool ins_into_idle_tree);
1006 bool next_queue_may_preempt(struct bfq_data *bfqd);
1007 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
1008 bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
1009 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1010 			 bool ins_into_idle_tree, bool expiration);
1011 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
1012 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1013 		      bool expiration);
1014 void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1015 		       bool expiration);
1016 void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
1017 
1018 /* --------------- end of interface of B-WF2Q+ ---------------- */
1019 
1020 /* Logging facilities. */
1021 static inline void bfq_pid_to_str(int pid, char *str, int len)
1022 {
1023 	if (pid != -1)
1024 		snprintf(str, len, "%d", pid);
1025 	else
1026 		snprintf(str, len, "SHARED-");
1027 }
1028 
1029 #ifdef CONFIG_BFQ_GROUP_IOSCHED
1030 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
1031 
1032 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	do {			\
1033 	char pid_str[MAX_PID_STR_LENGTH];	\
1034 	bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH);	\
1035 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
1036 			bfqg_to_blkg(bfqq_group(bfqq))->blkcg,		\
1037 			"bfq%s%c " fmt, pid_str,			\
1038 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args);	\
1039 } while (0)
1040 
1041 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...)	do {			\
1042 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
1043 		bfqg_to_blkg(bfqg)->blkcg, fmt, ##args);		\
1044 } while (0)
1045 
1046 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1047 
1048 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do {	\
1049 	char pid_str[MAX_PID_STR_LENGTH];	\
1050 	bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH);	\
1051 	blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str,	\
1052 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A',		\
1053 				##args);	\
1054 } while (0)
1055 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...)		do {} while (0)
1056 
1057 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
1058 
1059 #define bfq_log(bfqd, fmt, args...) \
1060 	blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
1061 
1062 #endif /* _BFQ_H */
1063