xref: /openbmc/linux/block/bfq-iosched.h (revision f9a82c48)
1 /*
2  * Header file for the BFQ I/O scheduler: data structures and
3  * prototypes of interface functions among BFQ components.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License as
7  *  published by the Free Software Foundation; either version 2 of the
8  *  License, or (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  General Public License for more details.
14  */
15 #ifndef _BFQ_H
16 #define _BFQ_H
17 
18 #include <linux/blktrace_api.h>
19 #include <linux/hrtimer.h>
20 #include <linux/blk-cgroup.h>
21 
22 #define BFQ_IOPRIO_CLASSES	3
23 #define BFQ_CL_IDLE_TIMEOUT	(HZ/5)
24 
25 #define BFQ_MIN_WEIGHT			1
26 #define BFQ_MAX_WEIGHT			1000
27 #define BFQ_WEIGHT_CONVERSION_COEFF	10
28 
29 #define BFQ_DEFAULT_QUEUE_IOPRIO	4
30 
31 #define BFQ_WEIGHT_LEGACY_DFL	100
32 #define BFQ_DEFAULT_GRP_IOPRIO	0
33 #define BFQ_DEFAULT_GRP_CLASS	IOPRIO_CLASS_BE
34 
35 /*
36  * Soft real-time applications are extremely more latency sensitive
37  * than interactive ones. Over-raise the weight of the former to
38  * privilege them against the latter.
39  */
40 #define BFQ_SOFTRT_WEIGHT_FACTOR	100
41 
42 struct bfq_entity;
43 
44 /**
45  * struct bfq_service_tree - per ioprio_class service tree.
46  *
47  * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
48  * ioprio_class has its own independent scheduler, and so its own
49  * bfq_service_tree.  All the fields are protected by the queue lock
50  * of the containing bfqd.
51  */
52 struct bfq_service_tree {
53 	/* tree for active entities (i.e., those backlogged) */
54 	struct rb_root active;
55 	/* tree for idle entities (i.e., not backlogged, with V < F_i)*/
56 	struct rb_root idle;
57 
58 	/* idle entity with minimum F_i */
59 	struct bfq_entity *first_idle;
60 	/* idle entity with maximum F_i */
61 	struct bfq_entity *last_idle;
62 
63 	/* scheduler virtual time */
64 	u64 vtime;
65 	/* scheduler weight sum; active and idle entities contribute to it */
66 	unsigned long wsum;
67 };
68 
69 /**
70  * struct bfq_sched_data - multi-class scheduler.
71  *
72  * bfq_sched_data is the basic scheduler queue.  It supports three
73  * ioprio_classes, and can be used either as a toplevel queue or as an
74  * intermediate queue in a hierarchical setup.
75  *
76  * The supported ioprio_classes are the same as in CFQ, in descending
77  * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
78  * Requests from higher priority queues are served before all the
79  * requests from lower priority queues; among requests of the same
80  * queue requests are served according to B-WF2Q+.
81  *
82  * The schedule is implemented by the service trees, plus the field
83  * @next_in_service, which points to the entity on the active trees
84  * that will be served next, if 1) no changes in the schedule occurs
85  * before the current in-service entity is expired, 2) the in-service
86  * queue becomes idle when it expires, and 3) if the entity pointed by
87  * in_service_entity is not a queue, then the in-service child entity
88  * of the entity pointed by in_service_entity becomes idle on
89  * expiration. This peculiar definition allows for the following
90  * optimization, not yet exploited: while a given entity is still in
91  * service, we already know which is the best candidate for next
92  * service among the other active entitities in the same parent
93  * entity. We can then quickly compare the timestamps of the
94  * in-service entity with those of such best candidate.
95  *
96  * All fields are protected by the lock of the containing bfqd.
97  */
98 struct bfq_sched_data {
99 	/* entity in service */
100 	struct bfq_entity *in_service_entity;
101 	/* head-of-line entity (see comments above) */
102 	struct bfq_entity *next_in_service;
103 	/* array of service trees, one per ioprio_class */
104 	struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
105 	/* last time CLASS_IDLE was served */
106 	unsigned long bfq_class_idle_last_service;
107 
108 };
109 
110 /**
111  * struct bfq_weight_counter - counter of the number of all active queues
112  *                             with a given weight.
113  */
114 struct bfq_weight_counter {
115 	unsigned int weight; /* weight of the queues this counter refers to */
116 	unsigned int num_active; /* nr of active queues with this weight */
117 	/*
118 	 * Weights tree member (see bfq_data's @queue_weights_tree)
119 	 */
120 	struct rb_node weights_node;
121 };
122 
123 /**
124  * struct bfq_entity - schedulable entity.
125  *
126  * A bfq_entity is used to represent either a bfq_queue (leaf node in the
127  * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
128  * entity belongs to the sched_data of the parent group in the cgroup
129  * hierarchy.  Non-leaf entities have also their own sched_data, stored
130  * in @my_sched_data.
131  *
132  * Each entity stores independently its priority values; this would
133  * allow different weights on different devices, but this
134  * functionality is not exported to userspace by now.  Priorities and
135  * weights are updated lazily, first storing the new values into the
136  * new_* fields, then setting the @prio_changed flag.  As soon as
137  * there is a transition in the entity state that allows the priority
138  * update to take place the effective and the requested priority
139  * values are synchronized.
140  *
141  * Unless cgroups are used, the weight value is calculated from the
142  * ioprio to export the same interface as CFQ.  When dealing with
143  * ``well-behaved'' queues (i.e., queues that do not spend too much
144  * time to consume their budget and have true sequential behavior, and
145  * when there are no external factors breaking anticipation) the
146  * relative weights at each level of the cgroups hierarchy should be
147  * guaranteed.  All the fields are protected by the queue lock of the
148  * containing bfqd.
149  */
150 struct bfq_entity {
151 	/* service_tree member */
152 	struct rb_node rb_node;
153 
154 	/*
155 	 * Flag, true if the entity is on a tree (either the active or
156 	 * the idle one of its service_tree) or is in service.
157 	 */
158 	bool on_st;
159 
160 	/* B-WF2Q+ start and finish timestamps [sectors/weight] */
161 	u64 start, finish;
162 
163 	/* tree the entity is enqueued into; %NULL if not on a tree */
164 	struct rb_root *tree;
165 
166 	/*
167 	 * minimum start time of the (active) subtree rooted at this
168 	 * entity; used for O(log N) lookups into active trees
169 	 */
170 	u64 min_start;
171 
172 	/* amount of service received during the last service slot */
173 	int service;
174 
175 	/* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
176 	int budget;
177 
178 	/* weight of the queue */
179 	int weight;
180 	/* next weight if a change is in progress */
181 	int new_weight;
182 
183 	/* original weight, used to implement weight boosting */
184 	int orig_weight;
185 
186 	/* parent entity, for hierarchical scheduling */
187 	struct bfq_entity *parent;
188 
189 	/*
190 	 * For non-leaf nodes in the hierarchy, the associated
191 	 * scheduler queue, %NULL on leaf nodes.
192 	 */
193 	struct bfq_sched_data *my_sched_data;
194 	/* the scheduler queue this entity belongs to */
195 	struct bfq_sched_data *sched_data;
196 
197 	/* flag, set to request a weight, ioprio or ioprio_class change  */
198 	int prio_changed;
199 
200 	/* flag, set if the entity is counted in groups_with_pending_reqs */
201 	bool in_groups_with_pending_reqs;
202 };
203 
204 struct bfq_group;
205 
206 /**
207  * struct bfq_ttime - per process thinktime stats.
208  */
209 struct bfq_ttime {
210 	/* completion time of the last request */
211 	u64 last_end_request;
212 
213 	/* total process thinktime */
214 	u64 ttime_total;
215 	/* number of thinktime samples */
216 	unsigned long ttime_samples;
217 	/* average process thinktime */
218 	u64 ttime_mean;
219 };
220 
221 /**
222  * struct bfq_queue - leaf schedulable entity.
223  *
224  * A bfq_queue is a leaf request queue; it can be associated with an
225  * io_context or more, if it  is  async or shared  between  cooperating
226  * processes. @cgroup holds a reference to the cgroup, to be sure that it
227  * does not disappear while a bfqq still references it (mostly to avoid
228  * races between request issuing and task migration followed by cgroup
229  * destruction).
230  * All the fields are protected by the queue lock of the containing bfqd.
231  */
232 struct bfq_queue {
233 	/* reference counter */
234 	int ref;
235 	/* parent bfq_data */
236 	struct bfq_data *bfqd;
237 
238 	/* current ioprio and ioprio class */
239 	unsigned short ioprio, ioprio_class;
240 	/* next ioprio and ioprio class if a change is in progress */
241 	unsigned short new_ioprio, new_ioprio_class;
242 
243 	/*
244 	 * Shared bfq_queue if queue is cooperating with one or more
245 	 * other queues.
246 	 */
247 	struct bfq_queue *new_bfqq;
248 	/* request-position tree member (see bfq_group's @rq_pos_tree) */
249 	struct rb_node pos_node;
250 	/* request-position tree root (see bfq_group's @rq_pos_tree) */
251 	struct rb_root *pos_root;
252 
253 	/* sorted list of pending requests */
254 	struct rb_root sort_list;
255 	/* if fifo isn't expired, next request to serve */
256 	struct request *next_rq;
257 	/* number of sync and async requests queued */
258 	int queued[2];
259 	/* number of requests currently allocated */
260 	int allocated;
261 	/* number of pending metadata requests */
262 	int meta_pending;
263 	/* fifo list of requests in sort_list */
264 	struct list_head fifo;
265 
266 	/* entity representing this queue in the scheduler */
267 	struct bfq_entity entity;
268 
269 	/* pointer to the weight counter associated with this entity */
270 	struct bfq_weight_counter *weight_counter;
271 
272 	/* maximum budget allowed from the feedback mechanism */
273 	int max_budget;
274 	/* budget expiration (in jiffies) */
275 	unsigned long budget_timeout;
276 
277 	/* number of requests on the dispatch list or inside driver */
278 	int dispatched;
279 
280 	/* status flags */
281 	unsigned long flags;
282 
283 	/* node for active/idle bfqq list inside parent bfqd */
284 	struct list_head bfqq_list;
285 
286 	/* associated @bfq_ttime struct */
287 	struct bfq_ttime ttime;
288 
289 	/* bit vector: a 1 for each seeky requests in history */
290 	u32 seek_history;
291 
292 	/* node for the device's burst list */
293 	struct hlist_node burst_list_node;
294 
295 	/* position of the last request enqueued */
296 	sector_t last_request_pos;
297 
298 	/* Number of consecutive pairs of request completion and
299 	 * arrival, such that the queue becomes idle after the
300 	 * completion, but the next request arrives within an idle
301 	 * time slice; used only if the queue's IO_bound flag has been
302 	 * cleared.
303 	 */
304 	unsigned int requests_within_timer;
305 
306 	/* pid of the process owning the queue, used for logging purposes */
307 	pid_t pid;
308 
309 	/*
310 	 * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
311 	 * if the queue is shared.
312 	 */
313 	struct bfq_io_cq *bic;
314 
315 	/* current maximum weight-raising time for this queue */
316 	unsigned long wr_cur_max_time;
317 	/*
318 	 * Minimum time instant such that, only if a new request is
319 	 * enqueued after this time instant in an idle @bfq_queue with
320 	 * no outstanding requests, then the task associated with the
321 	 * queue it is deemed as soft real-time (see the comments on
322 	 * the function bfq_bfqq_softrt_next_start())
323 	 */
324 	unsigned long soft_rt_next_start;
325 	/*
326 	 * Start time of the current weight-raising period if
327 	 * the @bfq-queue is being weight-raised, otherwise
328 	 * finish time of the last weight-raising period.
329 	 */
330 	unsigned long last_wr_start_finish;
331 	/* factor by which the weight of this queue is multiplied */
332 	unsigned int wr_coeff;
333 	/*
334 	 * Time of the last transition of the @bfq_queue from idle to
335 	 * backlogged.
336 	 */
337 	unsigned long last_idle_bklogged;
338 	/*
339 	 * Cumulative service received from the @bfq_queue since the
340 	 * last transition from idle to backlogged.
341 	 */
342 	unsigned long service_from_backlogged;
343 	/*
344 	 * Cumulative service received from the @bfq_queue since its
345 	 * last transition to weight-raised state.
346 	 */
347 	unsigned long service_from_wr;
348 
349 	/*
350 	 * Value of wr start time when switching to soft rt
351 	 */
352 	unsigned long wr_start_at_switch_to_srt;
353 
354 	unsigned long split_time; /* time of last split */
355 
356 	unsigned long first_IO_time; /* time of first I/O for this queue */
357 
358 	/* max service rate measured so far */
359 	u32 max_service_rate;
360 	/*
361 	 * Ratio between the service received by bfqq while it is in
362 	 * service, and the cumulative service (of requests of other
363 	 * queues) that may be injected while bfqq is empty but still
364 	 * in service. To increase precision, the coefficient is
365 	 * measured in tenths of unit. Here are some example of (1)
366 	 * ratios, (2) resulting percentages of service injected
367 	 * w.r.t. to the total service dispatched while bfqq is in
368 	 * service, and (3) corresponding values of the coefficient:
369 	 * 1 (50%) -> 10
370 	 * 2 (33%) -> 20
371 	 * 10 (9%) -> 100
372 	 * 9.9 (9%) -> 99
373 	 * 1.5 (40%) -> 15
374 	 * 0.5 (66%) -> 5
375 	 * 0.1 (90%) -> 1
376 	 *
377 	 * So, if the coefficient is lower than 10, then
378 	 * injected service is more than bfqq service.
379 	 */
380 	unsigned int inject_coeff;
381 	/* amount of service injected in current service slot */
382 	unsigned int injected_service;
383 };
384 
385 /**
386  * struct bfq_io_cq - per (request_queue, io_context) structure.
387  */
388 struct bfq_io_cq {
389 	/* associated io_cq structure */
390 	struct io_cq icq; /* must be the first member */
391 	/* array of two process queues, the sync and the async */
392 	struct bfq_queue *bfqq[2];
393 	/* per (request_queue, blkcg) ioprio */
394 	int ioprio;
395 #ifdef CONFIG_BFQ_GROUP_IOSCHED
396 	uint64_t blkcg_serial_nr; /* the current blkcg serial */
397 #endif
398 	/*
399 	 * Snapshot of the has_short_time flag before merging; taken
400 	 * to remember its value while the queue is merged, so as to
401 	 * be able to restore it in case of split.
402 	 */
403 	bool saved_has_short_ttime;
404 	/*
405 	 * Same purpose as the previous two fields for the I/O bound
406 	 * classification of a queue.
407 	 */
408 	bool saved_IO_bound;
409 
410 	/*
411 	 * Same purpose as the previous fields for the value of the
412 	 * field keeping the queue's belonging to a large burst
413 	 */
414 	bool saved_in_large_burst;
415 	/*
416 	 * True if the queue belonged to a burst list before its merge
417 	 * with another cooperating queue.
418 	 */
419 	bool was_in_burst_list;
420 
421 	/*
422 	 * Similar to previous fields: save wr information.
423 	 */
424 	unsigned long saved_wr_coeff;
425 	unsigned long saved_last_wr_start_finish;
426 	unsigned long saved_wr_start_at_switch_to_srt;
427 	unsigned int saved_wr_cur_max_time;
428 	struct bfq_ttime saved_ttime;
429 };
430 
431 /**
432  * struct bfq_data - per-device data structure.
433  *
434  * All the fields are protected by @lock.
435  */
436 struct bfq_data {
437 	/* device request queue */
438 	struct request_queue *queue;
439 	/* dispatch queue */
440 	struct list_head dispatch;
441 
442 	/* root bfq_group for the device */
443 	struct bfq_group *root_group;
444 
445 	/*
446 	 * rbtree of weight counters of @bfq_queues, sorted by
447 	 * weight. Used to keep track of whether all @bfq_queues have
448 	 * the same weight. The tree contains one counter for each
449 	 * distinct weight associated to some active and not
450 	 * weight-raised @bfq_queue (see the comments to the functions
451 	 * bfq_weights_tree_[add|remove] for further details).
452 	 */
453 	struct rb_root queue_weights_tree;
454 
455 	/*
456 	 * Number of groups with at least one descendant process that
457 	 * has at least one request waiting for completion. Note that
458 	 * this accounts for also requests already dispatched, but not
459 	 * yet completed. Therefore this number of groups may differ
460 	 * (be larger) than the number of active groups, as a group is
461 	 * considered active only if its corresponding entity has
462 	 * descendant queues with at least one request queued. This
463 	 * number is used to decide whether a scenario is symmetric.
464 	 * For a detailed explanation see comments on the computation
465 	 * of the variable asymmetric_scenario in the function
466 	 * bfq_better_to_idle().
467 	 *
468 	 * However, it is hard to compute this number exactly, for
469 	 * groups with multiple descendant processes. Consider a group
470 	 * that is inactive, i.e., that has no descendant process with
471 	 * pending I/O inside BFQ queues. Then suppose that
472 	 * num_groups_with_pending_reqs is still accounting for this
473 	 * group, because the group has descendant processes with some
474 	 * I/O request still in flight. num_groups_with_pending_reqs
475 	 * should be decremented when the in-flight request of the
476 	 * last descendant process is finally completed (assuming that
477 	 * nothing else has changed for the group in the meantime, in
478 	 * terms of composition of the group and active/inactive state of child
479 	 * groups and processes). To accomplish this, an additional
480 	 * pending-request counter must be added to entities, and must
481 	 * be updated correctly. To avoid this additional field and operations,
482 	 * we resort to the following tradeoff between simplicity and
483 	 * accuracy: for an inactive group that is still counted in
484 	 * num_groups_with_pending_reqs, we decrement
485 	 * num_groups_with_pending_reqs when the first descendant
486 	 * process of the group remains with no request waiting for
487 	 * completion.
488 	 *
489 	 * Even this simpler decrement strategy requires a little
490 	 * carefulness: to avoid multiple decrements, we flag a group,
491 	 * more precisely an entity representing a group, as still
492 	 * counted in num_groups_with_pending_reqs when it becomes
493 	 * inactive. Then, when the first descendant queue of the
494 	 * entity remains with no request waiting for completion,
495 	 * num_groups_with_pending_reqs is decremented, and this flag
496 	 * is reset. After this flag is reset for the entity,
497 	 * num_groups_with_pending_reqs won't be decremented any
498 	 * longer in case a new descendant queue of the entity remains
499 	 * with no request waiting for completion.
500 	 */
501 	unsigned int num_groups_with_pending_reqs;
502 
503 	/*
504 	 * Per-class (RT, BE, IDLE) number of bfq_queues containing
505 	 * requests (including the queue in service, even if it is
506 	 * idling).
507 	 */
508 	unsigned int busy_queues[3];
509 	/* number of weight-raised busy @bfq_queues */
510 	int wr_busy_queues;
511 	/* number of queued requests */
512 	int queued;
513 	/* number of requests dispatched and waiting for completion */
514 	int rq_in_driver;
515 
516 	/*
517 	 * Maximum number of requests in driver in the last
518 	 * @hw_tag_samples completed requests.
519 	 */
520 	int max_rq_in_driver;
521 	/* number of samples used to calculate hw_tag */
522 	int hw_tag_samples;
523 	/* flag set to one if the driver is showing a queueing behavior */
524 	int hw_tag;
525 
526 	/* number of budgets assigned */
527 	int budgets_assigned;
528 
529 	/*
530 	 * Timer set when idling (waiting) for the next request from
531 	 * the queue in service.
532 	 */
533 	struct hrtimer idle_slice_timer;
534 
535 	/* bfq_queue in service */
536 	struct bfq_queue *in_service_queue;
537 
538 	/* on-disk position of the last served request */
539 	sector_t last_position;
540 
541 	/* position of the last served request for the in-service queue */
542 	sector_t in_serv_last_pos;
543 
544 	/* time of last request completion (ns) */
545 	u64 last_completion;
546 
547 	/* time of first rq dispatch in current observation interval (ns) */
548 	u64 first_dispatch;
549 	/* time of last rq dispatch in current observation interval (ns) */
550 	u64 last_dispatch;
551 
552 	/* beginning of the last budget */
553 	ktime_t last_budget_start;
554 	/* beginning of the last idle slice */
555 	ktime_t last_idling_start;
556 
557 	/* number of samples in current observation interval */
558 	int peak_rate_samples;
559 	/* num of samples of seq dispatches in current observation interval */
560 	u32 sequential_samples;
561 	/* total num of sectors transferred in current observation interval */
562 	u64 tot_sectors_dispatched;
563 	/* max rq size seen during current observation interval (sectors) */
564 	u32 last_rq_max_size;
565 	/* time elapsed from first dispatch in current observ. interval (us) */
566 	u64 delta_from_first;
567 	/*
568 	 * Current estimate of the device peak rate, measured in
569 	 * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by
570 	 * BFQ_RATE_SHIFT is performed to increase precision in
571 	 * fixed-point calculations.
572 	 */
573 	u32 peak_rate;
574 
575 	/* maximum budget allotted to a bfq_queue before rescheduling */
576 	int bfq_max_budget;
577 
578 	/* list of all the bfq_queues active on the device */
579 	struct list_head active_list;
580 	/* list of all the bfq_queues idle on the device */
581 	struct list_head idle_list;
582 
583 	/*
584 	 * Timeout for async/sync requests; when it fires, requests
585 	 * are served in fifo order.
586 	 */
587 	u64 bfq_fifo_expire[2];
588 	/* weight of backward seeks wrt forward ones */
589 	unsigned int bfq_back_penalty;
590 	/* maximum allowed backward seek */
591 	unsigned int bfq_back_max;
592 	/* maximum idling time */
593 	u32 bfq_slice_idle;
594 
595 	/* user-configured max budget value (0 for auto-tuning) */
596 	int bfq_user_max_budget;
597 	/*
598 	 * Timeout for bfq_queues to consume their budget; used to
599 	 * prevent seeky queues from imposing long latencies to
600 	 * sequential or quasi-sequential ones (this also implies that
601 	 * seeky queues cannot receive guarantees in the service
602 	 * domain; after a timeout they are charged for the time they
603 	 * have been in service, to preserve fairness among them, but
604 	 * without service-domain guarantees).
605 	 */
606 	unsigned int bfq_timeout;
607 
608 	/*
609 	 * Number of consecutive requests that must be issued within
610 	 * the idle time slice to set again idling to a queue which
611 	 * was marked as non-I/O-bound (see the definition of the
612 	 * IO_bound flag for further details).
613 	 */
614 	unsigned int bfq_requests_within_timer;
615 
616 	/*
617 	 * Force device idling whenever needed to provide accurate
618 	 * service guarantees, without caring about throughput
619 	 * issues. CAVEAT: this may even increase latencies, in case
620 	 * of useless idling for processes that did stop doing I/O.
621 	 */
622 	bool strict_guarantees;
623 
624 	/*
625 	 * Last time at which a queue entered the current burst of
626 	 * queues being activated shortly after each other; for more
627 	 * details about this and the following parameters related to
628 	 * a burst of activations, see the comments on the function
629 	 * bfq_handle_burst.
630 	 */
631 	unsigned long last_ins_in_burst;
632 	/*
633 	 * Reference time interval used to decide whether a queue has
634 	 * been activated shortly after @last_ins_in_burst.
635 	 */
636 	unsigned long bfq_burst_interval;
637 	/* number of queues in the current burst of queue activations */
638 	int burst_size;
639 
640 	/* common parent entity for the queues in the burst */
641 	struct bfq_entity *burst_parent_entity;
642 	/* Maximum burst size above which the current queue-activation
643 	 * burst is deemed as 'large'.
644 	 */
645 	unsigned long bfq_large_burst_thresh;
646 	/* true if a large queue-activation burst is in progress */
647 	bool large_burst;
648 	/*
649 	 * Head of the burst list (as for the above fields, more
650 	 * details in the comments on the function bfq_handle_burst).
651 	 */
652 	struct hlist_head burst_list;
653 
654 	/* if set to true, low-latency heuristics are enabled */
655 	bool low_latency;
656 	/*
657 	 * Maximum factor by which the weight of a weight-raised queue
658 	 * is multiplied.
659 	 */
660 	unsigned int bfq_wr_coeff;
661 	/* maximum duration of a weight-raising period (jiffies) */
662 	unsigned int bfq_wr_max_time;
663 
664 	/* Maximum weight-raising duration for soft real-time processes */
665 	unsigned int bfq_wr_rt_max_time;
666 	/*
667 	 * Minimum idle period after which weight-raising may be
668 	 * reactivated for a queue (in jiffies).
669 	 */
670 	unsigned int bfq_wr_min_idle_time;
671 	/*
672 	 * Minimum period between request arrivals after which
673 	 * weight-raising may be reactivated for an already busy async
674 	 * queue (in jiffies).
675 	 */
676 	unsigned long bfq_wr_min_inter_arr_async;
677 
678 	/* Max service-rate for a soft real-time queue, in sectors/sec */
679 	unsigned int bfq_wr_max_softrt_rate;
680 	/*
681 	 * Cached value of the product ref_rate*ref_wr_duration, used
682 	 * for computing the maximum duration of weight raising
683 	 * automatically.
684 	 */
685 	u64 rate_dur_prod;
686 
687 	/* fallback dummy bfqq for extreme OOM conditions */
688 	struct bfq_queue oom_bfqq;
689 
690 	spinlock_t lock;
691 
692 	/*
693 	 * bic associated with the task issuing current bio for
694 	 * merging. This and the next field are used as a support to
695 	 * be able to perform the bic lookup, needed by bio-merge
696 	 * functions, before the scheduler lock is taken, and thus
697 	 * avoid taking the request-queue lock while the scheduler
698 	 * lock is being held.
699 	 */
700 	struct bfq_io_cq *bio_bic;
701 	/* bfqq associated with the task issuing current bio for merging */
702 	struct bfq_queue *bio_bfqq;
703 
704 	/*
705 	 * Depth limits used in bfq_limit_depth (see comments on the
706 	 * function)
707 	 */
708 	unsigned int word_depths[2][2];
709 };
710 
711 enum bfqq_state_flags {
712 	BFQQF_just_created = 0,	/* queue just allocated */
713 	BFQQF_busy,		/* has requests or is in service */
714 	BFQQF_wait_request,	/* waiting for a request */
715 	BFQQF_non_blocking_wait_rq, /*
716 				     * waiting for a request
717 				     * without idling the device
718 				     */
719 	BFQQF_fifo_expire,	/* FIFO checked in this slice */
720 	BFQQF_has_short_ttime,	/* queue has a short think time */
721 	BFQQF_sync,		/* synchronous queue */
722 	BFQQF_IO_bound,		/*
723 				 * bfqq has timed-out at least once
724 				 * having consumed at most 2/10 of
725 				 * its budget
726 				 */
727 	BFQQF_in_large_burst,	/*
728 				 * bfqq activated in a large burst,
729 				 * see comments to bfq_handle_burst.
730 				 */
731 	BFQQF_softrt_update,	/*
732 				 * may need softrt-next-start
733 				 * update
734 				 */
735 	BFQQF_coop,		/* bfqq is shared */
736 	BFQQF_split_coop	/* shared bfqq will be split */
737 };
738 
739 #define BFQ_BFQQ_FNS(name)						\
740 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq);			\
741 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq);			\
742 int bfq_bfqq_##name(const struct bfq_queue *bfqq);
743 
744 BFQ_BFQQ_FNS(just_created);
745 BFQ_BFQQ_FNS(busy);
746 BFQ_BFQQ_FNS(wait_request);
747 BFQ_BFQQ_FNS(non_blocking_wait_rq);
748 BFQ_BFQQ_FNS(fifo_expire);
749 BFQ_BFQQ_FNS(has_short_ttime);
750 BFQ_BFQQ_FNS(sync);
751 BFQ_BFQQ_FNS(IO_bound);
752 BFQ_BFQQ_FNS(in_large_burst);
753 BFQ_BFQQ_FNS(coop);
754 BFQ_BFQQ_FNS(split_coop);
755 BFQ_BFQQ_FNS(softrt_update);
756 #undef BFQ_BFQQ_FNS
757 
758 /* Expiration reasons. */
759 enum bfqq_expiration {
760 	BFQQE_TOO_IDLE = 0,		/*
761 					 * queue has been idling for
762 					 * too long
763 					 */
764 	BFQQE_BUDGET_TIMEOUT,	/* budget took too long to be used */
765 	BFQQE_BUDGET_EXHAUSTED,	/* budget consumed */
766 	BFQQE_NO_MORE_REQUESTS,	/* the queue has no more requests */
767 	BFQQE_PREEMPTED		/* preemption in progress */
768 };
769 
770 struct bfqg_stats {
771 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
772 	/* number of ios merged */
773 	struct blkg_rwstat		merged;
774 	/* total time spent on device in ns, may not be accurate w/ queueing */
775 	struct blkg_rwstat		service_time;
776 	/* total time spent waiting in scheduler queue in ns */
777 	struct blkg_rwstat		wait_time;
778 	/* number of IOs queued up */
779 	struct blkg_rwstat		queued;
780 	/* total disk time and nr sectors dispatched by this group */
781 	struct blkg_stat		time;
782 	/* sum of number of ios queued across all samples */
783 	struct blkg_stat		avg_queue_size_sum;
784 	/* count of samples taken for average */
785 	struct blkg_stat		avg_queue_size_samples;
786 	/* how many times this group has been removed from service tree */
787 	struct blkg_stat		dequeue;
788 	/* total time spent waiting for it to be assigned a timeslice. */
789 	struct blkg_stat		group_wait_time;
790 	/* time spent idling for this blkcg_gq */
791 	struct blkg_stat		idle_time;
792 	/* total time with empty current active q with other requests queued */
793 	struct blkg_stat		empty_time;
794 	/* fields after this shouldn't be cleared on stat reset */
795 	u64				start_group_wait_time;
796 	u64				start_idle_time;
797 	u64				start_empty_time;
798 	uint16_t			flags;
799 #endif	/* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
800 };
801 
802 #ifdef CONFIG_BFQ_GROUP_IOSCHED
803 
804 /*
805  * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
806  *
807  * @ps: @blkcg_policy_storage that this structure inherits
808  * @weight: weight of the bfq_group
809  */
810 struct bfq_group_data {
811 	/* must be the first member */
812 	struct blkcg_policy_data pd;
813 
814 	unsigned int weight;
815 };
816 
817 /**
818  * struct bfq_group - per (device, cgroup) data structure.
819  * @entity: schedulable entity to insert into the parent group sched_data.
820  * @sched_data: own sched_data, to contain child entities (they may be
821  *              both bfq_queues and bfq_groups).
822  * @bfqd: the bfq_data for the device this group acts upon.
823  * @async_bfqq: array of async queues for all the tasks belonging to
824  *              the group, one queue per ioprio value per ioprio_class,
825  *              except for the idle class that has only one queue.
826  * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
827  * @my_entity: pointer to @entity, %NULL for the toplevel group; used
828  *             to avoid too many special cases during group creation/
829  *             migration.
830  * @stats: stats for this bfqg.
831  * @active_entities: number of active entities belonging to the group;
832  *                   unused for the root group. Used to know whether there
833  *                   are groups with more than one active @bfq_entity
834  *                   (see the comments to the function
835  *                   bfq_bfqq_may_idle()).
836  * @rq_pos_tree: rbtree sorted by next_request position, used when
837  *               determining if two or more queues have interleaving
838  *               requests (see bfq_find_close_cooperator()).
839  *
840  * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
841  * there is a set of bfq_groups, each one collecting the lower-level
842  * entities belonging to the group that are acting on the same device.
843  *
844  * Locking works as follows:
845  *    o @bfqd is protected by the queue lock, RCU is used to access it
846  *      from the readers.
847  *    o All the other fields are protected by the @bfqd queue lock.
848  */
849 struct bfq_group {
850 	/* must be the first member */
851 	struct blkg_policy_data pd;
852 
853 	/* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
854 	char blkg_path[128];
855 
856 	/* reference counter (see comments in bfq_bic_update_cgroup) */
857 	int ref;
858 
859 	struct bfq_entity entity;
860 	struct bfq_sched_data sched_data;
861 
862 	void *bfqd;
863 
864 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
865 	struct bfq_queue *async_idle_bfqq;
866 
867 	struct bfq_entity *my_entity;
868 
869 	int active_entities;
870 
871 	struct rb_root rq_pos_tree;
872 
873 	struct bfqg_stats stats;
874 };
875 
876 #else
877 struct bfq_group {
878 	struct bfq_sched_data sched_data;
879 
880 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
881 	struct bfq_queue *async_idle_bfqq;
882 
883 	struct rb_root rq_pos_tree;
884 };
885 #endif
886 
887 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
888 
889 /* --------------- main algorithm interface ----------------- */
890 
891 #define BFQ_SERVICE_TREE_INIT	((struct bfq_service_tree)		\
892 				{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
893 
894 extern const int bfq_timeout;
895 
896 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
897 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
898 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
899 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
900 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
901 			  struct rb_root *root);
902 void __bfq_weights_tree_remove(struct bfq_data *bfqd,
903 			       struct bfq_queue *bfqq,
904 			       struct rb_root *root);
905 void bfq_weights_tree_remove(struct bfq_data *bfqd,
906 			     struct bfq_queue *bfqq);
907 void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
908 		     bool compensate, enum bfqq_expiration reason);
909 void bfq_put_queue(struct bfq_queue *bfqq);
910 void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
911 void bfq_schedule_dispatch(struct bfq_data *bfqd);
912 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
913 
914 /* ------------ end of main algorithm interface -------------- */
915 
916 /* ---------------- cgroups-support interface ---------------- */
917 
918 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
919 			      unsigned int op);
920 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
921 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
922 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
923 				  u64 io_start_time_ns, unsigned int op);
924 void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
925 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
926 void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
927 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg);
928 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg);
929 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
930 		   struct bfq_group *bfqg);
931 
932 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
933 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
934 void bfq_end_wr_async(struct bfq_data *bfqd);
935 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
936 				     struct blkcg *blkcg);
937 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
938 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
939 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
940 void bfqg_and_blkg_put(struct bfq_group *bfqg);
941 
942 #ifdef CONFIG_BFQ_GROUP_IOSCHED
943 extern struct cftype bfq_blkcg_legacy_files[];
944 extern struct cftype bfq_blkg_files[];
945 extern struct blkcg_policy blkcg_policy_bfq;
946 #endif
947 
948 /* ------------- end of cgroups-support interface ------------- */
949 
950 /* - interface of the internal hierarchical B-WF2Q+ scheduler - */
951 
952 #ifdef CONFIG_BFQ_GROUP_IOSCHED
953 /* both next loops stop at one of the child entities of the root group */
954 #define for_each_entity(entity)	\
955 	for (; entity ; entity = entity->parent)
956 
957 /*
958  * For each iteration, compute parent in advance, so as to be safe if
959  * entity is deallocated during the iteration. Such a deallocation may
960  * happen as a consequence of a bfq_put_queue that frees the bfq_queue
961  * containing entity.
962  */
963 #define for_each_entity_safe(entity, parent) \
964 	for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
965 
966 #else /* CONFIG_BFQ_GROUP_IOSCHED */
967 /*
968  * Next two macros are fake loops when cgroups support is not
969  * enabled. I fact, in such a case, there is only one level to go up
970  * (to reach the root group).
971  */
972 #define for_each_entity(entity)	\
973 	for (; entity ; entity = NULL)
974 
975 #define for_each_entity_safe(entity, parent) \
976 	for (parent = NULL; entity ; entity = parent)
977 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
978 
979 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
980 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
981 unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
982 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
983 struct bfq_entity *bfq_entity_of(struct rb_node *node);
984 unsigned short bfq_ioprio_to_weight(int ioprio);
985 void bfq_put_idle_entity(struct bfq_service_tree *st,
986 			 struct bfq_entity *entity);
987 struct bfq_service_tree *
988 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
989 				struct bfq_entity *entity,
990 				bool update_class_too);
991 void bfq_bfqq_served(struct bfq_queue *bfqq, int served);
992 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
993 			  unsigned long time_ms);
994 bool __bfq_deactivate_entity(struct bfq_entity *entity,
995 			     bool ins_into_idle_tree);
996 bool next_queue_may_preempt(struct bfq_data *bfqd);
997 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
998 void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
999 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1000 			 bool ins_into_idle_tree, bool expiration);
1001 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
1002 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1003 		      bool expiration);
1004 void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1005 		       bool expiration);
1006 void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
1007 
1008 /* --------------- end of interface of B-WF2Q+ ---------------- */
1009 
1010 /* Logging facilities. */
1011 #ifdef CONFIG_BFQ_GROUP_IOSCHED
1012 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
1013 
1014 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	do {			\
1015 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
1016 			bfqg_to_blkg(bfqq_group(bfqq))->blkcg,		\
1017 			"bfq%d%c " fmt, (bfqq)->pid,			\
1018 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args);	\
1019 } while (0)
1020 
1021 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...)	do {			\
1022 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
1023 		bfqg_to_blkg(bfqg)->blkcg, fmt, ##args);		\
1024 } while (0)
1025 
1026 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1027 
1028 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	\
1029 	blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid,	\
1030 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A',		\
1031 				##args)
1032 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...)		do {} while (0)
1033 
1034 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
1035 
1036 #define bfq_log(bfqd, fmt, args...) \
1037 	blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
1038 
1039 #endif /* _BFQ_H */
1040