xref: /openbmc/linux/block/bfq-iosched.c (revision efe4a1ac)
1 /*
2  * Budget Fair Queueing (BFQ) I/O scheduler.
3  *
4  * Based on ideas and code from CFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11  *                    Arianna Avanzini <avanzini@google.com>
12  *
13  * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License as
17  *  published by the Free Software Foundation; either version 2 of the
18  *  License, or (at your option) any later version.
19  *
20  *  This program is distributed in the hope that it will be useful,
21  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
22  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23  *  General Public License for more details.
24  *
25  * BFQ is a proportional-share I/O scheduler, with some extra
26  * low-latency capabilities. BFQ also supports full hierarchical
27  * scheduling through cgroups. Next paragraphs provide an introduction
28  * on BFQ inner workings. Details on BFQ benefits, usage and
29  * limitations can be found in Documentation/block/bfq-iosched.txt.
30  *
31  * BFQ is a proportional-share storage-I/O scheduling algorithm based
32  * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33  * budgets, measured in number of sectors, to processes instead of
34  * time slices. The device is not granted to the in-service process
35  * for a given time slice, but until it has exhausted its assigned
36  * budget. This change from the time to the service domain enables BFQ
37  * to distribute the device throughput among processes as desired,
38  * without any distortion due to throughput fluctuations, or to device
39  * internal queueing. BFQ uses an ad hoc internal scheduler, called
40  * B-WF2Q+, to schedule processes according to their budgets. More
41  * precisely, BFQ schedules queues associated with processes. Each
42  * process/queue is assigned a user-configurable weight, and B-WF2Q+
43  * guarantees that each queue receives a fraction of the throughput
44  * proportional to its weight. Thanks to the accurate policy of
45  * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46  * processes issuing sequential requests (to boost the throughput),
47  * and yet guarantee a low latency to interactive and soft real-time
48  * applications.
49  *
50  * In particular, to provide these low-latency guarantees, BFQ
51  * explicitly privileges the I/O of two classes of time-sensitive
52  * applications: interactive and soft real-time. This feature enables
53  * BFQ to provide applications in these classes with a very low
54  * latency. Finally, BFQ also features additional heuristics for
55  * preserving both a low latency and a high throughput on NCQ-capable,
56  * rotational or flash-based devices, and to get the job done quickly
57  * for applications consisting in many I/O-bound processes.
58  *
59  * NOTE: if the main or only goal, with a given device, is to achieve
60  * the maximum-possible throughput at all times, then do switch off
61  * all low-latency heuristics for that device, by setting low_latency
62  * to 0.
63  *
64  * BFQ is described in [1], where also a reference to the initial, more
65  * theoretical paper on BFQ can be found. The interested reader can find
66  * in the latter paper full details on the main algorithm, as well as
67  * formulas of the guarantees and formal proofs of all the properties.
68  * With respect to the version of BFQ presented in these papers, this
69  * implementation adds a few more heuristics, such as the one that
70  * guarantees a low latency to soft real-time applications, and a
71  * hierarchical extension based on H-WF2Q+.
72  *
73  * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74  * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75  * with O(log N) complexity derives from the one introduced with EEVDF
76  * in [3].
77  *
78  * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79  *     Scheduler", Proceedings of the First Workshop on Mobile System
80  *     Technologies (MST-2015), May 2015.
81  *     http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
82  *
83  * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84  *     Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
85  *     Oct 1997.
86  *
87  * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
88  *
89  * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90  *     First: A Flexible and Accurate Mechanism for Proportional Share
91  *     Resource Allocation", technical report.
92  *
93  * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
94  */
95 #include <linux/module.h>
96 #include <linux/slab.h>
97 #include <linux/blkdev.h>
98 #include <linux/cgroup.h>
99 #include <linux/elevator.h>
100 #include <linux/ktime.h>
101 #include <linux/rbtree.h>
102 #include <linux/ioprio.h>
103 #include <linux/sbitmap.h>
104 #include <linux/delay.h>
105 
106 #include "blk.h"
107 #include "blk-mq.h"
108 #include "blk-mq-tag.h"
109 #include "blk-mq-sched.h"
110 #include "bfq-iosched.h"
111 
112 #define BFQ_BFQQ_FNS(name)						\
113 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\
114 {									\
115 	__set_bit(BFQQF_##name, &(bfqq)->flags);			\
116 }									\
117 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)			\
118 {									\
119 	__clear_bit(BFQQF_##name, &(bfqq)->flags);		\
120 }									\
121 int bfq_bfqq_##name(const struct bfq_queue *bfqq)			\
122 {									\
123 	return test_bit(BFQQF_##name, &(bfqq)->flags);		\
124 }
125 
126 BFQ_BFQQ_FNS(just_created);
127 BFQ_BFQQ_FNS(busy);
128 BFQ_BFQQ_FNS(wait_request);
129 BFQ_BFQQ_FNS(non_blocking_wait_rq);
130 BFQ_BFQQ_FNS(fifo_expire);
131 BFQ_BFQQ_FNS(idle_window);
132 BFQ_BFQQ_FNS(sync);
133 BFQ_BFQQ_FNS(IO_bound);
134 BFQ_BFQQ_FNS(in_large_burst);
135 BFQ_BFQQ_FNS(coop);
136 BFQ_BFQQ_FNS(split_coop);
137 BFQ_BFQQ_FNS(softrt_update);
138 #undef BFQ_BFQQ_FNS						\
139 
140 /* Expiration time of sync (0) and async (1) requests, in ns. */
141 static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
142 
143 /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
144 static const int bfq_back_max = 16 * 1024;
145 
146 /* Penalty of a backwards seek, in number of sectors. */
147 static const int bfq_back_penalty = 2;
148 
149 /* Idling period duration, in ns. */
150 static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
151 
152 /* Minimum number of assigned budgets for which stats are safe to compute. */
153 static const int bfq_stats_min_budgets = 194;
154 
155 /* Default maximum budget values, in sectors and number of requests. */
156 static const int bfq_default_max_budget = 16 * 1024;
157 
158 /*
159  * Async to sync throughput distribution is controlled as follows:
160  * when an async request is served, the entity is charged the number
161  * of sectors of the request, multiplied by the factor below
162  */
163 static const int bfq_async_charge_factor = 10;
164 
165 /* Default timeout values, in jiffies, approximating CFQ defaults. */
166 const int bfq_timeout = HZ / 8;
167 
168 static struct kmem_cache *bfq_pool;
169 
170 /* Below this threshold (in ns), we consider thinktime immediate. */
171 #define BFQ_MIN_TT		(2 * NSEC_PER_MSEC)
172 
173 /* hw_tag detection: parallel requests threshold and min samples needed. */
174 #define BFQ_HW_QUEUE_THRESHOLD	4
175 #define BFQ_HW_QUEUE_SAMPLES	32
176 
177 #define BFQQ_SEEK_THR		(sector_t)(8 * 100)
178 #define BFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
179 #define BFQQ_CLOSE_THR		(sector_t)(8 * 1024)
180 #define BFQQ_SEEKY(bfqq)	(hweight32(bfqq->seek_history) > 32/8)
181 
182 /* Min number of samples required to perform peak-rate update */
183 #define BFQ_RATE_MIN_SAMPLES	32
184 /* Min observation time interval required to perform a peak-rate update (ns) */
185 #define BFQ_RATE_MIN_INTERVAL	(300*NSEC_PER_MSEC)
186 /* Target observation time interval for a peak-rate update (ns) */
187 #define BFQ_RATE_REF_INTERVAL	NSEC_PER_SEC
188 
189 /* Shift used for peak rate fixed precision calculations. */
190 #define BFQ_RATE_SHIFT		16
191 
192 /*
193  * By default, BFQ computes the duration of the weight raising for
194  * interactive applications automatically, using the following formula:
195  * duration = (R / r) * T, where r is the peak rate of the device, and
196  * R and T are two reference parameters.
197  * In particular, R is the peak rate of the reference device (see below),
198  * and T is a reference time: given the systems that are likely to be
199  * installed on the reference device according to its speed class, T is
200  * about the maximum time needed, under BFQ and while reading two files in
201  * parallel, to load typical large applications on these systems.
202  * In practice, the slower/faster the device at hand is, the more/less it
203  * takes to load applications with respect to the reference device.
204  * Accordingly, the longer/shorter BFQ grants weight raising to interactive
205  * applications.
206  *
207  * BFQ uses four different reference pairs (R, T), depending on:
208  * . whether the device is rotational or non-rotational;
209  * . whether the device is slow, such as old or portable HDDs, as well as
210  *   SD cards, or fast, such as newer HDDs and SSDs.
211  *
212  * The device's speed class is dynamically (re)detected in
213  * bfq_update_peak_rate() every time the estimated peak rate is updated.
214  *
215  * In the following definitions, R_slow[0]/R_fast[0] and
216  * T_slow[0]/T_fast[0] are the reference values for a slow/fast
217  * rotational device, whereas R_slow[1]/R_fast[1] and
218  * T_slow[1]/T_fast[1] are the reference values for a slow/fast
219  * non-rotational device. Finally, device_speed_thresh are the
220  * thresholds used to switch between speed classes. The reference
221  * rates are not the actual peak rates of the devices used as a
222  * reference, but slightly lower values. The reason for using these
223  * slightly lower values is that the peak-rate estimator tends to
224  * yield slightly lower values than the actual peak rate (it can yield
225  * the actual peak rate only if there is only one process doing I/O,
226  * and the process does sequential I/O).
227  *
228  * Both the reference peak rates and the thresholds are measured in
229  * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
230  */
231 static int R_slow[2] = {1000, 10700};
232 static int R_fast[2] = {14000, 33000};
233 /*
234  * To improve readability, a conversion function is used to initialize the
235  * following arrays, which entails that they can be initialized only in a
236  * function.
237  */
238 static int T_slow[2];
239 static int T_fast[2];
240 static int device_speed_thresh[2];
241 
242 #define RQ_BIC(rq)		((struct bfq_io_cq *) (rq)->elv.priv[0])
243 #define RQ_BFQQ(rq)		((rq)->elv.priv[1])
244 
245 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
246 {
247 	return bic->bfqq[is_sync];
248 }
249 
250 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
251 {
252 	bic->bfqq[is_sync] = bfqq;
253 }
254 
255 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
256 {
257 	return bic->icq.q->elevator->elevator_data;
258 }
259 
260 /**
261  * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
262  * @icq: the iocontext queue.
263  */
264 static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
265 {
266 	/* bic->icq is the first member, %NULL will convert to %NULL */
267 	return container_of(icq, struct bfq_io_cq, icq);
268 }
269 
270 /**
271  * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
272  * @bfqd: the lookup key.
273  * @ioc: the io_context of the process doing I/O.
274  * @q: the request queue.
275  */
276 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
277 					struct io_context *ioc,
278 					struct request_queue *q)
279 {
280 	if (ioc) {
281 		unsigned long flags;
282 		struct bfq_io_cq *icq;
283 
284 		spin_lock_irqsave(q->queue_lock, flags);
285 		icq = icq_to_bic(ioc_lookup_icq(ioc, q));
286 		spin_unlock_irqrestore(q->queue_lock, flags);
287 
288 		return icq;
289 	}
290 
291 	return NULL;
292 }
293 
294 /*
295  * Scheduler run of queue, if there are requests pending and no one in the
296  * driver that will restart queueing.
297  */
298 void bfq_schedule_dispatch(struct bfq_data *bfqd)
299 {
300 	if (bfqd->queued != 0) {
301 		bfq_log(bfqd, "schedule dispatch");
302 		blk_mq_run_hw_queues(bfqd->queue, true);
303 	}
304 }
305 
306 #define bfq_class_idle(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
307 #define bfq_class_rt(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
308 
309 #define bfq_sample_valid(samples)	((samples) > 80)
310 
311 /*
312  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
313  * We choose the request that is closesr to the head right now.  Distance
314  * behind the head is penalized and only allowed to a certain extent.
315  */
316 static struct request *bfq_choose_req(struct bfq_data *bfqd,
317 				      struct request *rq1,
318 				      struct request *rq2,
319 				      sector_t last)
320 {
321 	sector_t s1, s2, d1 = 0, d2 = 0;
322 	unsigned long back_max;
323 #define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */
324 #define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */
325 	unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
326 
327 	if (!rq1 || rq1 == rq2)
328 		return rq2;
329 	if (!rq2)
330 		return rq1;
331 
332 	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
333 		return rq1;
334 	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
335 		return rq2;
336 	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
337 		return rq1;
338 	else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
339 		return rq2;
340 
341 	s1 = blk_rq_pos(rq1);
342 	s2 = blk_rq_pos(rq2);
343 
344 	/*
345 	 * By definition, 1KiB is 2 sectors.
346 	 */
347 	back_max = bfqd->bfq_back_max * 2;
348 
349 	/*
350 	 * Strict one way elevator _except_ in the case where we allow
351 	 * short backward seeks which are biased as twice the cost of a
352 	 * similar forward seek.
353 	 */
354 	if (s1 >= last)
355 		d1 = s1 - last;
356 	else if (s1 + back_max >= last)
357 		d1 = (last - s1) * bfqd->bfq_back_penalty;
358 	else
359 		wrap |= BFQ_RQ1_WRAP;
360 
361 	if (s2 >= last)
362 		d2 = s2 - last;
363 	else if (s2 + back_max >= last)
364 		d2 = (last - s2) * bfqd->bfq_back_penalty;
365 	else
366 		wrap |= BFQ_RQ2_WRAP;
367 
368 	/* Found required data */
369 
370 	/*
371 	 * By doing switch() on the bit mask "wrap" we avoid having to
372 	 * check two variables for all permutations: --> faster!
373 	 */
374 	switch (wrap) {
375 	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
376 		if (d1 < d2)
377 			return rq1;
378 		else if (d2 < d1)
379 			return rq2;
380 
381 		if (s1 >= s2)
382 			return rq1;
383 		else
384 			return rq2;
385 
386 	case BFQ_RQ2_WRAP:
387 		return rq1;
388 	case BFQ_RQ1_WRAP:
389 		return rq2;
390 	case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
391 	default:
392 		/*
393 		 * Since both rqs are wrapped,
394 		 * start with the one that's further behind head
395 		 * (--> only *one* back seek required),
396 		 * since back seek takes more time than forward.
397 		 */
398 		if (s1 <= s2)
399 			return rq1;
400 		else
401 			return rq2;
402 	}
403 }
404 
405 static struct bfq_queue *
406 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
407 		     sector_t sector, struct rb_node **ret_parent,
408 		     struct rb_node ***rb_link)
409 {
410 	struct rb_node **p, *parent;
411 	struct bfq_queue *bfqq = NULL;
412 
413 	parent = NULL;
414 	p = &root->rb_node;
415 	while (*p) {
416 		struct rb_node **n;
417 
418 		parent = *p;
419 		bfqq = rb_entry(parent, struct bfq_queue, pos_node);
420 
421 		/*
422 		 * Sort strictly based on sector. Smallest to the left,
423 		 * largest to the right.
424 		 */
425 		if (sector > blk_rq_pos(bfqq->next_rq))
426 			n = &(*p)->rb_right;
427 		else if (sector < blk_rq_pos(bfqq->next_rq))
428 			n = &(*p)->rb_left;
429 		else
430 			break;
431 		p = n;
432 		bfqq = NULL;
433 	}
434 
435 	*ret_parent = parent;
436 	if (rb_link)
437 		*rb_link = p;
438 
439 	bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
440 		(unsigned long long)sector,
441 		bfqq ? bfqq->pid : 0);
442 
443 	return bfqq;
444 }
445 
446 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
447 {
448 	struct rb_node **p, *parent;
449 	struct bfq_queue *__bfqq;
450 
451 	if (bfqq->pos_root) {
452 		rb_erase(&bfqq->pos_node, bfqq->pos_root);
453 		bfqq->pos_root = NULL;
454 	}
455 
456 	if (bfq_class_idle(bfqq))
457 		return;
458 	if (!bfqq->next_rq)
459 		return;
460 
461 	bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
462 	__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
463 			blk_rq_pos(bfqq->next_rq), &parent, &p);
464 	if (!__bfqq) {
465 		rb_link_node(&bfqq->pos_node, parent, p);
466 		rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
467 	} else
468 		bfqq->pos_root = NULL;
469 }
470 
471 /*
472  * Tell whether there are active queues or groups with differentiated weights.
473  */
474 static bool bfq_differentiated_weights(struct bfq_data *bfqd)
475 {
476 	/*
477 	 * For weights to differ, at least one of the trees must contain
478 	 * at least two nodes.
479 	 */
480 	return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
481 		(bfqd->queue_weights_tree.rb_node->rb_left ||
482 		 bfqd->queue_weights_tree.rb_node->rb_right)
483 #ifdef CONFIG_BFQ_GROUP_IOSCHED
484 	       ) ||
485 	       (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
486 		(bfqd->group_weights_tree.rb_node->rb_left ||
487 		 bfqd->group_weights_tree.rb_node->rb_right)
488 #endif
489 	       );
490 }
491 
492 /*
493  * The following function returns true if every queue must receive the
494  * same share of the throughput (this condition is used when deciding
495  * whether idling may be disabled, see the comments in the function
496  * bfq_bfqq_may_idle()).
497  *
498  * Such a scenario occurs when:
499  * 1) all active queues have the same weight,
500  * 2) all active groups at the same level in the groups tree have the same
501  *    weight,
502  * 3) all active groups at the same level in the groups tree have the same
503  *    number of children.
504  *
505  * Unfortunately, keeping the necessary state for evaluating exactly the
506  * above symmetry conditions would be quite complex and time-consuming.
507  * Therefore this function evaluates, instead, the following stronger
508  * sub-conditions, for which it is much easier to maintain the needed
509  * state:
510  * 1) all active queues have the same weight,
511  * 2) all active groups have the same weight,
512  * 3) all active groups have at most one active child each.
513  * In particular, the last two conditions are always true if hierarchical
514  * support and the cgroups interface are not enabled, thus no state needs
515  * to be maintained in this case.
516  */
517 static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
518 {
519 	return !bfq_differentiated_weights(bfqd);
520 }
521 
522 /*
523  * If the weight-counter tree passed as input contains no counter for
524  * the weight of the input entity, then add that counter; otherwise just
525  * increment the existing counter.
526  *
527  * Note that weight-counter trees contain few nodes in mostly symmetric
528  * scenarios. For example, if all queues have the same weight, then the
529  * weight-counter tree for the queues may contain at most one node.
530  * This holds even if low_latency is on, because weight-raised queues
531  * are not inserted in the tree.
532  * In most scenarios, the rate at which nodes are created/destroyed
533  * should be low too.
534  */
535 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
536 			  struct rb_root *root)
537 {
538 	struct rb_node **new = &(root->rb_node), *parent = NULL;
539 
540 	/*
541 	 * Do not insert if the entity is already associated with a
542 	 * counter, which happens if:
543 	 *   1) the entity is associated with a queue,
544 	 *   2) a request arrival has caused the queue to become both
545 	 *      non-weight-raised, and hence change its weight, and
546 	 *      backlogged; in this respect, each of the two events
547 	 *      causes an invocation of this function,
548 	 *   3) this is the invocation of this function caused by the
549 	 *      second event. This second invocation is actually useless,
550 	 *      and we handle this fact by exiting immediately. More
551 	 *      efficient or clearer solutions might possibly be adopted.
552 	 */
553 	if (entity->weight_counter)
554 		return;
555 
556 	while (*new) {
557 		struct bfq_weight_counter *__counter = container_of(*new,
558 						struct bfq_weight_counter,
559 						weights_node);
560 		parent = *new;
561 
562 		if (entity->weight == __counter->weight) {
563 			entity->weight_counter = __counter;
564 			goto inc_counter;
565 		}
566 		if (entity->weight < __counter->weight)
567 			new = &((*new)->rb_left);
568 		else
569 			new = &((*new)->rb_right);
570 	}
571 
572 	entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
573 					 GFP_ATOMIC);
574 
575 	/*
576 	 * In the unlucky event of an allocation failure, we just
577 	 * exit. This will cause the weight of entity to not be
578 	 * considered in bfq_differentiated_weights, which, in its
579 	 * turn, causes the scenario to be deemed wrongly symmetric in
580 	 * case entity's weight would have been the only weight making
581 	 * the scenario asymmetric. On the bright side, no unbalance
582 	 * will however occur when entity becomes inactive again (the
583 	 * invocation of this function is triggered by an activation
584 	 * of entity). In fact, bfq_weights_tree_remove does nothing
585 	 * if !entity->weight_counter.
586 	 */
587 	if (unlikely(!entity->weight_counter))
588 		return;
589 
590 	entity->weight_counter->weight = entity->weight;
591 	rb_link_node(&entity->weight_counter->weights_node, parent, new);
592 	rb_insert_color(&entity->weight_counter->weights_node, root);
593 
594 inc_counter:
595 	entity->weight_counter->num_active++;
596 }
597 
598 /*
599  * Decrement the weight counter associated with the entity, and, if the
600  * counter reaches 0, remove the counter from the tree.
601  * See the comments to the function bfq_weights_tree_add() for considerations
602  * about overhead.
603  */
604 void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
605 			     struct rb_root *root)
606 {
607 	if (!entity->weight_counter)
608 		return;
609 
610 	entity->weight_counter->num_active--;
611 	if (entity->weight_counter->num_active > 0)
612 		goto reset_entity_pointer;
613 
614 	rb_erase(&entity->weight_counter->weights_node, root);
615 	kfree(entity->weight_counter);
616 
617 reset_entity_pointer:
618 	entity->weight_counter = NULL;
619 }
620 
621 /*
622  * Return expired entry, or NULL to just start from scratch in rbtree.
623  */
624 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
625 				      struct request *last)
626 {
627 	struct request *rq;
628 
629 	if (bfq_bfqq_fifo_expire(bfqq))
630 		return NULL;
631 
632 	bfq_mark_bfqq_fifo_expire(bfqq);
633 
634 	rq = rq_entry_fifo(bfqq->fifo.next);
635 
636 	if (rq == last || ktime_get_ns() < rq->fifo_time)
637 		return NULL;
638 
639 	bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
640 	return rq;
641 }
642 
643 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
644 					struct bfq_queue *bfqq,
645 					struct request *last)
646 {
647 	struct rb_node *rbnext = rb_next(&last->rb_node);
648 	struct rb_node *rbprev = rb_prev(&last->rb_node);
649 	struct request *next, *prev = NULL;
650 
651 	/* Follow expired path, else get first next available. */
652 	next = bfq_check_fifo(bfqq, last);
653 	if (next)
654 		return next;
655 
656 	if (rbprev)
657 		prev = rb_entry_rq(rbprev);
658 
659 	if (rbnext)
660 		next = rb_entry_rq(rbnext);
661 	else {
662 		rbnext = rb_first(&bfqq->sort_list);
663 		if (rbnext && rbnext != &last->rb_node)
664 			next = rb_entry_rq(rbnext);
665 	}
666 
667 	return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
668 }
669 
670 /* see the definition of bfq_async_charge_factor for details */
671 static unsigned long bfq_serv_to_charge(struct request *rq,
672 					struct bfq_queue *bfqq)
673 {
674 	if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
675 		return blk_rq_sectors(rq);
676 
677 	/*
678 	 * If there are no weight-raised queues, then amplify service
679 	 * by just the async charge factor; otherwise amplify service
680 	 * by twice the async charge factor, to further reduce latency
681 	 * for weight-raised queues.
682 	 */
683 	if (bfqq->bfqd->wr_busy_queues == 0)
684 		return blk_rq_sectors(rq) * bfq_async_charge_factor;
685 
686 	return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
687 }
688 
689 /**
690  * bfq_updated_next_req - update the queue after a new next_rq selection.
691  * @bfqd: the device data the queue belongs to.
692  * @bfqq: the queue to update.
693  *
694  * If the first request of a queue changes we make sure that the queue
695  * has enough budget to serve at least its first request (if the
696  * request has grown).  We do this because if the queue has not enough
697  * budget for its first request, it has to go through two dispatch
698  * rounds to actually get it dispatched.
699  */
700 static void bfq_updated_next_req(struct bfq_data *bfqd,
701 				 struct bfq_queue *bfqq)
702 {
703 	struct bfq_entity *entity = &bfqq->entity;
704 	struct request *next_rq = bfqq->next_rq;
705 	unsigned long new_budget;
706 
707 	if (!next_rq)
708 		return;
709 
710 	if (bfqq == bfqd->in_service_queue)
711 		/*
712 		 * In order not to break guarantees, budgets cannot be
713 		 * changed after an entity has been selected.
714 		 */
715 		return;
716 
717 	new_budget = max_t(unsigned long, bfqq->max_budget,
718 			   bfq_serv_to_charge(next_rq, bfqq));
719 	if (entity->budget != new_budget) {
720 		entity->budget = new_budget;
721 		bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
722 					 new_budget);
723 		bfq_requeue_bfqq(bfqd, bfqq);
724 	}
725 }
726 
727 static void
728 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
729 {
730 	if (bic->saved_idle_window)
731 		bfq_mark_bfqq_idle_window(bfqq);
732 	else
733 		bfq_clear_bfqq_idle_window(bfqq);
734 
735 	if (bic->saved_IO_bound)
736 		bfq_mark_bfqq_IO_bound(bfqq);
737 	else
738 		bfq_clear_bfqq_IO_bound(bfqq);
739 
740 	bfqq->ttime = bic->saved_ttime;
741 	bfqq->wr_coeff = bic->saved_wr_coeff;
742 	bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
743 	bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
744 	bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
745 
746 	if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
747 	    time_is_before_jiffies(bfqq->last_wr_start_finish +
748 				   bfqq->wr_cur_max_time))) {
749 		bfq_log_bfqq(bfqq->bfqd, bfqq,
750 		    "resume state: switching off wr");
751 
752 		bfqq->wr_coeff = 1;
753 	}
754 
755 	/* make sure weight will be updated, however we got here */
756 	bfqq->entity.prio_changed = 1;
757 }
758 
759 static int bfqq_process_refs(struct bfq_queue *bfqq)
760 {
761 	return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
762 }
763 
764 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
765 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
766 {
767 	struct bfq_queue *item;
768 	struct hlist_node *n;
769 
770 	hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
771 		hlist_del_init(&item->burst_list_node);
772 	hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
773 	bfqd->burst_size = 1;
774 	bfqd->burst_parent_entity = bfqq->entity.parent;
775 }
776 
777 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
778 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
779 {
780 	/* Increment burst size to take into account also bfqq */
781 	bfqd->burst_size++;
782 
783 	if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
784 		struct bfq_queue *pos, *bfqq_item;
785 		struct hlist_node *n;
786 
787 		/*
788 		 * Enough queues have been activated shortly after each
789 		 * other to consider this burst as large.
790 		 */
791 		bfqd->large_burst = true;
792 
793 		/*
794 		 * We can now mark all queues in the burst list as
795 		 * belonging to a large burst.
796 		 */
797 		hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
798 				     burst_list_node)
799 			bfq_mark_bfqq_in_large_burst(bfqq_item);
800 		bfq_mark_bfqq_in_large_burst(bfqq);
801 
802 		/*
803 		 * From now on, and until the current burst finishes, any
804 		 * new queue being activated shortly after the last queue
805 		 * was inserted in the burst can be immediately marked as
806 		 * belonging to a large burst. So the burst list is not
807 		 * needed any more. Remove it.
808 		 */
809 		hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
810 					  burst_list_node)
811 			hlist_del_init(&pos->burst_list_node);
812 	} else /*
813 		* Burst not yet large: add bfqq to the burst list. Do
814 		* not increment the ref counter for bfqq, because bfqq
815 		* is removed from the burst list before freeing bfqq
816 		* in put_queue.
817 		*/
818 		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
819 }
820 
821 /*
822  * If many queues belonging to the same group happen to be created
823  * shortly after each other, then the processes associated with these
824  * queues have typically a common goal. In particular, bursts of queue
825  * creations are usually caused by services or applications that spawn
826  * many parallel threads/processes. Examples are systemd during boot,
827  * or git grep. To help these processes get their job done as soon as
828  * possible, it is usually better to not grant either weight-raising
829  * or device idling to their queues.
830  *
831  * In this comment we describe, firstly, the reasons why this fact
832  * holds, and, secondly, the next function, which implements the main
833  * steps needed to properly mark these queues so that they can then be
834  * treated in a different way.
835  *
836  * The above services or applications benefit mostly from a high
837  * throughput: the quicker the requests of the activated queues are
838  * cumulatively served, the sooner the target job of these queues gets
839  * completed. As a consequence, weight-raising any of these queues,
840  * which also implies idling the device for it, is almost always
841  * counterproductive. In most cases it just lowers throughput.
842  *
843  * On the other hand, a burst of queue creations may be caused also by
844  * the start of an application that does not consist of a lot of
845  * parallel I/O-bound threads. In fact, with a complex application,
846  * several short processes may need to be executed to start-up the
847  * application. In this respect, to start an application as quickly as
848  * possible, the best thing to do is in any case to privilege the I/O
849  * related to the application with respect to all other
850  * I/O. Therefore, the best strategy to start as quickly as possible
851  * an application that causes a burst of queue creations is to
852  * weight-raise all the queues created during the burst. This is the
853  * exact opposite of the best strategy for the other type of bursts.
854  *
855  * In the end, to take the best action for each of the two cases, the
856  * two types of bursts need to be distinguished. Fortunately, this
857  * seems relatively easy, by looking at the sizes of the bursts. In
858  * particular, we found a threshold such that only bursts with a
859  * larger size than that threshold are apparently caused by
860  * services or commands such as systemd or git grep. For brevity,
861  * hereafter we call just 'large' these bursts. BFQ *does not*
862  * weight-raise queues whose creation occurs in a large burst. In
863  * addition, for each of these queues BFQ performs or does not perform
864  * idling depending on which choice boosts the throughput more. The
865  * exact choice depends on the device and request pattern at
866  * hand.
867  *
868  * Unfortunately, false positives may occur while an interactive task
869  * is starting (e.g., an application is being started). The
870  * consequence is that the queues associated with the task do not
871  * enjoy weight raising as expected. Fortunately these false positives
872  * are very rare. They typically occur if some service happens to
873  * start doing I/O exactly when the interactive task starts.
874  *
875  * Turning back to the next function, it implements all the steps
876  * needed to detect the occurrence of a large burst and to properly
877  * mark all the queues belonging to it (so that they can then be
878  * treated in a different way). This goal is achieved by maintaining a
879  * "burst list" that holds, temporarily, the queues that belong to the
880  * burst in progress. The list is then used to mark these queues as
881  * belonging to a large burst if the burst does become large. The main
882  * steps are the following.
883  *
884  * . when the very first queue is created, the queue is inserted into the
885  *   list (as it could be the first queue in a possible burst)
886  *
887  * . if the current burst has not yet become large, and a queue Q that does
888  *   not yet belong to the burst is activated shortly after the last time
889  *   at which a new queue entered the burst list, then the function appends
890  *   Q to the burst list
891  *
892  * . if, as a consequence of the previous step, the burst size reaches
893  *   the large-burst threshold, then
894  *
895  *     . all the queues in the burst list are marked as belonging to a
896  *       large burst
897  *
898  *     . the burst list is deleted; in fact, the burst list already served
899  *       its purpose (keeping temporarily track of the queues in a burst,
900  *       so as to be able to mark them as belonging to a large burst in the
901  *       previous sub-step), and now is not needed any more
902  *
903  *     . the device enters a large-burst mode
904  *
905  * . if a queue Q that does not belong to the burst is created while
906  *   the device is in large-burst mode and shortly after the last time
907  *   at which a queue either entered the burst list or was marked as
908  *   belonging to the current large burst, then Q is immediately marked
909  *   as belonging to a large burst.
910  *
911  * . if a queue Q that does not belong to the burst is created a while
912  *   later, i.e., not shortly after, than the last time at which a queue
913  *   either entered the burst list or was marked as belonging to the
914  *   current large burst, then the current burst is deemed as finished and:
915  *
916  *        . the large-burst mode is reset if set
917  *
918  *        . the burst list is emptied
919  *
920  *        . Q is inserted in the burst list, as Q may be the first queue
921  *          in a possible new burst (then the burst list contains just Q
922  *          after this step).
923  */
924 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
925 {
926 	/*
927 	 * If bfqq is already in the burst list or is part of a large
928 	 * burst, or finally has just been split, then there is
929 	 * nothing else to do.
930 	 */
931 	if (!hlist_unhashed(&bfqq->burst_list_node) ||
932 	    bfq_bfqq_in_large_burst(bfqq) ||
933 	    time_is_after_eq_jiffies(bfqq->split_time +
934 				     msecs_to_jiffies(10)))
935 		return;
936 
937 	/*
938 	 * If bfqq's creation happens late enough, or bfqq belongs to
939 	 * a different group than the burst group, then the current
940 	 * burst is finished, and related data structures must be
941 	 * reset.
942 	 *
943 	 * In this respect, consider the special case where bfqq is
944 	 * the very first queue created after BFQ is selected for this
945 	 * device. In this case, last_ins_in_burst and
946 	 * burst_parent_entity are not yet significant when we get
947 	 * here. But it is easy to verify that, whether or not the
948 	 * following condition is true, bfqq will end up being
949 	 * inserted into the burst list. In particular the list will
950 	 * happen to contain only bfqq. And this is exactly what has
951 	 * to happen, as bfqq may be the first queue of the first
952 	 * burst.
953 	 */
954 	if (time_is_before_jiffies(bfqd->last_ins_in_burst +
955 	    bfqd->bfq_burst_interval) ||
956 	    bfqq->entity.parent != bfqd->burst_parent_entity) {
957 		bfqd->large_burst = false;
958 		bfq_reset_burst_list(bfqd, bfqq);
959 		goto end;
960 	}
961 
962 	/*
963 	 * If we get here, then bfqq is being activated shortly after the
964 	 * last queue. So, if the current burst is also large, we can mark
965 	 * bfqq as belonging to this large burst immediately.
966 	 */
967 	if (bfqd->large_burst) {
968 		bfq_mark_bfqq_in_large_burst(bfqq);
969 		goto end;
970 	}
971 
972 	/*
973 	 * If we get here, then a large-burst state has not yet been
974 	 * reached, but bfqq is being activated shortly after the last
975 	 * queue. Then we add bfqq to the burst.
976 	 */
977 	bfq_add_to_burst(bfqd, bfqq);
978 end:
979 	/*
980 	 * At this point, bfqq either has been added to the current
981 	 * burst or has caused the current burst to terminate and a
982 	 * possible new burst to start. In particular, in the second
983 	 * case, bfqq has become the first queue in the possible new
984 	 * burst.  In both cases last_ins_in_burst needs to be moved
985 	 * forward.
986 	 */
987 	bfqd->last_ins_in_burst = jiffies;
988 }
989 
990 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
991 {
992 	struct bfq_entity *entity = &bfqq->entity;
993 
994 	return entity->budget - entity->service;
995 }
996 
997 /*
998  * If enough samples have been computed, return the current max budget
999  * stored in bfqd, which is dynamically updated according to the
1000  * estimated disk peak rate; otherwise return the default max budget
1001  */
1002 static int bfq_max_budget(struct bfq_data *bfqd)
1003 {
1004 	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1005 		return bfq_default_max_budget;
1006 	else
1007 		return bfqd->bfq_max_budget;
1008 }
1009 
1010 /*
1011  * Return min budget, which is a fraction of the current or default
1012  * max budget (trying with 1/32)
1013  */
1014 static int bfq_min_budget(struct bfq_data *bfqd)
1015 {
1016 	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1017 		return bfq_default_max_budget / 32;
1018 	else
1019 		return bfqd->bfq_max_budget / 32;
1020 }
1021 
1022 /*
1023  * The next function, invoked after the input queue bfqq switches from
1024  * idle to busy, updates the budget of bfqq. The function also tells
1025  * whether the in-service queue should be expired, by returning
1026  * true. The purpose of expiring the in-service queue is to give bfqq
1027  * the chance to possibly preempt the in-service queue, and the reason
1028  * for preempting the in-service queue is to achieve one of the two
1029  * goals below.
1030  *
1031  * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1032  * expired because it has remained idle. In particular, bfqq may have
1033  * expired for one of the following two reasons:
1034  *
1035  * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1036  *   and did not make it to issue a new request before its last
1037  *   request was served;
1038  *
1039  * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1040  *   a new request before the expiration of the idling-time.
1041  *
1042  * Even if bfqq has expired for one of the above reasons, the process
1043  * associated with the queue may be however issuing requests greedily,
1044  * and thus be sensitive to the bandwidth it receives (bfqq may have
1045  * remained idle for other reasons: CPU high load, bfqq not enjoying
1046  * idling, I/O throttling somewhere in the path from the process to
1047  * the I/O scheduler, ...). But if, after every expiration for one of
1048  * the above two reasons, bfqq has to wait for the service of at least
1049  * one full budget of another queue before being served again, then
1050  * bfqq is likely to get a much lower bandwidth or resource time than
1051  * its reserved ones. To address this issue, two countermeasures need
1052  * to be taken.
1053  *
1054  * First, the budget and the timestamps of bfqq need to be updated in
1055  * a special way on bfqq reactivation: they need to be updated as if
1056  * bfqq did not remain idle and did not expire. In fact, if they are
1057  * computed as if bfqq expired and remained idle until reactivation,
1058  * then the process associated with bfqq is treated as if, instead of
1059  * being greedy, it stopped issuing requests when bfqq remained idle,
1060  * and restarts issuing requests only on this reactivation. In other
1061  * words, the scheduler does not help the process recover the "service
1062  * hole" between bfqq expiration and reactivation. As a consequence,
1063  * the process receives a lower bandwidth than its reserved one. In
1064  * contrast, to recover this hole, the budget must be updated as if
1065  * bfqq was not expired at all before this reactivation, i.e., it must
1066  * be set to the value of the remaining budget when bfqq was
1067  * expired. Along the same line, timestamps need to be assigned the
1068  * value they had the last time bfqq was selected for service, i.e.,
1069  * before last expiration. Thus timestamps need to be back-shifted
1070  * with respect to their normal computation (see [1] for more details
1071  * on this tricky aspect).
1072  *
1073  * Secondly, to allow the process to recover the hole, the in-service
1074  * queue must be expired too, to give bfqq the chance to preempt it
1075  * immediately. In fact, if bfqq has to wait for a full budget of the
1076  * in-service queue to be completed, then it may become impossible to
1077  * let the process recover the hole, even if the back-shifted
1078  * timestamps of bfqq are lower than those of the in-service queue. If
1079  * this happens for most or all of the holes, then the process may not
1080  * receive its reserved bandwidth. In this respect, it is worth noting
1081  * that, being the service of outstanding requests unpreemptible, a
1082  * little fraction of the holes may however be unrecoverable, thereby
1083  * causing a little loss of bandwidth.
1084  *
1085  * The last important point is detecting whether bfqq does need this
1086  * bandwidth recovery. In this respect, the next function deems the
1087  * process associated with bfqq greedy, and thus allows it to recover
1088  * the hole, if: 1) the process is waiting for the arrival of a new
1089  * request (which implies that bfqq expired for one of the above two
1090  * reasons), and 2) such a request has arrived soon. The first
1091  * condition is controlled through the flag non_blocking_wait_rq,
1092  * while the second through the flag arrived_in_time. If both
1093  * conditions hold, then the function computes the budget in the
1094  * above-described special way, and signals that the in-service queue
1095  * should be expired. Timestamp back-shifting is done later in
1096  * __bfq_activate_entity.
1097  *
1098  * 2. Reduce latency. Even if timestamps are not backshifted to let
1099  * the process associated with bfqq recover a service hole, bfqq may
1100  * however happen to have, after being (re)activated, a lower finish
1101  * timestamp than the in-service queue.	 That is, the next budget of
1102  * bfqq may have to be completed before the one of the in-service
1103  * queue. If this is the case, then preempting the in-service queue
1104  * allows this goal to be achieved, apart from the unpreemptible,
1105  * outstanding requests mentioned above.
1106  *
1107  * Unfortunately, regardless of which of the above two goals one wants
1108  * to achieve, service trees need first to be updated to know whether
1109  * the in-service queue must be preempted. To have service trees
1110  * correctly updated, the in-service queue must be expired and
1111  * rescheduled, and bfqq must be scheduled too. This is one of the
1112  * most costly operations (in future versions, the scheduling
1113  * mechanism may be re-designed in such a way to make it possible to
1114  * know whether preemption is needed without needing to update service
1115  * trees). In addition, queue preemptions almost always cause random
1116  * I/O, and thus loss of throughput. Because of these facts, the next
1117  * function adopts the following simple scheme to avoid both costly
1118  * operations and too frequent preemptions: it requests the expiration
1119  * of the in-service queue (unconditionally) only for queues that need
1120  * to recover a hole, or that either are weight-raised or deserve to
1121  * be weight-raised.
1122  */
1123 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1124 						struct bfq_queue *bfqq,
1125 						bool arrived_in_time,
1126 						bool wr_or_deserves_wr)
1127 {
1128 	struct bfq_entity *entity = &bfqq->entity;
1129 
1130 	if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1131 		/*
1132 		 * We do not clear the flag non_blocking_wait_rq here, as
1133 		 * the latter is used in bfq_activate_bfqq to signal
1134 		 * that timestamps need to be back-shifted (and is
1135 		 * cleared right after).
1136 		 */
1137 
1138 		/*
1139 		 * In next assignment we rely on that either
1140 		 * entity->service or entity->budget are not updated
1141 		 * on expiration if bfqq is empty (see
1142 		 * __bfq_bfqq_recalc_budget). Thus both quantities
1143 		 * remain unchanged after such an expiration, and the
1144 		 * following statement therefore assigns to
1145 		 * entity->budget the remaining budget on such an
1146 		 * expiration. For clarity, entity->service is not
1147 		 * updated on expiration in any case, and, in normal
1148 		 * operation, is reset only when bfqq is selected for
1149 		 * service (see bfq_get_next_queue).
1150 		 */
1151 		entity->budget = min_t(unsigned long,
1152 				       bfq_bfqq_budget_left(bfqq),
1153 				       bfqq->max_budget);
1154 
1155 		return true;
1156 	}
1157 
1158 	entity->budget = max_t(unsigned long, bfqq->max_budget,
1159 			       bfq_serv_to_charge(bfqq->next_rq, bfqq));
1160 	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1161 	return wr_or_deserves_wr;
1162 }
1163 
1164 static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
1165 {
1166 	u64 dur;
1167 
1168 	if (bfqd->bfq_wr_max_time > 0)
1169 		return bfqd->bfq_wr_max_time;
1170 
1171 	dur = bfqd->RT_prod;
1172 	do_div(dur, bfqd->peak_rate);
1173 
1174 	/*
1175 	 * Limit duration between 3 and 13 seconds. Tests show that
1176 	 * higher values than 13 seconds often yield the opposite of
1177 	 * the desired result, i.e., worsen responsiveness by letting
1178 	 * non-interactive and non-soft-real-time applications
1179 	 * preserve weight raising for a too long time interval.
1180 	 *
1181 	 * On the other end, lower values than 3 seconds make it
1182 	 * difficult for most interactive tasks to complete their jobs
1183 	 * before weight-raising finishes.
1184 	 */
1185 	if (dur > msecs_to_jiffies(13000))
1186 		dur = msecs_to_jiffies(13000);
1187 	else if (dur < msecs_to_jiffies(3000))
1188 		dur = msecs_to_jiffies(3000);
1189 
1190 	return dur;
1191 }
1192 
1193 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1194 					     struct bfq_queue *bfqq,
1195 					     unsigned int old_wr_coeff,
1196 					     bool wr_or_deserves_wr,
1197 					     bool interactive,
1198 					     bool in_burst,
1199 					     bool soft_rt)
1200 {
1201 	if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1202 		/* start a weight-raising period */
1203 		if (interactive) {
1204 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1205 			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1206 		} else {
1207 			bfqq->wr_start_at_switch_to_srt = jiffies;
1208 			bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1209 				BFQ_SOFTRT_WEIGHT_FACTOR;
1210 			bfqq->wr_cur_max_time =
1211 				bfqd->bfq_wr_rt_max_time;
1212 		}
1213 
1214 		/*
1215 		 * If needed, further reduce budget to make sure it is
1216 		 * close to bfqq's backlog, so as to reduce the
1217 		 * scheduling-error component due to a too large
1218 		 * budget. Do not care about throughput consequences,
1219 		 * but only about latency. Finally, do not assign a
1220 		 * too small budget either, to avoid increasing
1221 		 * latency by causing too frequent expirations.
1222 		 */
1223 		bfqq->entity.budget = min_t(unsigned long,
1224 					    bfqq->entity.budget,
1225 					    2 * bfq_min_budget(bfqd));
1226 	} else if (old_wr_coeff > 1) {
1227 		if (interactive) { /* update wr coeff and duration */
1228 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1229 			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1230 		} else if (in_burst)
1231 			bfqq->wr_coeff = 1;
1232 		else if (soft_rt) {
1233 			/*
1234 			 * The application is now or still meeting the
1235 			 * requirements for being deemed soft rt.  We
1236 			 * can then correctly and safely (re)charge
1237 			 * the weight-raising duration for the
1238 			 * application with the weight-raising
1239 			 * duration for soft rt applications.
1240 			 *
1241 			 * In particular, doing this recharge now, i.e.,
1242 			 * before the weight-raising period for the
1243 			 * application finishes, reduces the probability
1244 			 * of the following negative scenario:
1245 			 * 1) the weight of a soft rt application is
1246 			 *    raised at startup (as for any newly
1247 			 *    created application),
1248 			 * 2) since the application is not interactive,
1249 			 *    at a certain time weight-raising is
1250 			 *    stopped for the application,
1251 			 * 3) at that time the application happens to
1252 			 *    still have pending requests, and hence
1253 			 *    is destined to not have a chance to be
1254 			 *    deemed soft rt before these requests are
1255 			 *    completed (see the comments to the
1256 			 *    function bfq_bfqq_softrt_next_start()
1257 			 *    for details on soft rt detection),
1258 			 * 4) these pending requests experience a high
1259 			 *    latency because the application is not
1260 			 *    weight-raised while they are pending.
1261 			 */
1262 			if (bfqq->wr_cur_max_time !=
1263 				bfqd->bfq_wr_rt_max_time) {
1264 				bfqq->wr_start_at_switch_to_srt =
1265 					bfqq->last_wr_start_finish;
1266 
1267 				bfqq->wr_cur_max_time =
1268 					bfqd->bfq_wr_rt_max_time;
1269 				bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1270 					BFQ_SOFTRT_WEIGHT_FACTOR;
1271 			}
1272 			bfqq->last_wr_start_finish = jiffies;
1273 		}
1274 	}
1275 }
1276 
1277 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1278 					struct bfq_queue *bfqq)
1279 {
1280 	return bfqq->dispatched == 0 &&
1281 		time_is_before_jiffies(
1282 			bfqq->budget_timeout +
1283 			bfqd->bfq_wr_min_idle_time);
1284 }
1285 
1286 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1287 					     struct bfq_queue *bfqq,
1288 					     int old_wr_coeff,
1289 					     struct request *rq,
1290 					     bool *interactive)
1291 {
1292 	bool soft_rt, in_burst,	wr_or_deserves_wr,
1293 		bfqq_wants_to_preempt,
1294 		idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1295 		/*
1296 		 * See the comments on
1297 		 * bfq_bfqq_update_budg_for_activation for
1298 		 * details on the usage of the next variable.
1299 		 */
1300 		arrived_in_time =  ktime_get_ns() <=
1301 			bfqq->ttime.last_end_request +
1302 			bfqd->bfq_slice_idle * 3;
1303 
1304 	bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
1305 
1306 	/*
1307 	 * bfqq deserves to be weight-raised if:
1308 	 * - it is sync,
1309 	 * - it does not belong to a large burst,
1310 	 * - it has been idle for enough time or is soft real-time,
1311 	 * - is linked to a bfq_io_cq (it is not shared in any sense).
1312 	 */
1313 	in_burst = bfq_bfqq_in_large_burst(bfqq);
1314 	soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1315 		!in_burst &&
1316 		time_is_before_jiffies(bfqq->soft_rt_next_start);
1317 	*interactive = !in_burst && idle_for_long_time;
1318 	wr_or_deserves_wr = bfqd->low_latency &&
1319 		(bfqq->wr_coeff > 1 ||
1320 		 (bfq_bfqq_sync(bfqq) &&
1321 		  bfqq->bic && (*interactive || soft_rt)));
1322 
1323 	/*
1324 	 * Using the last flag, update budget and check whether bfqq
1325 	 * may want to preempt the in-service queue.
1326 	 */
1327 	bfqq_wants_to_preempt =
1328 		bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1329 						    arrived_in_time,
1330 						    wr_or_deserves_wr);
1331 
1332 	/*
1333 	 * If bfqq happened to be activated in a burst, but has been
1334 	 * idle for much more than an interactive queue, then we
1335 	 * assume that, in the overall I/O initiated in the burst, the
1336 	 * I/O associated with bfqq is finished. So bfqq does not need
1337 	 * to be treated as a queue belonging to a burst
1338 	 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1339 	 * if set, and remove bfqq from the burst list if it's
1340 	 * there. We do not decrement burst_size, because the fact
1341 	 * that bfqq does not need to belong to the burst list any
1342 	 * more does not invalidate the fact that bfqq was created in
1343 	 * a burst.
1344 	 */
1345 	if (likely(!bfq_bfqq_just_created(bfqq)) &&
1346 	    idle_for_long_time &&
1347 	    time_is_before_jiffies(
1348 		    bfqq->budget_timeout +
1349 		    msecs_to_jiffies(10000))) {
1350 		hlist_del_init(&bfqq->burst_list_node);
1351 		bfq_clear_bfqq_in_large_burst(bfqq);
1352 	}
1353 
1354 	bfq_clear_bfqq_just_created(bfqq);
1355 
1356 
1357 	if (!bfq_bfqq_IO_bound(bfqq)) {
1358 		if (arrived_in_time) {
1359 			bfqq->requests_within_timer++;
1360 			if (bfqq->requests_within_timer >=
1361 			    bfqd->bfq_requests_within_timer)
1362 				bfq_mark_bfqq_IO_bound(bfqq);
1363 		} else
1364 			bfqq->requests_within_timer = 0;
1365 	}
1366 
1367 	if (bfqd->low_latency) {
1368 		if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1369 			/* wraparound */
1370 			bfqq->split_time =
1371 				jiffies - bfqd->bfq_wr_min_idle_time - 1;
1372 
1373 		if (time_is_before_jiffies(bfqq->split_time +
1374 					   bfqd->bfq_wr_min_idle_time)) {
1375 			bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1376 							 old_wr_coeff,
1377 							 wr_or_deserves_wr,
1378 							 *interactive,
1379 							 in_burst,
1380 							 soft_rt);
1381 
1382 			if (old_wr_coeff != bfqq->wr_coeff)
1383 				bfqq->entity.prio_changed = 1;
1384 		}
1385 	}
1386 
1387 	bfqq->last_idle_bklogged = jiffies;
1388 	bfqq->service_from_backlogged = 0;
1389 	bfq_clear_bfqq_softrt_update(bfqq);
1390 
1391 	bfq_add_bfqq_busy(bfqd, bfqq);
1392 
1393 	/*
1394 	 * Expire in-service queue only if preemption may be needed
1395 	 * for guarantees. In this respect, the function
1396 	 * next_queue_may_preempt just checks a simple, necessary
1397 	 * condition, and not a sufficient condition based on
1398 	 * timestamps. In fact, for the latter condition to be
1399 	 * evaluated, timestamps would need first to be updated, and
1400 	 * this operation is quite costly (see the comments on the
1401 	 * function bfq_bfqq_update_budg_for_activation).
1402 	 */
1403 	if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
1404 	    bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
1405 	    next_queue_may_preempt(bfqd))
1406 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1407 				false, BFQQE_PREEMPTED);
1408 }
1409 
1410 static void bfq_add_request(struct request *rq)
1411 {
1412 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
1413 	struct bfq_data *bfqd = bfqq->bfqd;
1414 	struct request *next_rq, *prev;
1415 	unsigned int old_wr_coeff = bfqq->wr_coeff;
1416 	bool interactive = false;
1417 
1418 	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1419 	bfqq->queued[rq_is_sync(rq)]++;
1420 	bfqd->queued++;
1421 
1422 	elv_rb_add(&bfqq->sort_list, rq);
1423 
1424 	/*
1425 	 * Check if this request is a better next-serve candidate.
1426 	 */
1427 	prev = bfqq->next_rq;
1428 	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1429 	bfqq->next_rq = next_rq;
1430 
1431 	/*
1432 	 * Adjust priority tree position, if next_rq changes.
1433 	 */
1434 	if (prev != bfqq->next_rq)
1435 		bfq_pos_tree_add_move(bfqd, bfqq);
1436 
1437 	if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
1438 		bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1439 						 rq, &interactive);
1440 	else {
1441 		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1442 		    time_is_before_jiffies(
1443 				bfqq->last_wr_start_finish +
1444 				bfqd->bfq_wr_min_inter_arr_async)) {
1445 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1446 			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1447 
1448 			bfqd->wr_busy_queues++;
1449 			bfqq->entity.prio_changed = 1;
1450 		}
1451 		if (prev != bfqq->next_rq)
1452 			bfq_updated_next_req(bfqd, bfqq);
1453 	}
1454 
1455 	/*
1456 	 * Assign jiffies to last_wr_start_finish in the following
1457 	 * cases:
1458 	 *
1459 	 * . if bfqq is not going to be weight-raised, because, for
1460 	 *   non weight-raised queues, last_wr_start_finish stores the
1461 	 *   arrival time of the last request; as of now, this piece
1462 	 *   of information is used only for deciding whether to
1463 	 *   weight-raise async queues
1464 	 *
1465 	 * . if bfqq is not weight-raised, because, if bfqq is now
1466 	 *   switching to weight-raised, then last_wr_start_finish
1467 	 *   stores the time when weight-raising starts
1468 	 *
1469 	 * . if bfqq is interactive, because, regardless of whether
1470 	 *   bfqq is currently weight-raised, the weight-raising
1471 	 *   period must start or restart (this case is considered
1472 	 *   separately because it is not detected by the above
1473 	 *   conditions, if bfqq is already weight-raised)
1474 	 *
1475 	 * last_wr_start_finish has to be updated also if bfqq is soft
1476 	 * real-time, because the weight-raising period is constantly
1477 	 * restarted on idle-to-busy transitions for these queues, but
1478 	 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1479 	 * needed.
1480 	 */
1481 	if (bfqd->low_latency &&
1482 		(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1483 		bfqq->last_wr_start_finish = jiffies;
1484 }
1485 
1486 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1487 					  struct bio *bio,
1488 					  struct request_queue *q)
1489 {
1490 	struct bfq_queue *bfqq = bfqd->bio_bfqq;
1491 
1492 
1493 	if (bfqq)
1494 		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1495 
1496 	return NULL;
1497 }
1498 
1499 static sector_t get_sdist(sector_t last_pos, struct request *rq)
1500 {
1501 	if (last_pos)
1502 		return abs(blk_rq_pos(rq) - last_pos);
1503 
1504 	return 0;
1505 }
1506 
1507 #if 0 /* Still not clear if we can do without next two functions */
1508 static void bfq_activate_request(struct request_queue *q, struct request *rq)
1509 {
1510 	struct bfq_data *bfqd = q->elevator->elevator_data;
1511 
1512 	bfqd->rq_in_driver++;
1513 }
1514 
1515 static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1516 {
1517 	struct bfq_data *bfqd = q->elevator->elevator_data;
1518 
1519 	bfqd->rq_in_driver--;
1520 }
1521 #endif
1522 
1523 static void bfq_remove_request(struct request_queue *q,
1524 			       struct request *rq)
1525 {
1526 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
1527 	struct bfq_data *bfqd = bfqq->bfqd;
1528 	const int sync = rq_is_sync(rq);
1529 
1530 	if (bfqq->next_rq == rq) {
1531 		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1532 		bfq_updated_next_req(bfqd, bfqq);
1533 	}
1534 
1535 	if (rq->queuelist.prev != &rq->queuelist)
1536 		list_del_init(&rq->queuelist);
1537 	bfqq->queued[sync]--;
1538 	bfqd->queued--;
1539 	elv_rb_del(&bfqq->sort_list, rq);
1540 
1541 	elv_rqhash_del(q, rq);
1542 	if (q->last_merge == rq)
1543 		q->last_merge = NULL;
1544 
1545 	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1546 		bfqq->next_rq = NULL;
1547 
1548 		if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
1549 			bfq_del_bfqq_busy(bfqd, bfqq, false);
1550 			/*
1551 			 * bfqq emptied. In normal operation, when
1552 			 * bfqq is empty, bfqq->entity.service and
1553 			 * bfqq->entity.budget must contain,
1554 			 * respectively, the service received and the
1555 			 * budget used last time bfqq emptied. These
1556 			 * facts do not hold in this case, as at least
1557 			 * this last removal occurred while bfqq is
1558 			 * not in service. To avoid inconsistencies,
1559 			 * reset both bfqq->entity.service and
1560 			 * bfqq->entity.budget, if bfqq has still a
1561 			 * process that may issue I/O requests to it.
1562 			 */
1563 			bfqq->entity.budget = bfqq->entity.service = 0;
1564 		}
1565 
1566 		/*
1567 		 * Remove queue from request-position tree as it is empty.
1568 		 */
1569 		if (bfqq->pos_root) {
1570 			rb_erase(&bfqq->pos_node, bfqq->pos_root);
1571 			bfqq->pos_root = NULL;
1572 		}
1573 	}
1574 
1575 	if (rq->cmd_flags & REQ_META)
1576 		bfqq->meta_pending--;
1577 
1578 	bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
1579 }
1580 
1581 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1582 {
1583 	struct request_queue *q = hctx->queue;
1584 	struct bfq_data *bfqd = q->elevator->elevator_data;
1585 	struct request *free = NULL;
1586 	/*
1587 	 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1588 	 * store its return value for later use, to avoid nesting
1589 	 * queue_lock inside the bfqd->lock. We assume that the bic
1590 	 * returned by bfq_bic_lookup does not go away before
1591 	 * bfqd->lock is taken.
1592 	 */
1593 	struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1594 	bool ret;
1595 
1596 	spin_lock_irq(&bfqd->lock);
1597 
1598 	if (bic)
1599 		bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1600 	else
1601 		bfqd->bio_bfqq = NULL;
1602 	bfqd->bio_bic = bic;
1603 
1604 	ret = blk_mq_sched_try_merge(q, bio, &free);
1605 
1606 	if (free)
1607 		blk_mq_free_request(free);
1608 	spin_unlock_irq(&bfqd->lock);
1609 
1610 	return ret;
1611 }
1612 
1613 static int bfq_request_merge(struct request_queue *q, struct request **req,
1614 			     struct bio *bio)
1615 {
1616 	struct bfq_data *bfqd = q->elevator->elevator_data;
1617 	struct request *__rq;
1618 
1619 	__rq = bfq_find_rq_fmerge(bfqd, bio, q);
1620 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
1621 		*req = __rq;
1622 		return ELEVATOR_FRONT_MERGE;
1623 	}
1624 
1625 	return ELEVATOR_NO_MERGE;
1626 }
1627 
1628 static void bfq_request_merged(struct request_queue *q, struct request *req,
1629 			       enum elv_merge type)
1630 {
1631 	if (type == ELEVATOR_FRONT_MERGE &&
1632 	    rb_prev(&req->rb_node) &&
1633 	    blk_rq_pos(req) <
1634 	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
1635 				    struct request, rb_node))) {
1636 		struct bfq_queue *bfqq = RQ_BFQQ(req);
1637 		struct bfq_data *bfqd = bfqq->bfqd;
1638 		struct request *prev, *next_rq;
1639 
1640 		/* Reposition request in its sort_list */
1641 		elv_rb_del(&bfqq->sort_list, req);
1642 		elv_rb_add(&bfqq->sort_list, req);
1643 
1644 		/* Choose next request to be served for bfqq */
1645 		prev = bfqq->next_rq;
1646 		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1647 					 bfqd->last_position);
1648 		bfqq->next_rq = next_rq;
1649 		/*
1650 		 * If next_rq changes, update both the queue's budget to
1651 		 * fit the new request and the queue's position in its
1652 		 * rq_pos_tree.
1653 		 */
1654 		if (prev != bfqq->next_rq) {
1655 			bfq_updated_next_req(bfqd, bfqq);
1656 			bfq_pos_tree_add_move(bfqd, bfqq);
1657 		}
1658 	}
1659 }
1660 
1661 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1662 				struct request *next)
1663 {
1664 	struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1665 
1666 	if (!RB_EMPTY_NODE(&rq->rb_node))
1667 		goto end;
1668 	spin_lock_irq(&bfqq->bfqd->lock);
1669 
1670 	/*
1671 	 * If next and rq belong to the same bfq_queue and next is older
1672 	 * than rq, then reposition rq in the fifo (by substituting next
1673 	 * with rq). Otherwise, if next and rq belong to different
1674 	 * bfq_queues, never reposition rq: in fact, we would have to
1675 	 * reposition it with respect to next's position in its own fifo,
1676 	 * which would most certainly be too expensive with respect to
1677 	 * the benefits.
1678 	 */
1679 	if (bfqq == next_bfqq &&
1680 	    !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1681 	    next->fifo_time < rq->fifo_time) {
1682 		list_del_init(&rq->queuelist);
1683 		list_replace_init(&next->queuelist, &rq->queuelist);
1684 		rq->fifo_time = next->fifo_time;
1685 	}
1686 
1687 	if (bfqq->next_rq == next)
1688 		bfqq->next_rq = rq;
1689 
1690 	bfq_remove_request(q, next);
1691 
1692 	spin_unlock_irq(&bfqq->bfqd->lock);
1693 end:
1694 	bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
1695 }
1696 
1697 /* Must be called with bfqq != NULL */
1698 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1699 {
1700 	if (bfq_bfqq_busy(bfqq))
1701 		bfqq->bfqd->wr_busy_queues--;
1702 	bfqq->wr_coeff = 1;
1703 	bfqq->wr_cur_max_time = 0;
1704 	bfqq->last_wr_start_finish = jiffies;
1705 	/*
1706 	 * Trigger a weight change on the next invocation of
1707 	 * __bfq_entity_update_weight_prio.
1708 	 */
1709 	bfqq->entity.prio_changed = 1;
1710 }
1711 
1712 void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1713 			     struct bfq_group *bfqg)
1714 {
1715 	int i, j;
1716 
1717 	for (i = 0; i < 2; i++)
1718 		for (j = 0; j < IOPRIO_BE_NR; j++)
1719 			if (bfqg->async_bfqq[i][j])
1720 				bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1721 	if (bfqg->async_idle_bfqq)
1722 		bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1723 }
1724 
1725 static void bfq_end_wr(struct bfq_data *bfqd)
1726 {
1727 	struct bfq_queue *bfqq;
1728 
1729 	spin_lock_irq(&bfqd->lock);
1730 
1731 	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1732 		bfq_bfqq_end_wr(bfqq);
1733 	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1734 		bfq_bfqq_end_wr(bfqq);
1735 	bfq_end_wr_async(bfqd);
1736 
1737 	spin_unlock_irq(&bfqd->lock);
1738 }
1739 
1740 static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1741 {
1742 	if (request)
1743 		return blk_rq_pos(io_struct);
1744 	else
1745 		return ((struct bio *)io_struct)->bi_iter.bi_sector;
1746 }
1747 
1748 static int bfq_rq_close_to_sector(void *io_struct, bool request,
1749 				  sector_t sector)
1750 {
1751 	return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1752 	       BFQQ_CLOSE_THR;
1753 }
1754 
1755 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1756 					 struct bfq_queue *bfqq,
1757 					 sector_t sector)
1758 {
1759 	struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1760 	struct rb_node *parent, *node;
1761 	struct bfq_queue *__bfqq;
1762 
1763 	if (RB_EMPTY_ROOT(root))
1764 		return NULL;
1765 
1766 	/*
1767 	 * First, if we find a request starting at the end of the last
1768 	 * request, choose it.
1769 	 */
1770 	__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1771 	if (__bfqq)
1772 		return __bfqq;
1773 
1774 	/*
1775 	 * If the exact sector wasn't found, the parent of the NULL leaf
1776 	 * will contain the closest sector (rq_pos_tree sorted by
1777 	 * next_request position).
1778 	 */
1779 	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1780 	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1781 		return __bfqq;
1782 
1783 	if (blk_rq_pos(__bfqq->next_rq) < sector)
1784 		node = rb_next(&__bfqq->pos_node);
1785 	else
1786 		node = rb_prev(&__bfqq->pos_node);
1787 	if (!node)
1788 		return NULL;
1789 
1790 	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
1791 	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1792 		return __bfqq;
1793 
1794 	return NULL;
1795 }
1796 
1797 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1798 						   struct bfq_queue *cur_bfqq,
1799 						   sector_t sector)
1800 {
1801 	struct bfq_queue *bfqq;
1802 
1803 	/*
1804 	 * We shall notice if some of the queues are cooperating,
1805 	 * e.g., working closely on the same area of the device. In
1806 	 * that case, we can group them together and: 1) don't waste
1807 	 * time idling, and 2) serve the union of their requests in
1808 	 * the best possible order for throughput.
1809 	 */
1810 	bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1811 	if (!bfqq || bfqq == cur_bfqq)
1812 		return NULL;
1813 
1814 	return bfqq;
1815 }
1816 
1817 static struct bfq_queue *
1818 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1819 {
1820 	int process_refs, new_process_refs;
1821 	struct bfq_queue *__bfqq;
1822 
1823 	/*
1824 	 * If there are no process references on the new_bfqq, then it is
1825 	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1826 	 * may have dropped their last reference (not just their last process
1827 	 * reference).
1828 	 */
1829 	if (!bfqq_process_refs(new_bfqq))
1830 		return NULL;
1831 
1832 	/* Avoid a circular list and skip interim queue merges. */
1833 	while ((__bfqq = new_bfqq->new_bfqq)) {
1834 		if (__bfqq == bfqq)
1835 			return NULL;
1836 		new_bfqq = __bfqq;
1837 	}
1838 
1839 	process_refs = bfqq_process_refs(bfqq);
1840 	new_process_refs = bfqq_process_refs(new_bfqq);
1841 	/*
1842 	 * If the process for the bfqq has gone away, there is no
1843 	 * sense in merging the queues.
1844 	 */
1845 	if (process_refs == 0 || new_process_refs == 0)
1846 		return NULL;
1847 
1848 	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1849 		new_bfqq->pid);
1850 
1851 	/*
1852 	 * Merging is just a redirection: the requests of the process
1853 	 * owning one of the two queues are redirected to the other queue.
1854 	 * The latter queue, in its turn, is set as shared if this is the
1855 	 * first time that the requests of some process are redirected to
1856 	 * it.
1857 	 *
1858 	 * We redirect bfqq to new_bfqq and not the opposite, because
1859 	 * we are in the context of the process owning bfqq, thus we
1860 	 * have the io_cq of this process. So we can immediately
1861 	 * configure this io_cq to redirect the requests of the
1862 	 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1863 	 * not available any more (new_bfqq->bic == NULL).
1864 	 *
1865 	 * Anyway, even in case new_bfqq coincides with the in-service
1866 	 * queue, redirecting requests the in-service queue is the
1867 	 * best option, as we feed the in-service queue with new
1868 	 * requests close to the last request served and, by doing so,
1869 	 * are likely to increase the throughput.
1870 	 */
1871 	bfqq->new_bfqq = new_bfqq;
1872 	new_bfqq->ref += process_refs;
1873 	return new_bfqq;
1874 }
1875 
1876 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1877 					struct bfq_queue *new_bfqq)
1878 {
1879 	if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1880 	    (bfqq->ioprio_class != new_bfqq->ioprio_class))
1881 		return false;
1882 
1883 	/*
1884 	 * If either of the queues has already been detected as seeky,
1885 	 * then merging it with the other queue is unlikely to lead to
1886 	 * sequential I/O.
1887 	 */
1888 	if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1889 		return false;
1890 
1891 	/*
1892 	 * Interleaved I/O is known to be done by (some) applications
1893 	 * only for reads, so it does not make sense to merge async
1894 	 * queues.
1895 	 */
1896 	if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1897 		return false;
1898 
1899 	return true;
1900 }
1901 
1902 /*
1903  * If this function returns true, then bfqq cannot be merged. The idea
1904  * is that true cooperation happens very early after processes start
1905  * to do I/O. Usually, late cooperations are just accidental false
1906  * positives. In case bfqq is weight-raised, such false positives
1907  * would evidently degrade latency guarantees for bfqq.
1908  */
1909 static bool wr_from_too_long(struct bfq_queue *bfqq)
1910 {
1911 	return bfqq->wr_coeff > 1 &&
1912 		time_is_before_jiffies(bfqq->last_wr_start_finish +
1913 				       msecs_to_jiffies(100));
1914 }
1915 
1916 /*
1917  * Attempt to schedule a merge of bfqq with the currently in-service
1918  * queue or with a close queue among the scheduled queues.  Return
1919  * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1920  * structure otherwise.
1921  *
1922  * The OOM queue is not allowed to participate to cooperation: in fact, since
1923  * the requests temporarily redirected to the OOM queue could be redirected
1924  * again to dedicated queues at any time, the state needed to correctly
1925  * handle merging with the OOM queue would be quite complex and expensive
1926  * to maintain. Besides, in such a critical condition as an out of memory,
1927  * the benefits of queue merging may be little relevant, or even negligible.
1928  *
1929  * Weight-raised queues can be merged only if their weight-raising
1930  * period has just started. In fact cooperating processes are usually
1931  * started together. Thus, with this filter we avoid false positives
1932  * that would jeopardize low-latency guarantees.
1933  *
1934  * WARNING: queue merging may impair fairness among non-weight raised
1935  * queues, for at least two reasons: 1) the original weight of a
1936  * merged queue may change during the merged state, 2) even being the
1937  * weight the same, a merged queue may be bloated with many more
1938  * requests than the ones produced by its originally-associated
1939  * process.
1940  */
1941 static struct bfq_queue *
1942 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1943 		     void *io_struct, bool request)
1944 {
1945 	struct bfq_queue *in_service_bfqq, *new_bfqq;
1946 
1947 	if (bfqq->new_bfqq)
1948 		return bfqq->new_bfqq;
1949 
1950 	if (!io_struct ||
1951 	    wr_from_too_long(bfqq) ||
1952 	    unlikely(bfqq == &bfqd->oom_bfqq))
1953 		return NULL;
1954 
1955 	/* If there is only one backlogged queue, don't search. */
1956 	if (bfqd->busy_queues == 1)
1957 		return NULL;
1958 
1959 	in_service_bfqq = bfqd->in_service_queue;
1960 
1961 	if (!in_service_bfqq || in_service_bfqq == bfqq
1962 	    || wr_from_too_long(in_service_bfqq) ||
1963 	    unlikely(in_service_bfqq == &bfqd->oom_bfqq))
1964 		goto check_scheduled;
1965 
1966 	if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
1967 	    bfqq->entity.parent == in_service_bfqq->entity.parent &&
1968 	    bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
1969 		new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
1970 		if (new_bfqq)
1971 			return new_bfqq;
1972 	}
1973 	/*
1974 	 * Check whether there is a cooperator among currently scheduled
1975 	 * queues. The only thing we need is that the bio/request is not
1976 	 * NULL, as we need it to establish whether a cooperator exists.
1977 	 */
1978 check_scheduled:
1979 	new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
1980 			bfq_io_struct_pos(io_struct, request));
1981 
1982 	if (new_bfqq && !wr_from_too_long(new_bfqq) &&
1983 	    likely(new_bfqq != &bfqd->oom_bfqq) &&
1984 	    bfq_may_be_close_cooperator(bfqq, new_bfqq))
1985 		return bfq_setup_merge(bfqq, new_bfqq);
1986 
1987 	return NULL;
1988 }
1989 
1990 static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
1991 {
1992 	struct bfq_io_cq *bic = bfqq->bic;
1993 
1994 	/*
1995 	 * If !bfqq->bic, the queue is already shared or its requests
1996 	 * have already been redirected to a shared queue; both idle window
1997 	 * and weight raising state have already been saved. Do nothing.
1998 	 */
1999 	if (!bic)
2000 		return;
2001 
2002 	bic->saved_ttime = bfqq->ttime;
2003 	bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
2004 	bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
2005 	bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2006 	bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
2007 	bic->saved_wr_coeff = bfqq->wr_coeff;
2008 	bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
2009 	bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2010 	bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2011 }
2012 
2013 static void
2014 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2015 		struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2016 {
2017 	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2018 		(unsigned long)new_bfqq->pid);
2019 	/* Save weight raising and idle window of the merged queues */
2020 	bfq_bfqq_save_state(bfqq);
2021 	bfq_bfqq_save_state(new_bfqq);
2022 	if (bfq_bfqq_IO_bound(bfqq))
2023 		bfq_mark_bfqq_IO_bound(new_bfqq);
2024 	bfq_clear_bfqq_IO_bound(bfqq);
2025 
2026 	/*
2027 	 * If bfqq is weight-raised, then let new_bfqq inherit
2028 	 * weight-raising. To reduce false positives, neglect the case
2029 	 * where bfqq has just been created, but has not yet made it
2030 	 * to be weight-raised (which may happen because EQM may merge
2031 	 * bfqq even before bfq_add_request is executed for the first
2032 	 * time for bfqq). Handling this case would however be very
2033 	 * easy, thanks to the flag just_created.
2034 	 */
2035 	if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2036 		new_bfqq->wr_coeff = bfqq->wr_coeff;
2037 		new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2038 		new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2039 		new_bfqq->wr_start_at_switch_to_srt =
2040 			bfqq->wr_start_at_switch_to_srt;
2041 		if (bfq_bfqq_busy(new_bfqq))
2042 			bfqd->wr_busy_queues++;
2043 		new_bfqq->entity.prio_changed = 1;
2044 	}
2045 
2046 	if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2047 		bfqq->wr_coeff = 1;
2048 		bfqq->entity.prio_changed = 1;
2049 		if (bfq_bfqq_busy(bfqq))
2050 			bfqd->wr_busy_queues--;
2051 	}
2052 
2053 	bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2054 		     bfqd->wr_busy_queues);
2055 
2056 	/*
2057 	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2058 	 */
2059 	bic_set_bfqq(bic, new_bfqq, 1);
2060 	bfq_mark_bfqq_coop(new_bfqq);
2061 	/*
2062 	 * new_bfqq now belongs to at least two bics (it is a shared queue):
2063 	 * set new_bfqq->bic to NULL. bfqq either:
2064 	 * - does not belong to any bic any more, and hence bfqq->bic must
2065 	 *   be set to NULL, or
2066 	 * - is a queue whose owning bics have already been redirected to a
2067 	 *   different queue, hence the queue is destined to not belong to
2068 	 *   any bic soon and bfqq->bic is already NULL (therefore the next
2069 	 *   assignment causes no harm).
2070 	 */
2071 	new_bfqq->bic = NULL;
2072 	bfqq->bic = NULL;
2073 	/* release process reference to bfqq */
2074 	bfq_put_queue(bfqq);
2075 }
2076 
2077 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2078 				struct bio *bio)
2079 {
2080 	struct bfq_data *bfqd = q->elevator->elevator_data;
2081 	bool is_sync = op_is_sync(bio->bi_opf);
2082 	struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2083 
2084 	/*
2085 	 * Disallow merge of a sync bio into an async request.
2086 	 */
2087 	if (is_sync && !rq_is_sync(rq))
2088 		return false;
2089 
2090 	/*
2091 	 * Lookup the bfqq that this bio will be queued with. Allow
2092 	 * merge only if rq is queued there.
2093 	 */
2094 	if (!bfqq)
2095 		return false;
2096 
2097 	/*
2098 	 * We take advantage of this function to perform an early merge
2099 	 * of the queues of possible cooperating processes.
2100 	 */
2101 	new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2102 	if (new_bfqq) {
2103 		/*
2104 		 * bic still points to bfqq, then it has not yet been
2105 		 * redirected to some other bfq_queue, and a queue
2106 		 * merge beween bfqq and new_bfqq can be safely
2107 		 * fulfillled, i.e., bic can be redirected to new_bfqq
2108 		 * and bfqq can be put.
2109 		 */
2110 		bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2111 				new_bfqq);
2112 		/*
2113 		 * If we get here, bio will be queued into new_queue,
2114 		 * so use new_bfqq to decide whether bio and rq can be
2115 		 * merged.
2116 		 */
2117 		bfqq = new_bfqq;
2118 
2119 		/*
2120 		 * Change also bqfd->bio_bfqq, as
2121 		 * bfqd->bio_bic now points to new_bfqq, and
2122 		 * this function may be invoked again (and then may
2123 		 * use again bqfd->bio_bfqq).
2124 		 */
2125 		bfqd->bio_bfqq = bfqq;
2126 	}
2127 
2128 	return bfqq == RQ_BFQQ(rq);
2129 }
2130 
2131 /*
2132  * Set the maximum time for the in-service queue to consume its
2133  * budget. This prevents seeky processes from lowering the throughput.
2134  * In practice, a time-slice service scheme is used with seeky
2135  * processes.
2136  */
2137 static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2138 				   struct bfq_queue *bfqq)
2139 {
2140 	unsigned int timeout_coeff;
2141 
2142 	if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2143 		timeout_coeff = 1;
2144 	else
2145 		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2146 
2147 	bfqd->last_budget_start = ktime_get();
2148 
2149 	bfqq->budget_timeout = jiffies +
2150 		bfqd->bfq_timeout * timeout_coeff;
2151 }
2152 
2153 static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2154 				       struct bfq_queue *bfqq)
2155 {
2156 	if (bfqq) {
2157 		bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
2158 		bfq_clear_bfqq_fifo_expire(bfqq);
2159 
2160 		bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2161 
2162 		if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2163 		    bfqq->wr_coeff > 1 &&
2164 		    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2165 		    time_is_before_jiffies(bfqq->budget_timeout)) {
2166 			/*
2167 			 * For soft real-time queues, move the start
2168 			 * of the weight-raising period forward by the
2169 			 * time the queue has not received any
2170 			 * service. Otherwise, a relatively long
2171 			 * service delay is likely to cause the
2172 			 * weight-raising period of the queue to end,
2173 			 * because of the short duration of the
2174 			 * weight-raising period of a soft real-time
2175 			 * queue.  It is worth noting that this move
2176 			 * is not so dangerous for the other queues,
2177 			 * because soft real-time queues are not
2178 			 * greedy.
2179 			 *
2180 			 * To not add a further variable, we use the
2181 			 * overloaded field budget_timeout to
2182 			 * determine for how long the queue has not
2183 			 * received service, i.e., how much time has
2184 			 * elapsed since the queue expired. However,
2185 			 * this is a little imprecise, because
2186 			 * budget_timeout is set to jiffies if bfqq
2187 			 * not only expires, but also remains with no
2188 			 * request.
2189 			 */
2190 			if (time_after(bfqq->budget_timeout,
2191 				       bfqq->last_wr_start_finish))
2192 				bfqq->last_wr_start_finish +=
2193 					jiffies - bfqq->budget_timeout;
2194 			else
2195 				bfqq->last_wr_start_finish = jiffies;
2196 		}
2197 
2198 		bfq_set_budget_timeout(bfqd, bfqq);
2199 		bfq_log_bfqq(bfqd, bfqq,
2200 			     "set_in_service_queue, cur-budget = %d",
2201 			     bfqq->entity.budget);
2202 	}
2203 
2204 	bfqd->in_service_queue = bfqq;
2205 }
2206 
2207 /*
2208  * Get and set a new queue for service.
2209  */
2210 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2211 {
2212 	struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2213 
2214 	__bfq_set_in_service_queue(bfqd, bfqq);
2215 	return bfqq;
2216 }
2217 
2218 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2219 {
2220 	struct bfq_queue *bfqq = bfqd->in_service_queue;
2221 	u32 sl;
2222 
2223 	bfq_mark_bfqq_wait_request(bfqq);
2224 
2225 	/*
2226 	 * We don't want to idle for seeks, but we do want to allow
2227 	 * fair distribution of slice time for a process doing back-to-back
2228 	 * seeks. So allow a little bit of time for him to submit a new rq.
2229 	 */
2230 	sl = bfqd->bfq_slice_idle;
2231 	/*
2232 	 * Unless the queue is being weight-raised or the scenario is
2233 	 * asymmetric, grant only minimum idle time if the queue
2234 	 * is seeky. A long idling is preserved for a weight-raised
2235 	 * queue, or, more in general, in an asymmetric scenario,
2236 	 * because a long idling is needed for guaranteeing to a queue
2237 	 * its reserved share of the throughput (in particular, it is
2238 	 * needed if the queue has a higher weight than some other
2239 	 * queue).
2240 	 */
2241 	if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2242 	    bfq_symmetric_scenario(bfqd))
2243 		sl = min_t(u64, sl, BFQ_MIN_TT);
2244 
2245 	bfqd->last_idling_start = ktime_get();
2246 	hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2247 		      HRTIMER_MODE_REL);
2248 	bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
2249 }
2250 
2251 /*
2252  * In autotuning mode, max_budget is dynamically recomputed as the
2253  * amount of sectors transferred in timeout at the estimated peak
2254  * rate. This enables BFQ to utilize a full timeslice with a full
2255  * budget, even if the in-service queue is served at peak rate. And
2256  * this maximises throughput with sequential workloads.
2257  */
2258 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2259 {
2260 	return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2261 		jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2262 }
2263 
2264 /*
2265  * Update parameters related to throughput and responsiveness, as a
2266  * function of the estimated peak rate. See comments on
2267  * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2268  */
2269 static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2270 {
2271 	int dev_type = blk_queue_nonrot(bfqd->queue);
2272 
2273 	if (bfqd->bfq_user_max_budget == 0)
2274 		bfqd->bfq_max_budget =
2275 			bfq_calc_max_budget(bfqd);
2276 
2277 	if (bfqd->device_speed == BFQ_BFQD_FAST &&
2278 	    bfqd->peak_rate < device_speed_thresh[dev_type]) {
2279 		bfqd->device_speed = BFQ_BFQD_SLOW;
2280 		bfqd->RT_prod = R_slow[dev_type] *
2281 			T_slow[dev_type];
2282 	} else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2283 		   bfqd->peak_rate > device_speed_thresh[dev_type]) {
2284 		bfqd->device_speed = BFQ_BFQD_FAST;
2285 		bfqd->RT_prod = R_fast[dev_type] *
2286 			T_fast[dev_type];
2287 	}
2288 
2289 	bfq_log(bfqd,
2290 "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2291 		dev_type == 0 ? "ROT" : "NONROT",
2292 		bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2293 		bfqd->device_speed == BFQ_BFQD_FAST ?
2294 		(USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2295 		(USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2296 		(USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2297 		BFQ_RATE_SHIFT);
2298 }
2299 
2300 static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2301 				       struct request *rq)
2302 {
2303 	if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2304 		bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2305 		bfqd->peak_rate_samples = 1;
2306 		bfqd->sequential_samples = 0;
2307 		bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2308 			blk_rq_sectors(rq);
2309 	} else /* no new rq dispatched, just reset the number of samples */
2310 		bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2311 
2312 	bfq_log(bfqd,
2313 		"reset_rate_computation at end, sample %u/%u tot_sects %llu",
2314 		bfqd->peak_rate_samples, bfqd->sequential_samples,
2315 		bfqd->tot_sectors_dispatched);
2316 }
2317 
2318 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2319 {
2320 	u32 rate, weight, divisor;
2321 
2322 	/*
2323 	 * For the convergence property to hold (see comments on
2324 	 * bfq_update_peak_rate()) and for the assessment to be
2325 	 * reliable, a minimum number of samples must be present, and
2326 	 * a minimum amount of time must have elapsed. If not so, do
2327 	 * not compute new rate. Just reset parameters, to get ready
2328 	 * for a new evaluation attempt.
2329 	 */
2330 	if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2331 	    bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2332 		goto reset_computation;
2333 
2334 	/*
2335 	 * If a new request completion has occurred after last
2336 	 * dispatch, then, to approximate the rate at which requests
2337 	 * have been served by the device, it is more precise to
2338 	 * extend the observation interval to the last completion.
2339 	 */
2340 	bfqd->delta_from_first =
2341 		max_t(u64, bfqd->delta_from_first,
2342 		      bfqd->last_completion - bfqd->first_dispatch);
2343 
2344 	/*
2345 	 * Rate computed in sects/usec, and not sects/nsec, for
2346 	 * precision issues.
2347 	 */
2348 	rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2349 			div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2350 
2351 	/*
2352 	 * Peak rate not updated if:
2353 	 * - the percentage of sequential dispatches is below 3/4 of the
2354 	 *   total, and rate is below the current estimated peak rate
2355 	 * - rate is unreasonably high (> 20M sectors/sec)
2356 	 */
2357 	if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2358 	     rate <= bfqd->peak_rate) ||
2359 		rate > 20<<BFQ_RATE_SHIFT)
2360 		goto reset_computation;
2361 
2362 	/*
2363 	 * We have to update the peak rate, at last! To this purpose,
2364 	 * we use a low-pass filter. We compute the smoothing constant
2365 	 * of the filter as a function of the 'weight' of the new
2366 	 * measured rate.
2367 	 *
2368 	 * As can be seen in next formulas, we define this weight as a
2369 	 * quantity proportional to how sequential the workload is,
2370 	 * and to how long the observation time interval is.
2371 	 *
2372 	 * The weight runs from 0 to 8. The maximum value of the
2373 	 * weight, 8, yields the minimum value for the smoothing
2374 	 * constant. At this minimum value for the smoothing constant,
2375 	 * the measured rate contributes for half of the next value of
2376 	 * the estimated peak rate.
2377 	 *
2378 	 * So, the first step is to compute the weight as a function
2379 	 * of how sequential the workload is. Note that the weight
2380 	 * cannot reach 9, because bfqd->sequential_samples cannot
2381 	 * become equal to bfqd->peak_rate_samples, which, in its
2382 	 * turn, holds true because bfqd->sequential_samples is not
2383 	 * incremented for the first sample.
2384 	 */
2385 	weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2386 
2387 	/*
2388 	 * Second step: further refine the weight as a function of the
2389 	 * duration of the observation interval.
2390 	 */
2391 	weight = min_t(u32, 8,
2392 		       div_u64(weight * bfqd->delta_from_first,
2393 			       BFQ_RATE_REF_INTERVAL));
2394 
2395 	/*
2396 	 * Divisor ranging from 10, for minimum weight, to 2, for
2397 	 * maximum weight.
2398 	 */
2399 	divisor = 10 - weight;
2400 
2401 	/*
2402 	 * Finally, update peak rate:
2403 	 *
2404 	 * peak_rate = peak_rate * (divisor-1) / divisor  +  rate / divisor
2405 	 */
2406 	bfqd->peak_rate *= divisor-1;
2407 	bfqd->peak_rate /= divisor;
2408 	rate /= divisor; /* smoothing constant alpha = 1/divisor */
2409 
2410 	bfqd->peak_rate += rate;
2411 	update_thr_responsiveness_params(bfqd);
2412 
2413 reset_computation:
2414 	bfq_reset_rate_computation(bfqd, rq);
2415 }
2416 
2417 /*
2418  * Update the read/write peak rate (the main quantity used for
2419  * auto-tuning, see update_thr_responsiveness_params()).
2420  *
2421  * It is not trivial to estimate the peak rate (correctly): because of
2422  * the presence of sw and hw queues between the scheduler and the
2423  * device components that finally serve I/O requests, it is hard to
2424  * say exactly when a given dispatched request is served inside the
2425  * device, and for how long. As a consequence, it is hard to know
2426  * precisely at what rate a given set of requests is actually served
2427  * by the device.
2428  *
2429  * On the opposite end, the dispatch time of any request is trivially
2430  * available, and, from this piece of information, the "dispatch rate"
2431  * of requests can be immediately computed. So, the idea in the next
2432  * function is to use what is known, namely request dispatch times
2433  * (plus, when useful, request completion times), to estimate what is
2434  * unknown, namely in-device request service rate.
2435  *
2436  * The main issue is that, because of the above facts, the rate at
2437  * which a certain set of requests is dispatched over a certain time
2438  * interval can vary greatly with respect to the rate at which the
2439  * same requests are then served. But, since the size of any
2440  * intermediate queue is limited, and the service scheme is lossless
2441  * (no request is silently dropped), the following obvious convergence
2442  * property holds: the number of requests dispatched MUST become
2443  * closer and closer to the number of requests completed as the
2444  * observation interval grows. This is the key property used in
2445  * the next function to estimate the peak service rate as a function
2446  * of the observed dispatch rate. The function assumes to be invoked
2447  * on every request dispatch.
2448  */
2449 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2450 {
2451 	u64 now_ns = ktime_get_ns();
2452 
2453 	if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2454 		bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2455 			bfqd->peak_rate_samples);
2456 		bfq_reset_rate_computation(bfqd, rq);
2457 		goto update_last_values; /* will add one sample */
2458 	}
2459 
2460 	/*
2461 	 * Device idle for very long: the observation interval lasting
2462 	 * up to this dispatch cannot be a valid observation interval
2463 	 * for computing a new peak rate (similarly to the late-
2464 	 * completion event in bfq_completed_request()). Go to
2465 	 * update_rate_and_reset to have the following three steps
2466 	 * taken:
2467 	 * - close the observation interval at the last (previous)
2468 	 *   request dispatch or completion
2469 	 * - compute rate, if possible, for that observation interval
2470 	 * - start a new observation interval with this dispatch
2471 	 */
2472 	if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2473 	    bfqd->rq_in_driver == 0)
2474 		goto update_rate_and_reset;
2475 
2476 	/* Update sampling information */
2477 	bfqd->peak_rate_samples++;
2478 
2479 	if ((bfqd->rq_in_driver > 0 ||
2480 		now_ns - bfqd->last_completion < BFQ_MIN_TT)
2481 	     && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2482 		bfqd->sequential_samples++;
2483 
2484 	bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2485 
2486 	/* Reset max observed rq size every 32 dispatches */
2487 	if (likely(bfqd->peak_rate_samples % 32))
2488 		bfqd->last_rq_max_size =
2489 			max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2490 	else
2491 		bfqd->last_rq_max_size = blk_rq_sectors(rq);
2492 
2493 	bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2494 
2495 	/* Target observation interval not yet reached, go on sampling */
2496 	if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2497 		goto update_last_values;
2498 
2499 update_rate_and_reset:
2500 	bfq_update_rate_reset(bfqd, rq);
2501 update_last_values:
2502 	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2503 	bfqd->last_dispatch = now_ns;
2504 }
2505 
2506 /*
2507  * Remove request from internal lists.
2508  */
2509 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2510 {
2511 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
2512 
2513 	/*
2514 	 * For consistency, the next instruction should have been
2515 	 * executed after removing the request from the queue and
2516 	 * dispatching it.  We execute instead this instruction before
2517 	 * bfq_remove_request() (and hence introduce a temporary
2518 	 * inconsistency), for efficiency.  In fact, should this
2519 	 * dispatch occur for a non in-service bfqq, this anticipated
2520 	 * increment prevents two counters related to bfqq->dispatched
2521 	 * from risking to be, first, uselessly decremented, and then
2522 	 * incremented again when the (new) value of bfqq->dispatched
2523 	 * happens to be taken into account.
2524 	 */
2525 	bfqq->dispatched++;
2526 	bfq_update_peak_rate(q->elevator->elevator_data, rq);
2527 
2528 	bfq_remove_request(q, rq);
2529 }
2530 
2531 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2532 {
2533 	/*
2534 	 * If this bfqq is shared between multiple processes, check
2535 	 * to make sure that those processes are still issuing I/Os
2536 	 * within the mean seek distance. If not, it may be time to
2537 	 * break the queues apart again.
2538 	 */
2539 	if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2540 		bfq_mark_bfqq_split_coop(bfqq);
2541 
2542 	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2543 		if (bfqq->dispatched == 0)
2544 			/*
2545 			 * Overloading budget_timeout field to store
2546 			 * the time at which the queue remains with no
2547 			 * backlog and no outstanding request; used by
2548 			 * the weight-raising mechanism.
2549 			 */
2550 			bfqq->budget_timeout = jiffies;
2551 
2552 		bfq_del_bfqq_busy(bfqd, bfqq, true);
2553 	} else {
2554 		bfq_requeue_bfqq(bfqd, bfqq);
2555 		/*
2556 		 * Resort priority tree of potential close cooperators.
2557 		 */
2558 		bfq_pos_tree_add_move(bfqd, bfqq);
2559 	}
2560 
2561 	/*
2562 	 * All in-service entities must have been properly deactivated
2563 	 * or requeued before executing the next function, which
2564 	 * resets all in-service entites as no more in service.
2565 	 */
2566 	__bfq_bfqd_reset_in_service(bfqd);
2567 }
2568 
2569 /**
2570  * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2571  * @bfqd: device data.
2572  * @bfqq: queue to update.
2573  * @reason: reason for expiration.
2574  *
2575  * Handle the feedback on @bfqq budget at queue expiration.
2576  * See the body for detailed comments.
2577  */
2578 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2579 				     struct bfq_queue *bfqq,
2580 				     enum bfqq_expiration reason)
2581 {
2582 	struct request *next_rq;
2583 	int budget, min_budget;
2584 
2585 	min_budget = bfq_min_budget(bfqd);
2586 
2587 	if (bfqq->wr_coeff == 1)
2588 		budget = bfqq->max_budget;
2589 	else /*
2590 	      * Use a constant, low budget for weight-raised queues,
2591 	      * to help achieve a low latency. Keep it slightly higher
2592 	      * than the minimum possible budget, to cause a little
2593 	      * bit fewer expirations.
2594 	      */
2595 		budget = 2 * min_budget;
2596 
2597 	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2598 		bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2599 	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2600 		budget, bfq_min_budget(bfqd));
2601 	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2602 		bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2603 
2604 	if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
2605 		switch (reason) {
2606 		/*
2607 		 * Caveat: in all the following cases we trade latency
2608 		 * for throughput.
2609 		 */
2610 		case BFQQE_TOO_IDLE:
2611 			/*
2612 			 * This is the only case where we may reduce
2613 			 * the budget: if there is no request of the
2614 			 * process still waiting for completion, then
2615 			 * we assume (tentatively) that the timer has
2616 			 * expired because the batch of requests of
2617 			 * the process could have been served with a
2618 			 * smaller budget.  Hence, betting that
2619 			 * process will behave in the same way when it
2620 			 * becomes backlogged again, we reduce its
2621 			 * next budget.  As long as we guess right,
2622 			 * this budget cut reduces the latency
2623 			 * experienced by the process.
2624 			 *
2625 			 * However, if there are still outstanding
2626 			 * requests, then the process may have not yet
2627 			 * issued its next request just because it is
2628 			 * still waiting for the completion of some of
2629 			 * the still outstanding ones.  So in this
2630 			 * subcase we do not reduce its budget, on the
2631 			 * contrary we increase it to possibly boost
2632 			 * the throughput, as discussed in the
2633 			 * comments to the BUDGET_TIMEOUT case.
2634 			 */
2635 			if (bfqq->dispatched > 0) /* still outstanding reqs */
2636 				budget = min(budget * 2, bfqd->bfq_max_budget);
2637 			else {
2638 				if (budget > 5 * min_budget)
2639 					budget -= 4 * min_budget;
2640 				else
2641 					budget = min_budget;
2642 			}
2643 			break;
2644 		case BFQQE_BUDGET_TIMEOUT:
2645 			/*
2646 			 * We double the budget here because it gives
2647 			 * the chance to boost the throughput if this
2648 			 * is not a seeky process (and has bumped into
2649 			 * this timeout because of, e.g., ZBR).
2650 			 */
2651 			budget = min(budget * 2, bfqd->bfq_max_budget);
2652 			break;
2653 		case BFQQE_BUDGET_EXHAUSTED:
2654 			/*
2655 			 * The process still has backlog, and did not
2656 			 * let either the budget timeout or the disk
2657 			 * idling timeout expire. Hence it is not
2658 			 * seeky, has a short thinktime and may be
2659 			 * happy with a higher budget too. So
2660 			 * definitely increase the budget of this good
2661 			 * candidate to boost the disk throughput.
2662 			 */
2663 			budget = min(budget * 4, bfqd->bfq_max_budget);
2664 			break;
2665 		case BFQQE_NO_MORE_REQUESTS:
2666 			/*
2667 			 * For queues that expire for this reason, it
2668 			 * is particularly important to keep the
2669 			 * budget close to the actual service they
2670 			 * need. Doing so reduces the timestamp
2671 			 * misalignment problem described in the
2672 			 * comments in the body of
2673 			 * __bfq_activate_entity. In fact, suppose
2674 			 * that a queue systematically expires for
2675 			 * BFQQE_NO_MORE_REQUESTS and presents a
2676 			 * new request in time to enjoy timestamp
2677 			 * back-shifting. The larger the budget of the
2678 			 * queue is with respect to the service the
2679 			 * queue actually requests in each service
2680 			 * slot, the more times the queue can be
2681 			 * reactivated with the same virtual finish
2682 			 * time. It follows that, even if this finish
2683 			 * time is pushed to the system virtual time
2684 			 * to reduce the consequent timestamp
2685 			 * misalignment, the queue unjustly enjoys for
2686 			 * many re-activations a lower finish time
2687 			 * than all newly activated queues.
2688 			 *
2689 			 * The service needed by bfqq is measured
2690 			 * quite precisely by bfqq->entity.service.
2691 			 * Since bfqq does not enjoy device idling,
2692 			 * bfqq->entity.service is equal to the number
2693 			 * of sectors that the process associated with
2694 			 * bfqq requested to read/write before waiting
2695 			 * for request completions, or blocking for
2696 			 * other reasons.
2697 			 */
2698 			budget = max_t(int, bfqq->entity.service, min_budget);
2699 			break;
2700 		default:
2701 			return;
2702 		}
2703 	} else if (!bfq_bfqq_sync(bfqq)) {
2704 		/*
2705 		 * Async queues get always the maximum possible
2706 		 * budget, as for them we do not care about latency
2707 		 * (in addition, their ability to dispatch is limited
2708 		 * by the charging factor).
2709 		 */
2710 		budget = bfqd->bfq_max_budget;
2711 	}
2712 
2713 	bfqq->max_budget = budget;
2714 
2715 	if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2716 	    !bfqd->bfq_user_max_budget)
2717 		bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2718 
2719 	/*
2720 	 * If there is still backlog, then assign a new budget, making
2721 	 * sure that it is large enough for the next request.  Since
2722 	 * the finish time of bfqq must be kept in sync with the
2723 	 * budget, be sure to call __bfq_bfqq_expire() *after* this
2724 	 * update.
2725 	 *
2726 	 * If there is no backlog, then no need to update the budget;
2727 	 * it will be updated on the arrival of a new request.
2728 	 */
2729 	next_rq = bfqq->next_rq;
2730 	if (next_rq)
2731 		bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2732 					    bfq_serv_to_charge(next_rq, bfqq));
2733 
2734 	bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2735 			next_rq ? blk_rq_sectors(next_rq) : 0,
2736 			bfqq->entity.budget);
2737 }
2738 
2739 /*
2740  * Return true if the process associated with bfqq is "slow". The slow
2741  * flag is used, in addition to the budget timeout, to reduce the
2742  * amount of service provided to seeky processes, and thus reduce
2743  * their chances to lower the throughput. More details in the comments
2744  * on the function bfq_bfqq_expire().
2745  *
2746  * An important observation is in order: as discussed in the comments
2747  * on the function bfq_update_peak_rate(), with devices with internal
2748  * queues, it is hard if ever possible to know when and for how long
2749  * an I/O request is processed by the device (apart from the trivial
2750  * I/O pattern where a new request is dispatched only after the
2751  * previous one has been completed). This makes it hard to evaluate
2752  * the real rate at which the I/O requests of each bfq_queue are
2753  * served.  In fact, for an I/O scheduler like BFQ, serving a
2754  * bfq_queue means just dispatching its requests during its service
2755  * slot (i.e., until the budget of the queue is exhausted, or the
2756  * queue remains idle, or, finally, a timeout fires). But, during the
2757  * service slot of a bfq_queue, around 100 ms at most, the device may
2758  * be even still processing requests of bfq_queues served in previous
2759  * service slots. On the opposite end, the requests of the in-service
2760  * bfq_queue may be completed after the service slot of the queue
2761  * finishes.
2762  *
2763  * Anyway, unless more sophisticated solutions are used
2764  * (where possible), the sum of the sizes of the requests dispatched
2765  * during the service slot of a bfq_queue is probably the only
2766  * approximation available for the service received by the bfq_queue
2767  * during its service slot. And this sum is the quantity used in this
2768  * function to evaluate the I/O speed of a process.
2769  */
2770 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2771 				 bool compensate, enum bfqq_expiration reason,
2772 				 unsigned long *delta_ms)
2773 {
2774 	ktime_t delta_ktime;
2775 	u32 delta_usecs;
2776 	bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
2777 
2778 	if (!bfq_bfqq_sync(bfqq))
2779 		return false;
2780 
2781 	if (compensate)
2782 		delta_ktime = bfqd->last_idling_start;
2783 	else
2784 		delta_ktime = ktime_get();
2785 	delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2786 	delta_usecs = ktime_to_us(delta_ktime);
2787 
2788 	/* don't use too short time intervals */
2789 	if (delta_usecs < 1000) {
2790 		if (blk_queue_nonrot(bfqd->queue))
2791 			 /*
2792 			  * give same worst-case guarantees as idling
2793 			  * for seeky
2794 			  */
2795 			*delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2796 		else /* charge at least one seek */
2797 			*delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2798 
2799 		return slow;
2800 	}
2801 
2802 	*delta_ms = delta_usecs / USEC_PER_MSEC;
2803 
2804 	/*
2805 	 * Use only long (> 20ms) intervals to filter out excessive
2806 	 * spikes in service rate estimation.
2807 	 */
2808 	if (delta_usecs > 20000) {
2809 		/*
2810 		 * Caveat for rotational devices: processes doing I/O
2811 		 * in the slower disk zones tend to be slow(er) even
2812 		 * if not seeky. In this respect, the estimated peak
2813 		 * rate is likely to be an average over the disk
2814 		 * surface. Accordingly, to not be too harsh with
2815 		 * unlucky processes, a process is deemed slow only if
2816 		 * its rate has been lower than half of the estimated
2817 		 * peak rate.
2818 		 */
2819 		slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
2820 	}
2821 
2822 	bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
2823 
2824 	return slow;
2825 }
2826 
2827 /*
2828  * To be deemed as soft real-time, an application must meet two
2829  * requirements. First, the application must not require an average
2830  * bandwidth higher than the approximate bandwidth required to playback or
2831  * record a compressed high-definition video.
2832  * The next function is invoked on the completion of the last request of a
2833  * batch, to compute the next-start time instant, soft_rt_next_start, such
2834  * that, if the next request of the application does not arrive before
2835  * soft_rt_next_start, then the above requirement on the bandwidth is met.
2836  *
2837  * The second requirement is that the request pattern of the application is
2838  * isochronous, i.e., that, after issuing a request or a batch of requests,
2839  * the application stops issuing new requests until all its pending requests
2840  * have been completed. After that, the application may issue a new batch,
2841  * and so on.
2842  * For this reason the next function is invoked to compute
2843  * soft_rt_next_start only for applications that meet this requirement,
2844  * whereas soft_rt_next_start is set to infinity for applications that do
2845  * not.
2846  *
2847  * Unfortunately, even a greedy application may happen to behave in an
2848  * isochronous way if the CPU load is high. In fact, the application may
2849  * stop issuing requests while the CPUs are busy serving other processes,
2850  * then restart, then stop again for a while, and so on. In addition, if
2851  * the disk achieves a low enough throughput with the request pattern
2852  * issued by the application (e.g., because the request pattern is random
2853  * and/or the device is slow), then the application may meet the above
2854  * bandwidth requirement too. To prevent such a greedy application to be
2855  * deemed as soft real-time, a further rule is used in the computation of
2856  * soft_rt_next_start: soft_rt_next_start must be higher than the current
2857  * time plus the maximum time for which the arrival of a request is waited
2858  * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2859  * This filters out greedy applications, as the latter issue instead their
2860  * next request as soon as possible after the last one has been completed
2861  * (in contrast, when a batch of requests is completed, a soft real-time
2862  * application spends some time processing data).
2863  *
2864  * Unfortunately, the last filter may easily generate false positives if
2865  * only bfqd->bfq_slice_idle is used as a reference time interval and one
2866  * or both the following cases occur:
2867  * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2868  *    than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2869  *    HZ=100.
2870  * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2871  *    for a while, then suddenly 'jump' by several units to recover the lost
2872  *    increments. This seems to happen, e.g., inside virtual machines.
2873  * To address this issue, we do not use as a reference time interval just
2874  * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2875  * particular we add the minimum number of jiffies for which the filter
2876  * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2877  * machines.
2878  */
2879 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2880 						struct bfq_queue *bfqq)
2881 {
2882 	return max(bfqq->last_idle_bklogged +
2883 		   HZ * bfqq->service_from_backlogged /
2884 		   bfqd->bfq_wr_max_softrt_rate,
2885 		   jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2886 }
2887 
2888 /*
2889  * Return the farthest future time instant according to jiffies
2890  * macros.
2891  */
2892 static unsigned long bfq_greatest_from_now(void)
2893 {
2894 	return jiffies + MAX_JIFFY_OFFSET;
2895 }
2896 
2897 /*
2898  * Return the farthest past time instant according to jiffies
2899  * macros.
2900  */
2901 static unsigned long bfq_smallest_from_now(void)
2902 {
2903 	return jiffies - MAX_JIFFY_OFFSET;
2904 }
2905 
2906 /**
2907  * bfq_bfqq_expire - expire a queue.
2908  * @bfqd: device owning the queue.
2909  * @bfqq: the queue to expire.
2910  * @compensate: if true, compensate for the time spent idling.
2911  * @reason: the reason causing the expiration.
2912  *
2913  * If the process associated with bfqq does slow I/O (e.g., because it
2914  * issues random requests), we charge bfqq with the time it has been
2915  * in service instead of the service it has received (see
2916  * bfq_bfqq_charge_time for details on how this goal is achieved). As
2917  * a consequence, bfqq will typically get higher timestamps upon
2918  * reactivation, and hence it will be rescheduled as if it had
2919  * received more service than what it has actually received. In the
2920  * end, bfqq receives less service in proportion to how slowly its
2921  * associated process consumes its budgets (and hence how seriously it
2922  * tends to lower the throughput). In addition, this time-charging
2923  * strategy guarantees time fairness among slow processes. In
2924  * contrast, if the process associated with bfqq is not slow, we
2925  * charge bfqq exactly with the service it has received.
2926  *
2927  * Charging time to the first type of queues and the exact service to
2928  * the other has the effect of using the WF2Q+ policy to schedule the
2929  * former on a timeslice basis, without violating service domain
2930  * guarantees among the latter.
2931  */
2932 void bfq_bfqq_expire(struct bfq_data *bfqd,
2933 		     struct bfq_queue *bfqq,
2934 		     bool compensate,
2935 		     enum bfqq_expiration reason)
2936 {
2937 	bool slow;
2938 	unsigned long delta = 0;
2939 	struct bfq_entity *entity = &bfqq->entity;
2940 	int ref;
2941 
2942 	/*
2943 	 * Check whether the process is slow (see bfq_bfqq_is_slow).
2944 	 */
2945 	slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
2946 
2947 	/*
2948 	 * Increase service_from_backlogged before next statement,
2949 	 * because the possible next invocation of
2950 	 * bfq_bfqq_charge_time would likely inflate
2951 	 * entity->service. In contrast, service_from_backlogged must
2952 	 * contain real service, to enable the soft real-time
2953 	 * heuristic to correctly compute the bandwidth consumed by
2954 	 * bfqq.
2955 	 */
2956 	bfqq->service_from_backlogged += entity->service;
2957 
2958 	/*
2959 	 * As above explained, charge slow (typically seeky) and
2960 	 * timed-out queues with the time and not the service
2961 	 * received, to favor sequential workloads.
2962 	 *
2963 	 * Processes doing I/O in the slower disk zones will tend to
2964 	 * be slow(er) even if not seeky. Therefore, since the
2965 	 * estimated peak rate is actually an average over the disk
2966 	 * surface, these processes may timeout just for bad luck. To
2967 	 * avoid punishing them, do not charge time to processes that
2968 	 * succeeded in consuming at least 2/3 of their budget. This
2969 	 * allows BFQ to preserve enough elasticity to still perform
2970 	 * bandwidth, and not time, distribution with little unlucky
2971 	 * or quasi-sequential processes.
2972 	 */
2973 	if (bfqq->wr_coeff == 1 &&
2974 	    (slow ||
2975 	     (reason == BFQQE_BUDGET_TIMEOUT &&
2976 	      bfq_bfqq_budget_left(bfqq) >=  entity->budget / 3)))
2977 		bfq_bfqq_charge_time(bfqd, bfqq, delta);
2978 
2979 	if (reason == BFQQE_TOO_IDLE &&
2980 	    entity->service <= 2 * entity->budget / 10)
2981 		bfq_clear_bfqq_IO_bound(bfqq);
2982 
2983 	if (bfqd->low_latency && bfqq->wr_coeff == 1)
2984 		bfqq->last_wr_start_finish = jiffies;
2985 
2986 	if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
2987 	    RB_EMPTY_ROOT(&bfqq->sort_list)) {
2988 		/*
2989 		 * If we get here, and there are no outstanding
2990 		 * requests, then the request pattern is isochronous
2991 		 * (see the comments on the function
2992 		 * bfq_bfqq_softrt_next_start()). Thus we can compute
2993 		 * soft_rt_next_start. If, instead, the queue still
2994 		 * has outstanding requests, then we have to wait for
2995 		 * the completion of all the outstanding requests to
2996 		 * discover whether the request pattern is actually
2997 		 * isochronous.
2998 		 */
2999 		if (bfqq->dispatched == 0)
3000 			bfqq->soft_rt_next_start =
3001 				bfq_bfqq_softrt_next_start(bfqd, bfqq);
3002 		else {
3003 			/*
3004 			 * The application is still waiting for the
3005 			 * completion of one or more requests:
3006 			 * prevent it from possibly being incorrectly
3007 			 * deemed as soft real-time by setting its
3008 			 * soft_rt_next_start to infinity. In fact,
3009 			 * without this assignment, the application
3010 			 * would be incorrectly deemed as soft
3011 			 * real-time if:
3012 			 * 1) it issued a new request before the
3013 			 *    completion of all its in-flight
3014 			 *    requests, and
3015 			 * 2) at that time, its soft_rt_next_start
3016 			 *    happened to be in the past.
3017 			 */
3018 			bfqq->soft_rt_next_start =
3019 				bfq_greatest_from_now();
3020 			/*
3021 			 * Schedule an update of soft_rt_next_start to when
3022 			 * the task may be discovered to be isochronous.
3023 			 */
3024 			bfq_mark_bfqq_softrt_update(bfqq);
3025 		}
3026 	}
3027 
3028 	bfq_log_bfqq(bfqd, bfqq,
3029 		"expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
3030 		slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
3031 
3032 	/*
3033 	 * Increase, decrease or leave budget unchanged according to
3034 	 * reason.
3035 	 */
3036 	__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3037 	ref = bfqq->ref;
3038 	__bfq_bfqq_expire(bfqd, bfqq);
3039 
3040 	/* mark bfqq as waiting a request only if a bic still points to it */
3041 	if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3042 	    reason != BFQQE_BUDGET_TIMEOUT &&
3043 	    reason != BFQQE_BUDGET_EXHAUSTED)
3044 		bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3045 }
3046 
3047 /*
3048  * Budget timeout is not implemented through a dedicated timer, but
3049  * just checked on request arrivals and completions, as well as on
3050  * idle timer expirations.
3051  */
3052 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3053 {
3054 	return time_is_before_eq_jiffies(bfqq->budget_timeout);
3055 }
3056 
3057 /*
3058  * If we expire a queue that is actively waiting (i.e., with the
3059  * device idled) for the arrival of a new request, then we may incur
3060  * the timestamp misalignment problem described in the body of the
3061  * function __bfq_activate_entity. Hence we return true only if this
3062  * condition does not hold, or if the queue is slow enough to deserve
3063  * only to be kicked off for preserving a high throughput.
3064  */
3065 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3066 {
3067 	bfq_log_bfqq(bfqq->bfqd, bfqq,
3068 		"may_budget_timeout: wait_request %d left %d timeout %d",
3069 		bfq_bfqq_wait_request(bfqq),
3070 			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
3071 		bfq_bfqq_budget_timeout(bfqq));
3072 
3073 	return (!bfq_bfqq_wait_request(bfqq) ||
3074 		bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
3075 		&&
3076 		bfq_bfqq_budget_timeout(bfqq);
3077 }
3078 
3079 /*
3080  * For a queue that becomes empty, device idling is allowed only if
3081  * this function returns true for the queue. As a consequence, since
3082  * device idling plays a critical role in both throughput boosting and
3083  * service guarantees, the return value of this function plays a
3084  * critical role in both these aspects as well.
3085  *
3086  * In a nutshell, this function returns true only if idling is
3087  * beneficial for throughput or, even if detrimental for throughput,
3088  * idling is however necessary to preserve service guarantees (low
3089  * latency, desired throughput distribution, ...). In particular, on
3090  * NCQ-capable devices, this function tries to return false, so as to
3091  * help keep the drives' internal queues full, whenever this helps the
3092  * device boost the throughput without causing any service-guarantee
3093  * issue.
3094  *
3095  * In more detail, the return value of this function is obtained by,
3096  * first, computing a number of boolean variables that take into
3097  * account throughput and service-guarantee issues, and, then,
3098  * combining these variables in a logical expression. Most of the
3099  * issues taken into account are not trivial. We discuss these issues
3100  * individually while introducing the variables.
3101  */
3102 static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3103 {
3104 	struct bfq_data *bfqd = bfqq->bfqd;
3105 	bool idling_boosts_thr, idling_boosts_thr_without_issues,
3106 		idling_needed_for_service_guarantees,
3107 		asymmetric_scenario;
3108 
3109 	if (bfqd->strict_guarantees)
3110 		return true;
3111 
3112 	/*
3113 	 * The next variable takes into account the cases where idling
3114 	 * boosts the throughput.
3115 	 *
3116 	 * The value of the variable is computed considering, first, that
3117 	 * idling is virtually always beneficial for the throughput if:
3118 	 * (a) the device is not NCQ-capable, or
3119 	 * (b) regardless of the presence of NCQ, the device is rotational
3120 	 *     and the request pattern for bfqq is I/O-bound and sequential.
3121 	 *
3122 	 * Secondly, and in contrast to the above item (b), idling an
3123 	 * NCQ-capable flash-based device would not boost the
3124 	 * throughput even with sequential I/O; rather it would lower
3125 	 * the throughput in proportion to how fast the device
3126 	 * is. Accordingly, the next variable is true if any of the
3127 	 * above conditions (a) and (b) is true, and, in particular,
3128 	 * happens to be false if bfqd is an NCQ-capable flash-based
3129 	 * device.
3130 	 */
3131 	idling_boosts_thr = !bfqd->hw_tag ||
3132 		(!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
3133 		 bfq_bfqq_idle_window(bfqq));
3134 
3135 	/*
3136 	 * The value of the next variable,
3137 	 * idling_boosts_thr_without_issues, is equal to that of
3138 	 * idling_boosts_thr, unless a special case holds. In this
3139 	 * special case, described below, idling may cause problems to
3140 	 * weight-raised queues.
3141 	 *
3142 	 * When the request pool is saturated (e.g., in the presence
3143 	 * of write hogs), if the processes associated with
3144 	 * non-weight-raised queues ask for requests at a lower rate,
3145 	 * then processes associated with weight-raised queues have a
3146 	 * higher probability to get a request from the pool
3147 	 * immediately (or at least soon) when they need one. Thus
3148 	 * they have a higher probability to actually get a fraction
3149 	 * of the device throughput proportional to their high
3150 	 * weight. This is especially true with NCQ-capable drives,
3151 	 * which enqueue several requests in advance, and further
3152 	 * reorder internally-queued requests.
3153 	 *
3154 	 * For this reason, we force to false the value of
3155 	 * idling_boosts_thr_without_issues if there are weight-raised
3156 	 * busy queues. In this case, and if bfqq is not weight-raised,
3157 	 * this guarantees that the device is not idled for bfqq (if,
3158 	 * instead, bfqq is weight-raised, then idling will be
3159 	 * guaranteed by another variable, see below). Combined with
3160 	 * the timestamping rules of BFQ (see [1] for details), this
3161 	 * behavior causes bfqq, and hence any sync non-weight-raised
3162 	 * queue, to get a lower number of requests served, and thus
3163 	 * to ask for a lower number of requests from the request
3164 	 * pool, before the busy weight-raised queues get served
3165 	 * again. This often mitigates starvation problems in the
3166 	 * presence of heavy write workloads and NCQ, thereby
3167 	 * guaranteeing a higher application and system responsiveness
3168 	 * in these hostile scenarios.
3169 	 */
3170 	idling_boosts_thr_without_issues = idling_boosts_thr &&
3171 		bfqd->wr_busy_queues == 0;
3172 
3173 	/*
3174 	 * There is then a case where idling must be performed not
3175 	 * for throughput concerns, but to preserve service
3176 	 * guarantees.
3177 	 *
3178 	 * To introduce this case, we can note that allowing the drive
3179 	 * to enqueue more than one request at a time, and hence
3180 	 * delegating de facto final scheduling decisions to the
3181 	 * drive's internal scheduler, entails loss of control on the
3182 	 * actual request service order. In particular, the critical
3183 	 * situation is when requests from different processes happen
3184 	 * to be present, at the same time, in the internal queue(s)
3185 	 * of the drive. In such a situation, the drive, by deciding
3186 	 * the service order of the internally-queued requests, does
3187 	 * determine also the actual throughput distribution among
3188 	 * these processes. But the drive typically has no notion or
3189 	 * concern about per-process throughput distribution, and
3190 	 * makes its decisions only on a per-request basis. Therefore,
3191 	 * the service distribution enforced by the drive's internal
3192 	 * scheduler is likely to coincide with the desired
3193 	 * device-throughput distribution only in a completely
3194 	 * symmetric scenario where:
3195 	 * (i)  each of these processes must get the same throughput as
3196 	 *      the others;
3197 	 * (ii) all these processes have the same I/O pattern
3198 		(either sequential or random).
3199 	 * In fact, in such a scenario, the drive will tend to treat
3200 	 * the requests of each of these processes in about the same
3201 	 * way as the requests of the others, and thus to provide
3202 	 * each of these processes with about the same throughput
3203 	 * (which is exactly the desired throughput distribution). In
3204 	 * contrast, in any asymmetric scenario, device idling is
3205 	 * certainly needed to guarantee that bfqq receives its
3206 	 * assigned fraction of the device throughput (see [1] for
3207 	 * details).
3208 	 *
3209 	 * We address this issue by controlling, actually, only the
3210 	 * symmetry sub-condition (i), i.e., provided that
3211 	 * sub-condition (i) holds, idling is not performed,
3212 	 * regardless of whether sub-condition (ii) holds. In other
3213 	 * words, only if sub-condition (i) holds, then idling is
3214 	 * allowed, and the device tends to be prevented from queueing
3215 	 * many requests, possibly of several processes. The reason
3216 	 * for not controlling also sub-condition (ii) is that we
3217 	 * exploit preemption to preserve guarantees in case of
3218 	 * symmetric scenarios, even if (ii) does not hold, as
3219 	 * explained in the next two paragraphs.
3220 	 *
3221 	 * Even if a queue, say Q, is expired when it remains idle, Q
3222 	 * can still preempt the new in-service queue if the next
3223 	 * request of Q arrives soon (see the comments on
3224 	 * bfq_bfqq_update_budg_for_activation). If all queues and
3225 	 * groups have the same weight, this form of preemption,
3226 	 * combined with the hole-recovery heuristic described in the
3227 	 * comments on function bfq_bfqq_update_budg_for_activation,
3228 	 * are enough to preserve a correct bandwidth distribution in
3229 	 * the mid term, even without idling. In fact, even if not
3230 	 * idling allows the internal queues of the device to contain
3231 	 * many requests, and thus to reorder requests, we can rather
3232 	 * safely assume that the internal scheduler still preserves a
3233 	 * minimum of mid-term fairness. The motivation for using
3234 	 * preemption instead of idling is that, by not idling,
3235 	 * service guarantees are preserved without minimally
3236 	 * sacrificing throughput. In other words, both a high
3237 	 * throughput and its desired distribution are obtained.
3238 	 *
3239 	 * More precisely, this preemption-based, idleless approach
3240 	 * provides fairness in terms of IOPS, and not sectors per
3241 	 * second. This can be seen with a simple example. Suppose
3242 	 * that there are two queues with the same weight, but that
3243 	 * the first queue receives requests of 8 sectors, while the
3244 	 * second queue receives requests of 1024 sectors. In
3245 	 * addition, suppose that each of the two queues contains at
3246 	 * most one request at a time, which implies that each queue
3247 	 * always remains idle after it is served. Finally, after
3248 	 * remaining idle, each queue receives very quickly a new
3249 	 * request. It follows that the two queues are served
3250 	 * alternatively, preempting each other if needed. This
3251 	 * implies that, although both queues have the same weight,
3252 	 * the queue with large requests receives a service that is
3253 	 * 1024/8 times as high as the service received by the other
3254 	 * queue.
3255 	 *
3256 	 * On the other hand, device idling is performed, and thus
3257 	 * pure sector-domain guarantees are provided, for the
3258 	 * following queues, which are likely to need stronger
3259 	 * throughput guarantees: weight-raised queues, and queues
3260 	 * with a higher weight than other queues. When such queues
3261 	 * are active, sub-condition (i) is false, which triggers
3262 	 * device idling.
3263 	 *
3264 	 * According to the above considerations, the next variable is
3265 	 * true (only) if sub-condition (i) holds. To compute the
3266 	 * value of this variable, we not only use the return value of
3267 	 * the function bfq_symmetric_scenario(), but also check
3268 	 * whether bfqq is being weight-raised, because
3269 	 * bfq_symmetric_scenario() does not take into account also
3270 	 * weight-raised queues (see comments on
3271 	 * bfq_weights_tree_add()).
3272 	 *
3273 	 * As a side note, it is worth considering that the above
3274 	 * device-idling countermeasures may however fail in the
3275 	 * following unlucky scenario: if idling is (correctly)
3276 	 * disabled in a time period during which all symmetry
3277 	 * sub-conditions hold, and hence the device is allowed to
3278 	 * enqueue many requests, but at some later point in time some
3279 	 * sub-condition stops to hold, then it may become impossible
3280 	 * to let requests be served in the desired order until all
3281 	 * the requests already queued in the device have been served.
3282 	 */
3283 	asymmetric_scenario = bfqq->wr_coeff > 1 ||
3284 		!bfq_symmetric_scenario(bfqd);
3285 
3286 	/*
3287 	 * Finally, there is a case where maximizing throughput is the
3288 	 * best choice even if it may cause unfairness toward
3289 	 * bfqq. Such a case is when bfqq became active in a burst of
3290 	 * queue activations. Queues that became active during a large
3291 	 * burst benefit only from throughput, as discussed in the
3292 	 * comments on bfq_handle_burst. Thus, if bfqq became active
3293 	 * in a burst and not idling the device maximizes throughput,
3294 	 * then the device must no be idled, because not idling the
3295 	 * device provides bfqq and all other queues in the burst with
3296 	 * maximum benefit. Combining this and the above case, we can
3297 	 * now establish when idling is actually needed to preserve
3298 	 * service guarantees.
3299 	 */
3300 	idling_needed_for_service_guarantees =
3301 		asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3302 
3303 	/*
3304 	 * We have now all the components we need to compute the return
3305 	 * value of the function, which is true only if both the following
3306 	 * conditions hold:
3307 	 * 1) bfqq is sync, because idling make sense only for sync queues;
3308 	 * 2) idling either boosts the throughput (without issues), or
3309 	 *    is necessary to preserve service guarantees.
3310 	 */
3311 	return bfq_bfqq_sync(bfqq) &&
3312 		(idling_boosts_thr_without_issues ||
3313 		 idling_needed_for_service_guarantees);
3314 }
3315 
3316 /*
3317  * If the in-service queue is empty but the function bfq_bfqq_may_idle
3318  * returns true, then:
3319  * 1) the queue must remain in service and cannot be expired, and
3320  * 2) the device must be idled to wait for the possible arrival of a new
3321  *    request for the queue.
3322  * See the comments on the function bfq_bfqq_may_idle for the reasons
3323  * why performing device idling is the best choice to boost the throughput
3324  * and preserve service guarantees when bfq_bfqq_may_idle itself
3325  * returns true.
3326  */
3327 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3328 {
3329 	struct bfq_data *bfqd = bfqq->bfqd;
3330 
3331 	return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
3332 	       bfq_bfqq_may_idle(bfqq);
3333 }
3334 
3335 /*
3336  * Select a queue for service.  If we have a current queue in service,
3337  * check whether to continue servicing it, or retrieve and set a new one.
3338  */
3339 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3340 {
3341 	struct bfq_queue *bfqq;
3342 	struct request *next_rq;
3343 	enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3344 
3345 	bfqq = bfqd->in_service_queue;
3346 	if (!bfqq)
3347 		goto new_queue;
3348 
3349 	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3350 
3351 	if (bfq_may_expire_for_budg_timeout(bfqq) &&
3352 	    !bfq_bfqq_wait_request(bfqq) &&
3353 	    !bfq_bfqq_must_idle(bfqq))
3354 		goto expire;
3355 
3356 check_queue:
3357 	/*
3358 	 * This loop is rarely executed more than once. Even when it
3359 	 * happens, it is much more convenient to re-execute this loop
3360 	 * than to return NULL and trigger a new dispatch to get a
3361 	 * request served.
3362 	 */
3363 	next_rq = bfqq->next_rq;
3364 	/*
3365 	 * If bfqq has requests queued and it has enough budget left to
3366 	 * serve them, keep the queue, otherwise expire it.
3367 	 */
3368 	if (next_rq) {
3369 		if (bfq_serv_to_charge(next_rq, bfqq) >
3370 			bfq_bfqq_budget_left(bfqq)) {
3371 			/*
3372 			 * Expire the queue for budget exhaustion,
3373 			 * which makes sure that the next budget is
3374 			 * enough to serve the next request, even if
3375 			 * it comes from the fifo expired path.
3376 			 */
3377 			reason = BFQQE_BUDGET_EXHAUSTED;
3378 			goto expire;
3379 		} else {
3380 			/*
3381 			 * The idle timer may be pending because we may
3382 			 * not disable disk idling even when a new request
3383 			 * arrives.
3384 			 */
3385 			if (bfq_bfqq_wait_request(bfqq)) {
3386 				/*
3387 				 * If we get here: 1) at least a new request
3388 				 * has arrived but we have not disabled the
3389 				 * timer because the request was too small,
3390 				 * 2) then the block layer has unplugged
3391 				 * the device, causing the dispatch to be
3392 				 * invoked.
3393 				 *
3394 				 * Since the device is unplugged, now the
3395 				 * requests are probably large enough to
3396 				 * provide a reasonable throughput.
3397 				 * So we disable idling.
3398 				 */
3399 				bfq_clear_bfqq_wait_request(bfqq);
3400 				hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
3401 				bfqg_stats_update_idle_time(bfqq_group(bfqq));
3402 			}
3403 			goto keep_queue;
3404 		}
3405 	}
3406 
3407 	/*
3408 	 * No requests pending. However, if the in-service queue is idling
3409 	 * for a new request, or has requests waiting for a completion and
3410 	 * may idle after their completion, then keep it anyway.
3411 	 */
3412 	if (bfq_bfqq_wait_request(bfqq) ||
3413 	    (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3414 		bfqq = NULL;
3415 		goto keep_queue;
3416 	}
3417 
3418 	reason = BFQQE_NO_MORE_REQUESTS;
3419 expire:
3420 	bfq_bfqq_expire(bfqd, bfqq, false, reason);
3421 new_queue:
3422 	bfqq = bfq_set_in_service_queue(bfqd);
3423 	if (bfqq) {
3424 		bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3425 		goto check_queue;
3426 	}
3427 keep_queue:
3428 	if (bfqq)
3429 		bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3430 	else
3431 		bfq_log(bfqd, "select_queue: no queue returned");
3432 
3433 	return bfqq;
3434 }
3435 
3436 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3437 {
3438 	struct bfq_entity *entity = &bfqq->entity;
3439 
3440 	if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3441 		bfq_log_bfqq(bfqd, bfqq,
3442 			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3443 			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3444 			jiffies_to_msecs(bfqq->wr_cur_max_time),
3445 			bfqq->wr_coeff,
3446 			bfqq->entity.weight, bfqq->entity.orig_weight);
3447 
3448 		if (entity->prio_changed)
3449 			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3450 
3451 		/*
3452 		 * If the queue was activated in a burst, or too much
3453 		 * time has elapsed from the beginning of this
3454 		 * weight-raising period, then end weight raising.
3455 		 */
3456 		if (bfq_bfqq_in_large_burst(bfqq))
3457 			bfq_bfqq_end_wr(bfqq);
3458 		else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3459 						bfqq->wr_cur_max_time)) {
3460 			if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3461 			time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
3462 					       bfq_wr_duration(bfqd)))
3463 				bfq_bfqq_end_wr(bfqq);
3464 			else {
3465 				/* switch back to interactive wr */
3466 				bfqq->wr_coeff = bfqd->bfq_wr_coeff;
3467 				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
3468 				bfqq->last_wr_start_finish =
3469 					bfqq->wr_start_at_switch_to_srt;
3470 				bfqq->entity.prio_changed = 1;
3471 			}
3472 		}
3473 	}
3474 	/* Update weight both if it must be raised and if it must be lowered */
3475 	if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
3476 		__bfq_entity_update_weight_prio(
3477 			bfq_entity_service_tree(entity),
3478 			entity);
3479 }
3480 
3481 /*
3482  * Dispatch next request from bfqq.
3483  */
3484 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3485 						 struct bfq_queue *bfqq)
3486 {
3487 	struct request *rq = bfqq->next_rq;
3488 	unsigned long service_to_charge;
3489 
3490 	service_to_charge = bfq_serv_to_charge(rq, bfqq);
3491 
3492 	bfq_bfqq_served(bfqq, service_to_charge);
3493 
3494 	bfq_dispatch_remove(bfqd->queue, rq);
3495 
3496 	/*
3497 	 * If weight raising has to terminate for bfqq, then next
3498 	 * function causes an immediate update of bfqq's weight,
3499 	 * without waiting for next activation. As a consequence, on
3500 	 * expiration, bfqq will be timestamped as if has never been
3501 	 * weight-raised during this service slot, even if it has
3502 	 * received part or even most of the service as a
3503 	 * weight-raised queue. This inflates bfqq's timestamps, which
3504 	 * is beneficial, as bfqq is then more willing to leave the
3505 	 * device immediately to possible other weight-raised queues.
3506 	 */
3507 	bfq_update_wr_data(bfqd, bfqq);
3508 
3509 	/*
3510 	 * Expire bfqq, pretending that its budget expired, if bfqq
3511 	 * belongs to CLASS_IDLE and other queues are waiting for
3512 	 * service.
3513 	 */
3514 	if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3515 		goto expire;
3516 
3517 	return rq;
3518 
3519 expire:
3520 	bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3521 	return rq;
3522 }
3523 
3524 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3525 {
3526 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3527 
3528 	/*
3529 	 * Avoiding lock: a race on bfqd->busy_queues should cause at
3530 	 * most a call to dispatch for nothing
3531 	 */
3532 	return !list_empty_careful(&bfqd->dispatch) ||
3533 		bfqd->busy_queues > 0;
3534 }
3535 
3536 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3537 {
3538 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3539 	struct request *rq = NULL;
3540 	struct bfq_queue *bfqq = NULL;
3541 
3542 	if (!list_empty(&bfqd->dispatch)) {
3543 		rq = list_first_entry(&bfqd->dispatch, struct request,
3544 				      queuelist);
3545 		list_del_init(&rq->queuelist);
3546 
3547 		bfqq = RQ_BFQQ(rq);
3548 
3549 		if (bfqq) {
3550 			/*
3551 			 * Increment counters here, because this
3552 			 * dispatch does not follow the standard
3553 			 * dispatch flow (where counters are
3554 			 * incremented)
3555 			 */
3556 			bfqq->dispatched++;
3557 
3558 			goto inc_in_driver_start_rq;
3559 		}
3560 
3561 		/*
3562 		 * We exploit the put_rq_private hook to decrement
3563 		 * rq_in_driver, but put_rq_private will not be
3564 		 * invoked on this request. So, to avoid unbalance,
3565 		 * just start this request, without incrementing
3566 		 * rq_in_driver. As a negative consequence,
3567 		 * rq_in_driver is deceptively lower than it should be
3568 		 * while this request is in service. This may cause
3569 		 * bfq_schedule_dispatch to be invoked uselessly.
3570 		 *
3571 		 * As for implementing an exact solution, the
3572 		 * put_request hook, if defined, is probably invoked
3573 		 * also on this request. So, by exploiting this hook,
3574 		 * we could 1) increment rq_in_driver here, and 2)
3575 		 * decrement it in put_request. Such a solution would
3576 		 * let the value of the counter be always accurate,
3577 		 * but it would entail using an extra interface
3578 		 * function. This cost seems higher than the benefit,
3579 		 * being the frequency of non-elevator-private
3580 		 * requests very low.
3581 		 */
3582 		goto start_rq;
3583 	}
3584 
3585 	bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3586 
3587 	if (bfqd->busy_queues == 0)
3588 		goto exit;
3589 
3590 	/*
3591 	 * Force device to serve one request at a time if
3592 	 * strict_guarantees is true. Forcing this service scheme is
3593 	 * currently the ONLY way to guarantee that the request
3594 	 * service order enforced by the scheduler is respected by a
3595 	 * queueing device. Otherwise the device is free even to make
3596 	 * some unlucky request wait for as long as the device
3597 	 * wishes.
3598 	 *
3599 	 * Of course, serving one request at at time may cause loss of
3600 	 * throughput.
3601 	 */
3602 	if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3603 		goto exit;
3604 
3605 	bfqq = bfq_select_queue(bfqd);
3606 	if (!bfqq)
3607 		goto exit;
3608 
3609 	rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3610 
3611 	if (rq) {
3612 inc_in_driver_start_rq:
3613 		bfqd->rq_in_driver++;
3614 start_rq:
3615 		rq->rq_flags |= RQF_STARTED;
3616 	}
3617 exit:
3618 	return rq;
3619 }
3620 
3621 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3622 {
3623 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3624 	struct request *rq;
3625 
3626 	spin_lock_irq(&bfqd->lock);
3627 
3628 	rq = __bfq_dispatch_request(hctx);
3629 	spin_unlock_irq(&bfqd->lock);
3630 
3631 	return rq;
3632 }
3633 
3634 /*
3635  * Task holds one reference to the queue, dropped when task exits.  Each rq
3636  * in-flight on this queue also holds a reference, dropped when rq is freed.
3637  *
3638  * Scheduler lock must be held here. Recall not to use bfqq after calling
3639  * this function on it.
3640  */
3641 void bfq_put_queue(struct bfq_queue *bfqq)
3642 {
3643 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3644 	struct bfq_group *bfqg = bfqq_group(bfqq);
3645 #endif
3646 
3647 	if (bfqq->bfqd)
3648 		bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3649 			     bfqq, bfqq->ref);
3650 
3651 	bfqq->ref--;
3652 	if (bfqq->ref)
3653 		return;
3654 
3655 	if (bfq_bfqq_sync(bfqq))
3656 		/*
3657 		 * The fact that this queue is being destroyed does not
3658 		 * invalidate the fact that this queue may have been
3659 		 * activated during the current burst. As a consequence,
3660 		 * although the queue does not exist anymore, and hence
3661 		 * needs to be removed from the burst list if there,
3662 		 * the burst size has not to be decremented.
3663 		 */
3664 		hlist_del_init(&bfqq->burst_list_node);
3665 
3666 	kmem_cache_free(bfq_pool, bfqq);
3667 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3668 	bfqg_and_blkg_put(bfqg);
3669 #endif
3670 }
3671 
3672 static void bfq_put_cooperator(struct bfq_queue *bfqq)
3673 {
3674 	struct bfq_queue *__bfqq, *next;
3675 
3676 	/*
3677 	 * If this queue was scheduled to merge with another queue, be
3678 	 * sure to drop the reference taken on that queue (and others in
3679 	 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3680 	 */
3681 	__bfqq = bfqq->new_bfqq;
3682 	while (__bfqq) {
3683 		if (__bfqq == bfqq)
3684 			break;
3685 		next = __bfqq->new_bfqq;
3686 		bfq_put_queue(__bfqq);
3687 		__bfqq = next;
3688 	}
3689 }
3690 
3691 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3692 {
3693 	if (bfqq == bfqd->in_service_queue) {
3694 		__bfq_bfqq_expire(bfqd, bfqq);
3695 		bfq_schedule_dispatch(bfqd);
3696 	}
3697 
3698 	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3699 
3700 	bfq_put_cooperator(bfqq);
3701 
3702 	bfq_put_queue(bfqq); /* release process reference */
3703 }
3704 
3705 static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3706 {
3707 	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3708 	struct bfq_data *bfqd;
3709 
3710 	if (bfqq)
3711 		bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3712 
3713 	if (bfqq && bfqd) {
3714 		unsigned long flags;
3715 
3716 		spin_lock_irqsave(&bfqd->lock, flags);
3717 		bfq_exit_bfqq(bfqd, bfqq);
3718 		bic_set_bfqq(bic, NULL, is_sync);
3719 		spin_unlock_irqrestore(&bfqd->lock, flags);
3720 	}
3721 }
3722 
3723 static void bfq_exit_icq(struct io_cq *icq)
3724 {
3725 	struct bfq_io_cq *bic = icq_to_bic(icq);
3726 
3727 	bfq_exit_icq_bfqq(bic, true);
3728 	bfq_exit_icq_bfqq(bic, false);
3729 }
3730 
3731 /*
3732  * Update the entity prio values; note that the new values will not
3733  * be used until the next (re)activation.
3734  */
3735 static void
3736 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3737 {
3738 	struct task_struct *tsk = current;
3739 	int ioprio_class;
3740 	struct bfq_data *bfqd = bfqq->bfqd;
3741 
3742 	if (!bfqd)
3743 		return;
3744 
3745 	ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3746 	switch (ioprio_class) {
3747 	default:
3748 		dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3749 			"bfq: bad prio class %d\n", ioprio_class);
3750 	case IOPRIO_CLASS_NONE:
3751 		/*
3752 		 * No prio set, inherit CPU scheduling settings.
3753 		 */
3754 		bfqq->new_ioprio = task_nice_ioprio(tsk);
3755 		bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3756 		break;
3757 	case IOPRIO_CLASS_RT:
3758 		bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3759 		bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3760 		break;
3761 	case IOPRIO_CLASS_BE:
3762 		bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3763 		bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3764 		break;
3765 	case IOPRIO_CLASS_IDLE:
3766 		bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3767 		bfqq->new_ioprio = 7;
3768 		bfq_clear_bfqq_idle_window(bfqq);
3769 		break;
3770 	}
3771 
3772 	if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3773 		pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3774 			bfqq->new_ioprio);
3775 		bfqq->new_ioprio = IOPRIO_BE_NR;
3776 	}
3777 
3778 	bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3779 	bfqq->entity.prio_changed = 1;
3780 }
3781 
3782 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3783 				       struct bio *bio, bool is_sync,
3784 				       struct bfq_io_cq *bic);
3785 
3786 static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3787 {
3788 	struct bfq_data *bfqd = bic_to_bfqd(bic);
3789 	struct bfq_queue *bfqq;
3790 	int ioprio = bic->icq.ioc->ioprio;
3791 
3792 	/*
3793 	 * This condition may trigger on a newly created bic, be sure to
3794 	 * drop the lock before returning.
3795 	 */
3796 	if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3797 		return;
3798 
3799 	bic->ioprio = ioprio;
3800 
3801 	bfqq = bic_to_bfqq(bic, false);
3802 	if (bfqq) {
3803 		/* release process reference on this queue */
3804 		bfq_put_queue(bfqq);
3805 		bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3806 		bic_set_bfqq(bic, bfqq, false);
3807 	}
3808 
3809 	bfqq = bic_to_bfqq(bic, true);
3810 	if (bfqq)
3811 		bfq_set_next_ioprio_data(bfqq, bic);
3812 }
3813 
3814 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3815 			  struct bfq_io_cq *bic, pid_t pid, int is_sync)
3816 {
3817 	RB_CLEAR_NODE(&bfqq->entity.rb_node);
3818 	INIT_LIST_HEAD(&bfqq->fifo);
3819 	INIT_HLIST_NODE(&bfqq->burst_list_node);
3820 
3821 	bfqq->ref = 0;
3822 	bfqq->bfqd = bfqd;
3823 
3824 	if (bic)
3825 		bfq_set_next_ioprio_data(bfqq, bic);
3826 
3827 	if (is_sync) {
3828 		if (!bfq_class_idle(bfqq))
3829 			bfq_mark_bfqq_idle_window(bfqq);
3830 		bfq_mark_bfqq_sync(bfqq);
3831 		bfq_mark_bfqq_just_created(bfqq);
3832 	} else
3833 		bfq_clear_bfqq_sync(bfqq);
3834 
3835 	/* set end request to minus infinity from now */
3836 	bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3837 
3838 	bfq_mark_bfqq_IO_bound(bfqq);
3839 
3840 	bfqq->pid = pid;
3841 
3842 	/* Tentative initial value to trade off between thr and lat */
3843 	bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
3844 	bfqq->budget_timeout = bfq_smallest_from_now();
3845 
3846 	bfqq->wr_coeff = 1;
3847 	bfqq->last_wr_start_finish = jiffies;
3848 	bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
3849 	bfqq->split_time = bfq_smallest_from_now();
3850 
3851 	/*
3852 	 * Set to the value for which bfqq will not be deemed as
3853 	 * soft rt when it becomes backlogged.
3854 	 */
3855 	bfqq->soft_rt_next_start = bfq_greatest_from_now();
3856 
3857 	/* first request is almost certainly seeky */
3858 	bfqq->seek_history = 1;
3859 }
3860 
3861 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
3862 					       struct bfq_group *bfqg,
3863 					       int ioprio_class, int ioprio)
3864 {
3865 	switch (ioprio_class) {
3866 	case IOPRIO_CLASS_RT:
3867 		return &bfqg->async_bfqq[0][ioprio];
3868 	case IOPRIO_CLASS_NONE:
3869 		ioprio = IOPRIO_NORM;
3870 		/* fall through */
3871 	case IOPRIO_CLASS_BE:
3872 		return &bfqg->async_bfqq[1][ioprio];
3873 	case IOPRIO_CLASS_IDLE:
3874 		return &bfqg->async_idle_bfqq;
3875 	default:
3876 		return NULL;
3877 	}
3878 }
3879 
3880 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3881 				       struct bio *bio, bool is_sync,
3882 				       struct bfq_io_cq *bic)
3883 {
3884 	const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3885 	const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3886 	struct bfq_queue **async_bfqq = NULL;
3887 	struct bfq_queue *bfqq;
3888 	struct bfq_group *bfqg;
3889 
3890 	rcu_read_lock();
3891 
3892 	bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
3893 	if (!bfqg) {
3894 		bfqq = &bfqd->oom_bfqq;
3895 		goto out;
3896 	}
3897 
3898 	if (!is_sync) {
3899 		async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
3900 						  ioprio);
3901 		bfqq = *async_bfqq;
3902 		if (bfqq)
3903 			goto out;
3904 	}
3905 
3906 	bfqq = kmem_cache_alloc_node(bfq_pool,
3907 				     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
3908 				     bfqd->queue->node);
3909 
3910 	if (bfqq) {
3911 		bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
3912 			      is_sync);
3913 		bfq_init_entity(&bfqq->entity, bfqg);
3914 		bfq_log_bfqq(bfqd, bfqq, "allocated");
3915 	} else {
3916 		bfqq = &bfqd->oom_bfqq;
3917 		bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
3918 		goto out;
3919 	}
3920 
3921 	/*
3922 	 * Pin the queue now that it's allocated, scheduler exit will
3923 	 * prune it.
3924 	 */
3925 	if (async_bfqq) {
3926 		bfqq->ref++; /*
3927 			      * Extra group reference, w.r.t. sync
3928 			      * queue. This extra reference is removed
3929 			      * only if bfqq->bfqg disappears, to
3930 			      * guarantee that this queue is not freed
3931 			      * until its group goes away.
3932 			      */
3933 		bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
3934 			     bfqq, bfqq->ref);
3935 		*async_bfqq = bfqq;
3936 	}
3937 
3938 out:
3939 	bfqq->ref++; /* get a process reference to this queue */
3940 	bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
3941 	rcu_read_unlock();
3942 	return bfqq;
3943 }
3944 
3945 static void bfq_update_io_thinktime(struct bfq_data *bfqd,
3946 				    struct bfq_queue *bfqq)
3947 {
3948 	struct bfq_ttime *ttime = &bfqq->ttime;
3949 	u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
3950 
3951 	elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
3952 
3953 	ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
3954 	ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
3955 	ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3956 				     ttime->ttime_samples);
3957 }
3958 
3959 static void
3960 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3961 		       struct request *rq)
3962 {
3963 	bfqq->seek_history <<= 1;
3964 	bfqq->seek_history |=
3965 		get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
3966 		(!blk_queue_nonrot(bfqd->queue) ||
3967 		 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
3968 }
3969 
3970 /*
3971  * Disable idle window if the process thinks too long or seeks so much that
3972  * it doesn't matter.
3973  */
3974 static void bfq_update_idle_window(struct bfq_data *bfqd,
3975 				   struct bfq_queue *bfqq,
3976 				   struct bfq_io_cq *bic)
3977 {
3978 	int enable_idle;
3979 
3980 	/* Don't idle for async or idle io prio class. */
3981 	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
3982 		return;
3983 
3984 	/* Idle window just restored, statistics are meaningless. */
3985 	if (time_is_after_eq_jiffies(bfqq->split_time +
3986 				     bfqd->bfq_wr_min_idle_time))
3987 		return;
3988 
3989 	enable_idle = bfq_bfqq_idle_window(bfqq);
3990 
3991 	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
3992 	    bfqd->bfq_slice_idle == 0 ||
3993 		(bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
3994 			bfqq->wr_coeff == 1))
3995 		enable_idle = 0;
3996 	else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) {
3997 		if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle &&
3998 			bfqq->wr_coeff == 1)
3999 			enable_idle = 0;
4000 		else
4001 			enable_idle = 1;
4002 	}
4003 	bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
4004 		enable_idle);
4005 
4006 	if (enable_idle)
4007 		bfq_mark_bfqq_idle_window(bfqq);
4008 	else
4009 		bfq_clear_bfqq_idle_window(bfqq);
4010 }
4011 
4012 /*
4013  * Called when a new fs request (rq) is added to bfqq.  Check if there's
4014  * something we should do about it.
4015  */
4016 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4017 			    struct request *rq)
4018 {
4019 	struct bfq_io_cq *bic = RQ_BIC(rq);
4020 
4021 	if (rq->cmd_flags & REQ_META)
4022 		bfqq->meta_pending++;
4023 
4024 	bfq_update_io_thinktime(bfqd, bfqq);
4025 	bfq_update_io_seektime(bfqd, bfqq, rq);
4026 	if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
4027 	    !BFQQ_SEEKY(bfqq))
4028 		bfq_update_idle_window(bfqd, bfqq, bic);
4029 
4030 	bfq_log_bfqq(bfqd, bfqq,
4031 		     "rq_enqueued: idle_window=%d (seeky %d)",
4032 		     bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
4033 
4034 	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4035 
4036 	if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4037 		bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4038 				 blk_rq_sectors(rq) < 32;
4039 		bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4040 
4041 		/*
4042 		 * There is just this request queued: if the request
4043 		 * is small and the queue is not to be expired, then
4044 		 * just exit.
4045 		 *
4046 		 * In this way, if the device is being idled to wait
4047 		 * for a new request from the in-service queue, we
4048 		 * avoid unplugging the device and committing the
4049 		 * device to serve just a small request. On the
4050 		 * contrary, we wait for the block layer to decide
4051 		 * when to unplug the device: hopefully, new requests
4052 		 * will be merged to this one quickly, then the device
4053 		 * will be unplugged and larger requests will be
4054 		 * dispatched.
4055 		 */
4056 		if (small_req && !budget_timeout)
4057 			return;
4058 
4059 		/*
4060 		 * A large enough request arrived, or the queue is to
4061 		 * be expired: in both cases disk idling is to be
4062 		 * stopped, so clear wait_request flag and reset
4063 		 * timer.
4064 		 */
4065 		bfq_clear_bfqq_wait_request(bfqq);
4066 		hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4067 		bfqg_stats_update_idle_time(bfqq_group(bfqq));
4068 
4069 		/*
4070 		 * The queue is not empty, because a new request just
4071 		 * arrived. Hence we can safely expire the queue, in
4072 		 * case of budget timeout, without risking that the
4073 		 * timestamps of the queue are not updated correctly.
4074 		 * See [1] for more details.
4075 		 */
4076 		if (budget_timeout)
4077 			bfq_bfqq_expire(bfqd, bfqq, false,
4078 					BFQQE_BUDGET_TIMEOUT);
4079 	}
4080 }
4081 
4082 static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4083 {
4084 	struct bfq_queue *bfqq = RQ_BFQQ(rq),
4085 		*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4086 
4087 	if (new_bfqq) {
4088 		if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4089 			new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4090 		/*
4091 		 * Release the request's reference to the old bfqq
4092 		 * and make sure one is taken to the shared queue.
4093 		 */
4094 		new_bfqq->allocated++;
4095 		bfqq->allocated--;
4096 		new_bfqq->ref++;
4097 		bfq_clear_bfqq_just_created(bfqq);
4098 		/*
4099 		 * If the bic associated with the process
4100 		 * issuing this request still points to bfqq
4101 		 * (and thus has not been already redirected
4102 		 * to new_bfqq or even some other bfq_queue),
4103 		 * then complete the merge and redirect it to
4104 		 * new_bfqq.
4105 		 */
4106 		if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4107 			bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4108 					bfqq, new_bfqq);
4109 		/*
4110 		 * rq is about to be enqueued into new_bfqq,
4111 		 * release rq reference on bfqq
4112 		 */
4113 		bfq_put_queue(bfqq);
4114 		rq->elv.priv[1] = new_bfqq;
4115 		bfqq = new_bfqq;
4116 	}
4117 
4118 	bfq_add_request(rq);
4119 
4120 	rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4121 	list_add_tail(&rq->queuelist, &bfqq->fifo);
4122 
4123 	bfq_rq_enqueued(bfqd, bfqq, rq);
4124 }
4125 
4126 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4127 			       bool at_head)
4128 {
4129 	struct request_queue *q = hctx->queue;
4130 	struct bfq_data *bfqd = q->elevator->elevator_data;
4131 
4132 	spin_lock_irq(&bfqd->lock);
4133 	if (blk_mq_sched_try_insert_merge(q, rq)) {
4134 		spin_unlock_irq(&bfqd->lock);
4135 		return;
4136 	}
4137 
4138 	spin_unlock_irq(&bfqd->lock);
4139 
4140 	blk_mq_sched_request_inserted(rq);
4141 
4142 	spin_lock_irq(&bfqd->lock);
4143 	if (at_head || blk_rq_is_passthrough(rq)) {
4144 		if (at_head)
4145 			list_add(&rq->queuelist, &bfqd->dispatch);
4146 		else
4147 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
4148 	} else {
4149 		__bfq_insert_request(bfqd, rq);
4150 
4151 		if (rq_mergeable(rq)) {
4152 			elv_rqhash_add(q, rq);
4153 			if (!q->last_merge)
4154 				q->last_merge = rq;
4155 		}
4156 	}
4157 
4158 	spin_unlock_irq(&bfqd->lock);
4159 }
4160 
4161 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4162 				struct list_head *list, bool at_head)
4163 {
4164 	while (!list_empty(list)) {
4165 		struct request *rq;
4166 
4167 		rq = list_first_entry(list, struct request, queuelist);
4168 		list_del_init(&rq->queuelist);
4169 		bfq_insert_request(hctx, rq, at_head);
4170 	}
4171 }
4172 
4173 static void bfq_update_hw_tag(struct bfq_data *bfqd)
4174 {
4175 	bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4176 				       bfqd->rq_in_driver);
4177 
4178 	if (bfqd->hw_tag == 1)
4179 		return;
4180 
4181 	/*
4182 	 * This sample is valid if the number of outstanding requests
4183 	 * is large enough to allow a queueing behavior.  Note that the
4184 	 * sum is not exact, as it's not taking into account deactivated
4185 	 * requests.
4186 	 */
4187 	if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4188 		return;
4189 
4190 	if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4191 		return;
4192 
4193 	bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4194 	bfqd->max_rq_in_driver = 0;
4195 	bfqd->hw_tag_samples = 0;
4196 }
4197 
4198 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4199 {
4200 	u64 now_ns;
4201 	u32 delta_us;
4202 
4203 	bfq_update_hw_tag(bfqd);
4204 
4205 	bfqd->rq_in_driver--;
4206 	bfqq->dispatched--;
4207 
4208 	if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4209 		/*
4210 		 * Set budget_timeout (which we overload to store the
4211 		 * time at which the queue remains with no backlog and
4212 		 * no outstanding request; used by the weight-raising
4213 		 * mechanism).
4214 		 */
4215 		bfqq->budget_timeout = jiffies;
4216 
4217 		bfq_weights_tree_remove(bfqd, &bfqq->entity,
4218 					&bfqd->queue_weights_tree);
4219 	}
4220 
4221 	now_ns = ktime_get_ns();
4222 
4223 	bfqq->ttime.last_end_request = now_ns;
4224 
4225 	/*
4226 	 * Using us instead of ns, to get a reasonable precision in
4227 	 * computing rate in next check.
4228 	 */
4229 	delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4230 
4231 	/*
4232 	 * If the request took rather long to complete, and, according
4233 	 * to the maximum request size recorded, this completion latency
4234 	 * implies that the request was certainly served at a very low
4235 	 * rate (less than 1M sectors/sec), then the whole observation
4236 	 * interval that lasts up to this time instant cannot be a
4237 	 * valid time interval for computing a new peak rate.  Invoke
4238 	 * bfq_update_rate_reset to have the following three steps
4239 	 * taken:
4240 	 * - close the observation interval at the last (previous)
4241 	 *   request dispatch or completion
4242 	 * - compute rate, if possible, for that observation interval
4243 	 * - reset to zero samples, which will trigger a proper
4244 	 *   re-initialization of the observation interval on next
4245 	 *   dispatch
4246 	 */
4247 	if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4248 	   (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4249 			1UL<<(BFQ_RATE_SHIFT - 10))
4250 		bfq_update_rate_reset(bfqd, NULL);
4251 	bfqd->last_completion = now_ns;
4252 
4253 	/*
4254 	 * If we are waiting to discover whether the request pattern
4255 	 * of the task associated with the queue is actually
4256 	 * isochronous, and both requisites for this condition to hold
4257 	 * are now satisfied, then compute soft_rt_next_start (see the
4258 	 * comments on the function bfq_bfqq_softrt_next_start()). We
4259 	 * schedule this delayed check when bfqq expires, if it still
4260 	 * has in-flight requests.
4261 	 */
4262 	if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4263 	    RB_EMPTY_ROOT(&bfqq->sort_list))
4264 		bfqq->soft_rt_next_start =
4265 			bfq_bfqq_softrt_next_start(bfqd, bfqq);
4266 
4267 	/*
4268 	 * If this is the in-service queue, check if it needs to be expired,
4269 	 * or if we want to idle in case it has no pending requests.
4270 	 */
4271 	if (bfqd->in_service_queue == bfqq) {
4272 		if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
4273 			bfq_arm_slice_timer(bfqd);
4274 			return;
4275 		} else if (bfq_may_expire_for_budg_timeout(bfqq))
4276 			bfq_bfqq_expire(bfqd, bfqq, false,
4277 					BFQQE_BUDGET_TIMEOUT);
4278 		else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4279 			 (bfqq->dispatched == 0 ||
4280 			  !bfq_bfqq_may_idle(bfqq)))
4281 			bfq_bfqq_expire(bfqd, bfqq, false,
4282 					BFQQE_NO_MORE_REQUESTS);
4283 	}
4284 }
4285 
4286 static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4287 {
4288 	bfqq->allocated--;
4289 
4290 	bfq_put_queue(bfqq);
4291 }
4292 
4293 static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
4294 {
4295 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
4296 	struct bfq_data *bfqd = bfqq->bfqd;
4297 
4298 	if (rq->rq_flags & RQF_STARTED)
4299 		bfqg_stats_update_completion(bfqq_group(bfqq),
4300 					     rq_start_time_ns(rq),
4301 					     rq_io_start_time_ns(rq),
4302 					     rq->cmd_flags);
4303 
4304 	if (likely(rq->rq_flags & RQF_STARTED)) {
4305 		unsigned long flags;
4306 
4307 		spin_lock_irqsave(&bfqd->lock, flags);
4308 
4309 		bfq_completed_request(bfqq, bfqd);
4310 		bfq_put_rq_priv_body(bfqq);
4311 
4312 		spin_unlock_irqrestore(&bfqd->lock, flags);
4313 	} else {
4314 		/*
4315 		 * Request rq may be still/already in the scheduler,
4316 		 * in which case we need to remove it. And we cannot
4317 		 * defer such a check and removal, to avoid
4318 		 * inconsistencies in the time interval from the end
4319 		 * of this function to the start of the deferred work.
4320 		 * This situation seems to occur only in process
4321 		 * context, as a consequence of a merge. In the
4322 		 * current version of the code, this implies that the
4323 		 * lock is held.
4324 		 */
4325 
4326 		if (!RB_EMPTY_NODE(&rq->rb_node))
4327 			bfq_remove_request(q, rq);
4328 		bfq_put_rq_priv_body(bfqq);
4329 	}
4330 
4331 	rq->elv.priv[0] = NULL;
4332 	rq->elv.priv[1] = NULL;
4333 }
4334 
4335 /*
4336  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4337  * was the last process referring to that bfqq.
4338  */
4339 static struct bfq_queue *
4340 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4341 {
4342 	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4343 
4344 	if (bfqq_process_refs(bfqq) == 1) {
4345 		bfqq->pid = current->pid;
4346 		bfq_clear_bfqq_coop(bfqq);
4347 		bfq_clear_bfqq_split_coop(bfqq);
4348 		return bfqq;
4349 	}
4350 
4351 	bic_set_bfqq(bic, NULL, 1);
4352 
4353 	bfq_put_cooperator(bfqq);
4354 
4355 	bfq_put_queue(bfqq);
4356 	return NULL;
4357 }
4358 
4359 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4360 						   struct bfq_io_cq *bic,
4361 						   struct bio *bio,
4362 						   bool split, bool is_sync,
4363 						   bool *new_queue)
4364 {
4365 	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4366 
4367 	if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4368 		return bfqq;
4369 
4370 	if (new_queue)
4371 		*new_queue = true;
4372 
4373 	if (bfqq)
4374 		bfq_put_queue(bfqq);
4375 	bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4376 
4377 	bic_set_bfqq(bic, bfqq, is_sync);
4378 	if (split && is_sync) {
4379 		if ((bic->was_in_burst_list && bfqd->large_burst) ||
4380 		    bic->saved_in_large_burst)
4381 			bfq_mark_bfqq_in_large_burst(bfqq);
4382 		else {
4383 			bfq_clear_bfqq_in_large_burst(bfqq);
4384 			if (bic->was_in_burst_list)
4385 				hlist_add_head(&bfqq->burst_list_node,
4386 					       &bfqd->burst_list);
4387 		}
4388 		bfqq->split_time = jiffies;
4389 	}
4390 
4391 	return bfqq;
4392 }
4393 
4394 /*
4395  * Allocate bfq data structures associated with this request.
4396  */
4397 static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
4398 			      struct bio *bio)
4399 {
4400 	struct bfq_data *bfqd = q->elevator->elevator_data;
4401 	struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
4402 	const int is_sync = rq_is_sync(rq);
4403 	struct bfq_queue *bfqq;
4404 	bool new_queue = false;
4405 	bool split = false;
4406 
4407 	spin_lock_irq(&bfqd->lock);
4408 
4409 	if (!bic)
4410 		goto queue_fail;
4411 
4412 	bfq_check_ioprio_change(bic, bio);
4413 
4414 	bfq_bic_update_cgroup(bic, bio);
4415 
4416 	bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4417 					 &new_queue);
4418 
4419 	if (likely(!new_queue)) {
4420 		/* If the queue was seeky for too long, break it apart. */
4421 		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4422 			bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
4423 
4424 			/* Update bic before losing reference to bfqq */
4425 			if (bfq_bfqq_in_large_burst(bfqq))
4426 				bic->saved_in_large_burst = true;
4427 
4428 			bfqq = bfq_split_bfqq(bic, bfqq);
4429 			split = true;
4430 
4431 			if (!bfqq)
4432 				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4433 								 true, is_sync,
4434 								 NULL);
4435 		}
4436 	}
4437 
4438 	bfqq->allocated++;
4439 	bfqq->ref++;
4440 	bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4441 		     rq, bfqq, bfqq->ref);
4442 
4443 	rq->elv.priv[0] = bic;
4444 	rq->elv.priv[1] = bfqq;
4445 
4446 	/*
4447 	 * If a bfq_queue has only one process reference, it is owned
4448 	 * by only this bic: we can then set bfqq->bic = bic. in
4449 	 * addition, if the queue has also just been split, we have to
4450 	 * resume its state.
4451 	 */
4452 	if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4453 		bfqq->bic = bic;
4454 		if (split) {
4455 			/*
4456 			 * The queue has just been split from a shared
4457 			 * queue: restore the idle window and the
4458 			 * possible weight raising period.
4459 			 */
4460 			bfq_bfqq_resume_state(bfqq, bic);
4461 		}
4462 	}
4463 
4464 	if (unlikely(bfq_bfqq_just_created(bfqq)))
4465 		bfq_handle_burst(bfqd, bfqq);
4466 
4467 	spin_unlock_irq(&bfqd->lock);
4468 
4469 	return 0;
4470 
4471 queue_fail:
4472 	spin_unlock_irq(&bfqd->lock);
4473 
4474 	return 1;
4475 }
4476 
4477 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
4478 {
4479 	struct bfq_data *bfqd = bfqq->bfqd;
4480 	enum bfqq_expiration reason;
4481 	unsigned long flags;
4482 
4483 	spin_lock_irqsave(&bfqd->lock, flags);
4484 	bfq_clear_bfqq_wait_request(bfqq);
4485 
4486 	if (bfqq != bfqd->in_service_queue) {
4487 		spin_unlock_irqrestore(&bfqd->lock, flags);
4488 		return;
4489 	}
4490 
4491 	if (bfq_bfqq_budget_timeout(bfqq))
4492 		/*
4493 		 * Also here the queue can be safely expired
4494 		 * for budget timeout without wasting
4495 		 * guarantees
4496 		 */
4497 		reason = BFQQE_BUDGET_TIMEOUT;
4498 	else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4499 		/*
4500 		 * The queue may not be empty upon timer expiration,
4501 		 * because we may not disable the timer when the
4502 		 * first request of the in-service queue arrives
4503 		 * during disk idling.
4504 		 */
4505 		reason = BFQQE_TOO_IDLE;
4506 	else
4507 		goto schedule_dispatch;
4508 
4509 	bfq_bfqq_expire(bfqd, bfqq, true, reason);
4510 
4511 schedule_dispatch:
4512 	spin_unlock_irqrestore(&bfqd->lock, flags);
4513 	bfq_schedule_dispatch(bfqd);
4514 }
4515 
4516 /*
4517  * Handler of the expiration of the timer running if the in-service queue
4518  * is idling inside its time slice.
4519  */
4520 static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4521 {
4522 	struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4523 					     idle_slice_timer);
4524 	struct bfq_queue *bfqq = bfqd->in_service_queue;
4525 
4526 	/*
4527 	 * Theoretical race here: the in-service queue can be NULL or
4528 	 * different from the queue that was idling if a new request
4529 	 * arrives for the current queue and there is a full dispatch
4530 	 * cycle that changes the in-service queue.  This can hardly
4531 	 * happen, but in the worst case we just expire a queue too
4532 	 * early.
4533 	 */
4534 	if (bfqq)
4535 		bfq_idle_slice_timer_body(bfqq);
4536 
4537 	return HRTIMER_NORESTART;
4538 }
4539 
4540 static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4541 				 struct bfq_queue **bfqq_ptr)
4542 {
4543 	struct bfq_queue *bfqq = *bfqq_ptr;
4544 
4545 	bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4546 	if (bfqq) {
4547 		bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4548 
4549 		bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4550 			     bfqq, bfqq->ref);
4551 		bfq_put_queue(bfqq);
4552 		*bfqq_ptr = NULL;
4553 	}
4554 }
4555 
4556 /*
4557  * Release all the bfqg references to its async queues.  If we are
4558  * deallocating the group these queues may still contain requests, so
4559  * we reparent them to the root cgroup (i.e., the only one that will
4560  * exist for sure until all the requests on a device are gone).
4561  */
4562 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
4563 {
4564 	int i, j;
4565 
4566 	for (i = 0; i < 2; i++)
4567 		for (j = 0; j < IOPRIO_BE_NR; j++)
4568 			__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
4569 
4570 	__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
4571 }
4572 
4573 static void bfq_exit_queue(struct elevator_queue *e)
4574 {
4575 	struct bfq_data *bfqd = e->elevator_data;
4576 	struct bfq_queue *bfqq, *n;
4577 
4578 	hrtimer_cancel(&bfqd->idle_slice_timer);
4579 
4580 	spin_lock_irq(&bfqd->lock);
4581 	list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
4582 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
4583 	spin_unlock_irq(&bfqd->lock);
4584 
4585 	hrtimer_cancel(&bfqd->idle_slice_timer);
4586 
4587 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4588 	blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4589 #else
4590 	spin_lock_irq(&bfqd->lock);
4591 	bfq_put_async_queues(bfqd, bfqd->root_group);
4592 	kfree(bfqd->root_group);
4593 	spin_unlock_irq(&bfqd->lock);
4594 #endif
4595 
4596 	kfree(bfqd);
4597 }
4598 
4599 static void bfq_init_root_group(struct bfq_group *root_group,
4600 				struct bfq_data *bfqd)
4601 {
4602 	int i;
4603 
4604 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4605 	root_group->entity.parent = NULL;
4606 	root_group->my_entity = NULL;
4607 	root_group->bfqd = bfqd;
4608 #endif
4609 	root_group->rq_pos_tree = RB_ROOT;
4610 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4611 		root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4612 	root_group->sched_data.bfq_class_idle_last_service = jiffies;
4613 }
4614 
4615 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4616 {
4617 	struct bfq_data *bfqd;
4618 	struct elevator_queue *eq;
4619 
4620 	eq = elevator_alloc(q, e);
4621 	if (!eq)
4622 		return -ENOMEM;
4623 
4624 	bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4625 	if (!bfqd) {
4626 		kobject_put(&eq->kobj);
4627 		return -ENOMEM;
4628 	}
4629 	eq->elevator_data = bfqd;
4630 
4631 	spin_lock_irq(q->queue_lock);
4632 	q->elevator = eq;
4633 	spin_unlock_irq(q->queue_lock);
4634 
4635 	/*
4636 	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4637 	 * Grab a permanent reference to it, so that the normal code flow
4638 	 * will not attempt to free it.
4639 	 */
4640 	bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4641 	bfqd->oom_bfqq.ref++;
4642 	bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4643 	bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4644 	bfqd->oom_bfqq.entity.new_weight =
4645 		bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
4646 
4647 	/* oom_bfqq does not participate to bursts */
4648 	bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4649 
4650 	/*
4651 	 * Trigger weight initialization, according to ioprio, at the
4652 	 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4653 	 * class won't be changed any more.
4654 	 */
4655 	bfqd->oom_bfqq.entity.prio_changed = 1;
4656 
4657 	bfqd->queue = q;
4658 
4659 	INIT_LIST_HEAD(&bfqd->dispatch);
4660 
4661 	hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4662 		     HRTIMER_MODE_REL);
4663 	bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4664 
4665 	bfqd->queue_weights_tree = RB_ROOT;
4666 	bfqd->group_weights_tree = RB_ROOT;
4667 
4668 	INIT_LIST_HEAD(&bfqd->active_list);
4669 	INIT_LIST_HEAD(&bfqd->idle_list);
4670 	INIT_HLIST_HEAD(&bfqd->burst_list);
4671 
4672 	bfqd->hw_tag = -1;
4673 
4674 	bfqd->bfq_max_budget = bfq_default_max_budget;
4675 
4676 	bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4677 	bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4678 	bfqd->bfq_back_max = bfq_back_max;
4679 	bfqd->bfq_back_penalty = bfq_back_penalty;
4680 	bfqd->bfq_slice_idle = bfq_slice_idle;
4681 	bfqd->bfq_timeout = bfq_timeout;
4682 
4683 	bfqd->bfq_requests_within_timer = 120;
4684 
4685 	bfqd->bfq_large_burst_thresh = 8;
4686 	bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4687 
4688 	bfqd->low_latency = true;
4689 
4690 	/*
4691 	 * Trade-off between responsiveness and fairness.
4692 	 */
4693 	bfqd->bfq_wr_coeff = 30;
4694 	bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
4695 	bfqd->bfq_wr_max_time = 0;
4696 	bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4697 	bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
4698 	bfqd->bfq_wr_max_softrt_rate = 7000; /*
4699 					      * Approximate rate required
4700 					      * to playback or record a
4701 					      * high-definition compressed
4702 					      * video.
4703 					      */
4704 	bfqd->wr_busy_queues = 0;
4705 
4706 	/*
4707 	 * Begin by assuming, optimistically, that the device is a
4708 	 * high-speed one, and that its peak rate is equal to 2/3 of
4709 	 * the highest reference rate.
4710 	 */
4711 	bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4712 			T_fast[blk_queue_nonrot(bfqd->queue)];
4713 	bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4714 	bfqd->device_speed = BFQ_BFQD_FAST;
4715 
4716 	spin_lock_init(&bfqd->lock);
4717 
4718 	/*
4719 	 * The invocation of the next bfq_create_group_hierarchy
4720 	 * function is the head of a chain of function calls
4721 	 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4722 	 * blk_mq_freeze_queue) that may lead to the invocation of the
4723 	 * has_work hook function. For this reason,
4724 	 * bfq_create_group_hierarchy is invoked only after all
4725 	 * scheduler data has been initialized, apart from the fields
4726 	 * that can be initialized only after invoking
4727 	 * bfq_create_group_hierarchy. This, in particular, enables
4728 	 * has_work to correctly return false. Of course, to avoid
4729 	 * other inconsistencies, the blk-mq stack must then refrain
4730 	 * from invoking further scheduler hooks before this init
4731 	 * function is finished.
4732 	 */
4733 	bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4734 	if (!bfqd->root_group)
4735 		goto out_free;
4736 	bfq_init_root_group(bfqd->root_group, bfqd);
4737 	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4738 
4739 
4740 	return 0;
4741 
4742 out_free:
4743 	kfree(bfqd);
4744 	kobject_put(&eq->kobj);
4745 	return -ENOMEM;
4746 }
4747 
4748 static void bfq_slab_kill(void)
4749 {
4750 	kmem_cache_destroy(bfq_pool);
4751 }
4752 
4753 static int __init bfq_slab_setup(void)
4754 {
4755 	bfq_pool = KMEM_CACHE(bfq_queue, 0);
4756 	if (!bfq_pool)
4757 		return -ENOMEM;
4758 	return 0;
4759 }
4760 
4761 static ssize_t bfq_var_show(unsigned int var, char *page)
4762 {
4763 	return sprintf(page, "%u\n", var);
4764 }
4765 
4766 static ssize_t bfq_var_store(unsigned long *var, const char *page,
4767 			     size_t count)
4768 {
4769 	unsigned long new_val;
4770 	int ret = kstrtoul(page, 10, &new_val);
4771 
4772 	if (ret == 0)
4773 		*var = new_val;
4774 
4775 	return count;
4776 }
4777 
4778 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
4779 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
4780 {									\
4781 	struct bfq_data *bfqd = e->elevator_data;			\
4782 	u64 __data = __VAR;						\
4783 	if (__CONV == 1)						\
4784 		__data = jiffies_to_msecs(__data);			\
4785 	else if (__CONV == 2)						\
4786 		__data = div_u64(__data, NSEC_PER_MSEC);		\
4787 	return bfq_var_show(__data, (page));				\
4788 }
4789 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
4790 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
4791 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
4792 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
4793 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
4794 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
4795 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
4796 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
4797 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
4798 #undef SHOW_FUNCTION
4799 
4800 #define USEC_SHOW_FUNCTION(__FUNC, __VAR)				\
4801 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
4802 {									\
4803 	struct bfq_data *bfqd = e->elevator_data;			\
4804 	u64 __data = __VAR;						\
4805 	__data = div_u64(__data, NSEC_PER_USEC);			\
4806 	return bfq_var_show(__data, (page));				\
4807 }
4808 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
4809 #undef USEC_SHOW_FUNCTION
4810 
4811 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
4812 static ssize_t								\
4813 __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
4814 {									\
4815 	struct bfq_data *bfqd = e->elevator_data;			\
4816 	unsigned long uninitialized_var(__data);			\
4817 	int ret = bfq_var_store(&__data, (page), count);		\
4818 	if (__data < (MIN))						\
4819 		__data = (MIN);						\
4820 	else if (__data > (MAX))					\
4821 		__data = (MAX);						\
4822 	if (__CONV == 1)						\
4823 		*(__PTR) = msecs_to_jiffies(__data);			\
4824 	else if (__CONV == 2)						\
4825 		*(__PTR) = (u64)__data * NSEC_PER_MSEC;			\
4826 	else								\
4827 		*(__PTR) = __data;					\
4828 	return ret;							\
4829 }
4830 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
4831 		INT_MAX, 2);
4832 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
4833 		INT_MAX, 2);
4834 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
4835 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
4836 		INT_MAX, 0);
4837 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
4838 #undef STORE_FUNCTION
4839 
4840 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)			\
4841 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4842 {									\
4843 	struct bfq_data *bfqd = e->elevator_data;			\
4844 	unsigned long uninitialized_var(__data);			\
4845 	int ret = bfq_var_store(&__data, (page), count);		\
4846 	if (__data < (MIN))						\
4847 		__data = (MIN);						\
4848 	else if (__data > (MAX))					\
4849 		__data = (MAX);						\
4850 	*(__PTR) = (u64)__data * NSEC_PER_USEC;				\
4851 	return ret;							\
4852 }
4853 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
4854 		    UINT_MAX);
4855 #undef USEC_STORE_FUNCTION
4856 
4857 static ssize_t bfq_max_budget_store(struct elevator_queue *e,
4858 				    const char *page, size_t count)
4859 {
4860 	struct bfq_data *bfqd = e->elevator_data;
4861 	unsigned long uninitialized_var(__data);
4862 	int ret = bfq_var_store(&__data, (page), count);
4863 
4864 	if (__data == 0)
4865 		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
4866 	else {
4867 		if (__data > INT_MAX)
4868 			__data = INT_MAX;
4869 		bfqd->bfq_max_budget = __data;
4870 	}
4871 
4872 	bfqd->bfq_user_max_budget = __data;
4873 
4874 	return ret;
4875 }
4876 
4877 /*
4878  * Leaving this name to preserve name compatibility with cfq
4879  * parameters, but this timeout is used for both sync and async.
4880  */
4881 static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
4882 				      const char *page, size_t count)
4883 {
4884 	struct bfq_data *bfqd = e->elevator_data;
4885 	unsigned long uninitialized_var(__data);
4886 	int ret = bfq_var_store(&__data, (page), count);
4887 
4888 	if (__data < 1)
4889 		__data = 1;
4890 	else if (__data > INT_MAX)
4891 		__data = INT_MAX;
4892 
4893 	bfqd->bfq_timeout = msecs_to_jiffies(__data);
4894 	if (bfqd->bfq_user_max_budget == 0)
4895 		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
4896 
4897 	return ret;
4898 }
4899 
4900 static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
4901 				     const char *page, size_t count)
4902 {
4903 	struct bfq_data *bfqd = e->elevator_data;
4904 	unsigned long uninitialized_var(__data);
4905 	int ret = bfq_var_store(&__data, (page), count);
4906 
4907 	if (__data > 1)
4908 		__data = 1;
4909 	if (!bfqd->strict_guarantees && __data == 1
4910 	    && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
4911 		bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
4912 
4913 	bfqd->strict_guarantees = __data;
4914 
4915 	return ret;
4916 }
4917 
4918 static ssize_t bfq_low_latency_store(struct elevator_queue *e,
4919 				     const char *page, size_t count)
4920 {
4921 	struct bfq_data *bfqd = e->elevator_data;
4922 	unsigned long uninitialized_var(__data);
4923 	int ret = bfq_var_store(&__data, (page), count);
4924 
4925 	if (__data > 1)
4926 		__data = 1;
4927 	if (__data == 0 && bfqd->low_latency != 0)
4928 		bfq_end_wr(bfqd);
4929 	bfqd->low_latency = __data;
4930 
4931 	return ret;
4932 }
4933 
4934 #define BFQ_ATTR(name) \
4935 	__ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
4936 
4937 static struct elv_fs_entry bfq_attrs[] = {
4938 	BFQ_ATTR(fifo_expire_sync),
4939 	BFQ_ATTR(fifo_expire_async),
4940 	BFQ_ATTR(back_seek_max),
4941 	BFQ_ATTR(back_seek_penalty),
4942 	BFQ_ATTR(slice_idle),
4943 	BFQ_ATTR(slice_idle_us),
4944 	BFQ_ATTR(max_budget),
4945 	BFQ_ATTR(timeout_sync),
4946 	BFQ_ATTR(strict_guarantees),
4947 	BFQ_ATTR(low_latency),
4948 	__ATTR_NULL
4949 };
4950 
4951 static struct elevator_type iosched_bfq_mq = {
4952 	.ops.mq = {
4953 		.get_rq_priv		= bfq_get_rq_private,
4954 		.put_rq_priv		= bfq_put_rq_private,
4955 		.exit_icq		= bfq_exit_icq,
4956 		.insert_requests	= bfq_insert_requests,
4957 		.dispatch_request	= bfq_dispatch_request,
4958 		.next_request		= elv_rb_latter_request,
4959 		.former_request		= elv_rb_former_request,
4960 		.allow_merge		= bfq_allow_bio_merge,
4961 		.bio_merge		= bfq_bio_merge,
4962 		.request_merge		= bfq_request_merge,
4963 		.requests_merged	= bfq_requests_merged,
4964 		.request_merged		= bfq_request_merged,
4965 		.has_work		= bfq_has_work,
4966 		.init_sched		= bfq_init_queue,
4967 		.exit_sched		= bfq_exit_queue,
4968 	},
4969 
4970 	.uses_mq =		true,
4971 	.icq_size =		sizeof(struct bfq_io_cq),
4972 	.icq_align =		__alignof__(struct bfq_io_cq),
4973 	.elevator_attrs =	bfq_attrs,
4974 	.elevator_name =	"bfq",
4975 	.elevator_owner =	THIS_MODULE,
4976 };
4977 
4978 static int __init bfq_init(void)
4979 {
4980 	int ret;
4981 
4982 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4983 	ret = blkcg_policy_register(&blkcg_policy_bfq);
4984 	if (ret)
4985 		return ret;
4986 #endif
4987 
4988 	ret = -ENOMEM;
4989 	if (bfq_slab_setup())
4990 		goto err_pol_unreg;
4991 
4992 	/*
4993 	 * Times to load large popular applications for the typical
4994 	 * systems installed on the reference devices (see the
4995 	 * comments before the definitions of the next two
4996 	 * arrays). Actually, we use slightly slower values, as the
4997 	 * estimated peak rate tends to be smaller than the actual
4998 	 * peak rate.  The reason for this last fact is that estimates
4999 	 * are computed over much shorter time intervals than the long
5000 	 * intervals typically used for benchmarking. Why? First, to
5001 	 * adapt more quickly to variations. Second, because an I/O
5002 	 * scheduler cannot rely on a peak-rate-evaluation workload to
5003 	 * be run for a long time.
5004 	 */
5005 	T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5006 	T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5007 	T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5008 	T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5009 
5010 	/*
5011 	 * Thresholds that determine the switch between speed classes
5012 	 * (see the comments before the definition of the array
5013 	 * device_speed_thresh). These thresholds are biased towards
5014 	 * transitions to the fast class. This is safer than the
5015 	 * opposite bias. In fact, a wrong transition to the slow
5016 	 * class results in short weight-raising periods, because the
5017 	 * speed of the device then tends to be higher that the
5018 	 * reference peak rate. On the opposite end, a wrong
5019 	 * transition to the fast class tends to increase
5020 	 * weight-raising periods, because of the opposite reason.
5021 	 */
5022 	device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5023 	device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5024 
5025 	ret = elv_register(&iosched_bfq_mq);
5026 	if (ret)
5027 		goto err_pol_unreg;
5028 
5029 	return 0;
5030 
5031 err_pol_unreg:
5032 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5033 	blkcg_policy_unregister(&blkcg_policy_bfq);
5034 #endif
5035 	return ret;
5036 }
5037 
5038 static void __exit bfq_exit(void)
5039 {
5040 	elv_unregister(&iosched_bfq_mq);
5041 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5042 	blkcg_policy_unregister(&blkcg_policy_bfq);
5043 #endif
5044 	bfq_slab_kill();
5045 }
5046 
5047 module_init(bfq_init);
5048 module_exit(bfq_exit);
5049 
5050 MODULE_AUTHOR("Paolo Valente");
5051 MODULE_LICENSE("GPL");
5052 MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
5053