xref: /openbmc/linux/block/bfq-iosched.c (revision bbecb07f)
1 /*
2  * Budget Fair Queueing (BFQ) I/O scheduler.
3  *
4  * Based on ideas and code from CFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11  *                    Arianna Avanzini <avanzini@google.com>
12  *
13  * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License as
17  *  published by the Free Software Foundation; either version 2 of the
18  *  License, or (at your option) any later version.
19  *
20  *  This program is distributed in the hope that it will be useful,
21  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
22  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23  *  General Public License for more details.
24  *
25  * BFQ is a proportional-share I/O scheduler, with some extra
26  * low-latency capabilities. BFQ also supports full hierarchical
27  * scheduling through cgroups. Next paragraphs provide an introduction
28  * on BFQ inner workings. Details on BFQ benefits, usage and
29  * limitations can be found in Documentation/block/bfq-iosched.txt.
30  *
31  * BFQ is a proportional-share storage-I/O scheduling algorithm based
32  * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33  * budgets, measured in number of sectors, to processes instead of
34  * time slices. The device is not granted to the in-service process
35  * for a given time slice, but until it has exhausted its assigned
36  * budget. This change from the time to the service domain enables BFQ
37  * to distribute the device throughput among processes as desired,
38  * without any distortion due to throughput fluctuations, or to device
39  * internal queueing. BFQ uses an ad hoc internal scheduler, called
40  * B-WF2Q+, to schedule processes according to their budgets. More
41  * precisely, BFQ schedules queues associated with processes. Each
42  * process/queue is assigned a user-configurable weight, and B-WF2Q+
43  * guarantees that each queue receives a fraction of the throughput
44  * proportional to its weight. Thanks to the accurate policy of
45  * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46  * processes issuing sequential requests (to boost the throughput),
47  * and yet guarantee a low latency to interactive and soft real-time
48  * applications.
49  *
50  * In particular, to provide these low-latency guarantees, BFQ
51  * explicitly privileges the I/O of two classes of time-sensitive
52  * applications: interactive and soft real-time. This feature enables
53  * BFQ to provide applications in these classes with a very low
54  * latency. Finally, BFQ also features additional heuristics for
55  * preserving both a low latency and a high throughput on NCQ-capable,
56  * rotational or flash-based devices, and to get the job done quickly
57  * for applications consisting in many I/O-bound processes.
58  *
59  * NOTE: if the main or only goal, with a given device, is to achieve
60  * the maximum-possible throughput at all times, then do switch off
61  * all low-latency heuristics for that device, by setting low_latency
62  * to 0.
63  *
64  * BFQ is described in [1], where also a reference to the initial, more
65  * theoretical paper on BFQ can be found. The interested reader can find
66  * in the latter paper full details on the main algorithm, as well as
67  * formulas of the guarantees and formal proofs of all the properties.
68  * With respect to the version of BFQ presented in these papers, this
69  * implementation adds a few more heuristics, such as the one that
70  * guarantees a low latency to soft real-time applications, and a
71  * hierarchical extension based on H-WF2Q+.
72  *
73  * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74  * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75  * with O(log N) complexity derives from the one introduced with EEVDF
76  * in [3].
77  *
78  * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79  *     Scheduler", Proceedings of the First Workshop on Mobile System
80  *     Technologies (MST-2015), May 2015.
81  *     http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
82  *
83  * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84  *     Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
85  *     Oct 1997.
86  *
87  * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
88  *
89  * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90  *     First: A Flexible and Accurate Mechanism for Proportional Share
91  *     Resource Allocation", technical report.
92  *
93  * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
94  */
95 #include <linux/module.h>
96 #include <linux/slab.h>
97 #include <linux/blkdev.h>
98 #include <linux/cgroup.h>
99 #include <linux/elevator.h>
100 #include <linux/ktime.h>
101 #include <linux/rbtree.h>
102 #include <linux/ioprio.h>
103 #include <linux/sbitmap.h>
104 #include <linux/delay.h>
105 
106 #include "blk.h"
107 #include "blk-mq.h"
108 #include "blk-mq-tag.h"
109 #include "blk-mq-sched.h"
110 #include "bfq-iosched.h"
111 #include "blk-wbt.h"
112 
113 #define BFQ_BFQQ_FNS(name)						\
114 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\
115 {									\
116 	__set_bit(BFQQF_##name, &(bfqq)->flags);			\
117 }									\
118 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)			\
119 {									\
120 	__clear_bit(BFQQF_##name, &(bfqq)->flags);		\
121 }									\
122 int bfq_bfqq_##name(const struct bfq_queue *bfqq)			\
123 {									\
124 	return test_bit(BFQQF_##name, &(bfqq)->flags);		\
125 }
126 
127 BFQ_BFQQ_FNS(just_created);
128 BFQ_BFQQ_FNS(busy);
129 BFQ_BFQQ_FNS(wait_request);
130 BFQ_BFQQ_FNS(non_blocking_wait_rq);
131 BFQ_BFQQ_FNS(fifo_expire);
132 BFQ_BFQQ_FNS(has_short_ttime);
133 BFQ_BFQQ_FNS(sync);
134 BFQ_BFQQ_FNS(IO_bound);
135 BFQ_BFQQ_FNS(in_large_burst);
136 BFQ_BFQQ_FNS(coop);
137 BFQ_BFQQ_FNS(split_coop);
138 BFQ_BFQQ_FNS(softrt_update);
139 #undef BFQ_BFQQ_FNS						\
140 
141 /* Expiration time of sync (0) and async (1) requests, in ns. */
142 static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
143 
144 /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
145 static const int bfq_back_max = 16 * 1024;
146 
147 /* Penalty of a backwards seek, in number of sectors. */
148 static const int bfq_back_penalty = 2;
149 
150 /* Idling period duration, in ns. */
151 static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
152 
153 /* Minimum number of assigned budgets for which stats are safe to compute. */
154 static const int bfq_stats_min_budgets = 194;
155 
156 /* Default maximum budget values, in sectors and number of requests. */
157 static const int bfq_default_max_budget = 16 * 1024;
158 
159 /*
160  * Async to sync throughput distribution is controlled as follows:
161  * when an async request is served, the entity is charged the number
162  * of sectors of the request, multiplied by the factor below
163  */
164 static const int bfq_async_charge_factor = 10;
165 
166 /* Default timeout values, in jiffies, approximating CFQ defaults. */
167 const int bfq_timeout = HZ / 8;
168 
169 static struct kmem_cache *bfq_pool;
170 
171 /* Below this threshold (in ns), we consider thinktime immediate. */
172 #define BFQ_MIN_TT		(2 * NSEC_PER_MSEC)
173 
174 /* hw_tag detection: parallel requests threshold and min samples needed. */
175 #define BFQ_HW_QUEUE_THRESHOLD	4
176 #define BFQ_HW_QUEUE_SAMPLES	32
177 
178 #define BFQQ_SEEK_THR		(sector_t)(8 * 100)
179 #define BFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
180 #define BFQQ_CLOSE_THR		(sector_t)(8 * 1024)
181 #define BFQQ_SEEKY(bfqq)	(hweight32(bfqq->seek_history) > 32/8)
182 
183 /* Min number of samples required to perform peak-rate update */
184 #define BFQ_RATE_MIN_SAMPLES	32
185 /* Min observation time interval required to perform a peak-rate update (ns) */
186 #define BFQ_RATE_MIN_INTERVAL	(300*NSEC_PER_MSEC)
187 /* Target observation time interval for a peak-rate update (ns) */
188 #define BFQ_RATE_REF_INTERVAL	NSEC_PER_SEC
189 
190 /* Shift used for peak rate fixed precision calculations. */
191 #define BFQ_RATE_SHIFT		16
192 
193 /*
194  * By default, BFQ computes the duration of the weight raising for
195  * interactive applications automatically, using the following formula:
196  * duration = (R / r) * T, where r is the peak rate of the device, and
197  * R and T are two reference parameters.
198  * In particular, R is the peak rate of the reference device (see below),
199  * and T is a reference time: given the systems that are likely to be
200  * installed on the reference device according to its speed class, T is
201  * about the maximum time needed, under BFQ and while reading two files in
202  * parallel, to load typical large applications on these systems.
203  * In practice, the slower/faster the device at hand is, the more/less it
204  * takes to load applications with respect to the reference device.
205  * Accordingly, the longer/shorter BFQ grants weight raising to interactive
206  * applications.
207  *
208  * BFQ uses four different reference pairs (R, T), depending on:
209  * . whether the device is rotational or non-rotational;
210  * . whether the device is slow, such as old or portable HDDs, as well as
211  *   SD cards, or fast, such as newer HDDs and SSDs.
212  *
213  * The device's speed class is dynamically (re)detected in
214  * bfq_update_peak_rate() every time the estimated peak rate is updated.
215  *
216  * In the following definitions, R_slow[0]/R_fast[0] and
217  * T_slow[0]/T_fast[0] are the reference values for a slow/fast
218  * rotational device, whereas R_slow[1]/R_fast[1] and
219  * T_slow[1]/T_fast[1] are the reference values for a slow/fast
220  * non-rotational device. Finally, device_speed_thresh are the
221  * thresholds used to switch between speed classes. The reference
222  * rates are not the actual peak rates of the devices used as a
223  * reference, but slightly lower values. The reason for using these
224  * slightly lower values is that the peak-rate estimator tends to
225  * yield slightly lower values than the actual peak rate (it can yield
226  * the actual peak rate only if there is only one process doing I/O,
227  * and the process does sequential I/O).
228  *
229  * Both the reference peak rates and the thresholds are measured in
230  * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
231  */
232 static int R_slow[2] = {1000, 10700};
233 static int R_fast[2] = {14000, 33000};
234 /*
235  * To improve readability, a conversion function is used to initialize the
236  * following arrays, which entails that they can be initialized only in a
237  * function.
238  */
239 static int T_slow[2];
240 static int T_fast[2];
241 static int device_speed_thresh[2];
242 
243 #define RQ_BIC(rq)		icq_to_bic((rq)->elv.priv[0])
244 #define RQ_BFQQ(rq)		((rq)->elv.priv[1])
245 
246 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
247 {
248 	return bic->bfqq[is_sync];
249 }
250 
251 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
252 {
253 	bic->bfqq[is_sync] = bfqq;
254 }
255 
256 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
257 {
258 	return bic->icq.q->elevator->elevator_data;
259 }
260 
261 /**
262  * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
263  * @icq: the iocontext queue.
264  */
265 static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
266 {
267 	/* bic->icq is the first member, %NULL will convert to %NULL */
268 	return container_of(icq, struct bfq_io_cq, icq);
269 }
270 
271 /**
272  * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
273  * @bfqd: the lookup key.
274  * @ioc: the io_context of the process doing I/O.
275  * @q: the request queue.
276  */
277 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
278 					struct io_context *ioc,
279 					struct request_queue *q)
280 {
281 	if (ioc) {
282 		unsigned long flags;
283 		struct bfq_io_cq *icq;
284 
285 		spin_lock_irqsave(q->queue_lock, flags);
286 		icq = icq_to_bic(ioc_lookup_icq(ioc, q));
287 		spin_unlock_irqrestore(q->queue_lock, flags);
288 
289 		return icq;
290 	}
291 
292 	return NULL;
293 }
294 
295 /*
296  * Scheduler run of queue, if there are requests pending and no one in the
297  * driver that will restart queueing.
298  */
299 void bfq_schedule_dispatch(struct bfq_data *bfqd)
300 {
301 	if (bfqd->queued != 0) {
302 		bfq_log(bfqd, "schedule dispatch");
303 		blk_mq_run_hw_queues(bfqd->queue, true);
304 	}
305 }
306 
307 #define bfq_class_idle(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
308 #define bfq_class_rt(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
309 
310 #define bfq_sample_valid(samples)	((samples) > 80)
311 
312 /*
313  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
314  * We choose the request that is closesr to the head right now.  Distance
315  * behind the head is penalized and only allowed to a certain extent.
316  */
317 static struct request *bfq_choose_req(struct bfq_data *bfqd,
318 				      struct request *rq1,
319 				      struct request *rq2,
320 				      sector_t last)
321 {
322 	sector_t s1, s2, d1 = 0, d2 = 0;
323 	unsigned long back_max;
324 #define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */
325 #define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */
326 	unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
327 
328 	if (!rq1 || rq1 == rq2)
329 		return rq2;
330 	if (!rq2)
331 		return rq1;
332 
333 	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
334 		return rq1;
335 	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
336 		return rq2;
337 	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
338 		return rq1;
339 	else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
340 		return rq2;
341 
342 	s1 = blk_rq_pos(rq1);
343 	s2 = blk_rq_pos(rq2);
344 
345 	/*
346 	 * By definition, 1KiB is 2 sectors.
347 	 */
348 	back_max = bfqd->bfq_back_max * 2;
349 
350 	/*
351 	 * Strict one way elevator _except_ in the case where we allow
352 	 * short backward seeks which are biased as twice the cost of a
353 	 * similar forward seek.
354 	 */
355 	if (s1 >= last)
356 		d1 = s1 - last;
357 	else if (s1 + back_max >= last)
358 		d1 = (last - s1) * bfqd->bfq_back_penalty;
359 	else
360 		wrap |= BFQ_RQ1_WRAP;
361 
362 	if (s2 >= last)
363 		d2 = s2 - last;
364 	else if (s2 + back_max >= last)
365 		d2 = (last - s2) * bfqd->bfq_back_penalty;
366 	else
367 		wrap |= BFQ_RQ2_WRAP;
368 
369 	/* Found required data */
370 
371 	/*
372 	 * By doing switch() on the bit mask "wrap" we avoid having to
373 	 * check two variables for all permutations: --> faster!
374 	 */
375 	switch (wrap) {
376 	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
377 		if (d1 < d2)
378 			return rq1;
379 		else if (d2 < d1)
380 			return rq2;
381 
382 		if (s1 >= s2)
383 			return rq1;
384 		else
385 			return rq2;
386 
387 	case BFQ_RQ2_WRAP:
388 		return rq1;
389 	case BFQ_RQ1_WRAP:
390 		return rq2;
391 	case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
392 	default:
393 		/*
394 		 * Since both rqs are wrapped,
395 		 * start with the one that's further behind head
396 		 * (--> only *one* back seek required),
397 		 * since back seek takes more time than forward.
398 		 */
399 		if (s1 <= s2)
400 			return rq1;
401 		else
402 			return rq2;
403 	}
404 }
405 
406 static struct bfq_queue *
407 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
408 		     sector_t sector, struct rb_node **ret_parent,
409 		     struct rb_node ***rb_link)
410 {
411 	struct rb_node **p, *parent;
412 	struct bfq_queue *bfqq = NULL;
413 
414 	parent = NULL;
415 	p = &root->rb_node;
416 	while (*p) {
417 		struct rb_node **n;
418 
419 		parent = *p;
420 		bfqq = rb_entry(parent, struct bfq_queue, pos_node);
421 
422 		/*
423 		 * Sort strictly based on sector. Smallest to the left,
424 		 * largest to the right.
425 		 */
426 		if (sector > blk_rq_pos(bfqq->next_rq))
427 			n = &(*p)->rb_right;
428 		else if (sector < blk_rq_pos(bfqq->next_rq))
429 			n = &(*p)->rb_left;
430 		else
431 			break;
432 		p = n;
433 		bfqq = NULL;
434 	}
435 
436 	*ret_parent = parent;
437 	if (rb_link)
438 		*rb_link = p;
439 
440 	bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
441 		(unsigned long long)sector,
442 		bfqq ? bfqq->pid : 0);
443 
444 	return bfqq;
445 }
446 
447 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
448 {
449 	struct rb_node **p, *parent;
450 	struct bfq_queue *__bfqq;
451 
452 	if (bfqq->pos_root) {
453 		rb_erase(&bfqq->pos_node, bfqq->pos_root);
454 		bfqq->pos_root = NULL;
455 	}
456 
457 	if (bfq_class_idle(bfqq))
458 		return;
459 	if (!bfqq->next_rq)
460 		return;
461 
462 	bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
463 	__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
464 			blk_rq_pos(bfqq->next_rq), &parent, &p);
465 	if (!__bfqq) {
466 		rb_link_node(&bfqq->pos_node, parent, p);
467 		rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
468 	} else
469 		bfqq->pos_root = NULL;
470 }
471 
472 /*
473  * Tell whether there are active queues or groups with differentiated weights.
474  */
475 static bool bfq_differentiated_weights(struct bfq_data *bfqd)
476 {
477 	/*
478 	 * For weights to differ, at least one of the trees must contain
479 	 * at least two nodes.
480 	 */
481 	return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
482 		(bfqd->queue_weights_tree.rb_node->rb_left ||
483 		 bfqd->queue_weights_tree.rb_node->rb_right)
484 #ifdef CONFIG_BFQ_GROUP_IOSCHED
485 	       ) ||
486 	       (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
487 		(bfqd->group_weights_tree.rb_node->rb_left ||
488 		 bfqd->group_weights_tree.rb_node->rb_right)
489 #endif
490 	       );
491 }
492 
493 /*
494  * The following function returns true if every queue must receive the
495  * same share of the throughput (this condition is used when deciding
496  * whether idling may be disabled, see the comments in the function
497  * bfq_bfqq_may_idle()).
498  *
499  * Such a scenario occurs when:
500  * 1) all active queues have the same weight,
501  * 2) all active groups at the same level in the groups tree have the same
502  *    weight,
503  * 3) all active groups at the same level in the groups tree have the same
504  *    number of children.
505  *
506  * Unfortunately, keeping the necessary state for evaluating exactly the
507  * above symmetry conditions would be quite complex and time-consuming.
508  * Therefore this function evaluates, instead, the following stronger
509  * sub-conditions, for which it is much easier to maintain the needed
510  * state:
511  * 1) all active queues have the same weight,
512  * 2) all active groups have the same weight,
513  * 3) all active groups have at most one active child each.
514  * In particular, the last two conditions are always true if hierarchical
515  * support and the cgroups interface are not enabled, thus no state needs
516  * to be maintained in this case.
517  */
518 static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
519 {
520 	return !bfq_differentiated_weights(bfqd);
521 }
522 
523 /*
524  * If the weight-counter tree passed as input contains no counter for
525  * the weight of the input entity, then add that counter; otherwise just
526  * increment the existing counter.
527  *
528  * Note that weight-counter trees contain few nodes in mostly symmetric
529  * scenarios. For example, if all queues have the same weight, then the
530  * weight-counter tree for the queues may contain at most one node.
531  * This holds even if low_latency is on, because weight-raised queues
532  * are not inserted in the tree.
533  * In most scenarios, the rate at which nodes are created/destroyed
534  * should be low too.
535  */
536 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
537 			  struct rb_root *root)
538 {
539 	struct rb_node **new = &(root->rb_node), *parent = NULL;
540 
541 	/*
542 	 * Do not insert if the entity is already associated with a
543 	 * counter, which happens if:
544 	 *   1) the entity is associated with a queue,
545 	 *   2) a request arrival has caused the queue to become both
546 	 *      non-weight-raised, and hence change its weight, and
547 	 *      backlogged; in this respect, each of the two events
548 	 *      causes an invocation of this function,
549 	 *   3) this is the invocation of this function caused by the
550 	 *      second event. This second invocation is actually useless,
551 	 *      and we handle this fact by exiting immediately. More
552 	 *      efficient or clearer solutions might possibly be adopted.
553 	 */
554 	if (entity->weight_counter)
555 		return;
556 
557 	while (*new) {
558 		struct bfq_weight_counter *__counter = container_of(*new,
559 						struct bfq_weight_counter,
560 						weights_node);
561 		parent = *new;
562 
563 		if (entity->weight == __counter->weight) {
564 			entity->weight_counter = __counter;
565 			goto inc_counter;
566 		}
567 		if (entity->weight < __counter->weight)
568 			new = &((*new)->rb_left);
569 		else
570 			new = &((*new)->rb_right);
571 	}
572 
573 	entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
574 					 GFP_ATOMIC);
575 
576 	/*
577 	 * In the unlucky event of an allocation failure, we just
578 	 * exit. This will cause the weight of entity to not be
579 	 * considered in bfq_differentiated_weights, which, in its
580 	 * turn, causes the scenario to be deemed wrongly symmetric in
581 	 * case entity's weight would have been the only weight making
582 	 * the scenario asymmetric. On the bright side, no unbalance
583 	 * will however occur when entity becomes inactive again (the
584 	 * invocation of this function is triggered by an activation
585 	 * of entity). In fact, bfq_weights_tree_remove does nothing
586 	 * if !entity->weight_counter.
587 	 */
588 	if (unlikely(!entity->weight_counter))
589 		return;
590 
591 	entity->weight_counter->weight = entity->weight;
592 	rb_link_node(&entity->weight_counter->weights_node, parent, new);
593 	rb_insert_color(&entity->weight_counter->weights_node, root);
594 
595 inc_counter:
596 	entity->weight_counter->num_active++;
597 }
598 
599 /*
600  * Decrement the weight counter associated with the entity, and, if the
601  * counter reaches 0, remove the counter from the tree.
602  * See the comments to the function bfq_weights_tree_add() for considerations
603  * about overhead.
604  */
605 void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
606 			     struct rb_root *root)
607 {
608 	if (!entity->weight_counter)
609 		return;
610 
611 	entity->weight_counter->num_active--;
612 	if (entity->weight_counter->num_active > 0)
613 		goto reset_entity_pointer;
614 
615 	rb_erase(&entity->weight_counter->weights_node, root);
616 	kfree(entity->weight_counter);
617 
618 reset_entity_pointer:
619 	entity->weight_counter = NULL;
620 }
621 
622 /*
623  * Return expired entry, or NULL to just start from scratch in rbtree.
624  */
625 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
626 				      struct request *last)
627 {
628 	struct request *rq;
629 
630 	if (bfq_bfqq_fifo_expire(bfqq))
631 		return NULL;
632 
633 	bfq_mark_bfqq_fifo_expire(bfqq);
634 
635 	rq = rq_entry_fifo(bfqq->fifo.next);
636 
637 	if (rq == last || ktime_get_ns() < rq->fifo_time)
638 		return NULL;
639 
640 	bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
641 	return rq;
642 }
643 
644 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
645 					struct bfq_queue *bfqq,
646 					struct request *last)
647 {
648 	struct rb_node *rbnext = rb_next(&last->rb_node);
649 	struct rb_node *rbprev = rb_prev(&last->rb_node);
650 	struct request *next, *prev = NULL;
651 
652 	/* Follow expired path, else get first next available. */
653 	next = bfq_check_fifo(bfqq, last);
654 	if (next)
655 		return next;
656 
657 	if (rbprev)
658 		prev = rb_entry_rq(rbprev);
659 
660 	if (rbnext)
661 		next = rb_entry_rq(rbnext);
662 	else {
663 		rbnext = rb_first(&bfqq->sort_list);
664 		if (rbnext && rbnext != &last->rb_node)
665 			next = rb_entry_rq(rbnext);
666 	}
667 
668 	return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
669 }
670 
671 /* see the definition of bfq_async_charge_factor for details */
672 static unsigned long bfq_serv_to_charge(struct request *rq,
673 					struct bfq_queue *bfqq)
674 {
675 	if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
676 		return blk_rq_sectors(rq);
677 
678 	/*
679 	 * If there are no weight-raised queues, then amplify service
680 	 * by just the async charge factor; otherwise amplify service
681 	 * by twice the async charge factor, to further reduce latency
682 	 * for weight-raised queues.
683 	 */
684 	if (bfqq->bfqd->wr_busy_queues == 0)
685 		return blk_rq_sectors(rq) * bfq_async_charge_factor;
686 
687 	return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
688 }
689 
690 /**
691  * bfq_updated_next_req - update the queue after a new next_rq selection.
692  * @bfqd: the device data the queue belongs to.
693  * @bfqq: the queue to update.
694  *
695  * If the first request of a queue changes we make sure that the queue
696  * has enough budget to serve at least its first request (if the
697  * request has grown).  We do this because if the queue has not enough
698  * budget for its first request, it has to go through two dispatch
699  * rounds to actually get it dispatched.
700  */
701 static void bfq_updated_next_req(struct bfq_data *bfqd,
702 				 struct bfq_queue *bfqq)
703 {
704 	struct bfq_entity *entity = &bfqq->entity;
705 	struct request *next_rq = bfqq->next_rq;
706 	unsigned long new_budget;
707 
708 	if (!next_rq)
709 		return;
710 
711 	if (bfqq == bfqd->in_service_queue)
712 		/*
713 		 * In order not to break guarantees, budgets cannot be
714 		 * changed after an entity has been selected.
715 		 */
716 		return;
717 
718 	new_budget = max_t(unsigned long, bfqq->max_budget,
719 			   bfq_serv_to_charge(next_rq, bfqq));
720 	if (entity->budget != new_budget) {
721 		entity->budget = new_budget;
722 		bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
723 					 new_budget);
724 		bfq_requeue_bfqq(bfqd, bfqq, false);
725 	}
726 }
727 
728 static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
729 {
730 	u64 dur;
731 
732 	if (bfqd->bfq_wr_max_time > 0)
733 		return bfqd->bfq_wr_max_time;
734 
735 	dur = bfqd->RT_prod;
736 	do_div(dur, bfqd->peak_rate);
737 
738 	/*
739 	 * Limit duration between 3 and 13 seconds. Tests show that
740 	 * higher values than 13 seconds often yield the opposite of
741 	 * the desired result, i.e., worsen responsiveness by letting
742 	 * non-interactive and non-soft-real-time applications
743 	 * preserve weight raising for a too long time interval.
744 	 *
745 	 * On the other end, lower values than 3 seconds make it
746 	 * difficult for most interactive tasks to complete their jobs
747 	 * before weight-raising finishes.
748 	 */
749 	if (dur > msecs_to_jiffies(13000))
750 		dur = msecs_to_jiffies(13000);
751 	else if (dur < msecs_to_jiffies(3000))
752 		dur = msecs_to_jiffies(3000);
753 
754 	return dur;
755 }
756 
757 /* switch back from soft real-time to interactive weight raising */
758 static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
759 					  struct bfq_data *bfqd)
760 {
761 	bfqq->wr_coeff = bfqd->bfq_wr_coeff;
762 	bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
763 	bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
764 }
765 
766 static void
767 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
768 		      struct bfq_io_cq *bic, bool bfq_already_existing)
769 {
770 	unsigned int old_wr_coeff = bfqq->wr_coeff;
771 	bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
772 
773 	if (bic->saved_has_short_ttime)
774 		bfq_mark_bfqq_has_short_ttime(bfqq);
775 	else
776 		bfq_clear_bfqq_has_short_ttime(bfqq);
777 
778 	if (bic->saved_IO_bound)
779 		bfq_mark_bfqq_IO_bound(bfqq);
780 	else
781 		bfq_clear_bfqq_IO_bound(bfqq);
782 
783 	bfqq->ttime = bic->saved_ttime;
784 	bfqq->wr_coeff = bic->saved_wr_coeff;
785 	bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
786 	bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
787 	bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
788 
789 	if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
790 	    time_is_before_jiffies(bfqq->last_wr_start_finish +
791 				   bfqq->wr_cur_max_time))) {
792 		if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
793 		    !bfq_bfqq_in_large_burst(bfqq) &&
794 		    time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
795 					     bfq_wr_duration(bfqd))) {
796 			switch_back_to_interactive_wr(bfqq, bfqd);
797 		} else {
798 			bfqq->wr_coeff = 1;
799 			bfq_log_bfqq(bfqq->bfqd, bfqq,
800 				     "resume state: switching off wr");
801 		}
802 	}
803 
804 	/* make sure weight will be updated, however we got here */
805 	bfqq->entity.prio_changed = 1;
806 
807 	if (likely(!busy))
808 		return;
809 
810 	if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
811 		bfqd->wr_busy_queues++;
812 	else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
813 		bfqd->wr_busy_queues--;
814 }
815 
816 static int bfqq_process_refs(struct bfq_queue *bfqq)
817 {
818 	return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
819 }
820 
821 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
822 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
823 {
824 	struct bfq_queue *item;
825 	struct hlist_node *n;
826 
827 	hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
828 		hlist_del_init(&item->burst_list_node);
829 	hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
830 	bfqd->burst_size = 1;
831 	bfqd->burst_parent_entity = bfqq->entity.parent;
832 }
833 
834 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
835 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
836 {
837 	/* Increment burst size to take into account also bfqq */
838 	bfqd->burst_size++;
839 
840 	if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
841 		struct bfq_queue *pos, *bfqq_item;
842 		struct hlist_node *n;
843 
844 		/*
845 		 * Enough queues have been activated shortly after each
846 		 * other to consider this burst as large.
847 		 */
848 		bfqd->large_burst = true;
849 
850 		/*
851 		 * We can now mark all queues in the burst list as
852 		 * belonging to a large burst.
853 		 */
854 		hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
855 				     burst_list_node)
856 			bfq_mark_bfqq_in_large_burst(bfqq_item);
857 		bfq_mark_bfqq_in_large_burst(bfqq);
858 
859 		/*
860 		 * From now on, and until the current burst finishes, any
861 		 * new queue being activated shortly after the last queue
862 		 * was inserted in the burst can be immediately marked as
863 		 * belonging to a large burst. So the burst list is not
864 		 * needed any more. Remove it.
865 		 */
866 		hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
867 					  burst_list_node)
868 			hlist_del_init(&pos->burst_list_node);
869 	} else /*
870 		* Burst not yet large: add bfqq to the burst list. Do
871 		* not increment the ref counter for bfqq, because bfqq
872 		* is removed from the burst list before freeing bfqq
873 		* in put_queue.
874 		*/
875 		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
876 }
877 
878 /*
879  * If many queues belonging to the same group happen to be created
880  * shortly after each other, then the processes associated with these
881  * queues have typically a common goal. In particular, bursts of queue
882  * creations are usually caused by services or applications that spawn
883  * many parallel threads/processes. Examples are systemd during boot,
884  * or git grep. To help these processes get their job done as soon as
885  * possible, it is usually better to not grant either weight-raising
886  * or device idling to their queues.
887  *
888  * In this comment we describe, firstly, the reasons why this fact
889  * holds, and, secondly, the next function, which implements the main
890  * steps needed to properly mark these queues so that they can then be
891  * treated in a different way.
892  *
893  * The above services or applications benefit mostly from a high
894  * throughput: the quicker the requests of the activated queues are
895  * cumulatively served, the sooner the target job of these queues gets
896  * completed. As a consequence, weight-raising any of these queues,
897  * which also implies idling the device for it, is almost always
898  * counterproductive. In most cases it just lowers throughput.
899  *
900  * On the other hand, a burst of queue creations may be caused also by
901  * the start of an application that does not consist of a lot of
902  * parallel I/O-bound threads. In fact, with a complex application,
903  * several short processes may need to be executed to start-up the
904  * application. In this respect, to start an application as quickly as
905  * possible, the best thing to do is in any case to privilege the I/O
906  * related to the application with respect to all other
907  * I/O. Therefore, the best strategy to start as quickly as possible
908  * an application that causes a burst of queue creations is to
909  * weight-raise all the queues created during the burst. This is the
910  * exact opposite of the best strategy for the other type of bursts.
911  *
912  * In the end, to take the best action for each of the two cases, the
913  * two types of bursts need to be distinguished. Fortunately, this
914  * seems relatively easy, by looking at the sizes of the bursts. In
915  * particular, we found a threshold such that only bursts with a
916  * larger size than that threshold are apparently caused by
917  * services or commands such as systemd or git grep. For brevity,
918  * hereafter we call just 'large' these bursts. BFQ *does not*
919  * weight-raise queues whose creation occurs in a large burst. In
920  * addition, for each of these queues BFQ performs or does not perform
921  * idling depending on which choice boosts the throughput more. The
922  * exact choice depends on the device and request pattern at
923  * hand.
924  *
925  * Unfortunately, false positives may occur while an interactive task
926  * is starting (e.g., an application is being started). The
927  * consequence is that the queues associated with the task do not
928  * enjoy weight raising as expected. Fortunately these false positives
929  * are very rare. They typically occur if some service happens to
930  * start doing I/O exactly when the interactive task starts.
931  *
932  * Turning back to the next function, it implements all the steps
933  * needed to detect the occurrence of a large burst and to properly
934  * mark all the queues belonging to it (so that they can then be
935  * treated in a different way). This goal is achieved by maintaining a
936  * "burst list" that holds, temporarily, the queues that belong to the
937  * burst in progress. The list is then used to mark these queues as
938  * belonging to a large burst if the burst does become large. The main
939  * steps are the following.
940  *
941  * . when the very first queue is created, the queue is inserted into the
942  *   list (as it could be the first queue in a possible burst)
943  *
944  * . if the current burst has not yet become large, and a queue Q that does
945  *   not yet belong to the burst is activated shortly after the last time
946  *   at which a new queue entered the burst list, then the function appends
947  *   Q to the burst list
948  *
949  * . if, as a consequence of the previous step, the burst size reaches
950  *   the large-burst threshold, then
951  *
952  *     . all the queues in the burst list are marked as belonging to a
953  *       large burst
954  *
955  *     . the burst list is deleted; in fact, the burst list already served
956  *       its purpose (keeping temporarily track of the queues in a burst,
957  *       so as to be able to mark them as belonging to a large burst in the
958  *       previous sub-step), and now is not needed any more
959  *
960  *     . the device enters a large-burst mode
961  *
962  * . if a queue Q that does not belong to the burst is created while
963  *   the device is in large-burst mode and shortly after the last time
964  *   at which a queue either entered the burst list or was marked as
965  *   belonging to the current large burst, then Q is immediately marked
966  *   as belonging to a large burst.
967  *
968  * . if a queue Q that does not belong to the burst is created a while
969  *   later, i.e., not shortly after, than the last time at which a queue
970  *   either entered the burst list or was marked as belonging to the
971  *   current large burst, then the current burst is deemed as finished and:
972  *
973  *        . the large-burst mode is reset if set
974  *
975  *        . the burst list is emptied
976  *
977  *        . Q is inserted in the burst list, as Q may be the first queue
978  *          in a possible new burst (then the burst list contains just Q
979  *          after this step).
980  */
981 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
982 {
983 	/*
984 	 * If bfqq is already in the burst list or is part of a large
985 	 * burst, or finally has just been split, then there is
986 	 * nothing else to do.
987 	 */
988 	if (!hlist_unhashed(&bfqq->burst_list_node) ||
989 	    bfq_bfqq_in_large_burst(bfqq) ||
990 	    time_is_after_eq_jiffies(bfqq->split_time +
991 				     msecs_to_jiffies(10)))
992 		return;
993 
994 	/*
995 	 * If bfqq's creation happens late enough, or bfqq belongs to
996 	 * a different group than the burst group, then the current
997 	 * burst is finished, and related data structures must be
998 	 * reset.
999 	 *
1000 	 * In this respect, consider the special case where bfqq is
1001 	 * the very first queue created after BFQ is selected for this
1002 	 * device. In this case, last_ins_in_burst and
1003 	 * burst_parent_entity are not yet significant when we get
1004 	 * here. But it is easy to verify that, whether or not the
1005 	 * following condition is true, bfqq will end up being
1006 	 * inserted into the burst list. In particular the list will
1007 	 * happen to contain only bfqq. And this is exactly what has
1008 	 * to happen, as bfqq may be the first queue of the first
1009 	 * burst.
1010 	 */
1011 	if (time_is_before_jiffies(bfqd->last_ins_in_burst +
1012 	    bfqd->bfq_burst_interval) ||
1013 	    bfqq->entity.parent != bfqd->burst_parent_entity) {
1014 		bfqd->large_burst = false;
1015 		bfq_reset_burst_list(bfqd, bfqq);
1016 		goto end;
1017 	}
1018 
1019 	/*
1020 	 * If we get here, then bfqq is being activated shortly after the
1021 	 * last queue. So, if the current burst is also large, we can mark
1022 	 * bfqq as belonging to this large burst immediately.
1023 	 */
1024 	if (bfqd->large_burst) {
1025 		bfq_mark_bfqq_in_large_burst(bfqq);
1026 		goto end;
1027 	}
1028 
1029 	/*
1030 	 * If we get here, then a large-burst state has not yet been
1031 	 * reached, but bfqq is being activated shortly after the last
1032 	 * queue. Then we add bfqq to the burst.
1033 	 */
1034 	bfq_add_to_burst(bfqd, bfqq);
1035 end:
1036 	/*
1037 	 * At this point, bfqq either has been added to the current
1038 	 * burst or has caused the current burst to terminate and a
1039 	 * possible new burst to start. In particular, in the second
1040 	 * case, bfqq has become the first queue in the possible new
1041 	 * burst.  In both cases last_ins_in_burst needs to be moved
1042 	 * forward.
1043 	 */
1044 	bfqd->last_ins_in_burst = jiffies;
1045 }
1046 
1047 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1048 {
1049 	struct bfq_entity *entity = &bfqq->entity;
1050 
1051 	return entity->budget - entity->service;
1052 }
1053 
1054 /*
1055  * If enough samples have been computed, return the current max budget
1056  * stored in bfqd, which is dynamically updated according to the
1057  * estimated disk peak rate; otherwise return the default max budget
1058  */
1059 static int bfq_max_budget(struct bfq_data *bfqd)
1060 {
1061 	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1062 		return bfq_default_max_budget;
1063 	else
1064 		return bfqd->bfq_max_budget;
1065 }
1066 
1067 /*
1068  * Return min budget, which is a fraction of the current or default
1069  * max budget (trying with 1/32)
1070  */
1071 static int bfq_min_budget(struct bfq_data *bfqd)
1072 {
1073 	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1074 		return bfq_default_max_budget / 32;
1075 	else
1076 		return bfqd->bfq_max_budget / 32;
1077 }
1078 
1079 /*
1080  * The next function, invoked after the input queue bfqq switches from
1081  * idle to busy, updates the budget of bfqq. The function also tells
1082  * whether the in-service queue should be expired, by returning
1083  * true. The purpose of expiring the in-service queue is to give bfqq
1084  * the chance to possibly preempt the in-service queue, and the reason
1085  * for preempting the in-service queue is to achieve one of the two
1086  * goals below.
1087  *
1088  * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1089  * expired because it has remained idle. In particular, bfqq may have
1090  * expired for one of the following two reasons:
1091  *
1092  * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1093  *   and did not make it to issue a new request before its last
1094  *   request was served;
1095  *
1096  * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1097  *   a new request before the expiration of the idling-time.
1098  *
1099  * Even if bfqq has expired for one of the above reasons, the process
1100  * associated with the queue may be however issuing requests greedily,
1101  * and thus be sensitive to the bandwidth it receives (bfqq may have
1102  * remained idle for other reasons: CPU high load, bfqq not enjoying
1103  * idling, I/O throttling somewhere in the path from the process to
1104  * the I/O scheduler, ...). But if, after every expiration for one of
1105  * the above two reasons, bfqq has to wait for the service of at least
1106  * one full budget of another queue before being served again, then
1107  * bfqq is likely to get a much lower bandwidth or resource time than
1108  * its reserved ones. To address this issue, two countermeasures need
1109  * to be taken.
1110  *
1111  * First, the budget and the timestamps of bfqq need to be updated in
1112  * a special way on bfqq reactivation: they need to be updated as if
1113  * bfqq did not remain idle and did not expire. In fact, if they are
1114  * computed as if bfqq expired and remained idle until reactivation,
1115  * then the process associated with bfqq is treated as if, instead of
1116  * being greedy, it stopped issuing requests when bfqq remained idle,
1117  * and restarts issuing requests only on this reactivation. In other
1118  * words, the scheduler does not help the process recover the "service
1119  * hole" between bfqq expiration and reactivation. As a consequence,
1120  * the process receives a lower bandwidth than its reserved one. In
1121  * contrast, to recover this hole, the budget must be updated as if
1122  * bfqq was not expired at all before this reactivation, i.e., it must
1123  * be set to the value of the remaining budget when bfqq was
1124  * expired. Along the same line, timestamps need to be assigned the
1125  * value they had the last time bfqq was selected for service, i.e.,
1126  * before last expiration. Thus timestamps need to be back-shifted
1127  * with respect to their normal computation (see [1] for more details
1128  * on this tricky aspect).
1129  *
1130  * Secondly, to allow the process to recover the hole, the in-service
1131  * queue must be expired too, to give bfqq the chance to preempt it
1132  * immediately. In fact, if bfqq has to wait for a full budget of the
1133  * in-service queue to be completed, then it may become impossible to
1134  * let the process recover the hole, even if the back-shifted
1135  * timestamps of bfqq are lower than those of the in-service queue. If
1136  * this happens for most or all of the holes, then the process may not
1137  * receive its reserved bandwidth. In this respect, it is worth noting
1138  * that, being the service of outstanding requests unpreemptible, a
1139  * little fraction of the holes may however be unrecoverable, thereby
1140  * causing a little loss of bandwidth.
1141  *
1142  * The last important point is detecting whether bfqq does need this
1143  * bandwidth recovery. In this respect, the next function deems the
1144  * process associated with bfqq greedy, and thus allows it to recover
1145  * the hole, if: 1) the process is waiting for the arrival of a new
1146  * request (which implies that bfqq expired for one of the above two
1147  * reasons), and 2) such a request has arrived soon. The first
1148  * condition is controlled through the flag non_blocking_wait_rq,
1149  * while the second through the flag arrived_in_time. If both
1150  * conditions hold, then the function computes the budget in the
1151  * above-described special way, and signals that the in-service queue
1152  * should be expired. Timestamp back-shifting is done later in
1153  * __bfq_activate_entity.
1154  *
1155  * 2. Reduce latency. Even if timestamps are not backshifted to let
1156  * the process associated with bfqq recover a service hole, bfqq may
1157  * however happen to have, after being (re)activated, a lower finish
1158  * timestamp than the in-service queue.	 That is, the next budget of
1159  * bfqq may have to be completed before the one of the in-service
1160  * queue. If this is the case, then preempting the in-service queue
1161  * allows this goal to be achieved, apart from the unpreemptible,
1162  * outstanding requests mentioned above.
1163  *
1164  * Unfortunately, regardless of which of the above two goals one wants
1165  * to achieve, service trees need first to be updated to know whether
1166  * the in-service queue must be preempted. To have service trees
1167  * correctly updated, the in-service queue must be expired and
1168  * rescheduled, and bfqq must be scheduled too. This is one of the
1169  * most costly operations (in future versions, the scheduling
1170  * mechanism may be re-designed in such a way to make it possible to
1171  * know whether preemption is needed without needing to update service
1172  * trees). In addition, queue preemptions almost always cause random
1173  * I/O, and thus loss of throughput. Because of these facts, the next
1174  * function adopts the following simple scheme to avoid both costly
1175  * operations and too frequent preemptions: it requests the expiration
1176  * of the in-service queue (unconditionally) only for queues that need
1177  * to recover a hole, or that either are weight-raised or deserve to
1178  * be weight-raised.
1179  */
1180 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1181 						struct bfq_queue *bfqq,
1182 						bool arrived_in_time,
1183 						bool wr_or_deserves_wr)
1184 {
1185 	struct bfq_entity *entity = &bfqq->entity;
1186 
1187 	if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1188 		/*
1189 		 * We do not clear the flag non_blocking_wait_rq here, as
1190 		 * the latter is used in bfq_activate_bfqq to signal
1191 		 * that timestamps need to be back-shifted (and is
1192 		 * cleared right after).
1193 		 */
1194 
1195 		/*
1196 		 * In next assignment we rely on that either
1197 		 * entity->service or entity->budget are not updated
1198 		 * on expiration if bfqq is empty (see
1199 		 * __bfq_bfqq_recalc_budget). Thus both quantities
1200 		 * remain unchanged after such an expiration, and the
1201 		 * following statement therefore assigns to
1202 		 * entity->budget the remaining budget on such an
1203 		 * expiration. For clarity, entity->service is not
1204 		 * updated on expiration in any case, and, in normal
1205 		 * operation, is reset only when bfqq is selected for
1206 		 * service (see bfq_get_next_queue).
1207 		 */
1208 		entity->budget = min_t(unsigned long,
1209 				       bfq_bfqq_budget_left(bfqq),
1210 				       bfqq->max_budget);
1211 
1212 		return true;
1213 	}
1214 
1215 	entity->budget = max_t(unsigned long, bfqq->max_budget,
1216 			       bfq_serv_to_charge(bfqq->next_rq, bfqq));
1217 	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1218 	return wr_or_deserves_wr;
1219 }
1220 
1221 /*
1222  * Return the farthest future time instant according to jiffies
1223  * macros.
1224  */
1225 static unsigned long bfq_greatest_from_now(void)
1226 {
1227 	return jiffies + MAX_JIFFY_OFFSET;
1228 }
1229 
1230 /*
1231  * Return the farthest past time instant according to jiffies
1232  * macros.
1233  */
1234 static unsigned long bfq_smallest_from_now(void)
1235 {
1236 	return jiffies - MAX_JIFFY_OFFSET;
1237 }
1238 
1239 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1240 					     struct bfq_queue *bfqq,
1241 					     unsigned int old_wr_coeff,
1242 					     bool wr_or_deserves_wr,
1243 					     bool interactive,
1244 					     bool in_burst,
1245 					     bool soft_rt)
1246 {
1247 	if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1248 		/* start a weight-raising period */
1249 		if (interactive) {
1250 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1251 			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1252 		} else {
1253 			/*
1254 			 * No interactive weight raising in progress
1255 			 * here: assign minus infinity to
1256 			 * wr_start_at_switch_to_srt, to make sure
1257 			 * that, at the end of the soft-real-time
1258 			 * weight raising periods that is starting
1259 			 * now, no interactive weight-raising period
1260 			 * may be wrongly considered as still in
1261 			 * progress (and thus actually started by
1262 			 * mistake).
1263 			 */
1264 			bfqq->wr_start_at_switch_to_srt =
1265 				bfq_smallest_from_now();
1266 			bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1267 				BFQ_SOFTRT_WEIGHT_FACTOR;
1268 			bfqq->wr_cur_max_time =
1269 				bfqd->bfq_wr_rt_max_time;
1270 		}
1271 
1272 		/*
1273 		 * If needed, further reduce budget to make sure it is
1274 		 * close to bfqq's backlog, so as to reduce the
1275 		 * scheduling-error component due to a too large
1276 		 * budget. Do not care about throughput consequences,
1277 		 * but only about latency. Finally, do not assign a
1278 		 * too small budget either, to avoid increasing
1279 		 * latency by causing too frequent expirations.
1280 		 */
1281 		bfqq->entity.budget = min_t(unsigned long,
1282 					    bfqq->entity.budget,
1283 					    2 * bfq_min_budget(bfqd));
1284 	} else if (old_wr_coeff > 1) {
1285 		if (interactive) { /* update wr coeff and duration */
1286 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1287 			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1288 		} else if (in_burst)
1289 			bfqq->wr_coeff = 1;
1290 		else if (soft_rt) {
1291 			/*
1292 			 * The application is now or still meeting the
1293 			 * requirements for being deemed soft rt.  We
1294 			 * can then correctly and safely (re)charge
1295 			 * the weight-raising duration for the
1296 			 * application with the weight-raising
1297 			 * duration for soft rt applications.
1298 			 *
1299 			 * In particular, doing this recharge now, i.e.,
1300 			 * before the weight-raising period for the
1301 			 * application finishes, reduces the probability
1302 			 * of the following negative scenario:
1303 			 * 1) the weight of a soft rt application is
1304 			 *    raised at startup (as for any newly
1305 			 *    created application),
1306 			 * 2) since the application is not interactive,
1307 			 *    at a certain time weight-raising is
1308 			 *    stopped for the application,
1309 			 * 3) at that time the application happens to
1310 			 *    still have pending requests, and hence
1311 			 *    is destined to not have a chance to be
1312 			 *    deemed soft rt before these requests are
1313 			 *    completed (see the comments to the
1314 			 *    function bfq_bfqq_softrt_next_start()
1315 			 *    for details on soft rt detection),
1316 			 * 4) these pending requests experience a high
1317 			 *    latency because the application is not
1318 			 *    weight-raised while they are pending.
1319 			 */
1320 			if (bfqq->wr_cur_max_time !=
1321 				bfqd->bfq_wr_rt_max_time) {
1322 				bfqq->wr_start_at_switch_to_srt =
1323 					bfqq->last_wr_start_finish;
1324 
1325 				bfqq->wr_cur_max_time =
1326 					bfqd->bfq_wr_rt_max_time;
1327 				bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1328 					BFQ_SOFTRT_WEIGHT_FACTOR;
1329 			}
1330 			bfqq->last_wr_start_finish = jiffies;
1331 		}
1332 	}
1333 }
1334 
1335 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1336 					struct bfq_queue *bfqq)
1337 {
1338 	return bfqq->dispatched == 0 &&
1339 		time_is_before_jiffies(
1340 			bfqq->budget_timeout +
1341 			bfqd->bfq_wr_min_idle_time);
1342 }
1343 
1344 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1345 					     struct bfq_queue *bfqq,
1346 					     int old_wr_coeff,
1347 					     struct request *rq,
1348 					     bool *interactive)
1349 {
1350 	bool soft_rt, in_burst,	wr_or_deserves_wr,
1351 		bfqq_wants_to_preempt,
1352 		idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1353 		/*
1354 		 * See the comments on
1355 		 * bfq_bfqq_update_budg_for_activation for
1356 		 * details on the usage of the next variable.
1357 		 */
1358 		arrived_in_time =  ktime_get_ns() <=
1359 			bfqq->ttime.last_end_request +
1360 			bfqd->bfq_slice_idle * 3;
1361 
1362 
1363 	/*
1364 	 * bfqq deserves to be weight-raised if:
1365 	 * - it is sync,
1366 	 * - it does not belong to a large burst,
1367 	 * - it has been idle for enough time or is soft real-time,
1368 	 * - is linked to a bfq_io_cq (it is not shared in any sense).
1369 	 */
1370 	in_burst = bfq_bfqq_in_large_burst(bfqq);
1371 	soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1372 		!in_burst &&
1373 		time_is_before_jiffies(bfqq->soft_rt_next_start);
1374 	*interactive = !in_burst && idle_for_long_time;
1375 	wr_or_deserves_wr = bfqd->low_latency &&
1376 		(bfqq->wr_coeff > 1 ||
1377 		 (bfq_bfqq_sync(bfqq) &&
1378 		  bfqq->bic && (*interactive || soft_rt)));
1379 
1380 	/*
1381 	 * Using the last flag, update budget and check whether bfqq
1382 	 * may want to preempt the in-service queue.
1383 	 */
1384 	bfqq_wants_to_preempt =
1385 		bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1386 						    arrived_in_time,
1387 						    wr_or_deserves_wr);
1388 
1389 	/*
1390 	 * If bfqq happened to be activated in a burst, but has been
1391 	 * idle for much more than an interactive queue, then we
1392 	 * assume that, in the overall I/O initiated in the burst, the
1393 	 * I/O associated with bfqq is finished. So bfqq does not need
1394 	 * to be treated as a queue belonging to a burst
1395 	 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1396 	 * if set, and remove bfqq from the burst list if it's
1397 	 * there. We do not decrement burst_size, because the fact
1398 	 * that bfqq does not need to belong to the burst list any
1399 	 * more does not invalidate the fact that bfqq was created in
1400 	 * a burst.
1401 	 */
1402 	if (likely(!bfq_bfqq_just_created(bfqq)) &&
1403 	    idle_for_long_time &&
1404 	    time_is_before_jiffies(
1405 		    bfqq->budget_timeout +
1406 		    msecs_to_jiffies(10000))) {
1407 		hlist_del_init(&bfqq->burst_list_node);
1408 		bfq_clear_bfqq_in_large_burst(bfqq);
1409 	}
1410 
1411 	bfq_clear_bfqq_just_created(bfqq);
1412 
1413 
1414 	if (!bfq_bfqq_IO_bound(bfqq)) {
1415 		if (arrived_in_time) {
1416 			bfqq->requests_within_timer++;
1417 			if (bfqq->requests_within_timer >=
1418 			    bfqd->bfq_requests_within_timer)
1419 				bfq_mark_bfqq_IO_bound(bfqq);
1420 		} else
1421 			bfqq->requests_within_timer = 0;
1422 	}
1423 
1424 	if (bfqd->low_latency) {
1425 		if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1426 			/* wraparound */
1427 			bfqq->split_time =
1428 				jiffies - bfqd->bfq_wr_min_idle_time - 1;
1429 
1430 		if (time_is_before_jiffies(bfqq->split_time +
1431 					   bfqd->bfq_wr_min_idle_time)) {
1432 			bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1433 							 old_wr_coeff,
1434 							 wr_or_deserves_wr,
1435 							 *interactive,
1436 							 in_burst,
1437 							 soft_rt);
1438 
1439 			if (old_wr_coeff != bfqq->wr_coeff)
1440 				bfqq->entity.prio_changed = 1;
1441 		}
1442 	}
1443 
1444 	bfqq->last_idle_bklogged = jiffies;
1445 	bfqq->service_from_backlogged = 0;
1446 	bfq_clear_bfqq_softrt_update(bfqq);
1447 
1448 	bfq_add_bfqq_busy(bfqd, bfqq);
1449 
1450 	/*
1451 	 * Expire in-service queue only if preemption may be needed
1452 	 * for guarantees. In this respect, the function
1453 	 * next_queue_may_preempt just checks a simple, necessary
1454 	 * condition, and not a sufficient condition based on
1455 	 * timestamps. In fact, for the latter condition to be
1456 	 * evaluated, timestamps would need first to be updated, and
1457 	 * this operation is quite costly (see the comments on the
1458 	 * function bfq_bfqq_update_budg_for_activation).
1459 	 */
1460 	if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
1461 	    bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
1462 	    next_queue_may_preempt(bfqd))
1463 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1464 				false, BFQQE_PREEMPTED);
1465 }
1466 
1467 static void bfq_add_request(struct request *rq)
1468 {
1469 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
1470 	struct bfq_data *bfqd = bfqq->bfqd;
1471 	struct request *next_rq, *prev;
1472 	unsigned int old_wr_coeff = bfqq->wr_coeff;
1473 	bool interactive = false;
1474 
1475 	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1476 	bfqq->queued[rq_is_sync(rq)]++;
1477 	bfqd->queued++;
1478 
1479 	elv_rb_add(&bfqq->sort_list, rq);
1480 
1481 	/*
1482 	 * Check if this request is a better next-serve candidate.
1483 	 */
1484 	prev = bfqq->next_rq;
1485 	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1486 	bfqq->next_rq = next_rq;
1487 
1488 	/*
1489 	 * Adjust priority tree position, if next_rq changes.
1490 	 */
1491 	if (prev != bfqq->next_rq)
1492 		bfq_pos_tree_add_move(bfqd, bfqq);
1493 
1494 	if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
1495 		bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1496 						 rq, &interactive);
1497 	else {
1498 		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1499 		    time_is_before_jiffies(
1500 				bfqq->last_wr_start_finish +
1501 				bfqd->bfq_wr_min_inter_arr_async)) {
1502 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1503 			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1504 
1505 			bfqd->wr_busy_queues++;
1506 			bfqq->entity.prio_changed = 1;
1507 		}
1508 		if (prev != bfqq->next_rq)
1509 			bfq_updated_next_req(bfqd, bfqq);
1510 	}
1511 
1512 	/*
1513 	 * Assign jiffies to last_wr_start_finish in the following
1514 	 * cases:
1515 	 *
1516 	 * . if bfqq is not going to be weight-raised, because, for
1517 	 *   non weight-raised queues, last_wr_start_finish stores the
1518 	 *   arrival time of the last request; as of now, this piece
1519 	 *   of information is used only for deciding whether to
1520 	 *   weight-raise async queues
1521 	 *
1522 	 * . if bfqq is not weight-raised, because, if bfqq is now
1523 	 *   switching to weight-raised, then last_wr_start_finish
1524 	 *   stores the time when weight-raising starts
1525 	 *
1526 	 * . if bfqq is interactive, because, regardless of whether
1527 	 *   bfqq is currently weight-raised, the weight-raising
1528 	 *   period must start or restart (this case is considered
1529 	 *   separately because it is not detected by the above
1530 	 *   conditions, if bfqq is already weight-raised)
1531 	 *
1532 	 * last_wr_start_finish has to be updated also if bfqq is soft
1533 	 * real-time, because the weight-raising period is constantly
1534 	 * restarted on idle-to-busy transitions for these queues, but
1535 	 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1536 	 * needed.
1537 	 */
1538 	if (bfqd->low_latency &&
1539 		(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1540 		bfqq->last_wr_start_finish = jiffies;
1541 }
1542 
1543 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1544 					  struct bio *bio,
1545 					  struct request_queue *q)
1546 {
1547 	struct bfq_queue *bfqq = bfqd->bio_bfqq;
1548 
1549 
1550 	if (bfqq)
1551 		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1552 
1553 	return NULL;
1554 }
1555 
1556 static sector_t get_sdist(sector_t last_pos, struct request *rq)
1557 {
1558 	if (last_pos)
1559 		return abs(blk_rq_pos(rq) - last_pos);
1560 
1561 	return 0;
1562 }
1563 
1564 #if 0 /* Still not clear if we can do without next two functions */
1565 static void bfq_activate_request(struct request_queue *q, struct request *rq)
1566 {
1567 	struct bfq_data *bfqd = q->elevator->elevator_data;
1568 
1569 	bfqd->rq_in_driver++;
1570 }
1571 
1572 static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1573 {
1574 	struct bfq_data *bfqd = q->elevator->elevator_data;
1575 
1576 	bfqd->rq_in_driver--;
1577 }
1578 #endif
1579 
1580 static void bfq_remove_request(struct request_queue *q,
1581 			       struct request *rq)
1582 {
1583 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
1584 	struct bfq_data *bfqd = bfqq->bfqd;
1585 	const int sync = rq_is_sync(rq);
1586 
1587 	if (bfqq->next_rq == rq) {
1588 		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1589 		bfq_updated_next_req(bfqd, bfqq);
1590 	}
1591 
1592 	if (rq->queuelist.prev != &rq->queuelist)
1593 		list_del_init(&rq->queuelist);
1594 	bfqq->queued[sync]--;
1595 	bfqd->queued--;
1596 	elv_rb_del(&bfqq->sort_list, rq);
1597 
1598 	elv_rqhash_del(q, rq);
1599 	if (q->last_merge == rq)
1600 		q->last_merge = NULL;
1601 
1602 	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1603 		bfqq->next_rq = NULL;
1604 
1605 		if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
1606 			bfq_del_bfqq_busy(bfqd, bfqq, false);
1607 			/*
1608 			 * bfqq emptied. In normal operation, when
1609 			 * bfqq is empty, bfqq->entity.service and
1610 			 * bfqq->entity.budget must contain,
1611 			 * respectively, the service received and the
1612 			 * budget used last time bfqq emptied. These
1613 			 * facts do not hold in this case, as at least
1614 			 * this last removal occurred while bfqq is
1615 			 * not in service. To avoid inconsistencies,
1616 			 * reset both bfqq->entity.service and
1617 			 * bfqq->entity.budget, if bfqq has still a
1618 			 * process that may issue I/O requests to it.
1619 			 */
1620 			bfqq->entity.budget = bfqq->entity.service = 0;
1621 		}
1622 
1623 		/*
1624 		 * Remove queue from request-position tree as it is empty.
1625 		 */
1626 		if (bfqq->pos_root) {
1627 			rb_erase(&bfqq->pos_node, bfqq->pos_root);
1628 			bfqq->pos_root = NULL;
1629 		}
1630 	}
1631 
1632 	if (rq->cmd_flags & REQ_META)
1633 		bfqq->meta_pending--;
1634 
1635 }
1636 
1637 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1638 {
1639 	struct request_queue *q = hctx->queue;
1640 	struct bfq_data *bfqd = q->elevator->elevator_data;
1641 	struct request *free = NULL;
1642 	/*
1643 	 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1644 	 * store its return value for later use, to avoid nesting
1645 	 * queue_lock inside the bfqd->lock. We assume that the bic
1646 	 * returned by bfq_bic_lookup does not go away before
1647 	 * bfqd->lock is taken.
1648 	 */
1649 	struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1650 	bool ret;
1651 
1652 	spin_lock_irq(&bfqd->lock);
1653 
1654 	if (bic)
1655 		bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1656 	else
1657 		bfqd->bio_bfqq = NULL;
1658 	bfqd->bio_bic = bic;
1659 
1660 	ret = blk_mq_sched_try_merge(q, bio, &free);
1661 
1662 	if (free)
1663 		blk_mq_free_request(free);
1664 	spin_unlock_irq(&bfqd->lock);
1665 
1666 	return ret;
1667 }
1668 
1669 static int bfq_request_merge(struct request_queue *q, struct request **req,
1670 			     struct bio *bio)
1671 {
1672 	struct bfq_data *bfqd = q->elevator->elevator_data;
1673 	struct request *__rq;
1674 
1675 	__rq = bfq_find_rq_fmerge(bfqd, bio, q);
1676 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
1677 		*req = __rq;
1678 		return ELEVATOR_FRONT_MERGE;
1679 	}
1680 
1681 	return ELEVATOR_NO_MERGE;
1682 }
1683 
1684 static void bfq_request_merged(struct request_queue *q, struct request *req,
1685 			       enum elv_merge type)
1686 {
1687 	if (type == ELEVATOR_FRONT_MERGE &&
1688 	    rb_prev(&req->rb_node) &&
1689 	    blk_rq_pos(req) <
1690 	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
1691 				    struct request, rb_node))) {
1692 		struct bfq_queue *bfqq = RQ_BFQQ(req);
1693 		struct bfq_data *bfqd = bfqq->bfqd;
1694 		struct request *prev, *next_rq;
1695 
1696 		/* Reposition request in its sort_list */
1697 		elv_rb_del(&bfqq->sort_list, req);
1698 		elv_rb_add(&bfqq->sort_list, req);
1699 
1700 		/* Choose next request to be served for bfqq */
1701 		prev = bfqq->next_rq;
1702 		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1703 					 bfqd->last_position);
1704 		bfqq->next_rq = next_rq;
1705 		/*
1706 		 * If next_rq changes, update both the queue's budget to
1707 		 * fit the new request and the queue's position in its
1708 		 * rq_pos_tree.
1709 		 */
1710 		if (prev != bfqq->next_rq) {
1711 			bfq_updated_next_req(bfqd, bfqq);
1712 			bfq_pos_tree_add_move(bfqd, bfqq);
1713 		}
1714 	}
1715 }
1716 
1717 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1718 				struct request *next)
1719 {
1720 	struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1721 
1722 	if (!RB_EMPTY_NODE(&rq->rb_node))
1723 		goto end;
1724 	spin_lock_irq(&bfqq->bfqd->lock);
1725 
1726 	/*
1727 	 * If next and rq belong to the same bfq_queue and next is older
1728 	 * than rq, then reposition rq in the fifo (by substituting next
1729 	 * with rq). Otherwise, if next and rq belong to different
1730 	 * bfq_queues, never reposition rq: in fact, we would have to
1731 	 * reposition it with respect to next's position in its own fifo,
1732 	 * which would most certainly be too expensive with respect to
1733 	 * the benefits.
1734 	 */
1735 	if (bfqq == next_bfqq &&
1736 	    !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1737 	    next->fifo_time < rq->fifo_time) {
1738 		list_del_init(&rq->queuelist);
1739 		list_replace_init(&next->queuelist, &rq->queuelist);
1740 		rq->fifo_time = next->fifo_time;
1741 	}
1742 
1743 	if (bfqq->next_rq == next)
1744 		bfqq->next_rq = rq;
1745 
1746 	bfq_remove_request(q, next);
1747 	bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
1748 
1749 	spin_unlock_irq(&bfqq->bfqd->lock);
1750 end:
1751 	bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
1752 }
1753 
1754 /* Must be called with bfqq != NULL */
1755 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1756 {
1757 	if (bfq_bfqq_busy(bfqq))
1758 		bfqq->bfqd->wr_busy_queues--;
1759 	bfqq->wr_coeff = 1;
1760 	bfqq->wr_cur_max_time = 0;
1761 	bfqq->last_wr_start_finish = jiffies;
1762 	/*
1763 	 * Trigger a weight change on the next invocation of
1764 	 * __bfq_entity_update_weight_prio.
1765 	 */
1766 	bfqq->entity.prio_changed = 1;
1767 }
1768 
1769 void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1770 			     struct bfq_group *bfqg)
1771 {
1772 	int i, j;
1773 
1774 	for (i = 0; i < 2; i++)
1775 		for (j = 0; j < IOPRIO_BE_NR; j++)
1776 			if (bfqg->async_bfqq[i][j])
1777 				bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1778 	if (bfqg->async_idle_bfqq)
1779 		bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1780 }
1781 
1782 static void bfq_end_wr(struct bfq_data *bfqd)
1783 {
1784 	struct bfq_queue *bfqq;
1785 
1786 	spin_lock_irq(&bfqd->lock);
1787 
1788 	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1789 		bfq_bfqq_end_wr(bfqq);
1790 	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1791 		bfq_bfqq_end_wr(bfqq);
1792 	bfq_end_wr_async(bfqd);
1793 
1794 	spin_unlock_irq(&bfqd->lock);
1795 }
1796 
1797 static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1798 {
1799 	if (request)
1800 		return blk_rq_pos(io_struct);
1801 	else
1802 		return ((struct bio *)io_struct)->bi_iter.bi_sector;
1803 }
1804 
1805 static int bfq_rq_close_to_sector(void *io_struct, bool request,
1806 				  sector_t sector)
1807 {
1808 	return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1809 	       BFQQ_CLOSE_THR;
1810 }
1811 
1812 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1813 					 struct bfq_queue *bfqq,
1814 					 sector_t sector)
1815 {
1816 	struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1817 	struct rb_node *parent, *node;
1818 	struct bfq_queue *__bfqq;
1819 
1820 	if (RB_EMPTY_ROOT(root))
1821 		return NULL;
1822 
1823 	/*
1824 	 * First, if we find a request starting at the end of the last
1825 	 * request, choose it.
1826 	 */
1827 	__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1828 	if (__bfqq)
1829 		return __bfqq;
1830 
1831 	/*
1832 	 * If the exact sector wasn't found, the parent of the NULL leaf
1833 	 * will contain the closest sector (rq_pos_tree sorted by
1834 	 * next_request position).
1835 	 */
1836 	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1837 	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1838 		return __bfqq;
1839 
1840 	if (blk_rq_pos(__bfqq->next_rq) < sector)
1841 		node = rb_next(&__bfqq->pos_node);
1842 	else
1843 		node = rb_prev(&__bfqq->pos_node);
1844 	if (!node)
1845 		return NULL;
1846 
1847 	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
1848 	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1849 		return __bfqq;
1850 
1851 	return NULL;
1852 }
1853 
1854 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1855 						   struct bfq_queue *cur_bfqq,
1856 						   sector_t sector)
1857 {
1858 	struct bfq_queue *bfqq;
1859 
1860 	/*
1861 	 * We shall notice if some of the queues are cooperating,
1862 	 * e.g., working closely on the same area of the device. In
1863 	 * that case, we can group them together and: 1) don't waste
1864 	 * time idling, and 2) serve the union of their requests in
1865 	 * the best possible order for throughput.
1866 	 */
1867 	bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1868 	if (!bfqq || bfqq == cur_bfqq)
1869 		return NULL;
1870 
1871 	return bfqq;
1872 }
1873 
1874 static struct bfq_queue *
1875 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1876 {
1877 	int process_refs, new_process_refs;
1878 	struct bfq_queue *__bfqq;
1879 
1880 	/*
1881 	 * If there are no process references on the new_bfqq, then it is
1882 	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1883 	 * may have dropped their last reference (not just their last process
1884 	 * reference).
1885 	 */
1886 	if (!bfqq_process_refs(new_bfqq))
1887 		return NULL;
1888 
1889 	/* Avoid a circular list and skip interim queue merges. */
1890 	while ((__bfqq = new_bfqq->new_bfqq)) {
1891 		if (__bfqq == bfqq)
1892 			return NULL;
1893 		new_bfqq = __bfqq;
1894 	}
1895 
1896 	process_refs = bfqq_process_refs(bfqq);
1897 	new_process_refs = bfqq_process_refs(new_bfqq);
1898 	/*
1899 	 * If the process for the bfqq has gone away, there is no
1900 	 * sense in merging the queues.
1901 	 */
1902 	if (process_refs == 0 || new_process_refs == 0)
1903 		return NULL;
1904 
1905 	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1906 		new_bfqq->pid);
1907 
1908 	/*
1909 	 * Merging is just a redirection: the requests of the process
1910 	 * owning one of the two queues are redirected to the other queue.
1911 	 * The latter queue, in its turn, is set as shared if this is the
1912 	 * first time that the requests of some process are redirected to
1913 	 * it.
1914 	 *
1915 	 * We redirect bfqq to new_bfqq and not the opposite, because
1916 	 * we are in the context of the process owning bfqq, thus we
1917 	 * have the io_cq of this process. So we can immediately
1918 	 * configure this io_cq to redirect the requests of the
1919 	 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1920 	 * not available any more (new_bfqq->bic == NULL).
1921 	 *
1922 	 * Anyway, even in case new_bfqq coincides with the in-service
1923 	 * queue, redirecting requests the in-service queue is the
1924 	 * best option, as we feed the in-service queue with new
1925 	 * requests close to the last request served and, by doing so,
1926 	 * are likely to increase the throughput.
1927 	 */
1928 	bfqq->new_bfqq = new_bfqq;
1929 	new_bfqq->ref += process_refs;
1930 	return new_bfqq;
1931 }
1932 
1933 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1934 					struct bfq_queue *new_bfqq)
1935 {
1936 	if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1937 	    (bfqq->ioprio_class != new_bfqq->ioprio_class))
1938 		return false;
1939 
1940 	/*
1941 	 * If either of the queues has already been detected as seeky,
1942 	 * then merging it with the other queue is unlikely to lead to
1943 	 * sequential I/O.
1944 	 */
1945 	if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1946 		return false;
1947 
1948 	/*
1949 	 * Interleaved I/O is known to be done by (some) applications
1950 	 * only for reads, so it does not make sense to merge async
1951 	 * queues.
1952 	 */
1953 	if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1954 		return false;
1955 
1956 	return true;
1957 }
1958 
1959 /*
1960  * If this function returns true, then bfqq cannot be merged. The idea
1961  * is that true cooperation happens very early after processes start
1962  * to do I/O. Usually, late cooperations are just accidental false
1963  * positives. In case bfqq is weight-raised, such false positives
1964  * would evidently degrade latency guarantees for bfqq.
1965  */
1966 static bool wr_from_too_long(struct bfq_queue *bfqq)
1967 {
1968 	return bfqq->wr_coeff > 1 &&
1969 		time_is_before_jiffies(bfqq->last_wr_start_finish +
1970 				       msecs_to_jiffies(100));
1971 }
1972 
1973 /*
1974  * Attempt to schedule a merge of bfqq with the currently in-service
1975  * queue or with a close queue among the scheduled queues.  Return
1976  * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1977  * structure otherwise.
1978  *
1979  * The OOM queue is not allowed to participate to cooperation: in fact, since
1980  * the requests temporarily redirected to the OOM queue could be redirected
1981  * again to dedicated queues at any time, the state needed to correctly
1982  * handle merging with the OOM queue would be quite complex and expensive
1983  * to maintain. Besides, in such a critical condition as an out of memory,
1984  * the benefits of queue merging may be little relevant, or even negligible.
1985  *
1986  * Weight-raised queues can be merged only if their weight-raising
1987  * period has just started. In fact cooperating processes are usually
1988  * started together. Thus, with this filter we avoid false positives
1989  * that would jeopardize low-latency guarantees.
1990  *
1991  * WARNING: queue merging may impair fairness among non-weight raised
1992  * queues, for at least two reasons: 1) the original weight of a
1993  * merged queue may change during the merged state, 2) even being the
1994  * weight the same, a merged queue may be bloated with many more
1995  * requests than the ones produced by its originally-associated
1996  * process.
1997  */
1998 static struct bfq_queue *
1999 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2000 		     void *io_struct, bool request)
2001 {
2002 	struct bfq_queue *in_service_bfqq, *new_bfqq;
2003 
2004 	if (bfqq->new_bfqq)
2005 		return bfqq->new_bfqq;
2006 
2007 	if (!io_struct ||
2008 	    wr_from_too_long(bfqq) ||
2009 	    unlikely(bfqq == &bfqd->oom_bfqq))
2010 		return NULL;
2011 
2012 	/* If there is only one backlogged queue, don't search. */
2013 	if (bfqd->busy_queues == 1)
2014 		return NULL;
2015 
2016 	in_service_bfqq = bfqd->in_service_queue;
2017 
2018 	if (!in_service_bfqq || in_service_bfqq == bfqq
2019 	    || wr_from_too_long(in_service_bfqq) ||
2020 	    unlikely(in_service_bfqq == &bfqd->oom_bfqq))
2021 		goto check_scheduled;
2022 
2023 	if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
2024 	    bfqq->entity.parent == in_service_bfqq->entity.parent &&
2025 	    bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
2026 		new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
2027 		if (new_bfqq)
2028 			return new_bfqq;
2029 	}
2030 	/*
2031 	 * Check whether there is a cooperator among currently scheduled
2032 	 * queues. The only thing we need is that the bio/request is not
2033 	 * NULL, as we need it to establish whether a cooperator exists.
2034 	 */
2035 check_scheduled:
2036 	new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2037 			bfq_io_struct_pos(io_struct, request));
2038 
2039 	if (new_bfqq && !wr_from_too_long(new_bfqq) &&
2040 	    likely(new_bfqq != &bfqd->oom_bfqq) &&
2041 	    bfq_may_be_close_cooperator(bfqq, new_bfqq))
2042 		return bfq_setup_merge(bfqq, new_bfqq);
2043 
2044 	return NULL;
2045 }
2046 
2047 static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2048 {
2049 	struct bfq_io_cq *bic = bfqq->bic;
2050 
2051 	/*
2052 	 * If !bfqq->bic, the queue is already shared or its requests
2053 	 * have already been redirected to a shared queue; both idle window
2054 	 * and weight raising state have already been saved. Do nothing.
2055 	 */
2056 	if (!bic)
2057 		return;
2058 
2059 	bic->saved_ttime = bfqq->ttime;
2060 	bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
2061 	bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
2062 	bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2063 	bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
2064 	if (unlikely(bfq_bfqq_just_created(bfqq) &&
2065 		     !bfq_bfqq_in_large_burst(bfqq))) {
2066 		/*
2067 		 * bfqq being merged right after being created: bfqq
2068 		 * would have deserved interactive weight raising, but
2069 		 * did not make it to be set in a weight-raised state,
2070 		 * because of this early merge.	Store directly the
2071 		 * weight-raising state that would have been assigned
2072 		 * to bfqq, so that to avoid that bfqq unjustly fails
2073 		 * to enjoy weight raising if split soon.
2074 		 */
2075 		bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
2076 		bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
2077 		bic->saved_last_wr_start_finish = jiffies;
2078 	} else {
2079 		bic->saved_wr_coeff = bfqq->wr_coeff;
2080 		bic->saved_wr_start_at_switch_to_srt =
2081 			bfqq->wr_start_at_switch_to_srt;
2082 		bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2083 		bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2084 	}
2085 }
2086 
2087 static void
2088 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2089 		struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2090 {
2091 	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2092 		(unsigned long)new_bfqq->pid);
2093 	/* Save weight raising and idle window of the merged queues */
2094 	bfq_bfqq_save_state(bfqq);
2095 	bfq_bfqq_save_state(new_bfqq);
2096 	if (bfq_bfqq_IO_bound(bfqq))
2097 		bfq_mark_bfqq_IO_bound(new_bfqq);
2098 	bfq_clear_bfqq_IO_bound(bfqq);
2099 
2100 	/*
2101 	 * If bfqq is weight-raised, then let new_bfqq inherit
2102 	 * weight-raising. To reduce false positives, neglect the case
2103 	 * where bfqq has just been created, but has not yet made it
2104 	 * to be weight-raised (which may happen because EQM may merge
2105 	 * bfqq even before bfq_add_request is executed for the first
2106 	 * time for bfqq). Handling this case would however be very
2107 	 * easy, thanks to the flag just_created.
2108 	 */
2109 	if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2110 		new_bfqq->wr_coeff = bfqq->wr_coeff;
2111 		new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2112 		new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2113 		new_bfqq->wr_start_at_switch_to_srt =
2114 			bfqq->wr_start_at_switch_to_srt;
2115 		if (bfq_bfqq_busy(new_bfqq))
2116 			bfqd->wr_busy_queues++;
2117 		new_bfqq->entity.prio_changed = 1;
2118 	}
2119 
2120 	if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2121 		bfqq->wr_coeff = 1;
2122 		bfqq->entity.prio_changed = 1;
2123 		if (bfq_bfqq_busy(bfqq))
2124 			bfqd->wr_busy_queues--;
2125 	}
2126 
2127 	bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2128 		     bfqd->wr_busy_queues);
2129 
2130 	/*
2131 	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2132 	 */
2133 	bic_set_bfqq(bic, new_bfqq, 1);
2134 	bfq_mark_bfqq_coop(new_bfqq);
2135 	/*
2136 	 * new_bfqq now belongs to at least two bics (it is a shared queue):
2137 	 * set new_bfqq->bic to NULL. bfqq either:
2138 	 * - does not belong to any bic any more, and hence bfqq->bic must
2139 	 *   be set to NULL, or
2140 	 * - is a queue whose owning bics have already been redirected to a
2141 	 *   different queue, hence the queue is destined to not belong to
2142 	 *   any bic soon and bfqq->bic is already NULL (therefore the next
2143 	 *   assignment causes no harm).
2144 	 */
2145 	new_bfqq->bic = NULL;
2146 	bfqq->bic = NULL;
2147 	/* release process reference to bfqq */
2148 	bfq_put_queue(bfqq);
2149 }
2150 
2151 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2152 				struct bio *bio)
2153 {
2154 	struct bfq_data *bfqd = q->elevator->elevator_data;
2155 	bool is_sync = op_is_sync(bio->bi_opf);
2156 	struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2157 
2158 	/*
2159 	 * Disallow merge of a sync bio into an async request.
2160 	 */
2161 	if (is_sync && !rq_is_sync(rq))
2162 		return false;
2163 
2164 	/*
2165 	 * Lookup the bfqq that this bio will be queued with. Allow
2166 	 * merge only if rq is queued there.
2167 	 */
2168 	if (!bfqq)
2169 		return false;
2170 
2171 	/*
2172 	 * We take advantage of this function to perform an early merge
2173 	 * of the queues of possible cooperating processes.
2174 	 */
2175 	new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2176 	if (new_bfqq) {
2177 		/*
2178 		 * bic still points to bfqq, then it has not yet been
2179 		 * redirected to some other bfq_queue, and a queue
2180 		 * merge beween bfqq and new_bfqq can be safely
2181 		 * fulfillled, i.e., bic can be redirected to new_bfqq
2182 		 * and bfqq can be put.
2183 		 */
2184 		bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2185 				new_bfqq);
2186 		/*
2187 		 * If we get here, bio will be queued into new_queue,
2188 		 * so use new_bfqq to decide whether bio and rq can be
2189 		 * merged.
2190 		 */
2191 		bfqq = new_bfqq;
2192 
2193 		/*
2194 		 * Change also bqfd->bio_bfqq, as
2195 		 * bfqd->bio_bic now points to new_bfqq, and
2196 		 * this function may be invoked again (and then may
2197 		 * use again bqfd->bio_bfqq).
2198 		 */
2199 		bfqd->bio_bfqq = bfqq;
2200 	}
2201 
2202 	return bfqq == RQ_BFQQ(rq);
2203 }
2204 
2205 /*
2206  * Set the maximum time for the in-service queue to consume its
2207  * budget. This prevents seeky processes from lowering the throughput.
2208  * In practice, a time-slice service scheme is used with seeky
2209  * processes.
2210  */
2211 static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2212 				   struct bfq_queue *bfqq)
2213 {
2214 	unsigned int timeout_coeff;
2215 
2216 	if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2217 		timeout_coeff = 1;
2218 	else
2219 		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2220 
2221 	bfqd->last_budget_start = ktime_get();
2222 
2223 	bfqq->budget_timeout = jiffies +
2224 		bfqd->bfq_timeout * timeout_coeff;
2225 }
2226 
2227 static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2228 				       struct bfq_queue *bfqq)
2229 {
2230 	if (bfqq) {
2231 		bfq_clear_bfqq_fifo_expire(bfqq);
2232 
2233 		bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2234 
2235 		if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2236 		    bfqq->wr_coeff > 1 &&
2237 		    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2238 		    time_is_before_jiffies(bfqq->budget_timeout)) {
2239 			/*
2240 			 * For soft real-time queues, move the start
2241 			 * of the weight-raising period forward by the
2242 			 * time the queue has not received any
2243 			 * service. Otherwise, a relatively long
2244 			 * service delay is likely to cause the
2245 			 * weight-raising period of the queue to end,
2246 			 * because of the short duration of the
2247 			 * weight-raising period of a soft real-time
2248 			 * queue.  It is worth noting that this move
2249 			 * is not so dangerous for the other queues,
2250 			 * because soft real-time queues are not
2251 			 * greedy.
2252 			 *
2253 			 * To not add a further variable, we use the
2254 			 * overloaded field budget_timeout to
2255 			 * determine for how long the queue has not
2256 			 * received service, i.e., how much time has
2257 			 * elapsed since the queue expired. However,
2258 			 * this is a little imprecise, because
2259 			 * budget_timeout is set to jiffies if bfqq
2260 			 * not only expires, but also remains with no
2261 			 * request.
2262 			 */
2263 			if (time_after(bfqq->budget_timeout,
2264 				       bfqq->last_wr_start_finish))
2265 				bfqq->last_wr_start_finish +=
2266 					jiffies - bfqq->budget_timeout;
2267 			else
2268 				bfqq->last_wr_start_finish = jiffies;
2269 		}
2270 
2271 		bfq_set_budget_timeout(bfqd, bfqq);
2272 		bfq_log_bfqq(bfqd, bfqq,
2273 			     "set_in_service_queue, cur-budget = %d",
2274 			     bfqq->entity.budget);
2275 	}
2276 
2277 	bfqd->in_service_queue = bfqq;
2278 }
2279 
2280 /*
2281  * Get and set a new queue for service.
2282  */
2283 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2284 {
2285 	struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2286 
2287 	__bfq_set_in_service_queue(bfqd, bfqq);
2288 	return bfqq;
2289 }
2290 
2291 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2292 {
2293 	struct bfq_queue *bfqq = bfqd->in_service_queue;
2294 	u32 sl;
2295 
2296 	bfq_mark_bfqq_wait_request(bfqq);
2297 
2298 	/*
2299 	 * We don't want to idle for seeks, but we do want to allow
2300 	 * fair distribution of slice time for a process doing back-to-back
2301 	 * seeks. So allow a little bit of time for him to submit a new rq.
2302 	 */
2303 	sl = bfqd->bfq_slice_idle;
2304 	/*
2305 	 * Unless the queue is being weight-raised or the scenario is
2306 	 * asymmetric, grant only minimum idle time if the queue
2307 	 * is seeky. A long idling is preserved for a weight-raised
2308 	 * queue, or, more in general, in an asymmetric scenario,
2309 	 * because a long idling is needed for guaranteeing to a queue
2310 	 * its reserved share of the throughput (in particular, it is
2311 	 * needed if the queue has a higher weight than some other
2312 	 * queue).
2313 	 */
2314 	if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2315 	    bfq_symmetric_scenario(bfqd))
2316 		sl = min_t(u64, sl, BFQ_MIN_TT);
2317 
2318 	bfqd->last_idling_start = ktime_get();
2319 	hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2320 		      HRTIMER_MODE_REL);
2321 	bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
2322 }
2323 
2324 /*
2325  * In autotuning mode, max_budget is dynamically recomputed as the
2326  * amount of sectors transferred in timeout at the estimated peak
2327  * rate. This enables BFQ to utilize a full timeslice with a full
2328  * budget, even if the in-service queue is served at peak rate. And
2329  * this maximises throughput with sequential workloads.
2330  */
2331 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2332 {
2333 	return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2334 		jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2335 }
2336 
2337 /*
2338  * Update parameters related to throughput and responsiveness, as a
2339  * function of the estimated peak rate. See comments on
2340  * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2341  */
2342 static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2343 {
2344 	int dev_type = blk_queue_nonrot(bfqd->queue);
2345 
2346 	if (bfqd->bfq_user_max_budget == 0)
2347 		bfqd->bfq_max_budget =
2348 			bfq_calc_max_budget(bfqd);
2349 
2350 	if (bfqd->device_speed == BFQ_BFQD_FAST &&
2351 	    bfqd->peak_rate < device_speed_thresh[dev_type]) {
2352 		bfqd->device_speed = BFQ_BFQD_SLOW;
2353 		bfqd->RT_prod = R_slow[dev_type] *
2354 			T_slow[dev_type];
2355 	} else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2356 		   bfqd->peak_rate > device_speed_thresh[dev_type]) {
2357 		bfqd->device_speed = BFQ_BFQD_FAST;
2358 		bfqd->RT_prod = R_fast[dev_type] *
2359 			T_fast[dev_type];
2360 	}
2361 
2362 	bfq_log(bfqd,
2363 "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2364 		dev_type == 0 ? "ROT" : "NONROT",
2365 		bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2366 		bfqd->device_speed == BFQ_BFQD_FAST ?
2367 		(USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2368 		(USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2369 		(USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2370 		BFQ_RATE_SHIFT);
2371 }
2372 
2373 static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2374 				       struct request *rq)
2375 {
2376 	if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2377 		bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2378 		bfqd->peak_rate_samples = 1;
2379 		bfqd->sequential_samples = 0;
2380 		bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2381 			blk_rq_sectors(rq);
2382 	} else /* no new rq dispatched, just reset the number of samples */
2383 		bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2384 
2385 	bfq_log(bfqd,
2386 		"reset_rate_computation at end, sample %u/%u tot_sects %llu",
2387 		bfqd->peak_rate_samples, bfqd->sequential_samples,
2388 		bfqd->tot_sectors_dispatched);
2389 }
2390 
2391 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2392 {
2393 	u32 rate, weight, divisor;
2394 
2395 	/*
2396 	 * For the convergence property to hold (see comments on
2397 	 * bfq_update_peak_rate()) and for the assessment to be
2398 	 * reliable, a minimum number of samples must be present, and
2399 	 * a minimum amount of time must have elapsed. If not so, do
2400 	 * not compute new rate. Just reset parameters, to get ready
2401 	 * for a new evaluation attempt.
2402 	 */
2403 	if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2404 	    bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2405 		goto reset_computation;
2406 
2407 	/*
2408 	 * If a new request completion has occurred after last
2409 	 * dispatch, then, to approximate the rate at which requests
2410 	 * have been served by the device, it is more precise to
2411 	 * extend the observation interval to the last completion.
2412 	 */
2413 	bfqd->delta_from_first =
2414 		max_t(u64, bfqd->delta_from_first,
2415 		      bfqd->last_completion - bfqd->first_dispatch);
2416 
2417 	/*
2418 	 * Rate computed in sects/usec, and not sects/nsec, for
2419 	 * precision issues.
2420 	 */
2421 	rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2422 			div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2423 
2424 	/*
2425 	 * Peak rate not updated if:
2426 	 * - the percentage of sequential dispatches is below 3/4 of the
2427 	 *   total, and rate is below the current estimated peak rate
2428 	 * - rate is unreasonably high (> 20M sectors/sec)
2429 	 */
2430 	if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2431 	     rate <= bfqd->peak_rate) ||
2432 		rate > 20<<BFQ_RATE_SHIFT)
2433 		goto reset_computation;
2434 
2435 	/*
2436 	 * We have to update the peak rate, at last! To this purpose,
2437 	 * we use a low-pass filter. We compute the smoothing constant
2438 	 * of the filter as a function of the 'weight' of the new
2439 	 * measured rate.
2440 	 *
2441 	 * As can be seen in next formulas, we define this weight as a
2442 	 * quantity proportional to how sequential the workload is,
2443 	 * and to how long the observation time interval is.
2444 	 *
2445 	 * The weight runs from 0 to 8. The maximum value of the
2446 	 * weight, 8, yields the minimum value for the smoothing
2447 	 * constant. At this minimum value for the smoothing constant,
2448 	 * the measured rate contributes for half of the next value of
2449 	 * the estimated peak rate.
2450 	 *
2451 	 * So, the first step is to compute the weight as a function
2452 	 * of how sequential the workload is. Note that the weight
2453 	 * cannot reach 9, because bfqd->sequential_samples cannot
2454 	 * become equal to bfqd->peak_rate_samples, which, in its
2455 	 * turn, holds true because bfqd->sequential_samples is not
2456 	 * incremented for the first sample.
2457 	 */
2458 	weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2459 
2460 	/*
2461 	 * Second step: further refine the weight as a function of the
2462 	 * duration of the observation interval.
2463 	 */
2464 	weight = min_t(u32, 8,
2465 		       div_u64(weight * bfqd->delta_from_first,
2466 			       BFQ_RATE_REF_INTERVAL));
2467 
2468 	/*
2469 	 * Divisor ranging from 10, for minimum weight, to 2, for
2470 	 * maximum weight.
2471 	 */
2472 	divisor = 10 - weight;
2473 
2474 	/*
2475 	 * Finally, update peak rate:
2476 	 *
2477 	 * peak_rate = peak_rate * (divisor-1) / divisor  +  rate / divisor
2478 	 */
2479 	bfqd->peak_rate *= divisor-1;
2480 	bfqd->peak_rate /= divisor;
2481 	rate /= divisor; /* smoothing constant alpha = 1/divisor */
2482 
2483 	bfqd->peak_rate += rate;
2484 	update_thr_responsiveness_params(bfqd);
2485 
2486 reset_computation:
2487 	bfq_reset_rate_computation(bfqd, rq);
2488 }
2489 
2490 /*
2491  * Update the read/write peak rate (the main quantity used for
2492  * auto-tuning, see update_thr_responsiveness_params()).
2493  *
2494  * It is not trivial to estimate the peak rate (correctly): because of
2495  * the presence of sw and hw queues between the scheduler and the
2496  * device components that finally serve I/O requests, it is hard to
2497  * say exactly when a given dispatched request is served inside the
2498  * device, and for how long. As a consequence, it is hard to know
2499  * precisely at what rate a given set of requests is actually served
2500  * by the device.
2501  *
2502  * On the opposite end, the dispatch time of any request is trivially
2503  * available, and, from this piece of information, the "dispatch rate"
2504  * of requests can be immediately computed. So, the idea in the next
2505  * function is to use what is known, namely request dispatch times
2506  * (plus, when useful, request completion times), to estimate what is
2507  * unknown, namely in-device request service rate.
2508  *
2509  * The main issue is that, because of the above facts, the rate at
2510  * which a certain set of requests is dispatched over a certain time
2511  * interval can vary greatly with respect to the rate at which the
2512  * same requests are then served. But, since the size of any
2513  * intermediate queue is limited, and the service scheme is lossless
2514  * (no request is silently dropped), the following obvious convergence
2515  * property holds: the number of requests dispatched MUST become
2516  * closer and closer to the number of requests completed as the
2517  * observation interval grows. This is the key property used in
2518  * the next function to estimate the peak service rate as a function
2519  * of the observed dispatch rate. The function assumes to be invoked
2520  * on every request dispatch.
2521  */
2522 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2523 {
2524 	u64 now_ns = ktime_get_ns();
2525 
2526 	if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2527 		bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2528 			bfqd->peak_rate_samples);
2529 		bfq_reset_rate_computation(bfqd, rq);
2530 		goto update_last_values; /* will add one sample */
2531 	}
2532 
2533 	/*
2534 	 * Device idle for very long: the observation interval lasting
2535 	 * up to this dispatch cannot be a valid observation interval
2536 	 * for computing a new peak rate (similarly to the late-
2537 	 * completion event in bfq_completed_request()). Go to
2538 	 * update_rate_and_reset to have the following three steps
2539 	 * taken:
2540 	 * - close the observation interval at the last (previous)
2541 	 *   request dispatch or completion
2542 	 * - compute rate, if possible, for that observation interval
2543 	 * - start a new observation interval with this dispatch
2544 	 */
2545 	if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2546 	    bfqd->rq_in_driver == 0)
2547 		goto update_rate_and_reset;
2548 
2549 	/* Update sampling information */
2550 	bfqd->peak_rate_samples++;
2551 
2552 	if ((bfqd->rq_in_driver > 0 ||
2553 		now_ns - bfqd->last_completion < BFQ_MIN_TT)
2554 	     && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2555 		bfqd->sequential_samples++;
2556 
2557 	bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2558 
2559 	/* Reset max observed rq size every 32 dispatches */
2560 	if (likely(bfqd->peak_rate_samples % 32))
2561 		bfqd->last_rq_max_size =
2562 			max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2563 	else
2564 		bfqd->last_rq_max_size = blk_rq_sectors(rq);
2565 
2566 	bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2567 
2568 	/* Target observation interval not yet reached, go on sampling */
2569 	if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2570 		goto update_last_values;
2571 
2572 update_rate_and_reset:
2573 	bfq_update_rate_reset(bfqd, rq);
2574 update_last_values:
2575 	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2576 	bfqd->last_dispatch = now_ns;
2577 }
2578 
2579 /*
2580  * Remove request from internal lists.
2581  */
2582 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2583 {
2584 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
2585 
2586 	/*
2587 	 * For consistency, the next instruction should have been
2588 	 * executed after removing the request from the queue and
2589 	 * dispatching it.  We execute instead this instruction before
2590 	 * bfq_remove_request() (and hence introduce a temporary
2591 	 * inconsistency), for efficiency.  In fact, should this
2592 	 * dispatch occur for a non in-service bfqq, this anticipated
2593 	 * increment prevents two counters related to bfqq->dispatched
2594 	 * from risking to be, first, uselessly decremented, and then
2595 	 * incremented again when the (new) value of bfqq->dispatched
2596 	 * happens to be taken into account.
2597 	 */
2598 	bfqq->dispatched++;
2599 	bfq_update_peak_rate(q->elevator->elevator_data, rq);
2600 
2601 	bfq_remove_request(q, rq);
2602 }
2603 
2604 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2605 {
2606 	/*
2607 	 * If this bfqq is shared between multiple processes, check
2608 	 * to make sure that those processes are still issuing I/Os
2609 	 * within the mean seek distance. If not, it may be time to
2610 	 * break the queues apart again.
2611 	 */
2612 	if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2613 		bfq_mark_bfqq_split_coop(bfqq);
2614 
2615 	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2616 		if (bfqq->dispatched == 0)
2617 			/*
2618 			 * Overloading budget_timeout field to store
2619 			 * the time at which the queue remains with no
2620 			 * backlog and no outstanding request; used by
2621 			 * the weight-raising mechanism.
2622 			 */
2623 			bfqq->budget_timeout = jiffies;
2624 
2625 		bfq_del_bfqq_busy(bfqd, bfqq, true);
2626 	} else {
2627 		bfq_requeue_bfqq(bfqd, bfqq, true);
2628 		/*
2629 		 * Resort priority tree of potential close cooperators.
2630 		 */
2631 		bfq_pos_tree_add_move(bfqd, bfqq);
2632 	}
2633 
2634 	/*
2635 	 * All in-service entities must have been properly deactivated
2636 	 * or requeued before executing the next function, which
2637 	 * resets all in-service entites as no more in service.
2638 	 */
2639 	__bfq_bfqd_reset_in_service(bfqd);
2640 }
2641 
2642 /**
2643  * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2644  * @bfqd: device data.
2645  * @bfqq: queue to update.
2646  * @reason: reason for expiration.
2647  *
2648  * Handle the feedback on @bfqq budget at queue expiration.
2649  * See the body for detailed comments.
2650  */
2651 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2652 				     struct bfq_queue *bfqq,
2653 				     enum bfqq_expiration reason)
2654 {
2655 	struct request *next_rq;
2656 	int budget, min_budget;
2657 
2658 	min_budget = bfq_min_budget(bfqd);
2659 
2660 	if (bfqq->wr_coeff == 1)
2661 		budget = bfqq->max_budget;
2662 	else /*
2663 	      * Use a constant, low budget for weight-raised queues,
2664 	      * to help achieve a low latency. Keep it slightly higher
2665 	      * than the minimum possible budget, to cause a little
2666 	      * bit fewer expirations.
2667 	      */
2668 		budget = 2 * min_budget;
2669 
2670 	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2671 		bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2672 	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2673 		budget, bfq_min_budget(bfqd));
2674 	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2675 		bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2676 
2677 	if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
2678 		switch (reason) {
2679 		/*
2680 		 * Caveat: in all the following cases we trade latency
2681 		 * for throughput.
2682 		 */
2683 		case BFQQE_TOO_IDLE:
2684 			/*
2685 			 * This is the only case where we may reduce
2686 			 * the budget: if there is no request of the
2687 			 * process still waiting for completion, then
2688 			 * we assume (tentatively) that the timer has
2689 			 * expired because the batch of requests of
2690 			 * the process could have been served with a
2691 			 * smaller budget.  Hence, betting that
2692 			 * process will behave in the same way when it
2693 			 * becomes backlogged again, we reduce its
2694 			 * next budget.  As long as we guess right,
2695 			 * this budget cut reduces the latency
2696 			 * experienced by the process.
2697 			 *
2698 			 * However, if there are still outstanding
2699 			 * requests, then the process may have not yet
2700 			 * issued its next request just because it is
2701 			 * still waiting for the completion of some of
2702 			 * the still outstanding ones.  So in this
2703 			 * subcase we do not reduce its budget, on the
2704 			 * contrary we increase it to possibly boost
2705 			 * the throughput, as discussed in the
2706 			 * comments to the BUDGET_TIMEOUT case.
2707 			 */
2708 			if (bfqq->dispatched > 0) /* still outstanding reqs */
2709 				budget = min(budget * 2, bfqd->bfq_max_budget);
2710 			else {
2711 				if (budget > 5 * min_budget)
2712 					budget -= 4 * min_budget;
2713 				else
2714 					budget = min_budget;
2715 			}
2716 			break;
2717 		case BFQQE_BUDGET_TIMEOUT:
2718 			/*
2719 			 * We double the budget here because it gives
2720 			 * the chance to boost the throughput if this
2721 			 * is not a seeky process (and has bumped into
2722 			 * this timeout because of, e.g., ZBR).
2723 			 */
2724 			budget = min(budget * 2, bfqd->bfq_max_budget);
2725 			break;
2726 		case BFQQE_BUDGET_EXHAUSTED:
2727 			/*
2728 			 * The process still has backlog, and did not
2729 			 * let either the budget timeout or the disk
2730 			 * idling timeout expire. Hence it is not
2731 			 * seeky, has a short thinktime and may be
2732 			 * happy with a higher budget too. So
2733 			 * definitely increase the budget of this good
2734 			 * candidate to boost the disk throughput.
2735 			 */
2736 			budget = min(budget * 4, bfqd->bfq_max_budget);
2737 			break;
2738 		case BFQQE_NO_MORE_REQUESTS:
2739 			/*
2740 			 * For queues that expire for this reason, it
2741 			 * is particularly important to keep the
2742 			 * budget close to the actual service they
2743 			 * need. Doing so reduces the timestamp
2744 			 * misalignment problem described in the
2745 			 * comments in the body of
2746 			 * __bfq_activate_entity. In fact, suppose
2747 			 * that a queue systematically expires for
2748 			 * BFQQE_NO_MORE_REQUESTS and presents a
2749 			 * new request in time to enjoy timestamp
2750 			 * back-shifting. The larger the budget of the
2751 			 * queue is with respect to the service the
2752 			 * queue actually requests in each service
2753 			 * slot, the more times the queue can be
2754 			 * reactivated with the same virtual finish
2755 			 * time. It follows that, even if this finish
2756 			 * time is pushed to the system virtual time
2757 			 * to reduce the consequent timestamp
2758 			 * misalignment, the queue unjustly enjoys for
2759 			 * many re-activations a lower finish time
2760 			 * than all newly activated queues.
2761 			 *
2762 			 * The service needed by bfqq is measured
2763 			 * quite precisely by bfqq->entity.service.
2764 			 * Since bfqq does not enjoy device idling,
2765 			 * bfqq->entity.service is equal to the number
2766 			 * of sectors that the process associated with
2767 			 * bfqq requested to read/write before waiting
2768 			 * for request completions, or blocking for
2769 			 * other reasons.
2770 			 */
2771 			budget = max_t(int, bfqq->entity.service, min_budget);
2772 			break;
2773 		default:
2774 			return;
2775 		}
2776 	} else if (!bfq_bfqq_sync(bfqq)) {
2777 		/*
2778 		 * Async queues get always the maximum possible
2779 		 * budget, as for them we do not care about latency
2780 		 * (in addition, their ability to dispatch is limited
2781 		 * by the charging factor).
2782 		 */
2783 		budget = bfqd->bfq_max_budget;
2784 	}
2785 
2786 	bfqq->max_budget = budget;
2787 
2788 	if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2789 	    !bfqd->bfq_user_max_budget)
2790 		bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2791 
2792 	/*
2793 	 * If there is still backlog, then assign a new budget, making
2794 	 * sure that it is large enough for the next request.  Since
2795 	 * the finish time of bfqq must be kept in sync with the
2796 	 * budget, be sure to call __bfq_bfqq_expire() *after* this
2797 	 * update.
2798 	 *
2799 	 * If there is no backlog, then no need to update the budget;
2800 	 * it will be updated on the arrival of a new request.
2801 	 */
2802 	next_rq = bfqq->next_rq;
2803 	if (next_rq)
2804 		bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2805 					    bfq_serv_to_charge(next_rq, bfqq));
2806 
2807 	bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2808 			next_rq ? blk_rq_sectors(next_rq) : 0,
2809 			bfqq->entity.budget);
2810 }
2811 
2812 /*
2813  * Return true if the process associated with bfqq is "slow". The slow
2814  * flag is used, in addition to the budget timeout, to reduce the
2815  * amount of service provided to seeky processes, and thus reduce
2816  * their chances to lower the throughput. More details in the comments
2817  * on the function bfq_bfqq_expire().
2818  *
2819  * An important observation is in order: as discussed in the comments
2820  * on the function bfq_update_peak_rate(), with devices with internal
2821  * queues, it is hard if ever possible to know when and for how long
2822  * an I/O request is processed by the device (apart from the trivial
2823  * I/O pattern where a new request is dispatched only after the
2824  * previous one has been completed). This makes it hard to evaluate
2825  * the real rate at which the I/O requests of each bfq_queue are
2826  * served.  In fact, for an I/O scheduler like BFQ, serving a
2827  * bfq_queue means just dispatching its requests during its service
2828  * slot (i.e., until the budget of the queue is exhausted, or the
2829  * queue remains idle, or, finally, a timeout fires). But, during the
2830  * service slot of a bfq_queue, around 100 ms at most, the device may
2831  * be even still processing requests of bfq_queues served in previous
2832  * service slots. On the opposite end, the requests of the in-service
2833  * bfq_queue may be completed after the service slot of the queue
2834  * finishes.
2835  *
2836  * Anyway, unless more sophisticated solutions are used
2837  * (where possible), the sum of the sizes of the requests dispatched
2838  * during the service slot of a bfq_queue is probably the only
2839  * approximation available for the service received by the bfq_queue
2840  * during its service slot. And this sum is the quantity used in this
2841  * function to evaluate the I/O speed of a process.
2842  */
2843 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2844 				 bool compensate, enum bfqq_expiration reason,
2845 				 unsigned long *delta_ms)
2846 {
2847 	ktime_t delta_ktime;
2848 	u32 delta_usecs;
2849 	bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
2850 
2851 	if (!bfq_bfqq_sync(bfqq))
2852 		return false;
2853 
2854 	if (compensate)
2855 		delta_ktime = bfqd->last_idling_start;
2856 	else
2857 		delta_ktime = ktime_get();
2858 	delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2859 	delta_usecs = ktime_to_us(delta_ktime);
2860 
2861 	/* don't use too short time intervals */
2862 	if (delta_usecs < 1000) {
2863 		if (blk_queue_nonrot(bfqd->queue))
2864 			 /*
2865 			  * give same worst-case guarantees as idling
2866 			  * for seeky
2867 			  */
2868 			*delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2869 		else /* charge at least one seek */
2870 			*delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2871 
2872 		return slow;
2873 	}
2874 
2875 	*delta_ms = delta_usecs / USEC_PER_MSEC;
2876 
2877 	/*
2878 	 * Use only long (> 20ms) intervals to filter out excessive
2879 	 * spikes in service rate estimation.
2880 	 */
2881 	if (delta_usecs > 20000) {
2882 		/*
2883 		 * Caveat for rotational devices: processes doing I/O
2884 		 * in the slower disk zones tend to be slow(er) even
2885 		 * if not seeky. In this respect, the estimated peak
2886 		 * rate is likely to be an average over the disk
2887 		 * surface. Accordingly, to not be too harsh with
2888 		 * unlucky processes, a process is deemed slow only if
2889 		 * its rate has been lower than half of the estimated
2890 		 * peak rate.
2891 		 */
2892 		slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
2893 	}
2894 
2895 	bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
2896 
2897 	return slow;
2898 }
2899 
2900 /*
2901  * To be deemed as soft real-time, an application must meet two
2902  * requirements. First, the application must not require an average
2903  * bandwidth higher than the approximate bandwidth required to playback or
2904  * record a compressed high-definition video.
2905  * The next function is invoked on the completion of the last request of a
2906  * batch, to compute the next-start time instant, soft_rt_next_start, such
2907  * that, if the next request of the application does not arrive before
2908  * soft_rt_next_start, then the above requirement on the bandwidth is met.
2909  *
2910  * The second requirement is that the request pattern of the application is
2911  * isochronous, i.e., that, after issuing a request or a batch of requests,
2912  * the application stops issuing new requests until all its pending requests
2913  * have been completed. After that, the application may issue a new batch,
2914  * and so on.
2915  * For this reason the next function is invoked to compute
2916  * soft_rt_next_start only for applications that meet this requirement,
2917  * whereas soft_rt_next_start is set to infinity for applications that do
2918  * not.
2919  *
2920  * Unfortunately, even a greedy application may happen to behave in an
2921  * isochronous way if the CPU load is high. In fact, the application may
2922  * stop issuing requests while the CPUs are busy serving other processes,
2923  * then restart, then stop again for a while, and so on. In addition, if
2924  * the disk achieves a low enough throughput with the request pattern
2925  * issued by the application (e.g., because the request pattern is random
2926  * and/or the device is slow), then the application may meet the above
2927  * bandwidth requirement too. To prevent such a greedy application to be
2928  * deemed as soft real-time, a further rule is used in the computation of
2929  * soft_rt_next_start: soft_rt_next_start must be higher than the current
2930  * time plus the maximum time for which the arrival of a request is waited
2931  * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2932  * This filters out greedy applications, as the latter issue instead their
2933  * next request as soon as possible after the last one has been completed
2934  * (in contrast, when a batch of requests is completed, a soft real-time
2935  * application spends some time processing data).
2936  *
2937  * Unfortunately, the last filter may easily generate false positives if
2938  * only bfqd->bfq_slice_idle is used as a reference time interval and one
2939  * or both the following cases occur:
2940  * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2941  *    than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2942  *    HZ=100.
2943  * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2944  *    for a while, then suddenly 'jump' by several units to recover the lost
2945  *    increments. This seems to happen, e.g., inside virtual machines.
2946  * To address this issue, we do not use as a reference time interval just
2947  * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2948  * particular we add the minimum number of jiffies for which the filter
2949  * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2950  * machines.
2951  */
2952 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2953 						struct bfq_queue *bfqq)
2954 {
2955 	return max(bfqq->last_idle_bklogged +
2956 		   HZ * bfqq->service_from_backlogged /
2957 		   bfqd->bfq_wr_max_softrt_rate,
2958 		   jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2959 }
2960 
2961 /**
2962  * bfq_bfqq_expire - expire a queue.
2963  * @bfqd: device owning the queue.
2964  * @bfqq: the queue to expire.
2965  * @compensate: if true, compensate for the time spent idling.
2966  * @reason: the reason causing the expiration.
2967  *
2968  * If the process associated with bfqq does slow I/O (e.g., because it
2969  * issues random requests), we charge bfqq with the time it has been
2970  * in service instead of the service it has received (see
2971  * bfq_bfqq_charge_time for details on how this goal is achieved). As
2972  * a consequence, bfqq will typically get higher timestamps upon
2973  * reactivation, and hence it will be rescheduled as if it had
2974  * received more service than what it has actually received. In the
2975  * end, bfqq receives less service in proportion to how slowly its
2976  * associated process consumes its budgets (and hence how seriously it
2977  * tends to lower the throughput). In addition, this time-charging
2978  * strategy guarantees time fairness among slow processes. In
2979  * contrast, if the process associated with bfqq is not slow, we
2980  * charge bfqq exactly with the service it has received.
2981  *
2982  * Charging time to the first type of queues and the exact service to
2983  * the other has the effect of using the WF2Q+ policy to schedule the
2984  * former on a timeslice basis, without violating service domain
2985  * guarantees among the latter.
2986  */
2987 void bfq_bfqq_expire(struct bfq_data *bfqd,
2988 		     struct bfq_queue *bfqq,
2989 		     bool compensate,
2990 		     enum bfqq_expiration reason)
2991 {
2992 	bool slow;
2993 	unsigned long delta = 0;
2994 	struct bfq_entity *entity = &bfqq->entity;
2995 	int ref;
2996 
2997 	/*
2998 	 * Check whether the process is slow (see bfq_bfqq_is_slow).
2999 	 */
3000 	slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
3001 
3002 	/*
3003 	 * Increase service_from_backlogged before next statement,
3004 	 * because the possible next invocation of
3005 	 * bfq_bfqq_charge_time would likely inflate
3006 	 * entity->service. In contrast, service_from_backlogged must
3007 	 * contain real service, to enable the soft real-time
3008 	 * heuristic to correctly compute the bandwidth consumed by
3009 	 * bfqq.
3010 	 */
3011 	bfqq->service_from_backlogged += entity->service;
3012 
3013 	/*
3014 	 * As above explained, charge slow (typically seeky) and
3015 	 * timed-out queues with the time and not the service
3016 	 * received, to favor sequential workloads.
3017 	 *
3018 	 * Processes doing I/O in the slower disk zones will tend to
3019 	 * be slow(er) even if not seeky. Therefore, since the
3020 	 * estimated peak rate is actually an average over the disk
3021 	 * surface, these processes may timeout just for bad luck. To
3022 	 * avoid punishing them, do not charge time to processes that
3023 	 * succeeded in consuming at least 2/3 of their budget. This
3024 	 * allows BFQ to preserve enough elasticity to still perform
3025 	 * bandwidth, and not time, distribution with little unlucky
3026 	 * or quasi-sequential processes.
3027 	 */
3028 	if (bfqq->wr_coeff == 1 &&
3029 	    (slow ||
3030 	     (reason == BFQQE_BUDGET_TIMEOUT &&
3031 	      bfq_bfqq_budget_left(bfqq) >=  entity->budget / 3)))
3032 		bfq_bfqq_charge_time(bfqd, bfqq, delta);
3033 
3034 	if (reason == BFQQE_TOO_IDLE &&
3035 	    entity->service <= 2 * entity->budget / 10)
3036 		bfq_clear_bfqq_IO_bound(bfqq);
3037 
3038 	if (bfqd->low_latency && bfqq->wr_coeff == 1)
3039 		bfqq->last_wr_start_finish = jiffies;
3040 
3041 	if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
3042 	    RB_EMPTY_ROOT(&bfqq->sort_list)) {
3043 		/*
3044 		 * If we get here, and there are no outstanding
3045 		 * requests, then the request pattern is isochronous
3046 		 * (see the comments on the function
3047 		 * bfq_bfqq_softrt_next_start()). Thus we can compute
3048 		 * soft_rt_next_start. If, instead, the queue still
3049 		 * has outstanding requests, then we have to wait for
3050 		 * the completion of all the outstanding requests to
3051 		 * discover whether the request pattern is actually
3052 		 * isochronous.
3053 		 */
3054 		if (bfqq->dispatched == 0)
3055 			bfqq->soft_rt_next_start =
3056 				bfq_bfqq_softrt_next_start(bfqd, bfqq);
3057 		else {
3058 			/*
3059 			 * The application is still waiting for the
3060 			 * completion of one or more requests:
3061 			 * prevent it from possibly being incorrectly
3062 			 * deemed as soft real-time by setting its
3063 			 * soft_rt_next_start to infinity. In fact,
3064 			 * without this assignment, the application
3065 			 * would be incorrectly deemed as soft
3066 			 * real-time if:
3067 			 * 1) it issued a new request before the
3068 			 *    completion of all its in-flight
3069 			 *    requests, and
3070 			 * 2) at that time, its soft_rt_next_start
3071 			 *    happened to be in the past.
3072 			 */
3073 			bfqq->soft_rt_next_start =
3074 				bfq_greatest_from_now();
3075 			/*
3076 			 * Schedule an update of soft_rt_next_start to when
3077 			 * the task may be discovered to be isochronous.
3078 			 */
3079 			bfq_mark_bfqq_softrt_update(bfqq);
3080 		}
3081 	}
3082 
3083 	bfq_log_bfqq(bfqd, bfqq,
3084 		"expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
3085 		slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
3086 
3087 	/*
3088 	 * Increase, decrease or leave budget unchanged according to
3089 	 * reason.
3090 	 */
3091 	__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3092 	ref = bfqq->ref;
3093 	__bfq_bfqq_expire(bfqd, bfqq);
3094 
3095 	/* mark bfqq as waiting a request only if a bic still points to it */
3096 	if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3097 	    reason != BFQQE_BUDGET_TIMEOUT &&
3098 	    reason != BFQQE_BUDGET_EXHAUSTED)
3099 		bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3100 }
3101 
3102 /*
3103  * Budget timeout is not implemented through a dedicated timer, but
3104  * just checked on request arrivals and completions, as well as on
3105  * idle timer expirations.
3106  */
3107 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3108 {
3109 	return time_is_before_eq_jiffies(bfqq->budget_timeout);
3110 }
3111 
3112 /*
3113  * If we expire a queue that is actively waiting (i.e., with the
3114  * device idled) for the arrival of a new request, then we may incur
3115  * the timestamp misalignment problem described in the body of the
3116  * function __bfq_activate_entity. Hence we return true only if this
3117  * condition does not hold, or if the queue is slow enough to deserve
3118  * only to be kicked off for preserving a high throughput.
3119  */
3120 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3121 {
3122 	bfq_log_bfqq(bfqq->bfqd, bfqq,
3123 		"may_budget_timeout: wait_request %d left %d timeout %d",
3124 		bfq_bfqq_wait_request(bfqq),
3125 			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
3126 		bfq_bfqq_budget_timeout(bfqq));
3127 
3128 	return (!bfq_bfqq_wait_request(bfqq) ||
3129 		bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
3130 		&&
3131 		bfq_bfqq_budget_timeout(bfqq);
3132 }
3133 
3134 /*
3135  * For a queue that becomes empty, device idling is allowed only if
3136  * this function returns true for the queue. As a consequence, since
3137  * device idling plays a critical role in both throughput boosting and
3138  * service guarantees, the return value of this function plays a
3139  * critical role in both these aspects as well.
3140  *
3141  * In a nutshell, this function returns true only if idling is
3142  * beneficial for throughput or, even if detrimental for throughput,
3143  * idling is however necessary to preserve service guarantees (low
3144  * latency, desired throughput distribution, ...). In particular, on
3145  * NCQ-capable devices, this function tries to return false, so as to
3146  * help keep the drives' internal queues full, whenever this helps the
3147  * device boost the throughput without causing any service-guarantee
3148  * issue.
3149  *
3150  * In more detail, the return value of this function is obtained by,
3151  * first, computing a number of boolean variables that take into
3152  * account throughput and service-guarantee issues, and, then,
3153  * combining these variables in a logical expression. Most of the
3154  * issues taken into account are not trivial. We discuss these issues
3155  * individually while introducing the variables.
3156  */
3157 static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3158 {
3159 	struct bfq_data *bfqd = bfqq->bfqd;
3160 	bool rot_without_queueing =
3161 		!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
3162 		bfqq_sequential_and_IO_bound,
3163 		idling_boosts_thr, idling_boosts_thr_without_issues,
3164 		idling_needed_for_service_guarantees,
3165 		asymmetric_scenario;
3166 
3167 	if (bfqd->strict_guarantees)
3168 		return true;
3169 
3170 	/*
3171 	 * Idling is performed only if slice_idle > 0. In addition, we
3172 	 * do not idle if
3173 	 * (a) bfqq is async
3174 	 * (b) bfqq is in the idle io prio class: in this case we do
3175 	 * not idle because we want to minimize the bandwidth that
3176 	 * queues in this class can steal to higher-priority queues
3177 	 */
3178 	if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
3179 	    bfq_class_idle(bfqq))
3180 		return false;
3181 
3182 	bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
3183 		bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
3184 
3185 	/*
3186 	 * The next variable takes into account the cases where idling
3187 	 * boosts the throughput.
3188 	 *
3189 	 * The value of the variable is computed considering, first, that
3190 	 * idling is virtually always beneficial for the throughput if:
3191 	 * (a) the device is not NCQ-capable and rotational, or
3192 	 * (b) regardless of the presence of NCQ, the device is rotational and
3193 	 *     the request pattern for bfqq is I/O-bound and sequential, or
3194 	 * (c) regardless of whether it is rotational, the device is
3195 	 *     not NCQ-capable and the request pattern for bfqq is
3196 	 *     I/O-bound and sequential.
3197 	 *
3198 	 * Secondly, and in contrast to the above item (b), idling an
3199 	 * NCQ-capable flash-based device would not boost the
3200 	 * throughput even with sequential I/O; rather it would lower
3201 	 * the throughput in proportion to how fast the device
3202 	 * is. Accordingly, the next variable is true if any of the
3203 	 * above conditions (a), (b) or (c) is true, and, in
3204 	 * particular, happens to be false if bfqd is an NCQ-capable
3205 	 * flash-based device.
3206 	 */
3207 	idling_boosts_thr = rot_without_queueing ||
3208 		((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
3209 		 bfqq_sequential_and_IO_bound);
3210 
3211 	/*
3212 	 * The value of the next variable,
3213 	 * idling_boosts_thr_without_issues, is equal to that of
3214 	 * idling_boosts_thr, unless a special case holds. In this
3215 	 * special case, described below, idling may cause problems to
3216 	 * weight-raised queues.
3217 	 *
3218 	 * When the request pool is saturated (e.g., in the presence
3219 	 * of write hogs), if the processes associated with
3220 	 * non-weight-raised queues ask for requests at a lower rate,
3221 	 * then processes associated with weight-raised queues have a
3222 	 * higher probability to get a request from the pool
3223 	 * immediately (or at least soon) when they need one. Thus
3224 	 * they have a higher probability to actually get a fraction
3225 	 * of the device throughput proportional to their high
3226 	 * weight. This is especially true with NCQ-capable drives,
3227 	 * which enqueue several requests in advance, and further
3228 	 * reorder internally-queued requests.
3229 	 *
3230 	 * For this reason, we force to false the value of
3231 	 * idling_boosts_thr_without_issues if there are weight-raised
3232 	 * busy queues. In this case, and if bfqq is not weight-raised,
3233 	 * this guarantees that the device is not idled for bfqq (if,
3234 	 * instead, bfqq is weight-raised, then idling will be
3235 	 * guaranteed by another variable, see below). Combined with
3236 	 * the timestamping rules of BFQ (see [1] for details), this
3237 	 * behavior causes bfqq, and hence any sync non-weight-raised
3238 	 * queue, to get a lower number of requests served, and thus
3239 	 * to ask for a lower number of requests from the request
3240 	 * pool, before the busy weight-raised queues get served
3241 	 * again. This often mitigates starvation problems in the
3242 	 * presence of heavy write workloads and NCQ, thereby
3243 	 * guaranteeing a higher application and system responsiveness
3244 	 * in these hostile scenarios.
3245 	 */
3246 	idling_boosts_thr_without_issues = idling_boosts_thr &&
3247 		bfqd->wr_busy_queues == 0;
3248 
3249 	/*
3250 	 * There is then a case where idling must be performed not
3251 	 * for throughput concerns, but to preserve service
3252 	 * guarantees.
3253 	 *
3254 	 * To introduce this case, we can note that allowing the drive
3255 	 * to enqueue more than one request at a time, and hence
3256 	 * delegating de facto final scheduling decisions to the
3257 	 * drive's internal scheduler, entails loss of control on the
3258 	 * actual request service order. In particular, the critical
3259 	 * situation is when requests from different processes happen
3260 	 * to be present, at the same time, in the internal queue(s)
3261 	 * of the drive. In such a situation, the drive, by deciding
3262 	 * the service order of the internally-queued requests, does
3263 	 * determine also the actual throughput distribution among
3264 	 * these processes. But the drive typically has no notion or
3265 	 * concern about per-process throughput distribution, and
3266 	 * makes its decisions only on a per-request basis. Therefore,
3267 	 * the service distribution enforced by the drive's internal
3268 	 * scheduler is likely to coincide with the desired
3269 	 * device-throughput distribution only in a completely
3270 	 * symmetric scenario where:
3271 	 * (i)  each of these processes must get the same throughput as
3272 	 *      the others;
3273 	 * (ii) all these processes have the same I/O pattern
3274 		(either sequential or random).
3275 	 * In fact, in such a scenario, the drive will tend to treat
3276 	 * the requests of each of these processes in about the same
3277 	 * way as the requests of the others, and thus to provide
3278 	 * each of these processes with about the same throughput
3279 	 * (which is exactly the desired throughput distribution). In
3280 	 * contrast, in any asymmetric scenario, device idling is
3281 	 * certainly needed to guarantee that bfqq receives its
3282 	 * assigned fraction of the device throughput (see [1] for
3283 	 * details).
3284 	 *
3285 	 * We address this issue by controlling, actually, only the
3286 	 * symmetry sub-condition (i), i.e., provided that
3287 	 * sub-condition (i) holds, idling is not performed,
3288 	 * regardless of whether sub-condition (ii) holds. In other
3289 	 * words, only if sub-condition (i) holds, then idling is
3290 	 * allowed, and the device tends to be prevented from queueing
3291 	 * many requests, possibly of several processes. The reason
3292 	 * for not controlling also sub-condition (ii) is that we
3293 	 * exploit preemption to preserve guarantees in case of
3294 	 * symmetric scenarios, even if (ii) does not hold, as
3295 	 * explained in the next two paragraphs.
3296 	 *
3297 	 * Even if a queue, say Q, is expired when it remains idle, Q
3298 	 * can still preempt the new in-service queue if the next
3299 	 * request of Q arrives soon (see the comments on
3300 	 * bfq_bfqq_update_budg_for_activation). If all queues and
3301 	 * groups have the same weight, this form of preemption,
3302 	 * combined with the hole-recovery heuristic described in the
3303 	 * comments on function bfq_bfqq_update_budg_for_activation,
3304 	 * are enough to preserve a correct bandwidth distribution in
3305 	 * the mid term, even without idling. In fact, even if not
3306 	 * idling allows the internal queues of the device to contain
3307 	 * many requests, and thus to reorder requests, we can rather
3308 	 * safely assume that the internal scheduler still preserves a
3309 	 * minimum of mid-term fairness. The motivation for using
3310 	 * preemption instead of idling is that, by not idling,
3311 	 * service guarantees are preserved without minimally
3312 	 * sacrificing throughput. In other words, both a high
3313 	 * throughput and its desired distribution are obtained.
3314 	 *
3315 	 * More precisely, this preemption-based, idleless approach
3316 	 * provides fairness in terms of IOPS, and not sectors per
3317 	 * second. This can be seen with a simple example. Suppose
3318 	 * that there are two queues with the same weight, but that
3319 	 * the first queue receives requests of 8 sectors, while the
3320 	 * second queue receives requests of 1024 sectors. In
3321 	 * addition, suppose that each of the two queues contains at
3322 	 * most one request at a time, which implies that each queue
3323 	 * always remains idle after it is served. Finally, after
3324 	 * remaining idle, each queue receives very quickly a new
3325 	 * request. It follows that the two queues are served
3326 	 * alternatively, preempting each other if needed. This
3327 	 * implies that, although both queues have the same weight,
3328 	 * the queue with large requests receives a service that is
3329 	 * 1024/8 times as high as the service received by the other
3330 	 * queue.
3331 	 *
3332 	 * On the other hand, device idling is performed, and thus
3333 	 * pure sector-domain guarantees are provided, for the
3334 	 * following queues, which are likely to need stronger
3335 	 * throughput guarantees: weight-raised queues, and queues
3336 	 * with a higher weight than other queues. When such queues
3337 	 * are active, sub-condition (i) is false, which triggers
3338 	 * device idling.
3339 	 *
3340 	 * According to the above considerations, the next variable is
3341 	 * true (only) if sub-condition (i) holds. To compute the
3342 	 * value of this variable, we not only use the return value of
3343 	 * the function bfq_symmetric_scenario(), but also check
3344 	 * whether bfqq is being weight-raised, because
3345 	 * bfq_symmetric_scenario() does not take into account also
3346 	 * weight-raised queues (see comments on
3347 	 * bfq_weights_tree_add()).
3348 	 *
3349 	 * As a side note, it is worth considering that the above
3350 	 * device-idling countermeasures may however fail in the
3351 	 * following unlucky scenario: if idling is (correctly)
3352 	 * disabled in a time period during which all symmetry
3353 	 * sub-conditions hold, and hence the device is allowed to
3354 	 * enqueue many requests, but at some later point in time some
3355 	 * sub-condition stops to hold, then it may become impossible
3356 	 * to let requests be served in the desired order until all
3357 	 * the requests already queued in the device have been served.
3358 	 */
3359 	asymmetric_scenario = bfqq->wr_coeff > 1 ||
3360 		!bfq_symmetric_scenario(bfqd);
3361 
3362 	/*
3363 	 * Finally, there is a case where maximizing throughput is the
3364 	 * best choice even if it may cause unfairness toward
3365 	 * bfqq. Such a case is when bfqq became active in a burst of
3366 	 * queue activations. Queues that became active during a large
3367 	 * burst benefit only from throughput, as discussed in the
3368 	 * comments on bfq_handle_burst. Thus, if bfqq became active
3369 	 * in a burst and not idling the device maximizes throughput,
3370 	 * then the device must no be idled, because not idling the
3371 	 * device provides bfqq and all other queues in the burst with
3372 	 * maximum benefit. Combining this and the above case, we can
3373 	 * now establish when idling is actually needed to preserve
3374 	 * service guarantees.
3375 	 */
3376 	idling_needed_for_service_guarantees =
3377 		asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3378 
3379 	/*
3380 	 * We have now all the components we need to compute the
3381 	 * return value of the function, which is true only if idling
3382 	 * either boosts the throughput (without issues), or is
3383 	 * necessary to preserve service guarantees.
3384 	 */
3385 	return idling_boosts_thr_without_issues ||
3386 		idling_needed_for_service_guarantees;
3387 }
3388 
3389 /*
3390  * If the in-service queue is empty but the function bfq_bfqq_may_idle
3391  * returns true, then:
3392  * 1) the queue must remain in service and cannot be expired, and
3393  * 2) the device must be idled to wait for the possible arrival of a new
3394  *    request for the queue.
3395  * See the comments on the function bfq_bfqq_may_idle for the reasons
3396  * why performing device idling is the best choice to boost the throughput
3397  * and preserve service guarantees when bfq_bfqq_may_idle itself
3398  * returns true.
3399  */
3400 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3401 {
3402 	return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
3403 }
3404 
3405 /*
3406  * Select a queue for service.  If we have a current queue in service,
3407  * check whether to continue servicing it, or retrieve and set a new one.
3408  */
3409 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3410 {
3411 	struct bfq_queue *bfqq;
3412 	struct request *next_rq;
3413 	enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3414 
3415 	bfqq = bfqd->in_service_queue;
3416 	if (!bfqq)
3417 		goto new_queue;
3418 
3419 	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3420 
3421 	if (bfq_may_expire_for_budg_timeout(bfqq) &&
3422 	    !bfq_bfqq_wait_request(bfqq) &&
3423 	    !bfq_bfqq_must_idle(bfqq))
3424 		goto expire;
3425 
3426 check_queue:
3427 	/*
3428 	 * This loop is rarely executed more than once. Even when it
3429 	 * happens, it is much more convenient to re-execute this loop
3430 	 * than to return NULL and trigger a new dispatch to get a
3431 	 * request served.
3432 	 */
3433 	next_rq = bfqq->next_rq;
3434 	/*
3435 	 * If bfqq has requests queued and it has enough budget left to
3436 	 * serve them, keep the queue, otherwise expire it.
3437 	 */
3438 	if (next_rq) {
3439 		if (bfq_serv_to_charge(next_rq, bfqq) >
3440 			bfq_bfqq_budget_left(bfqq)) {
3441 			/*
3442 			 * Expire the queue for budget exhaustion,
3443 			 * which makes sure that the next budget is
3444 			 * enough to serve the next request, even if
3445 			 * it comes from the fifo expired path.
3446 			 */
3447 			reason = BFQQE_BUDGET_EXHAUSTED;
3448 			goto expire;
3449 		} else {
3450 			/*
3451 			 * The idle timer may be pending because we may
3452 			 * not disable disk idling even when a new request
3453 			 * arrives.
3454 			 */
3455 			if (bfq_bfqq_wait_request(bfqq)) {
3456 				/*
3457 				 * If we get here: 1) at least a new request
3458 				 * has arrived but we have not disabled the
3459 				 * timer because the request was too small,
3460 				 * 2) then the block layer has unplugged
3461 				 * the device, causing the dispatch to be
3462 				 * invoked.
3463 				 *
3464 				 * Since the device is unplugged, now the
3465 				 * requests are probably large enough to
3466 				 * provide a reasonable throughput.
3467 				 * So we disable idling.
3468 				 */
3469 				bfq_clear_bfqq_wait_request(bfqq);
3470 				hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
3471 			}
3472 			goto keep_queue;
3473 		}
3474 	}
3475 
3476 	/*
3477 	 * No requests pending. However, if the in-service queue is idling
3478 	 * for a new request, or has requests waiting for a completion and
3479 	 * may idle after their completion, then keep it anyway.
3480 	 */
3481 	if (bfq_bfqq_wait_request(bfqq) ||
3482 	    (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3483 		bfqq = NULL;
3484 		goto keep_queue;
3485 	}
3486 
3487 	reason = BFQQE_NO_MORE_REQUESTS;
3488 expire:
3489 	bfq_bfqq_expire(bfqd, bfqq, false, reason);
3490 new_queue:
3491 	bfqq = bfq_set_in_service_queue(bfqd);
3492 	if (bfqq) {
3493 		bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3494 		goto check_queue;
3495 	}
3496 keep_queue:
3497 	if (bfqq)
3498 		bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3499 	else
3500 		bfq_log(bfqd, "select_queue: no queue returned");
3501 
3502 	return bfqq;
3503 }
3504 
3505 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3506 {
3507 	struct bfq_entity *entity = &bfqq->entity;
3508 
3509 	if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3510 		bfq_log_bfqq(bfqd, bfqq,
3511 			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3512 			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3513 			jiffies_to_msecs(bfqq->wr_cur_max_time),
3514 			bfqq->wr_coeff,
3515 			bfqq->entity.weight, bfqq->entity.orig_weight);
3516 
3517 		if (entity->prio_changed)
3518 			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3519 
3520 		/*
3521 		 * If the queue was activated in a burst, or too much
3522 		 * time has elapsed from the beginning of this
3523 		 * weight-raising period, then end weight raising.
3524 		 */
3525 		if (bfq_bfqq_in_large_burst(bfqq))
3526 			bfq_bfqq_end_wr(bfqq);
3527 		else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3528 						bfqq->wr_cur_max_time)) {
3529 			if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3530 			time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
3531 					       bfq_wr_duration(bfqd)))
3532 				bfq_bfqq_end_wr(bfqq);
3533 			else {
3534 				switch_back_to_interactive_wr(bfqq, bfqd);
3535 				bfqq->entity.prio_changed = 1;
3536 			}
3537 		}
3538 	}
3539 	/*
3540 	 * To improve latency (for this or other queues), immediately
3541 	 * update weight both if it must be raised and if it must be
3542 	 * lowered. Since, entity may be on some active tree here, and
3543 	 * might have a pending change of its ioprio class, invoke
3544 	 * next function with the last parameter unset (see the
3545 	 * comments on the function).
3546 	 */
3547 	if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
3548 		__bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
3549 						entity, false);
3550 }
3551 
3552 /*
3553  * Dispatch next request from bfqq.
3554  */
3555 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3556 						 struct bfq_queue *bfqq)
3557 {
3558 	struct request *rq = bfqq->next_rq;
3559 	unsigned long service_to_charge;
3560 
3561 	service_to_charge = bfq_serv_to_charge(rq, bfqq);
3562 
3563 	bfq_bfqq_served(bfqq, service_to_charge);
3564 
3565 	bfq_dispatch_remove(bfqd->queue, rq);
3566 
3567 	/*
3568 	 * If weight raising has to terminate for bfqq, then next
3569 	 * function causes an immediate update of bfqq's weight,
3570 	 * without waiting for next activation. As a consequence, on
3571 	 * expiration, bfqq will be timestamped as if has never been
3572 	 * weight-raised during this service slot, even if it has
3573 	 * received part or even most of the service as a
3574 	 * weight-raised queue. This inflates bfqq's timestamps, which
3575 	 * is beneficial, as bfqq is then more willing to leave the
3576 	 * device immediately to possible other weight-raised queues.
3577 	 */
3578 	bfq_update_wr_data(bfqd, bfqq);
3579 
3580 	/*
3581 	 * Expire bfqq, pretending that its budget expired, if bfqq
3582 	 * belongs to CLASS_IDLE and other queues are waiting for
3583 	 * service.
3584 	 */
3585 	if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3586 		goto expire;
3587 
3588 	return rq;
3589 
3590 expire:
3591 	bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3592 	return rq;
3593 }
3594 
3595 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3596 {
3597 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3598 
3599 	/*
3600 	 * Avoiding lock: a race on bfqd->busy_queues should cause at
3601 	 * most a call to dispatch for nothing
3602 	 */
3603 	return !list_empty_careful(&bfqd->dispatch) ||
3604 		bfqd->busy_queues > 0;
3605 }
3606 
3607 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3608 {
3609 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3610 	struct request *rq = NULL;
3611 	struct bfq_queue *bfqq = NULL;
3612 
3613 	if (!list_empty(&bfqd->dispatch)) {
3614 		rq = list_first_entry(&bfqd->dispatch, struct request,
3615 				      queuelist);
3616 		list_del_init(&rq->queuelist);
3617 
3618 		bfqq = RQ_BFQQ(rq);
3619 
3620 		if (bfqq) {
3621 			/*
3622 			 * Increment counters here, because this
3623 			 * dispatch does not follow the standard
3624 			 * dispatch flow (where counters are
3625 			 * incremented)
3626 			 */
3627 			bfqq->dispatched++;
3628 
3629 			goto inc_in_driver_start_rq;
3630 		}
3631 
3632 		/*
3633 		 * We exploit the put_rq_private hook to decrement
3634 		 * rq_in_driver, but put_rq_private will not be
3635 		 * invoked on this request. So, to avoid unbalance,
3636 		 * just start this request, without incrementing
3637 		 * rq_in_driver. As a negative consequence,
3638 		 * rq_in_driver is deceptively lower than it should be
3639 		 * while this request is in service. This may cause
3640 		 * bfq_schedule_dispatch to be invoked uselessly.
3641 		 *
3642 		 * As for implementing an exact solution, the
3643 		 * put_request hook, if defined, is probably invoked
3644 		 * also on this request. So, by exploiting this hook,
3645 		 * we could 1) increment rq_in_driver here, and 2)
3646 		 * decrement it in put_request. Such a solution would
3647 		 * let the value of the counter be always accurate,
3648 		 * but it would entail using an extra interface
3649 		 * function. This cost seems higher than the benefit,
3650 		 * being the frequency of non-elevator-private
3651 		 * requests very low.
3652 		 */
3653 		goto start_rq;
3654 	}
3655 
3656 	bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3657 
3658 	if (bfqd->busy_queues == 0)
3659 		goto exit;
3660 
3661 	/*
3662 	 * Force device to serve one request at a time if
3663 	 * strict_guarantees is true. Forcing this service scheme is
3664 	 * currently the ONLY way to guarantee that the request
3665 	 * service order enforced by the scheduler is respected by a
3666 	 * queueing device. Otherwise the device is free even to make
3667 	 * some unlucky request wait for as long as the device
3668 	 * wishes.
3669 	 *
3670 	 * Of course, serving one request at at time may cause loss of
3671 	 * throughput.
3672 	 */
3673 	if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3674 		goto exit;
3675 
3676 	bfqq = bfq_select_queue(bfqd);
3677 	if (!bfqq)
3678 		goto exit;
3679 
3680 	rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3681 
3682 	if (rq) {
3683 inc_in_driver_start_rq:
3684 		bfqd->rq_in_driver++;
3685 start_rq:
3686 		rq->rq_flags |= RQF_STARTED;
3687 	}
3688 exit:
3689 	return rq;
3690 }
3691 
3692 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3693 {
3694 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3695 	struct request *rq;
3696 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
3697 	struct bfq_queue *in_serv_queue, *bfqq;
3698 	bool waiting_rq, idle_timer_disabled;
3699 #endif
3700 
3701 	spin_lock_irq(&bfqd->lock);
3702 
3703 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
3704 	in_serv_queue = bfqd->in_service_queue;
3705 	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
3706 
3707 	rq = __bfq_dispatch_request(hctx);
3708 
3709 	idle_timer_disabled =
3710 		waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
3711 
3712 #else
3713 	rq = __bfq_dispatch_request(hctx);
3714 #endif
3715 	spin_unlock_irq(&bfqd->lock);
3716 
3717 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
3718 	bfqq = rq ? RQ_BFQQ(rq) : NULL;
3719 	if (!idle_timer_disabled && !bfqq)
3720 		return rq;
3721 
3722 	/*
3723 	 * rq and bfqq are guaranteed to exist until this function
3724 	 * ends, for the following reasons. First, rq can be
3725 	 * dispatched to the device, and then can be completed and
3726 	 * freed, only after this function ends. Second, rq cannot be
3727 	 * merged (and thus freed because of a merge) any longer,
3728 	 * because it has already started. Thus rq cannot be freed
3729 	 * before this function ends, and, since rq has a reference to
3730 	 * bfqq, the same guarantee holds for bfqq too.
3731 	 *
3732 	 * In addition, the following queue lock guarantees that
3733 	 * bfqq_group(bfqq) exists as well.
3734 	 */
3735 	spin_lock_irq(hctx->queue->queue_lock);
3736 	if (idle_timer_disabled)
3737 		/*
3738 		 * Since the idle timer has been disabled,
3739 		 * in_serv_queue contained some request when
3740 		 * __bfq_dispatch_request was invoked above, which
3741 		 * implies that rq was picked exactly from
3742 		 * in_serv_queue. Thus in_serv_queue == bfqq, and is
3743 		 * therefore guaranteed to exist because of the above
3744 		 * arguments.
3745 		 */
3746 		bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
3747 	if (bfqq) {
3748 		struct bfq_group *bfqg = bfqq_group(bfqq);
3749 
3750 		bfqg_stats_update_avg_queue_size(bfqg);
3751 		bfqg_stats_set_start_empty_time(bfqg);
3752 		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
3753 	}
3754 	spin_unlock_irq(hctx->queue->queue_lock);
3755 #endif
3756 
3757 	return rq;
3758 }
3759 
3760 /*
3761  * Task holds one reference to the queue, dropped when task exits.  Each rq
3762  * in-flight on this queue also holds a reference, dropped when rq is freed.
3763  *
3764  * Scheduler lock must be held here. Recall not to use bfqq after calling
3765  * this function on it.
3766  */
3767 void bfq_put_queue(struct bfq_queue *bfqq)
3768 {
3769 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3770 	struct bfq_group *bfqg = bfqq_group(bfqq);
3771 #endif
3772 
3773 	if (bfqq->bfqd)
3774 		bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3775 			     bfqq, bfqq->ref);
3776 
3777 	bfqq->ref--;
3778 	if (bfqq->ref)
3779 		return;
3780 
3781 	if (!hlist_unhashed(&bfqq->burst_list_node)) {
3782 		hlist_del_init(&bfqq->burst_list_node);
3783 		/*
3784 		 * Decrement also burst size after the removal, if the
3785 		 * process associated with bfqq is exiting, and thus
3786 		 * does not contribute to the burst any longer. This
3787 		 * decrement helps filter out false positives of large
3788 		 * bursts, when some short-lived process (often due to
3789 		 * the execution of commands by some service) happens
3790 		 * to start and exit while a complex application is
3791 		 * starting, and thus spawning several processes that
3792 		 * do I/O (and that *must not* be treated as a large
3793 		 * burst, see comments on bfq_handle_burst).
3794 		 *
3795 		 * In particular, the decrement is performed only if:
3796 		 * 1) bfqq is not a merged queue, because, if it is,
3797 		 * then this free of bfqq is not triggered by the exit
3798 		 * of the process bfqq is associated with, but exactly
3799 		 * by the fact that bfqq has just been merged.
3800 		 * 2) burst_size is greater than 0, to handle
3801 		 * unbalanced decrements. Unbalanced decrements may
3802 		 * happen in te following case: bfqq is inserted into
3803 		 * the current burst list--without incrementing
3804 		 * bust_size--because of a split, but the current
3805 		 * burst list is not the burst list bfqq belonged to
3806 		 * (see comments on the case of a split in
3807 		 * bfq_set_request).
3808 		 */
3809 		if (bfqq->bic && bfqq->bfqd->burst_size > 0)
3810 			bfqq->bfqd->burst_size--;
3811 	}
3812 
3813 	kmem_cache_free(bfq_pool, bfqq);
3814 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3815 	bfqg_and_blkg_put(bfqg);
3816 #endif
3817 }
3818 
3819 static void bfq_put_cooperator(struct bfq_queue *bfqq)
3820 {
3821 	struct bfq_queue *__bfqq, *next;
3822 
3823 	/*
3824 	 * If this queue was scheduled to merge with another queue, be
3825 	 * sure to drop the reference taken on that queue (and others in
3826 	 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3827 	 */
3828 	__bfqq = bfqq->new_bfqq;
3829 	while (__bfqq) {
3830 		if (__bfqq == bfqq)
3831 			break;
3832 		next = __bfqq->new_bfqq;
3833 		bfq_put_queue(__bfqq);
3834 		__bfqq = next;
3835 	}
3836 }
3837 
3838 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3839 {
3840 	if (bfqq == bfqd->in_service_queue) {
3841 		__bfq_bfqq_expire(bfqd, bfqq);
3842 		bfq_schedule_dispatch(bfqd);
3843 	}
3844 
3845 	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3846 
3847 	bfq_put_cooperator(bfqq);
3848 
3849 	bfq_put_queue(bfqq); /* release process reference */
3850 }
3851 
3852 static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3853 {
3854 	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3855 	struct bfq_data *bfqd;
3856 
3857 	if (bfqq)
3858 		bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3859 
3860 	if (bfqq && bfqd) {
3861 		unsigned long flags;
3862 
3863 		spin_lock_irqsave(&bfqd->lock, flags);
3864 		bfq_exit_bfqq(bfqd, bfqq);
3865 		bic_set_bfqq(bic, NULL, is_sync);
3866 		spin_unlock_irqrestore(&bfqd->lock, flags);
3867 	}
3868 }
3869 
3870 static void bfq_exit_icq(struct io_cq *icq)
3871 {
3872 	struct bfq_io_cq *bic = icq_to_bic(icq);
3873 
3874 	bfq_exit_icq_bfqq(bic, true);
3875 	bfq_exit_icq_bfqq(bic, false);
3876 }
3877 
3878 /*
3879  * Update the entity prio values; note that the new values will not
3880  * be used until the next (re)activation.
3881  */
3882 static void
3883 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3884 {
3885 	struct task_struct *tsk = current;
3886 	int ioprio_class;
3887 	struct bfq_data *bfqd = bfqq->bfqd;
3888 
3889 	if (!bfqd)
3890 		return;
3891 
3892 	ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3893 	switch (ioprio_class) {
3894 	default:
3895 		dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3896 			"bfq: bad prio class %d\n", ioprio_class);
3897 		/* fall through */
3898 	case IOPRIO_CLASS_NONE:
3899 		/*
3900 		 * No prio set, inherit CPU scheduling settings.
3901 		 */
3902 		bfqq->new_ioprio = task_nice_ioprio(tsk);
3903 		bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3904 		break;
3905 	case IOPRIO_CLASS_RT:
3906 		bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3907 		bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3908 		break;
3909 	case IOPRIO_CLASS_BE:
3910 		bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3911 		bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3912 		break;
3913 	case IOPRIO_CLASS_IDLE:
3914 		bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3915 		bfqq->new_ioprio = 7;
3916 		break;
3917 	}
3918 
3919 	if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3920 		pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3921 			bfqq->new_ioprio);
3922 		bfqq->new_ioprio = IOPRIO_BE_NR;
3923 	}
3924 
3925 	bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3926 	bfqq->entity.prio_changed = 1;
3927 }
3928 
3929 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3930 				       struct bio *bio, bool is_sync,
3931 				       struct bfq_io_cq *bic);
3932 
3933 static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3934 {
3935 	struct bfq_data *bfqd = bic_to_bfqd(bic);
3936 	struct bfq_queue *bfqq;
3937 	int ioprio = bic->icq.ioc->ioprio;
3938 
3939 	/*
3940 	 * This condition may trigger on a newly created bic, be sure to
3941 	 * drop the lock before returning.
3942 	 */
3943 	if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3944 		return;
3945 
3946 	bic->ioprio = ioprio;
3947 
3948 	bfqq = bic_to_bfqq(bic, false);
3949 	if (bfqq) {
3950 		/* release process reference on this queue */
3951 		bfq_put_queue(bfqq);
3952 		bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3953 		bic_set_bfqq(bic, bfqq, false);
3954 	}
3955 
3956 	bfqq = bic_to_bfqq(bic, true);
3957 	if (bfqq)
3958 		bfq_set_next_ioprio_data(bfqq, bic);
3959 }
3960 
3961 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3962 			  struct bfq_io_cq *bic, pid_t pid, int is_sync)
3963 {
3964 	RB_CLEAR_NODE(&bfqq->entity.rb_node);
3965 	INIT_LIST_HEAD(&bfqq->fifo);
3966 	INIT_HLIST_NODE(&bfqq->burst_list_node);
3967 
3968 	bfqq->ref = 0;
3969 	bfqq->bfqd = bfqd;
3970 
3971 	if (bic)
3972 		bfq_set_next_ioprio_data(bfqq, bic);
3973 
3974 	if (is_sync) {
3975 		/*
3976 		 * No need to mark as has_short_ttime if in
3977 		 * idle_class, because no device idling is performed
3978 		 * for queues in idle class
3979 		 */
3980 		if (!bfq_class_idle(bfqq))
3981 			/* tentatively mark as has_short_ttime */
3982 			bfq_mark_bfqq_has_short_ttime(bfqq);
3983 		bfq_mark_bfqq_sync(bfqq);
3984 		bfq_mark_bfqq_just_created(bfqq);
3985 	} else
3986 		bfq_clear_bfqq_sync(bfqq);
3987 
3988 	/* set end request to minus infinity from now */
3989 	bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3990 
3991 	bfq_mark_bfqq_IO_bound(bfqq);
3992 
3993 	bfqq->pid = pid;
3994 
3995 	/* Tentative initial value to trade off between thr and lat */
3996 	bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
3997 	bfqq->budget_timeout = bfq_smallest_from_now();
3998 
3999 	bfqq->wr_coeff = 1;
4000 	bfqq->last_wr_start_finish = jiffies;
4001 	bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
4002 	bfqq->split_time = bfq_smallest_from_now();
4003 
4004 	/*
4005 	 * Set to the value for which bfqq will not be deemed as
4006 	 * soft rt when it becomes backlogged.
4007 	 */
4008 	bfqq->soft_rt_next_start = bfq_greatest_from_now();
4009 
4010 	/* first request is almost certainly seeky */
4011 	bfqq->seek_history = 1;
4012 }
4013 
4014 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
4015 					       struct bfq_group *bfqg,
4016 					       int ioprio_class, int ioprio)
4017 {
4018 	switch (ioprio_class) {
4019 	case IOPRIO_CLASS_RT:
4020 		return &bfqg->async_bfqq[0][ioprio];
4021 	case IOPRIO_CLASS_NONE:
4022 		ioprio = IOPRIO_NORM;
4023 		/* fall through */
4024 	case IOPRIO_CLASS_BE:
4025 		return &bfqg->async_bfqq[1][ioprio];
4026 	case IOPRIO_CLASS_IDLE:
4027 		return &bfqg->async_idle_bfqq;
4028 	default:
4029 		return NULL;
4030 	}
4031 }
4032 
4033 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
4034 				       struct bio *bio, bool is_sync,
4035 				       struct bfq_io_cq *bic)
4036 {
4037 	const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
4038 	const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
4039 	struct bfq_queue **async_bfqq = NULL;
4040 	struct bfq_queue *bfqq;
4041 	struct bfq_group *bfqg;
4042 
4043 	rcu_read_lock();
4044 
4045 	bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
4046 	if (!bfqg) {
4047 		bfqq = &bfqd->oom_bfqq;
4048 		goto out;
4049 	}
4050 
4051 	if (!is_sync) {
4052 		async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
4053 						  ioprio);
4054 		bfqq = *async_bfqq;
4055 		if (bfqq)
4056 			goto out;
4057 	}
4058 
4059 	bfqq = kmem_cache_alloc_node(bfq_pool,
4060 				     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
4061 				     bfqd->queue->node);
4062 
4063 	if (bfqq) {
4064 		bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
4065 			      is_sync);
4066 		bfq_init_entity(&bfqq->entity, bfqg);
4067 		bfq_log_bfqq(bfqd, bfqq, "allocated");
4068 	} else {
4069 		bfqq = &bfqd->oom_bfqq;
4070 		bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
4071 		goto out;
4072 	}
4073 
4074 	/*
4075 	 * Pin the queue now that it's allocated, scheduler exit will
4076 	 * prune it.
4077 	 */
4078 	if (async_bfqq) {
4079 		bfqq->ref++; /*
4080 			      * Extra group reference, w.r.t. sync
4081 			      * queue. This extra reference is removed
4082 			      * only if bfqq->bfqg disappears, to
4083 			      * guarantee that this queue is not freed
4084 			      * until its group goes away.
4085 			      */
4086 		bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
4087 			     bfqq, bfqq->ref);
4088 		*async_bfqq = bfqq;
4089 	}
4090 
4091 out:
4092 	bfqq->ref++; /* get a process reference to this queue */
4093 	bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
4094 	rcu_read_unlock();
4095 	return bfqq;
4096 }
4097 
4098 static void bfq_update_io_thinktime(struct bfq_data *bfqd,
4099 				    struct bfq_queue *bfqq)
4100 {
4101 	struct bfq_ttime *ttime = &bfqq->ttime;
4102 	u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
4103 
4104 	elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
4105 
4106 	ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
4107 	ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
4108 	ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
4109 				     ttime->ttime_samples);
4110 }
4111 
4112 static void
4113 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4114 		       struct request *rq)
4115 {
4116 	bfqq->seek_history <<= 1;
4117 	bfqq->seek_history |=
4118 		get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
4119 		(!blk_queue_nonrot(bfqd->queue) ||
4120 		 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
4121 }
4122 
4123 static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4124 				       struct bfq_queue *bfqq,
4125 				       struct bfq_io_cq *bic)
4126 {
4127 	bool has_short_ttime = true;
4128 
4129 	/*
4130 	 * No need to update has_short_ttime if bfqq is async or in
4131 	 * idle io prio class, or if bfq_slice_idle is zero, because
4132 	 * no device idling is performed for bfqq in this case.
4133 	 */
4134 	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
4135 	    bfqd->bfq_slice_idle == 0)
4136 		return;
4137 
4138 	/* Idle window just restored, statistics are meaningless. */
4139 	if (time_is_after_eq_jiffies(bfqq->split_time +
4140 				     bfqd->bfq_wr_min_idle_time))
4141 		return;
4142 
4143 	/* Think time is infinite if no process is linked to
4144 	 * bfqq. Otherwise check average think time to
4145 	 * decide whether to mark as has_short_ttime
4146 	 */
4147 	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
4148 	    (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
4149 	     bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
4150 		has_short_ttime = false;
4151 
4152 	bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
4153 		     has_short_ttime);
4154 
4155 	if (has_short_ttime)
4156 		bfq_mark_bfqq_has_short_ttime(bfqq);
4157 	else
4158 		bfq_clear_bfqq_has_short_ttime(bfqq);
4159 }
4160 
4161 /*
4162  * Called when a new fs request (rq) is added to bfqq.  Check if there's
4163  * something we should do about it.
4164  */
4165 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4166 			    struct request *rq)
4167 {
4168 	struct bfq_io_cq *bic = RQ_BIC(rq);
4169 
4170 	if (rq->cmd_flags & REQ_META)
4171 		bfqq->meta_pending++;
4172 
4173 	bfq_update_io_thinktime(bfqd, bfqq);
4174 	bfq_update_has_short_ttime(bfqd, bfqq, bic);
4175 	bfq_update_io_seektime(bfqd, bfqq, rq);
4176 
4177 	bfq_log_bfqq(bfqd, bfqq,
4178 		     "rq_enqueued: has_short_ttime=%d (seeky %d)",
4179 		     bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
4180 
4181 	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4182 
4183 	if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4184 		bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4185 				 blk_rq_sectors(rq) < 32;
4186 		bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4187 
4188 		/*
4189 		 * There is just this request queued: if the request
4190 		 * is small and the queue is not to be expired, then
4191 		 * just exit.
4192 		 *
4193 		 * In this way, if the device is being idled to wait
4194 		 * for a new request from the in-service queue, we
4195 		 * avoid unplugging the device and committing the
4196 		 * device to serve just a small request. On the
4197 		 * contrary, we wait for the block layer to decide
4198 		 * when to unplug the device: hopefully, new requests
4199 		 * will be merged to this one quickly, then the device
4200 		 * will be unplugged and larger requests will be
4201 		 * dispatched.
4202 		 */
4203 		if (small_req && !budget_timeout)
4204 			return;
4205 
4206 		/*
4207 		 * A large enough request arrived, or the queue is to
4208 		 * be expired: in both cases disk idling is to be
4209 		 * stopped, so clear wait_request flag and reset
4210 		 * timer.
4211 		 */
4212 		bfq_clear_bfqq_wait_request(bfqq);
4213 		hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4214 
4215 		/*
4216 		 * The queue is not empty, because a new request just
4217 		 * arrived. Hence we can safely expire the queue, in
4218 		 * case of budget timeout, without risking that the
4219 		 * timestamps of the queue are not updated correctly.
4220 		 * See [1] for more details.
4221 		 */
4222 		if (budget_timeout)
4223 			bfq_bfqq_expire(bfqd, bfqq, false,
4224 					BFQQE_BUDGET_TIMEOUT);
4225 	}
4226 }
4227 
4228 /* returns true if it causes the idle timer to be disabled */
4229 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4230 {
4231 	struct bfq_queue *bfqq = RQ_BFQQ(rq),
4232 		*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4233 	bool waiting, idle_timer_disabled = false;
4234 
4235 	if (new_bfqq) {
4236 		if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4237 			new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4238 		/*
4239 		 * Release the request's reference to the old bfqq
4240 		 * and make sure one is taken to the shared queue.
4241 		 */
4242 		new_bfqq->allocated++;
4243 		bfqq->allocated--;
4244 		new_bfqq->ref++;
4245 		/*
4246 		 * If the bic associated with the process
4247 		 * issuing this request still points to bfqq
4248 		 * (and thus has not been already redirected
4249 		 * to new_bfqq or even some other bfq_queue),
4250 		 * then complete the merge and redirect it to
4251 		 * new_bfqq.
4252 		 */
4253 		if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4254 			bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4255 					bfqq, new_bfqq);
4256 
4257 		bfq_clear_bfqq_just_created(bfqq);
4258 		/*
4259 		 * rq is about to be enqueued into new_bfqq,
4260 		 * release rq reference on bfqq
4261 		 */
4262 		bfq_put_queue(bfqq);
4263 		rq->elv.priv[1] = new_bfqq;
4264 		bfqq = new_bfqq;
4265 	}
4266 
4267 	waiting = bfqq && bfq_bfqq_wait_request(bfqq);
4268 	bfq_add_request(rq);
4269 	idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
4270 
4271 	rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4272 	list_add_tail(&rq->queuelist, &bfqq->fifo);
4273 
4274 	bfq_rq_enqueued(bfqd, bfqq, rq);
4275 
4276 	return idle_timer_disabled;
4277 }
4278 
4279 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4280 			       bool at_head)
4281 {
4282 	struct request_queue *q = hctx->queue;
4283 	struct bfq_data *bfqd = q->elevator->elevator_data;
4284 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4285 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
4286 	bool idle_timer_disabled = false;
4287 	unsigned int cmd_flags;
4288 #endif
4289 
4290 	spin_lock_irq(&bfqd->lock);
4291 	if (blk_mq_sched_try_insert_merge(q, rq)) {
4292 		spin_unlock_irq(&bfqd->lock);
4293 		return;
4294 	}
4295 
4296 	spin_unlock_irq(&bfqd->lock);
4297 
4298 	blk_mq_sched_request_inserted(rq);
4299 
4300 	spin_lock_irq(&bfqd->lock);
4301 	if (at_head || blk_rq_is_passthrough(rq)) {
4302 		if (at_head)
4303 			list_add(&rq->queuelist, &bfqd->dispatch);
4304 		else
4305 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
4306 	} else {
4307 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4308 		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
4309 		/*
4310 		 * Update bfqq, because, if a queue merge has occurred
4311 		 * in __bfq_insert_request, then rq has been
4312 		 * redirected into a new queue.
4313 		 */
4314 		bfqq = RQ_BFQQ(rq);
4315 #else
4316 		__bfq_insert_request(bfqd, rq);
4317 #endif
4318 
4319 		if (rq_mergeable(rq)) {
4320 			elv_rqhash_add(q, rq);
4321 			if (!q->last_merge)
4322 				q->last_merge = rq;
4323 		}
4324 	}
4325 
4326 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4327 	/*
4328 	 * Cache cmd_flags before releasing scheduler lock, because rq
4329 	 * may disappear afterwards (for example, because of a request
4330 	 * merge).
4331 	 */
4332 	cmd_flags = rq->cmd_flags;
4333 #endif
4334 	spin_unlock_irq(&bfqd->lock);
4335 
4336 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4337 	if (!bfqq)
4338 		return;
4339 	/*
4340 	 * bfqq still exists, because it can disappear only after
4341 	 * either it is merged with another queue, or the process it
4342 	 * is associated with exits. But both actions must be taken by
4343 	 * the same process currently executing this flow of
4344 	 * instruction.
4345 	 *
4346 	 * In addition, the following queue lock guarantees that
4347 	 * bfqq_group(bfqq) exists as well.
4348 	 */
4349 	spin_lock_irq(q->queue_lock);
4350 	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
4351 	if (idle_timer_disabled)
4352 		bfqg_stats_update_idle_time(bfqq_group(bfqq));
4353 	spin_unlock_irq(q->queue_lock);
4354 #endif
4355 }
4356 
4357 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4358 				struct list_head *list, bool at_head)
4359 {
4360 	while (!list_empty(list)) {
4361 		struct request *rq;
4362 
4363 		rq = list_first_entry(list, struct request, queuelist);
4364 		list_del_init(&rq->queuelist);
4365 		bfq_insert_request(hctx, rq, at_head);
4366 	}
4367 }
4368 
4369 static void bfq_update_hw_tag(struct bfq_data *bfqd)
4370 {
4371 	bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4372 				       bfqd->rq_in_driver);
4373 
4374 	if (bfqd->hw_tag == 1)
4375 		return;
4376 
4377 	/*
4378 	 * This sample is valid if the number of outstanding requests
4379 	 * is large enough to allow a queueing behavior.  Note that the
4380 	 * sum is not exact, as it's not taking into account deactivated
4381 	 * requests.
4382 	 */
4383 	if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4384 		return;
4385 
4386 	if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4387 		return;
4388 
4389 	bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4390 	bfqd->max_rq_in_driver = 0;
4391 	bfqd->hw_tag_samples = 0;
4392 }
4393 
4394 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4395 {
4396 	u64 now_ns;
4397 	u32 delta_us;
4398 
4399 	bfq_update_hw_tag(bfqd);
4400 
4401 	bfqd->rq_in_driver--;
4402 	bfqq->dispatched--;
4403 
4404 	if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4405 		/*
4406 		 * Set budget_timeout (which we overload to store the
4407 		 * time at which the queue remains with no backlog and
4408 		 * no outstanding request; used by the weight-raising
4409 		 * mechanism).
4410 		 */
4411 		bfqq->budget_timeout = jiffies;
4412 
4413 		bfq_weights_tree_remove(bfqd, &bfqq->entity,
4414 					&bfqd->queue_weights_tree);
4415 	}
4416 
4417 	now_ns = ktime_get_ns();
4418 
4419 	bfqq->ttime.last_end_request = now_ns;
4420 
4421 	/*
4422 	 * Using us instead of ns, to get a reasonable precision in
4423 	 * computing rate in next check.
4424 	 */
4425 	delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4426 
4427 	/*
4428 	 * If the request took rather long to complete, and, according
4429 	 * to the maximum request size recorded, this completion latency
4430 	 * implies that the request was certainly served at a very low
4431 	 * rate (less than 1M sectors/sec), then the whole observation
4432 	 * interval that lasts up to this time instant cannot be a
4433 	 * valid time interval for computing a new peak rate.  Invoke
4434 	 * bfq_update_rate_reset to have the following three steps
4435 	 * taken:
4436 	 * - close the observation interval at the last (previous)
4437 	 *   request dispatch or completion
4438 	 * - compute rate, if possible, for that observation interval
4439 	 * - reset to zero samples, which will trigger a proper
4440 	 *   re-initialization of the observation interval on next
4441 	 *   dispatch
4442 	 */
4443 	if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4444 	   (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4445 			1UL<<(BFQ_RATE_SHIFT - 10))
4446 		bfq_update_rate_reset(bfqd, NULL);
4447 	bfqd->last_completion = now_ns;
4448 
4449 	/*
4450 	 * If we are waiting to discover whether the request pattern
4451 	 * of the task associated with the queue is actually
4452 	 * isochronous, and both requisites for this condition to hold
4453 	 * are now satisfied, then compute soft_rt_next_start (see the
4454 	 * comments on the function bfq_bfqq_softrt_next_start()). We
4455 	 * schedule this delayed check when bfqq expires, if it still
4456 	 * has in-flight requests.
4457 	 */
4458 	if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4459 	    RB_EMPTY_ROOT(&bfqq->sort_list))
4460 		bfqq->soft_rt_next_start =
4461 			bfq_bfqq_softrt_next_start(bfqd, bfqq);
4462 
4463 	/*
4464 	 * If this is the in-service queue, check if it needs to be expired,
4465 	 * or if we want to idle in case it has no pending requests.
4466 	 */
4467 	if (bfqd->in_service_queue == bfqq) {
4468 		if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
4469 			bfq_arm_slice_timer(bfqd);
4470 			return;
4471 		} else if (bfq_may_expire_for_budg_timeout(bfqq))
4472 			bfq_bfqq_expire(bfqd, bfqq, false,
4473 					BFQQE_BUDGET_TIMEOUT);
4474 		else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4475 			 (bfqq->dispatched == 0 ||
4476 			  !bfq_bfqq_may_idle(bfqq)))
4477 			bfq_bfqq_expire(bfqd, bfqq, false,
4478 					BFQQE_NO_MORE_REQUESTS);
4479 	}
4480 
4481 	if (!bfqd->rq_in_driver)
4482 		bfq_schedule_dispatch(bfqd);
4483 }
4484 
4485 static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4486 {
4487 	bfqq->allocated--;
4488 
4489 	bfq_put_queue(bfqq);
4490 }
4491 
4492 static void bfq_finish_request(struct request *rq)
4493 {
4494 	struct bfq_queue *bfqq;
4495 	struct bfq_data *bfqd;
4496 
4497 	if (!rq->elv.icq)
4498 		return;
4499 
4500 	bfqq = RQ_BFQQ(rq);
4501 	bfqd = bfqq->bfqd;
4502 
4503 	if (rq->rq_flags & RQF_STARTED)
4504 		bfqg_stats_update_completion(bfqq_group(bfqq),
4505 					     rq_start_time_ns(rq),
4506 					     rq_io_start_time_ns(rq),
4507 					     rq->cmd_flags);
4508 
4509 	if (likely(rq->rq_flags & RQF_STARTED)) {
4510 		unsigned long flags;
4511 
4512 		spin_lock_irqsave(&bfqd->lock, flags);
4513 
4514 		bfq_completed_request(bfqq, bfqd);
4515 		bfq_put_rq_priv_body(bfqq);
4516 
4517 		spin_unlock_irqrestore(&bfqd->lock, flags);
4518 	} else {
4519 		/*
4520 		 * Request rq may be still/already in the scheduler,
4521 		 * in which case we need to remove it. And we cannot
4522 		 * defer such a check and removal, to avoid
4523 		 * inconsistencies in the time interval from the end
4524 		 * of this function to the start of the deferred work.
4525 		 * This situation seems to occur only in process
4526 		 * context, as a consequence of a merge. In the
4527 		 * current version of the code, this implies that the
4528 		 * lock is held.
4529 		 */
4530 
4531 		if (!RB_EMPTY_NODE(&rq->rb_node)) {
4532 			bfq_remove_request(rq->q, rq);
4533 			bfqg_stats_update_io_remove(bfqq_group(bfqq),
4534 						    rq->cmd_flags);
4535 		}
4536 		bfq_put_rq_priv_body(bfqq);
4537 	}
4538 
4539 	rq->elv.priv[0] = NULL;
4540 	rq->elv.priv[1] = NULL;
4541 }
4542 
4543 /*
4544  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4545  * was the last process referring to that bfqq.
4546  */
4547 static struct bfq_queue *
4548 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4549 {
4550 	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4551 
4552 	if (bfqq_process_refs(bfqq) == 1) {
4553 		bfqq->pid = current->pid;
4554 		bfq_clear_bfqq_coop(bfqq);
4555 		bfq_clear_bfqq_split_coop(bfqq);
4556 		return bfqq;
4557 	}
4558 
4559 	bic_set_bfqq(bic, NULL, 1);
4560 
4561 	bfq_put_cooperator(bfqq);
4562 
4563 	bfq_put_queue(bfqq);
4564 	return NULL;
4565 }
4566 
4567 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4568 						   struct bfq_io_cq *bic,
4569 						   struct bio *bio,
4570 						   bool split, bool is_sync,
4571 						   bool *new_queue)
4572 {
4573 	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4574 
4575 	if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4576 		return bfqq;
4577 
4578 	if (new_queue)
4579 		*new_queue = true;
4580 
4581 	if (bfqq)
4582 		bfq_put_queue(bfqq);
4583 	bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4584 
4585 	bic_set_bfqq(bic, bfqq, is_sync);
4586 	if (split && is_sync) {
4587 		if ((bic->was_in_burst_list && bfqd->large_burst) ||
4588 		    bic->saved_in_large_burst)
4589 			bfq_mark_bfqq_in_large_burst(bfqq);
4590 		else {
4591 			bfq_clear_bfqq_in_large_burst(bfqq);
4592 			if (bic->was_in_burst_list)
4593 				/*
4594 				 * If bfqq was in the current
4595 				 * burst list before being
4596 				 * merged, then we have to add
4597 				 * it back. And we do not need
4598 				 * to increase burst_size, as
4599 				 * we did not decrement
4600 				 * burst_size when we removed
4601 				 * bfqq from the burst list as
4602 				 * a consequence of a merge
4603 				 * (see comments in
4604 				 * bfq_put_queue). In this
4605 				 * respect, it would be rather
4606 				 * costly to know whether the
4607 				 * current burst list is still
4608 				 * the same burst list from
4609 				 * which bfqq was removed on
4610 				 * the merge. To avoid this
4611 				 * cost, if bfqq was in a
4612 				 * burst list, then we add
4613 				 * bfqq to the current burst
4614 				 * list without any further
4615 				 * check. This can cause
4616 				 * inappropriate insertions,
4617 				 * but rarely enough to not
4618 				 * harm the detection of large
4619 				 * bursts significantly.
4620 				 */
4621 				hlist_add_head(&bfqq->burst_list_node,
4622 					       &bfqd->burst_list);
4623 		}
4624 		bfqq->split_time = jiffies;
4625 	}
4626 
4627 	return bfqq;
4628 }
4629 
4630 /*
4631  * Allocate bfq data structures associated with this request.
4632  */
4633 static void bfq_prepare_request(struct request *rq, struct bio *bio)
4634 {
4635 	struct request_queue *q = rq->q;
4636 	struct bfq_data *bfqd = q->elevator->elevator_data;
4637 	struct bfq_io_cq *bic;
4638 	const int is_sync = rq_is_sync(rq);
4639 	struct bfq_queue *bfqq;
4640 	bool new_queue = false;
4641 	bool bfqq_already_existing = false, split = false;
4642 
4643 	if (!rq->elv.icq)
4644 		return;
4645 	bic = icq_to_bic(rq->elv.icq);
4646 
4647 	spin_lock_irq(&bfqd->lock);
4648 
4649 	bfq_check_ioprio_change(bic, bio);
4650 
4651 	bfq_bic_update_cgroup(bic, bio);
4652 
4653 	bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4654 					 &new_queue);
4655 
4656 	if (likely(!new_queue)) {
4657 		/* If the queue was seeky for too long, break it apart. */
4658 		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4659 			bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
4660 
4661 			/* Update bic before losing reference to bfqq */
4662 			if (bfq_bfqq_in_large_burst(bfqq))
4663 				bic->saved_in_large_burst = true;
4664 
4665 			bfqq = bfq_split_bfqq(bic, bfqq);
4666 			split = true;
4667 
4668 			if (!bfqq)
4669 				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4670 								 true, is_sync,
4671 								 NULL);
4672 			else
4673 				bfqq_already_existing = true;
4674 		}
4675 	}
4676 
4677 	bfqq->allocated++;
4678 	bfqq->ref++;
4679 	bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4680 		     rq, bfqq, bfqq->ref);
4681 
4682 	rq->elv.priv[0] = bic;
4683 	rq->elv.priv[1] = bfqq;
4684 
4685 	/*
4686 	 * If a bfq_queue has only one process reference, it is owned
4687 	 * by only this bic: we can then set bfqq->bic = bic. in
4688 	 * addition, if the queue has also just been split, we have to
4689 	 * resume its state.
4690 	 */
4691 	if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4692 		bfqq->bic = bic;
4693 		if (split) {
4694 			/*
4695 			 * The queue has just been split from a shared
4696 			 * queue: restore the idle window and the
4697 			 * possible weight raising period.
4698 			 */
4699 			bfq_bfqq_resume_state(bfqq, bfqd, bic,
4700 					      bfqq_already_existing);
4701 		}
4702 	}
4703 
4704 	if (unlikely(bfq_bfqq_just_created(bfqq)))
4705 		bfq_handle_burst(bfqd, bfqq);
4706 
4707 	spin_unlock_irq(&bfqd->lock);
4708 }
4709 
4710 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
4711 {
4712 	struct bfq_data *bfqd = bfqq->bfqd;
4713 	enum bfqq_expiration reason;
4714 	unsigned long flags;
4715 
4716 	spin_lock_irqsave(&bfqd->lock, flags);
4717 	bfq_clear_bfqq_wait_request(bfqq);
4718 
4719 	if (bfqq != bfqd->in_service_queue) {
4720 		spin_unlock_irqrestore(&bfqd->lock, flags);
4721 		return;
4722 	}
4723 
4724 	if (bfq_bfqq_budget_timeout(bfqq))
4725 		/*
4726 		 * Also here the queue can be safely expired
4727 		 * for budget timeout without wasting
4728 		 * guarantees
4729 		 */
4730 		reason = BFQQE_BUDGET_TIMEOUT;
4731 	else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4732 		/*
4733 		 * The queue may not be empty upon timer expiration,
4734 		 * because we may not disable the timer when the
4735 		 * first request of the in-service queue arrives
4736 		 * during disk idling.
4737 		 */
4738 		reason = BFQQE_TOO_IDLE;
4739 	else
4740 		goto schedule_dispatch;
4741 
4742 	bfq_bfqq_expire(bfqd, bfqq, true, reason);
4743 
4744 schedule_dispatch:
4745 	spin_unlock_irqrestore(&bfqd->lock, flags);
4746 	bfq_schedule_dispatch(bfqd);
4747 }
4748 
4749 /*
4750  * Handler of the expiration of the timer running if the in-service queue
4751  * is idling inside its time slice.
4752  */
4753 static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4754 {
4755 	struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4756 					     idle_slice_timer);
4757 	struct bfq_queue *bfqq = bfqd->in_service_queue;
4758 
4759 	/*
4760 	 * Theoretical race here: the in-service queue can be NULL or
4761 	 * different from the queue that was idling if a new request
4762 	 * arrives for the current queue and there is a full dispatch
4763 	 * cycle that changes the in-service queue.  This can hardly
4764 	 * happen, but in the worst case we just expire a queue too
4765 	 * early.
4766 	 */
4767 	if (bfqq)
4768 		bfq_idle_slice_timer_body(bfqq);
4769 
4770 	return HRTIMER_NORESTART;
4771 }
4772 
4773 static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4774 				 struct bfq_queue **bfqq_ptr)
4775 {
4776 	struct bfq_queue *bfqq = *bfqq_ptr;
4777 
4778 	bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4779 	if (bfqq) {
4780 		bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4781 
4782 		bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4783 			     bfqq, bfqq->ref);
4784 		bfq_put_queue(bfqq);
4785 		*bfqq_ptr = NULL;
4786 	}
4787 }
4788 
4789 /*
4790  * Release all the bfqg references to its async queues.  If we are
4791  * deallocating the group these queues may still contain requests, so
4792  * we reparent them to the root cgroup (i.e., the only one that will
4793  * exist for sure until all the requests on a device are gone).
4794  */
4795 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
4796 {
4797 	int i, j;
4798 
4799 	for (i = 0; i < 2; i++)
4800 		for (j = 0; j < IOPRIO_BE_NR; j++)
4801 			__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
4802 
4803 	__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
4804 }
4805 
4806 static void bfq_exit_queue(struct elevator_queue *e)
4807 {
4808 	struct bfq_data *bfqd = e->elevator_data;
4809 	struct bfq_queue *bfqq, *n;
4810 
4811 	hrtimer_cancel(&bfqd->idle_slice_timer);
4812 
4813 	spin_lock_irq(&bfqd->lock);
4814 	list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
4815 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
4816 	spin_unlock_irq(&bfqd->lock);
4817 
4818 	hrtimer_cancel(&bfqd->idle_slice_timer);
4819 
4820 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4821 	blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4822 #else
4823 	spin_lock_irq(&bfqd->lock);
4824 	bfq_put_async_queues(bfqd, bfqd->root_group);
4825 	kfree(bfqd->root_group);
4826 	spin_unlock_irq(&bfqd->lock);
4827 #endif
4828 
4829 	kfree(bfqd);
4830 }
4831 
4832 static void bfq_init_root_group(struct bfq_group *root_group,
4833 				struct bfq_data *bfqd)
4834 {
4835 	int i;
4836 
4837 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4838 	root_group->entity.parent = NULL;
4839 	root_group->my_entity = NULL;
4840 	root_group->bfqd = bfqd;
4841 #endif
4842 	root_group->rq_pos_tree = RB_ROOT;
4843 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4844 		root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4845 	root_group->sched_data.bfq_class_idle_last_service = jiffies;
4846 }
4847 
4848 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4849 {
4850 	struct bfq_data *bfqd;
4851 	struct elevator_queue *eq;
4852 
4853 	eq = elevator_alloc(q, e);
4854 	if (!eq)
4855 		return -ENOMEM;
4856 
4857 	bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4858 	if (!bfqd) {
4859 		kobject_put(&eq->kobj);
4860 		return -ENOMEM;
4861 	}
4862 	eq->elevator_data = bfqd;
4863 
4864 	spin_lock_irq(q->queue_lock);
4865 	q->elevator = eq;
4866 	spin_unlock_irq(q->queue_lock);
4867 
4868 	/*
4869 	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4870 	 * Grab a permanent reference to it, so that the normal code flow
4871 	 * will not attempt to free it.
4872 	 */
4873 	bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4874 	bfqd->oom_bfqq.ref++;
4875 	bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4876 	bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4877 	bfqd->oom_bfqq.entity.new_weight =
4878 		bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
4879 
4880 	/* oom_bfqq does not participate to bursts */
4881 	bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4882 
4883 	/*
4884 	 * Trigger weight initialization, according to ioprio, at the
4885 	 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4886 	 * class won't be changed any more.
4887 	 */
4888 	bfqd->oom_bfqq.entity.prio_changed = 1;
4889 
4890 	bfqd->queue = q;
4891 
4892 	INIT_LIST_HEAD(&bfqd->dispatch);
4893 
4894 	hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4895 		     HRTIMER_MODE_REL);
4896 	bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4897 
4898 	bfqd->queue_weights_tree = RB_ROOT;
4899 	bfqd->group_weights_tree = RB_ROOT;
4900 
4901 	INIT_LIST_HEAD(&bfqd->active_list);
4902 	INIT_LIST_HEAD(&bfqd->idle_list);
4903 	INIT_HLIST_HEAD(&bfqd->burst_list);
4904 
4905 	bfqd->hw_tag = -1;
4906 
4907 	bfqd->bfq_max_budget = bfq_default_max_budget;
4908 
4909 	bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4910 	bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4911 	bfqd->bfq_back_max = bfq_back_max;
4912 	bfqd->bfq_back_penalty = bfq_back_penalty;
4913 	bfqd->bfq_slice_idle = bfq_slice_idle;
4914 	bfqd->bfq_timeout = bfq_timeout;
4915 
4916 	bfqd->bfq_requests_within_timer = 120;
4917 
4918 	bfqd->bfq_large_burst_thresh = 8;
4919 	bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4920 
4921 	bfqd->low_latency = true;
4922 
4923 	/*
4924 	 * Trade-off between responsiveness and fairness.
4925 	 */
4926 	bfqd->bfq_wr_coeff = 30;
4927 	bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
4928 	bfqd->bfq_wr_max_time = 0;
4929 	bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4930 	bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
4931 	bfqd->bfq_wr_max_softrt_rate = 7000; /*
4932 					      * Approximate rate required
4933 					      * to playback or record a
4934 					      * high-definition compressed
4935 					      * video.
4936 					      */
4937 	bfqd->wr_busy_queues = 0;
4938 
4939 	/*
4940 	 * Begin by assuming, optimistically, that the device is a
4941 	 * high-speed one, and that its peak rate is equal to 2/3 of
4942 	 * the highest reference rate.
4943 	 */
4944 	bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4945 			T_fast[blk_queue_nonrot(bfqd->queue)];
4946 	bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4947 	bfqd->device_speed = BFQ_BFQD_FAST;
4948 
4949 	spin_lock_init(&bfqd->lock);
4950 
4951 	/*
4952 	 * The invocation of the next bfq_create_group_hierarchy
4953 	 * function is the head of a chain of function calls
4954 	 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4955 	 * blk_mq_freeze_queue) that may lead to the invocation of the
4956 	 * has_work hook function. For this reason,
4957 	 * bfq_create_group_hierarchy is invoked only after all
4958 	 * scheduler data has been initialized, apart from the fields
4959 	 * that can be initialized only after invoking
4960 	 * bfq_create_group_hierarchy. This, in particular, enables
4961 	 * has_work to correctly return false. Of course, to avoid
4962 	 * other inconsistencies, the blk-mq stack must then refrain
4963 	 * from invoking further scheduler hooks before this init
4964 	 * function is finished.
4965 	 */
4966 	bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4967 	if (!bfqd->root_group)
4968 		goto out_free;
4969 	bfq_init_root_group(bfqd->root_group, bfqd);
4970 	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4971 
4972 	wbt_disable_default(q);
4973 	return 0;
4974 
4975 out_free:
4976 	kfree(bfqd);
4977 	kobject_put(&eq->kobj);
4978 	return -ENOMEM;
4979 }
4980 
4981 static void bfq_slab_kill(void)
4982 {
4983 	kmem_cache_destroy(bfq_pool);
4984 }
4985 
4986 static int __init bfq_slab_setup(void)
4987 {
4988 	bfq_pool = KMEM_CACHE(bfq_queue, 0);
4989 	if (!bfq_pool)
4990 		return -ENOMEM;
4991 	return 0;
4992 }
4993 
4994 static ssize_t bfq_var_show(unsigned int var, char *page)
4995 {
4996 	return sprintf(page, "%u\n", var);
4997 }
4998 
4999 static int bfq_var_store(unsigned long *var, const char *page)
5000 {
5001 	unsigned long new_val;
5002 	int ret = kstrtoul(page, 10, &new_val);
5003 
5004 	if (ret)
5005 		return ret;
5006 	*var = new_val;
5007 	return 0;
5008 }
5009 
5010 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
5011 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
5012 {									\
5013 	struct bfq_data *bfqd = e->elevator_data;			\
5014 	u64 __data = __VAR;						\
5015 	if (__CONV == 1)						\
5016 		__data = jiffies_to_msecs(__data);			\
5017 	else if (__CONV == 2)						\
5018 		__data = div_u64(__data, NSEC_PER_MSEC);		\
5019 	return bfq_var_show(__data, (page));				\
5020 }
5021 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
5022 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
5023 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
5024 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
5025 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
5026 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
5027 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
5028 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
5029 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
5030 #undef SHOW_FUNCTION
5031 
5032 #define USEC_SHOW_FUNCTION(__FUNC, __VAR)				\
5033 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
5034 {									\
5035 	struct bfq_data *bfqd = e->elevator_data;			\
5036 	u64 __data = __VAR;						\
5037 	__data = div_u64(__data, NSEC_PER_USEC);			\
5038 	return bfq_var_show(__data, (page));				\
5039 }
5040 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
5041 #undef USEC_SHOW_FUNCTION
5042 
5043 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
5044 static ssize_t								\
5045 __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
5046 {									\
5047 	struct bfq_data *bfqd = e->elevator_data;			\
5048 	unsigned long __data, __min = (MIN), __max = (MAX);		\
5049 	int ret;							\
5050 									\
5051 	ret = bfq_var_store(&__data, (page));				\
5052 	if (ret)							\
5053 		return ret;						\
5054 	if (__data < __min)						\
5055 		__data = __min;						\
5056 	else if (__data > __max)					\
5057 		__data = __max;						\
5058 	if (__CONV == 1)						\
5059 		*(__PTR) = msecs_to_jiffies(__data);			\
5060 	else if (__CONV == 2)						\
5061 		*(__PTR) = (u64)__data * NSEC_PER_MSEC;			\
5062 	else								\
5063 		*(__PTR) = __data;					\
5064 	return count;							\
5065 }
5066 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
5067 		INT_MAX, 2);
5068 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
5069 		INT_MAX, 2);
5070 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
5071 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
5072 		INT_MAX, 0);
5073 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
5074 #undef STORE_FUNCTION
5075 
5076 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)			\
5077 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
5078 {									\
5079 	struct bfq_data *bfqd = e->elevator_data;			\
5080 	unsigned long __data, __min = (MIN), __max = (MAX);		\
5081 	int ret;							\
5082 									\
5083 	ret = bfq_var_store(&__data, (page));				\
5084 	if (ret)							\
5085 		return ret;						\
5086 	if (__data < __min)						\
5087 		__data = __min;						\
5088 	else if (__data > __max)					\
5089 		__data = __max;						\
5090 	*(__PTR) = (u64)__data * NSEC_PER_USEC;				\
5091 	return count;							\
5092 }
5093 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
5094 		    UINT_MAX);
5095 #undef USEC_STORE_FUNCTION
5096 
5097 static ssize_t bfq_max_budget_store(struct elevator_queue *e,
5098 				    const char *page, size_t count)
5099 {
5100 	struct bfq_data *bfqd = e->elevator_data;
5101 	unsigned long __data;
5102 	int ret;
5103 
5104 	ret = bfq_var_store(&__data, (page));
5105 	if (ret)
5106 		return ret;
5107 
5108 	if (__data == 0)
5109 		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
5110 	else {
5111 		if (__data > INT_MAX)
5112 			__data = INT_MAX;
5113 		bfqd->bfq_max_budget = __data;
5114 	}
5115 
5116 	bfqd->bfq_user_max_budget = __data;
5117 
5118 	return count;
5119 }
5120 
5121 /*
5122  * Leaving this name to preserve name compatibility with cfq
5123  * parameters, but this timeout is used for both sync and async.
5124  */
5125 static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
5126 				      const char *page, size_t count)
5127 {
5128 	struct bfq_data *bfqd = e->elevator_data;
5129 	unsigned long __data;
5130 	int ret;
5131 
5132 	ret = bfq_var_store(&__data, (page));
5133 	if (ret)
5134 		return ret;
5135 
5136 	if (__data < 1)
5137 		__data = 1;
5138 	else if (__data > INT_MAX)
5139 		__data = INT_MAX;
5140 
5141 	bfqd->bfq_timeout = msecs_to_jiffies(__data);
5142 	if (bfqd->bfq_user_max_budget == 0)
5143 		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
5144 
5145 	return count;
5146 }
5147 
5148 static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
5149 				     const char *page, size_t count)
5150 {
5151 	struct bfq_data *bfqd = e->elevator_data;
5152 	unsigned long __data;
5153 	int ret;
5154 
5155 	ret = bfq_var_store(&__data, (page));
5156 	if (ret)
5157 		return ret;
5158 
5159 	if (__data > 1)
5160 		__data = 1;
5161 	if (!bfqd->strict_guarantees && __data == 1
5162 	    && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
5163 		bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
5164 
5165 	bfqd->strict_guarantees = __data;
5166 
5167 	return count;
5168 }
5169 
5170 static ssize_t bfq_low_latency_store(struct elevator_queue *e,
5171 				     const char *page, size_t count)
5172 {
5173 	struct bfq_data *bfqd = e->elevator_data;
5174 	unsigned long __data;
5175 	int ret;
5176 
5177 	ret = bfq_var_store(&__data, (page));
5178 	if (ret)
5179 		return ret;
5180 
5181 	if (__data > 1)
5182 		__data = 1;
5183 	if (__data == 0 && bfqd->low_latency != 0)
5184 		bfq_end_wr(bfqd);
5185 	bfqd->low_latency = __data;
5186 
5187 	return count;
5188 }
5189 
5190 #define BFQ_ATTR(name) \
5191 	__ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
5192 
5193 static struct elv_fs_entry bfq_attrs[] = {
5194 	BFQ_ATTR(fifo_expire_sync),
5195 	BFQ_ATTR(fifo_expire_async),
5196 	BFQ_ATTR(back_seek_max),
5197 	BFQ_ATTR(back_seek_penalty),
5198 	BFQ_ATTR(slice_idle),
5199 	BFQ_ATTR(slice_idle_us),
5200 	BFQ_ATTR(max_budget),
5201 	BFQ_ATTR(timeout_sync),
5202 	BFQ_ATTR(strict_guarantees),
5203 	BFQ_ATTR(low_latency),
5204 	__ATTR_NULL
5205 };
5206 
5207 static struct elevator_type iosched_bfq_mq = {
5208 	.ops.mq = {
5209 		.prepare_request	= bfq_prepare_request,
5210 		.finish_request		= bfq_finish_request,
5211 		.exit_icq		= bfq_exit_icq,
5212 		.insert_requests	= bfq_insert_requests,
5213 		.dispatch_request	= bfq_dispatch_request,
5214 		.next_request		= elv_rb_latter_request,
5215 		.former_request		= elv_rb_former_request,
5216 		.allow_merge		= bfq_allow_bio_merge,
5217 		.bio_merge		= bfq_bio_merge,
5218 		.request_merge		= bfq_request_merge,
5219 		.requests_merged	= bfq_requests_merged,
5220 		.request_merged		= bfq_request_merged,
5221 		.has_work		= bfq_has_work,
5222 		.init_sched		= bfq_init_queue,
5223 		.exit_sched		= bfq_exit_queue,
5224 	},
5225 
5226 	.uses_mq =		true,
5227 	.icq_size =		sizeof(struct bfq_io_cq),
5228 	.icq_align =		__alignof__(struct bfq_io_cq),
5229 	.elevator_attrs =	bfq_attrs,
5230 	.elevator_name =	"bfq",
5231 	.elevator_owner =	THIS_MODULE,
5232 };
5233 MODULE_ALIAS("bfq-iosched");
5234 
5235 static int __init bfq_init(void)
5236 {
5237 	int ret;
5238 
5239 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5240 	ret = blkcg_policy_register(&blkcg_policy_bfq);
5241 	if (ret)
5242 		return ret;
5243 #endif
5244 
5245 	ret = -ENOMEM;
5246 	if (bfq_slab_setup())
5247 		goto err_pol_unreg;
5248 
5249 	/*
5250 	 * Times to load large popular applications for the typical
5251 	 * systems installed on the reference devices (see the
5252 	 * comments before the definitions of the next two
5253 	 * arrays). Actually, we use slightly slower values, as the
5254 	 * estimated peak rate tends to be smaller than the actual
5255 	 * peak rate.  The reason for this last fact is that estimates
5256 	 * are computed over much shorter time intervals than the long
5257 	 * intervals typically used for benchmarking. Why? First, to
5258 	 * adapt more quickly to variations. Second, because an I/O
5259 	 * scheduler cannot rely on a peak-rate-evaluation workload to
5260 	 * be run for a long time.
5261 	 */
5262 	T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5263 	T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5264 	T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5265 	T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5266 
5267 	/*
5268 	 * Thresholds that determine the switch between speed classes
5269 	 * (see the comments before the definition of the array
5270 	 * device_speed_thresh). These thresholds are biased towards
5271 	 * transitions to the fast class. This is safer than the
5272 	 * opposite bias. In fact, a wrong transition to the slow
5273 	 * class results in short weight-raising periods, because the
5274 	 * speed of the device then tends to be higher that the
5275 	 * reference peak rate. On the opposite end, a wrong
5276 	 * transition to the fast class tends to increase
5277 	 * weight-raising periods, because of the opposite reason.
5278 	 */
5279 	device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5280 	device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5281 
5282 	ret = elv_register(&iosched_bfq_mq);
5283 	if (ret)
5284 		goto slab_kill;
5285 
5286 	return 0;
5287 
5288 slab_kill:
5289 	bfq_slab_kill();
5290 err_pol_unreg:
5291 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5292 	blkcg_policy_unregister(&blkcg_policy_bfq);
5293 #endif
5294 	return ret;
5295 }
5296 
5297 static void __exit bfq_exit(void)
5298 {
5299 	elv_unregister(&iosched_bfq_mq);
5300 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5301 	blkcg_policy_unregister(&blkcg_policy_bfq);
5302 #endif
5303 	bfq_slab_kill();
5304 }
5305 
5306 module_init(bfq_init);
5307 module_exit(bfq_exit);
5308 
5309 MODULE_AUTHOR("Paolo Valente");
5310 MODULE_LICENSE("GPL");
5311 MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
5312