xref: /openbmc/linux/block/bfq-wf2q.c (revision dea54fba)
1 /*
2  * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
3  * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
4  * scheduler schedules generic entities. The latter can represent
5  * either single bfq queues (associated with processes) or groups of
6  * bfq queues (associated with cgroups).
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License as
10  *  published by the Free Software Foundation; either version 2 of the
11  *  License, or (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  *  General Public License for more details.
17  */
18 #include "bfq-iosched.h"
19 
20 /**
21  * bfq_gt - compare two timestamps.
22  * @a: first ts.
23  * @b: second ts.
24  *
25  * Return @a > @b, dealing with wrapping correctly.
26  */
27 static int bfq_gt(u64 a, u64 b)
28 {
29 	return (s64)(a - b) > 0;
30 }
31 
32 static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
33 {
34 	struct rb_node *node = tree->rb_node;
35 
36 	return rb_entry(node, struct bfq_entity, rb_node);
37 }
38 
39 static unsigned int bfq_class_idx(struct bfq_entity *entity)
40 {
41 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
42 
43 	return bfqq ? bfqq->ioprio_class - 1 :
44 		BFQ_DEFAULT_GRP_CLASS - 1;
45 }
46 
47 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
48 
49 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
50 
51 /**
52  * bfq_update_next_in_service - update sd->next_in_service
53  * @sd: sched_data for which to perform the update.
54  * @new_entity: if not NULL, pointer to the entity whose activation,
55  *		requeueing or repositionig triggered the invocation of
56  *		this function.
57  *
58  * This function is called to update sd->next_in_service, which, in
59  * its turn, may change as a consequence of the insertion or
60  * extraction of an entity into/from one of the active trees of
61  * sd. These insertions/extractions occur as a consequence of
62  * activations/deactivations of entities, with some activations being
63  * 'true' activations, and other activations being requeueings (i.e.,
64  * implementing the second, requeueing phase of the mechanism used to
65  * reposition an entity in its active tree; see comments on
66  * __bfq_activate_entity and __bfq_requeue_entity for details). In
67  * both the last two activation sub-cases, new_entity points to the
68  * just activated or requeued entity.
69  *
70  * Returns true if sd->next_in_service changes in such a way that
71  * entity->parent may become the next_in_service for its parent
72  * entity.
73  */
74 static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
75 				       struct bfq_entity *new_entity)
76 {
77 	struct bfq_entity *next_in_service = sd->next_in_service;
78 	bool parent_sched_may_change = false;
79 
80 	/*
81 	 * If this update is triggered by the activation, requeueing
82 	 * or repositiong of an entity that does not coincide with
83 	 * sd->next_in_service, then a full lookup in the active tree
84 	 * can be avoided. In fact, it is enough to check whether the
85 	 * just-modified entity has a higher priority than
86 	 * sd->next_in_service, or, even if it has the same priority
87 	 * as sd->next_in_service, is eligible and has a lower virtual
88 	 * finish time than sd->next_in_service. If this compound
89 	 * condition holds, then the new entity becomes the new
90 	 * next_in_service. Otherwise no change is needed.
91 	 */
92 	if (new_entity && new_entity != sd->next_in_service) {
93 		/*
94 		 * Flag used to decide whether to replace
95 		 * sd->next_in_service with new_entity. Tentatively
96 		 * set to true, and left as true if
97 		 * sd->next_in_service is NULL.
98 		 */
99 		bool replace_next = true;
100 
101 		/*
102 		 * If there is already a next_in_service candidate
103 		 * entity, then compare class priorities or timestamps
104 		 * to decide whether to replace sd->service_tree with
105 		 * new_entity.
106 		 */
107 		if (next_in_service) {
108 			unsigned int new_entity_class_idx =
109 				bfq_class_idx(new_entity);
110 			struct bfq_service_tree *st =
111 				sd->service_tree + new_entity_class_idx;
112 
113 			/*
114 			 * For efficiency, evaluate the most likely
115 			 * sub-condition first.
116 			 */
117 			replace_next =
118 				(new_entity_class_idx ==
119 				 bfq_class_idx(next_in_service)
120 				 &&
121 				 !bfq_gt(new_entity->start, st->vtime)
122 				 &&
123 				 bfq_gt(next_in_service->finish,
124 					new_entity->finish))
125 				||
126 				new_entity_class_idx <
127 				bfq_class_idx(next_in_service);
128 		}
129 
130 		if (replace_next)
131 			next_in_service = new_entity;
132 	} else /* invoked because of a deactivation: lookup needed */
133 		next_in_service = bfq_lookup_next_entity(sd);
134 
135 	if (next_in_service) {
136 		parent_sched_may_change = !sd->next_in_service ||
137 			bfq_update_parent_budget(next_in_service);
138 	}
139 
140 	sd->next_in_service = next_in_service;
141 
142 	if (!next_in_service)
143 		return parent_sched_may_change;
144 
145 	return parent_sched_may_change;
146 }
147 
148 #ifdef CONFIG_BFQ_GROUP_IOSCHED
149 
150 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
151 {
152 	struct bfq_entity *group_entity = bfqq->entity.parent;
153 
154 	if (!group_entity)
155 		group_entity = &bfqq->bfqd->root_group->entity;
156 
157 	return container_of(group_entity, struct bfq_group, entity);
158 }
159 
160 /*
161  * Returns true if this budget changes may let next_in_service->parent
162  * become the next_in_service entity for its parent entity.
163  */
164 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
165 {
166 	struct bfq_entity *bfqg_entity;
167 	struct bfq_group *bfqg;
168 	struct bfq_sched_data *group_sd;
169 	bool ret = false;
170 
171 	group_sd = next_in_service->sched_data;
172 
173 	bfqg = container_of(group_sd, struct bfq_group, sched_data);
174 	/*
175 	 * bfq_group's my_entity field is not NULL only if the group
176 	 * is not the root group. We must not touch the root entity
177 	 * as it must never become an in-service entity.
178 	 */
179 	bfqg_entity = bfqg->my_entity;
180 	if (bfqg_entity) {
181 		if (bfqg_entity->budget > next_in_service->budget)
182 			ret = true;
183 		bfqg_entity->budget = next_in_service->budget;
184 	}
185 
186 	return ret;
187 }
188 
189 /*
190  * This function tells whether entity stops being a candidate for next
191  * service, according to the restrictive definition of the field
192  * next_in_service. In particular, this function is invoked for an
193  * entity that is about to be set in service.
194  *
195  * If entity is a queue, then the entity is no longer a candidate for
196  * next service according to the that definition, because entity is
197  * about to become the in-service queue. This function then returns
198  * true if entity is a queue.
199  *
200  * In contrast, entity could still be a candidate for next service if
201  * it is not a queue, and has more than one active child. In fact,
202  * even if one of its children is about to be set in service, other
203  * active children may still be the next to serve, for the parent
204  * entity, even according to the above definition. As a consequence, a
205  * non-queue entity is not a candidate for next-service only if it has
206  * only one active child. And only if this condition holds, then this
207  * function returns true for a non-queue entity.
208  */
209 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
210 {
211 	struct bfq_group *bfqg;
212 
213 	if (bfq_entity_to_bfqq(entity))
214 		return true;
215 
216 	bfqg = container_of(entity, struct bfq_group, entity);
217 
218 	/*
219 	 * The field active_entities does not always contain the
220 	 * actual number of active children entities: it happens to
221 	 * not account for the in-service entity in case the latter is
222 	 * removed from its active tree (which may get done after
223 	 * invoking the function bfq_no_longer_next_in_service in
224 	 * bfq_get_next_queue). Fortunately, here, i.e., while
225 	 * bfq_no_longer_next_in_service is not yet completed in
226 	 * bfq_get_next_queue, bfq_active_extract has not yet been
227 	 * invoked, and thus active_entities still coincides with the
228 	 * actual number of active entities.
229 	 */
230 	if (bfqg->active_entities == 1)
231 		return true;
232 
233 	return false;
234 }
235 
236 #else /* CONFIG_BFQ_GROUP_IOSCHED */
237 
238 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
239 {
240 	return bfqq->bfqd->root_group;
241 }
242 
243 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
244 {
245 	return false;
246 }
247 
248 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
249 {
250 	return true;
251 }
252 
253 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
254 
255 /*
256  * Shift for timestamp calculations.  This actually limits the maximum
257  * service allowed in one timestamp delta (small shift values increase it),
258  * the maximum total weight that can be used for the queues in the system
259  * (big shift values increase it), and the period of virtual time
260  * wraparounds.
261  */
262 #define WFQ_SERVICE_SHIFT	22
263 
264 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
265 {
266 	struct bfq_queue *bfqq = NULL;
267 
268 	if (!entity->my_sched_data)
269 		bfqq = container_of(entity, struct bfq_queue, entity);
270 
271 	return bfqq;
272 }
273 
274 
275 /**
276  * bfq_delta - map service into the virtual time domain.
277  * @service: amount of service.
278  * @weight: scale factor (weight of an entity or weight sum).
279  */
280 static u64 bfq_delta(unsigned long service, unsigned long weight)
281 {
282 	u64 d = (u64)service << WFQ_SERVICE_SHIFT;
283 
284 	do_div(d, weight);
285 	return d;
286 }
287 
288 /**
289  * bfq_calc_finish - assign the finish time to an entity.
290  * @entity: the entity to act upon.
291  * @service: the service to be charged to the entity.
292  */
293 static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
294 {
295 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
296 
297 	entity->finish = entity->start +
298 		bfq_delta(service, entity->weight);
299 
300 	if (bfqq) {
301 		bfq_log_bfqq(bfqq->bfqd, bfqq,
302 			"calc_finish: serv %lu, w %d",
303 			service, entity->weight);
304 		bfq_log_bfqq(bfqq->bfqd, bfqq,
305 			"calc_finish: start %llu, finish %llu, delta %llu",
306 			entity->start, entity->finish,
307 			bfq_delta(service, entity->weight));
308 	}
309 }
310 
311 /**
312  * bfq_entity_of - get an entity from a node.
313  * @node: the node field of the entity.
314  *
315  * Convert a node pointer to the relative entity.  This is used only
316  * to simplify the logic of some functions and not as the generic
317  * conversion mechanism because, e.g., in the tree walking functions,
318  * the check for a %NULL value would be redundant.
319  */
320 struct bfq_entity *bfq_entity_of(struct rb_node *node)
321 {
322 	struct bfq_entity *entity = NULL;
323 
324 	if (node)
325 		entity = rb_entry(node, struct bfq_entity, rb_node);
326 
327 	return entity;
328 }
329 
330 /**
331  * bfq_extract - remove an entity from a tree.
332  * @root: the tree root.
333  * @entity: the entity to remove.
334  */
335 static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
336 {
337 	entity->tree = NULL;
338 	rb_erase(&entity->rb_node, root);
339 }
340 
341 /**
342  * bfq_idle_extract - extract an entity from the idle tree.
343  * @st: the service tree of the owning @entity.
344  * @entity: the entity being removed.
345  */
346 static void bfq_idle_extract(struct bfq_service_tree *st,
347 			     struct bfq_entity *entity)
348 {
349 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
350 	struct rb_node *next;
351 
352 	if (entity == st->first_idle) {
353 		next = rb_next(&entity->rb_node);
354 		st->first_idle = bfq_entity_of(next);
355 	}
356 
357 	if (entity == st->last_idle) {
358 		next = rb_prev(&entity->rb_node);
359 		st->last_idle = bfq_entity_of(next);
360 	}
361 
362 	bfq_extract(&st->idle, entity);
363 
364 	if (bfqq)
365 		list_del(&bfqq->bfqq_list);
366 }
367 
368 /**
369  * bfq_insert - generic tree insertion.
370  * @root: tree root.
371  * @entity: entity to insert.
372  *
373  * This is used for the idle and the active tree, since they are both
374  * ordered by finish time.
375  */
376 static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
377 {
378 	struct bfq_entity *entry;
379 	struct rb_node **node = &root->rb_node;
380 	struct rb_node *parent = NULL;
381 
382 	while (*node) {
383 		parent = *node;
384 		entry = rb_entry(parent, struct bfq_entity, rb_node);
385 
386 		if (bfq_gt(entry->finish, entity->finish))
387 			node = &parent->rb_left;
388 		else
389 			node = &parent->rb_right;
390 	}
391 
392 	rb_link_node(&entity->rb_node, parent, node);
393 	rb_insert_color(&entity->rb_node, root);
394 
395 	entity->tree = root;
396 }
397 
398 /**
399  * bfq_update_min - update the min_start field of a entity.
400  * @entity: the entity to update.
401  * @node: one of its children.
402  *
403  * This function is called when @entity may store an invalid value for
404  * min_start due to updates to the active tree.  The function  assumes
405  * that the subtree rooted at @node (which may be its left or its right
406  * child) has a valid min_start value.
407  */
408 static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
409 {
410 	struct bfq_entity *child;
411 
412 	if (node) {
413 		child = rb_entry(node, struct bfq_entity, rb_node);
414 		if (bfq_gt(entity->min_start, child->min_start))
415 			entity->min_start = child->min_start;
416 	}
417 }
418 
419 /**
420  * bfq_update_active_node - recalculate min_start.
421  * @node: the node to update.
422  *
423  * @node may have changed position or one of its children may have moved,
424  * this function updates its min_start value.  The left and right subtrees
425  * are assumed to hold a correct min_start value.
426  */
427 static void bfq_update_active_node(struct rb_node *node)
428 {
429 	struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
430 
431 	entity->min_start = entity->start;
432 	bfq_update_min(entity, node->rb_right);
433 	bfq_update_min(entity, node->rb_left);
434 }
435 
436 /**
437  * bfq_update_active_tree - update min_start for the whole active tree.
438  * @node: the starting node.
439  *
440  * @node must be the deepest modified node after an update.  This function
441  * updates its min_start using the values held by its children, assuming
442  * that they did not change, and then updates all the nodes that may have
443  * changed in the path to the root.  The only nodes that may have changed
444  * are the ones in the path or their siblings.
445  */
446 static void bfq_update_active_tree(struct rb_node *node)
447 {
448 	struct rb_node *parent;
449 
450 up:
451 	bfq_update_active_node(node);
452 
453 	parent = rb_parent(node);
454 	if (!parent)
455 		return;
456 
457 	if (node == parent->rb_left && parent->rb_right)
458 		bfq_update_active_node(parent->rb_right);
459 	else if (parent->rb_left)
460 		bfq_update_active_node(parent->rb_left);
461 
462 	node = parent;
463 	goto up;
464 }
465 
466 /**
467  * bfq_active_insert - insert an entity in the active tree of its
468  *                     group/device.
469  * @st: the service tree of the entity.
470  * @entity: the entity being inserted.
471  *
472  * The active tree is ordered by finish time, but an extra key is kept
473  * per each node, containing the minimum value for the start times of
474  * its children (and the node itself), so it's possible to search for
475  * the eligible node with the lowest finish time in logarithmic time.
476  */
477 static void bfq_active_insert(struct bfq_service_tree *st,
478 			      struct bfq_entity *entity)
479 {
480 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
481 	struct rb_node *node = &entity->rb_node;
482 #ifdef CONFIG_BFQ_GROUP_IOSCHED
483 	struct bfq_sched_data *sd = NULL;
484 	struct bfq_group *bfqg = NULL;
485 	struct bfq_data *bfqd = NULL;
486 #endif
487 
488 	bfq_insert(&st->active, entity);
489 
490 	if (node->rb_left)
491 		node = node->rb_left;
492 	else if (node->rb_right)
493 		node = node->rb_right;
494 
495 	bfq_update_active_tree(node);
496 
497 #ifdef CONFIG_BFQ_GROUP_IOSCHED
498 	sd = entity->sched_data;
499 	bfqg = container_of(sd, struct bfq_group, sched_data);
500 	bfqd = (struct bfq_data *)bfqg->bfqd;
501 #endif
502 	if (bfqq)
503 		list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
504 #ifdef CONFIG_BFQ_GROUP_IOSCHED
505 	else /* bfq_group */
506 		bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
507 
508 	if (bfqg != bfqd->root_group)
509 		bfqg->active_entities++;
510 #endif
511 }
512 
513 /**
514  * bfq_ioprio_to_weight - calc a weight from an ioprio.
515  * @ioprio: the ioprio value to convert.
516  */
517 unsigned short bfq_ioprio_to_weight(int ioprio)
518 {
519 	return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
520 }
521 
522 /**
523  * bfq_weight_to_ioprio - calc an ioprio from a weight.
524  * @weight: the weight value to convert.
525  *
526  * To preserve as much as possible the old only-ioprio user interface,
527  * 0 is used as an escape ioprio value for weights (numerically) equal or
528  * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
529  */
530 static unsigned short bfq_weight_to_ioprio(int weight)
531 {
532 	return max_t(int, 0,
533 		     IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
534 }
535 
536 static void bfq_get_entity(struct bfq_entity *entity)
537 {
538 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
539 
540 	if (bfqq) {
541 		bfqq->ref++;
542 		bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
543 			     bfqq, bfqq->ref);
544 	}
545 }
546 
547 /**
548  * bfq_find_deepest - find the deepest node that an extraction can modify.
549  * @node: the node being removed.
550  *
551  * Do the first step of an extraction in an rb tree, looking for the
552  * node that will replace @node, and returning the deepest node that
553  * the following modifications to the tree can touch.  If @node is the
554  * last node in the tree return %NULL.
555  */
556 static struct rb_node *bfq_find_deepest(struct rb_node *node)
557 {
558 	struct rb_node *deepest;
559 
560 	if (!node->rb_right && !node->rb_left)
561 		deepest = rb_parent(node);
562 	else if (!node->rb_right)
563 		deepest = node->rb_left;
564 	else if (!node->rb_left)
565 		deepest = node->rb_right;
566 	else {
567 		deepest = rb_next(node);
568 		if (deepest->rb_right)
569 			deepest = deepest->rb_right;
570 		else if (rb_parent(deepest) != node)
571 			deepest = rb_parent(deepest);
572 	}
573 
574 	return deepest;
575 }
576 
577 /**
578  * bfq_active_extract - remove an entity from the active tree.
579  * @st: the service_tree containing the tree.
580  * @entity: the entity being removed.
581  */
582 static void bfq_active_extract(struct bfq_service_tree *st,
583 			       struct bfq_entity *entity)
584 {
585 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
586 	struct rb_node *node;
587 #ifdef CONFIG_BFQ_GROUP_IOSCHED
588 	struct bfq_sched_data *sd = NULL;
589 	struct bfq_group *bfqg = NULL;
590 	struct bfq_data *bfqd = NULL;
591 #endif
592 
593 	node = bfq_find_deepest(&entity->rb_node);
594 	bfq_extract(&st->active, entity);
595 
596 	if (node)
597 		bfq_update_active_tree(node);
598 
599 #ifdef CONFIG_BFQ_GROUP_IOSCHED
600 	sd = entity->sched_data;
601 	bfqg = container_of(sd, struct bfq_group, sched_data);
602 	bfqd = (struct bfq_data *)bfqg->bfqd;
603 #endif
604 	if (bfqq)
605 		list_del(&bfqq->bfqq_list);
606 #ifdef CONFIG_BFQ_GROUP_IOSCHED
607 	else /* bfq_group */
608 		bfq_weights_tree_remove(bfqd, entity,
609 					&bfqd->group_weights_tree);
610 
611 	if (bfqg != bfqd->root_group)
612 		bfqg->active_entities--;
613 #endif
614 }
615 
616 /**
617  * bfq_idle_insert - insert an entity into the idle tree.
618  * @st: the service tree containing the tree.
619  * @entity: the entity to insert.
620  */
621 static void bfq_idle_insert(struct bfq_service_tree *st,
622 			    struct bfq_entity *entity)
623 {
624 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
625 	struct bfq_entity *first_idle = st->first_idle;
626 	struct bfq_entity *last_idle = st->last_idle;
627 
628 	if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
629 		st->first_idle = entity;
630 	if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
631 		st->last_idle = entity;
632 
633 	bfq_insert(&st->idle, entity);
634 
635 	if (bfqq)
636 		list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
637 }
638 
639 /**
640  * bfq_forget_entity - do not consider entity any longer for scheduling
641  * @st: the service tree.
642  * @entity: the entity being removed.
643  * @is_in_service: true if entity is currently the in-service entity.
644  *
645  * Forget everything about @entity. In addition, if entity represents
646  * a queue, and the latter is not in service, then release the service
647  * reference to the queue (the one taken through bfq_get_entity). In
648  * fact, in this case, there is really no more service reference to
649  * the queue, as the latter is also outside any service tree. If,
650  * instead, the queue is in service, then __bfq_bfqd_reset_in_service
651  * will take care of putting the reference when the queue finally
652  * stops being served.
653  */
654 static void bfq_forget_entity(struct bfq_service_tree *st,
655 			      struct bfq_entity *entity,
656 			      bool is_in_service)
657 {
658 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
659 
660 	entity->on_st = false;
661 	st->wsum -= entity->weight;
662 	if (bfqq && !is_in_service)
663 		bfq_put_queue(bfqq);
664 }
665 
666 /**
667  * bfq_put_idle_entity - release the idle tree ref of an entity.
668  * @st: service tree for the entity.
669  * @entity: the entity being released.
670  */
671 void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
672 {
673 	bfq_idle_extract(st, entity);
674 	bfq_forget_entity(st, entity,
675 			  entity == entity->sched_data->in_service_entity);
676 }
677 
678 /**
679  * bfq_forget_idle - update the idle tree if necessary.
680  * @st: the service tree to act upon.
681  *
682  * To preserve the global O(log N) complexity we only remove one entry here;
683  * as the idle tree will not grow indefinitely this can be done safely.
684  */
685 static void bfq_forget_idle(struct bfq_service_tree *st)
686 {
687 	struct bfq_entity *first_idle = st->first_idle;
688 	struct bfq_entity *last_idle = st->last_idle;
689 
690 	if (RB_EMPTY_ROOT(&st->active) && last_idle &&
691 	    !bfq_gt(last_idle->finish, st->vtime)) {
692 		/*
693 		 * Forget the whole idle tree, increasing the vtime past
694 		 * the last finish time of idle entities.
695 		 */
696 		st->vtime = last_idle->finish;
697 	}
698 
699 	if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
700 		bfq_put_idle_entity(st, first_idle);
701 }
702 
703 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
704 {
705 	struct bfq_sched_data *sched_data = entity->sched_data;
706 	unsigned int idx = bfq_class_idx(entity);
707 
708 	return sched_data->service_tree + idx;
709 }
710 
711 /*
712  * Update weight and priority of entity. If update_class_too is true,
713  * then update the ioprio_class of entity too.
714  *
715  * The reason why the update of ioprio_class is controlled through the
716  * last parameter is as follows. Changing the ioprio class of an
717  * entity implies changing the destination service trees for that
718  * entity. If such a change occurred when the entity is already on one
719  * of the service trees for its previous class, then the state of the
720  * entity would become more complex: none of the new possible service
721  * trees for the entity, according to bfq_entity_service_tree(), would
722  * match any of the possible service trees on which the entity
723  * is. Complex operations involving these trees, such as entity
724  * activations and deactivations, should take into account this
725  * additional complexity.  To avoid this issue, this function is
726  * invoked with update_class_too unset in the points in the code where
727  * entity may happen to be on some tree.
728  */
729 struct bfq_service_tree *
730 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
731 				struct bfq_entity *entity,
732 				bool update_class_too)
733 {
734 	struct bfq_service_tree *new_st = old_st;
735 
736 	if (entity->prio_changed) {
737 		struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
738 		unsigned int prev_weight, new_weight;
739 		struct bfq_data *bfqd = NULL;
740 		struct rb_root *root;
741 #ifdef CONFIG_BFQ_GROUP_IOSCHED
742 		struct bfq_sched_data *sd;
743 		struct bfq_group *bfqg;
744 #endif
745 
746 		if (bfqq)
747 			bfqd = bfqq->bfqd;
748 #ifdef CONFIG_BFQ_GROUP_IOSCHED
749 		else {
750 			sd = entity->my_sched_data;
751 			bfqg = container_of(sd, struct bfq_group, sched_data);
752 			bfqd = (struct bfq_data *)bfqg->bfqd;
753 		}
754 #endif
755 
756 		old_st->wsum -= entity->weight;
757 
758 		if (entity->new_weight != entity->orig_weight) {
759 			if (entity->new_weight < BFQ_MIN_WEIGHT ||
760 			    entity->new_weight > BFQ_MAX_WEIGHT) {
761 				pr_crit("update_weight_prio: new_weight %d\n",
762 					entity->new_weight);
763 				if (entity->new_weight < BFQ_MIN_WEIGHT)
764 					entity->new_weight = BFQ_MIN_WEIGHT;
765 				else
766 					entity->new_weight = BFQ_MAX_WEIGHT;
767 			}
768 			entity->orig_weight = entity->new_weight;
769 			if (bfqq)
770 				bfqq->ioprio =
771 				  bfq_weight_to_ioprio(entity->orig_weight);
772 		}
773 
774 		if (bfqq && update_class_too)
775 			bfqq->ioprio_class = bfqq->new_ioprio_class;
776 
777 		/*
778 		 * Reset prio_changed only if the ioprio_class change
779 		 * is not pending any longer.
780 		 */
781 		if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
782 			entity->prio_changed = 0;
783 
784 		/*
785 		 * NOTE: here we may be changing the weight too early,
786 		 * this will cause unfairness.  The correct approach
787 		 * would have required additional complexity to defer
788 		 * weight changes to the proper time instants (i.e.,
789 		 * when entity->finish <= old_st->vtime).
790 		 */
791 		new_st = bfq_entity_service_tree(entity);
792 
793 		prev_weight = entity->weight;
794 		new_weight = entity->orig_weight *
795 			     (bfqq ? bfqq->wr_coeff : 1);
796 		/*
797 		 * If the weight of the entity changes, remove the entity
798 		 * from its old weight counter (if there is a counter
799 		 * associated with the entity), and add it to the counter
800 		 * associated with its new weight.
801 		 */
802 		if (prev_weight != new_weight) {
803 			root = bfqq ? &bfqd->queue_weights_tree :
804 				      &bfqd->group_weights_tree;
805 			bfq_weights_tree_remove(bfqd, entity, root);
806 		}
807 		entity->weight = new_weight;
808 		/*
809 		 * Add the entity to its weights tree only if it is
810 		 * not associated with a weight-raised queue.
811 		 */
812 		if (prev_weight != new_weight &&
813 		    (bfqq ? bfqq->wr_coeff == 1 : 1))
814 			/* If we get here, root has been initialized. */
815 			bfq_weights_tree_add(bfqd, entity, root);
816 
817 		new_st->wsum += entity->weight;
818 
819 		if (new_st != old_st)
820 			entity->start = new_st->vtime;
821 	}
822 
823 	return new_st;
824 }
825 
826 /**
827  * bfq_bfqq_served - update the scheduler status after selection for
828  *                   service.
829  * @bfqq: the queue being served.
830  * @served: bytes to transfer.
831  *
832  * NOTE: this can be optimized, as the timestamps of upper level entities
833  * are synchronized every time a new bfqq is selected for service.  By now,
834  * we keep it to better check consistency.
835  */
836 void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
837 {
838 	struct bfq_entity *entity = &bfqq->entity;
839 	struct bfq_service_tree *st;
840 
841 	for_each_entity(entity) {
842 		st = bfq_entity_service_tree(entity);
843 
844 		entity->service += served;
845 
846 		st->vtime += bfq_delta(served, st->wsum);
847 		bfq_forget_idle(st);
848 	}
849 	bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
850 	bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
851 }
852 
853 /**
854  * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
855  *			  of the time interval during which bfqq has been in
856  *			  service.
857  * @bfqd: the device
858  * @bfqq: the queue that needs a service update.
859  * @time_ms: the amount of time during which the queue has received service
860  *
861  * If a queue does not consume its budget fast enough, then providing
862  * the queue with service fairness may impair throughput, more or less
863  * severely. For this reason, queues that consume their budget slowly
864  * are provided with time fairness instead of service fairness. This
865  * goal is achieved through the BFQ scheduling engine, even if such an
866  * engine works in the service, and not in the time domain. The trick
867  * is charging these queues with an inflated amount of service, equal
868  * to the amount of service that they would have received during their
869  * service slot if they had been fast, i.e., if their requests had
870  * been dispatched at a rate equal to the estimated peak rate.
871  *
872  * It is worth noting that time fairness can cause important
873  * distortions in terms of bandwidth distribution, on devices with
874  * internal queueing. The reason is that I/O requests dispatched
875  * during the service slot of a queue may be served after that service
876  * slot is finished, and may have a total processing time loosely
877  * correlated with the duration of the service slot. This is
878  * especially true for short service slots.
879  */
880 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
881 			  unsigned long time_ms)
882 {
883 	struct bfq_entity *entity = &bfqq->entity;
884 	int tot_serv_to_charge = entity->service;
885 	unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
886 
887 	if (time_ms > 0 && time_ms < timeout_ms)
888 		tot_serv_to_charge =
889 			(bfqd->bfq_max_budget * time_ms) / timeout_ms;
890 
891 	if (tot_serv_to_charge < entity->service)
892 		tot_serv_to_charge = entity->service;
893 
894 	/* Increase budget to avoid inconsistencies */
895 	if (tot_serv_to_charge > entity->budget)
896 		entity->budget = tot_serv_to_charge;
897 
898 	bfq_bfqq_served(bfqq,
899 			max_t(int, 0, tot_serv_to_charge - entity->service));
900 }
901 
902 static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
903 					struct bfq_service_tree *st,
904 					bool backshifted)
905 {
906 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
907 
908 	/*
909 	 * When this function is invoked, entity is not in any service
910 	 * tree, then it is safe to invoke next function with the last
911 	 * parameter set (see the comments on the function).
912 	 */
913 	st = __bfq_entity_update_weight_prio(st, entity, true);
914 	bfq_calc_finish(entity, entity->budget);
915 
916 	/*
917 	 * If some queues enjoy backshifting for a while, then their
918 	 * (virtual) finish timestamps may happen to become lower and
919 	 * lower than the system virtual time.	In particular, if
920 	 * these queues often happen to be idle for short time
921 	 * periods, and during such time periods other queues with
922 	 * higher timestamps happen to be busy, then the backshifted
923 	 * timestamps of the former queues can become much lower than
924 	 * the system virtual time. In fact, to serve the queues with
925 	 * higher timestamps while the ones with lower timestamps are
926 	 * idle, the system virtual time may be pushed-up to much
927 	 * higher values than the finish timestamps of the idle
928 	 * queues. As a consequence, the finish timestamps of all new
929 	 * or newly activated queues may end up being much larger than
930 	 * those of lucky queues with backshifted timestamps. The
931 	 * latter queues may then monopolize the device for a lot of
932 	 * time. This would simply break service guarantees.
933 	 *
934 	 * To reduce this problem, push up a little bit the
935 	 * backshifted timestamps of the queue associated with this
936 	 * entity (only a queue can happen to have the backshifted
937 	 * flag set): just enough to let the finish timestamp of the
938 	 * queue be equal to the current value of the system virtual
939 	 * time. This may introduce a little unfairness among queues
940 	 * with backshifted timestamps, but it does not break
941 	 * worst-case fairness guarantees.
942 	 *
943 	 * As a special case, if bfqq is weight-raised, push up
944 	 * timestamps much less, to keep very low the probability that
945 	 * this push up causes the backshifted finish timestamps of
946 	 * weight-raised queues to become higher than the backshifted
947 	 * finish timestamps of non weight-raised queues.
948 	 */
949 	if (backshifted && bfq_gt(st->vtime, entity->finish)) {
950 		unsigned long delta = st->vtime - entity->finish;
951 
952 		if (bfqq)
953 			delta /= bfqq->wr_coeff;
954 
955 		entity->start += delta;
956 		entity->finish += delta;
957 	}
958 
959 	bfq_active_insert(st, entity);
960 }
961 
962 /**
963  * __bfq_activate_entity - handle activation of entity.
964  * @entity: the entity being activated.
965  * @non_blocking_wait_rq: true if entity was waiting for a request
966  *
967  * Called for a 'true' activation, i.e., if entity is not active and
968  * one of its children receives a new request.
969  *
970  * Basically, this function updates the timestamps of entity and
971  * inserts entity into its active tree, ater possibly extracting it
972  * from its idle tree.
973  */
974 static void __bfq_activate_entity(struct bfq_entity *entity,
975 				  bool non_blocking_wait_rq)
976 {
977 	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
978 	bool backshifted = false;
979 	unsigned long long min_vstart;
980 
981 	/* See comments on bfq_fqq_update_budg_for_activation */
982 	if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
983 		backshifted = true;
984 		min_vstart = entity->finish;
985 	} else
986 		min_vstart = st->vtime;
987 
988 	if (entity->tree == &st->idle) {
989 		/*
990 		 * Must be on the idle tree, bfq_idle_extract() will
991 		 * check for that.
992 		 */
993 		bfq_idle_extract(st, entity);
994 		entity->start = bfq_gt(min_vstart, entity->finish) ?
995 			min_vstart : entity->finish;
996 	} else {
997 		/*
998 		 * The finish time of the entity may be invalid, and
999 		 * it is in the past for sure, otherwise the queue
1000 		 * would have been on the idle tree.
1001 		 */
1002 		entity->start = min_vstart;
1003 		st->wsum += entity->weight;
1004 		/*
1005 		 * entity is about to be inserted into a service tree,
1006 		 * and then set in service: get a reference to make
1007 		 * sure entity does not disappear until it is no
1008 		 * longer in service or scheduled for service.
1009 		 */
1010 		bfq_get_entity(entity);
1011 
1012 		entity->on_st = true;
1013 	}
1014 
1015 	bfq_update_fin_time_enqueue(entity, st, backshifted);
1016 }
1017 
1018 /**
1019  * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1020  * @entity: the entity being requeued or repositioned.
1021  *
1022  * Requeueing is needed if this entity stops being served, which
1023  * happens if a leaf descendant entity has expired. On the other hand,
1024  * repositioning is needed if the next_inservice_entity for the child
1025  * entity has changed. See the comments inside the function for
1026  * details.
1027  *
1028  * Basically, this function: 1) removes entity from its active tree if
1029  * present there, 2) updates the timestamps of entity and 3) inserts
1030  * entity back into its active tree (in the new, right position for
1031  * the new values of the timestamps).
1032  */
1033 static void __bfq_requeue_entity(struct bfq_entity *entity)
1034 {
1035 	struct bfq_sched_data *sd = entity->sched_data;
1036 	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1037 
1038 	if (entity == sd->in_service_entity) {
1039 		/*
1040 		 * We are requeueing the current in-service entity,
1041 		 * which may have to be done for one of the following
1042 		 * reasons:
1043 		 * - entity represents the in-service queue, and the
1044 		 *   in-service queue is being requeued after an
1045 		 *   expiration;
1046 		 * - entity represents a group, and its budget has
1047 		 *   changed because one of its child entities has
1048 		 *   just been either activated or requeued for some
1049 		 *   reason; the timestamps of the entity need then to
1050 		 *   be updated, and the entity needs to be enqueued
1051 		 *   or repositioned accordingly.
1052 		 *
1053 		 * In particular, before requeueing, the start time of
1054 		 * the entity must be moved forward to account for the
1055 		 * service that the entity has received while in
1056 		 * service. This is done by the next instructions. The
1057 		 * finish time will then be updated according to this
1058 		 * new value of the start time, and to the budget of
1059 		 * the entity.
1060 		 */
1061 		bfq_calc_finish(entity, entity->service);
1062 		entity->start = entity->finish;
1063 		/*
1064 		 * In addition, if the entity had more than one child
1065 		 * when set in service, then it was not extracted from
1066 		 * the active tree. This implies that the position of
1067 		 * the entity in the active tree may need to be
1068 		 * changed now, because we have just updated the start
1069 		 * time of the entity, and we will update its finish
1070 		 * time in a moment (the requeueing is then, more
1071 		 * precisely, a repositioning in this case). To
1072 		 * implement this repositioning, we: 1) dequeue the
1073 		 * entity here, 2) update the finish time and requeue
1074 		 * the entity according to the new timestamps below.
1075 		 */
1076 		if (entity->tree)
1077 			bfq_active_extract(st, entity);
1078 	} else { /* The entity is already active, and not in service */
1079 		/*
1080 		 * In this case, this function gets called only if the
1081 		 * next_in_service entity below this entity has
1082 		 * changed, and this change has caused the budget of
1083 		 * this entity to change, which, finally implies that
1084 		 * the finish time of this entity must be
1085 		 * updated. Such an update may cause the scheduling,
1086 		 * i.e., the position in the active tree, of this
1087 		 * entity to change. We handle this change by: 1)
1088 		 * dequeueing the entity here, 2) updating the finish
1089 		 * time and requeueing the entity according to the new
1090 		 * timestamps below. This is the same approach as the
1091 		 * non-extracted-entity sub-case above.
1092 		 */
1093 		bfq_active_extract(st, entity);
1094 	}
1095 
1096 	bfq_update_fin_time_enqueue(entity, st, false);
1097 }
1098 
1099 static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1100 					  struct bfq_sched_data *sd,
1101 					  bool non_blocking_wait_rq)
1102 {
1103 	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1104 
1105 	if (sd->in_service_entity == entity || entity->tree == &st->active)
1106 		 /*
1107 		  * in service or already queued on the active tree,
1108 		  * requeue or reposition
1109 		  */
1110 		__bfq_requeue_entity(entity);
1111 	else
1112 		/*
1113 		 * Not in service and not queued on its active tree:
1114 		 * the activity is idle and this is a true activation.
1115 		 */
1116 		__bfq_activate_entity(entity, non_blocking_wait_rq);
1117 }
1118 
1119 
1120 /**
1121  * bfq_activate_requeue_entity - activate or requeue an entity representing a
1122  *				 bfq_queue, and activate, requeue or reposition
1123  *				 all ancestors for which such an update becomes
1124  *				 necessary.
1125  * @entity: the entity to activate.
1126  * @non_blocking_wait_rq: true if this entity was waiting for a request
1127  * @requeue: true if this is a requeue, which implies that bfqq is
1128  *	     being expired; thus ALL its ancestors stop being served and must
1129  *	     therefore be requeued
1130  */
1131 static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1132 					bool non_blocking_wait_rq,
1133 					bool requeue)
1134 {
1135 	struct bfq_sched_data *sd;
1136 
1137 	for_each_entity(entity) {
1138 		sd = entity->sched_data;
1139 		__bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1140 
1141 		if (!bfq_update_next_in_service(sd, entity) && !requeue)
1142 			break;
1143 	}
1144 }
1145 
1146 /**
1147  * __bfq_deactivate_entity - deactivate an entity from its service tree.
1148  * @entity: the entity to deactivate.
1149  * @ins_into_idle_tree: if false, the entity will not be put into the
1150  *			idle tree.
1151  *
1152  * Deactivates an entity, independently of its previous state.  Must
1153  * be invoked only if entity is on a service tree. Extracts the entity
1154  * from that tree, and if necessary and allowed, puts it into the idle
1155  * tree.
1156  */
1157 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1158 {
1159 	struct bfq_sched_data *sd = entity->sched_data;
1160 	struct bfq_service_tree *st;
1161 	bool is_in_service;
1162 
1163 	if (!entity->on_st) /* entity never activated, or already inactive */
1164 		return false;
1165 
1166 	/*
1167 	 * If we get here, then entity is active, which implies that
1168 	 * bfq_group_set_parent has already been invoked for the group
1169 	 * represented by entity. Therefore, the field
1170 	 * entity->sched_data has been set, and we can safely use it.
1171 	 */
1172 	st = bfq_entity_service_tree(entity);
1173 	is_in_service = entity == sd->in_service_entity;
1174 
1175 	if (is_in_service) {
1176 		bfq_calc_finish(entity, entity->service);
1177 		sd->in_service_entity = NULL;
1178 	}
1179 
1180 	if (entity->tree == &st->active)
1181 		bfq_active_extract(st, entity);
1182 	else if (!is_in_service && entity->tree == &st->idle)
1183 		bfq_idle_extract(st, entity);
1184 
1185 	if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1186 		bfq_forget_entity(st, entity, is_in_service);
1187 	else
1188 		bfq_idle_insert(st, entity);
1189 
1190 	return true;
1191 }
1192 
1193 /**
1194  * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1195  * @entity: the entity to deactivate.
1196  * @ins_into_idle_tree: true if the entity can be put into the idle tree
1197  */
1198 static void bfq_deactivate_entity(struct bfq_entity *entity,
1199 				  bool ins_into_idle_tree,
1200 				  bool expiration)
1201 {
1202 	struct bfq_sched_data *sd;
1203 	struct bfq_entity *parent = NULL;
1204 
1205 	for_each_entity_safe(entity, parent) {
1206 		sd = entity->sched_data;
1207 
1208 		if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1209 			/*
1210 			 * entity is not in any tree any more, so
1211 			 * this deactivation is a no-op, and there is
1212 			 * nothing to change for upper-level entities
1213 			 * (in case of expiration, this can never
1214 			 * happen).
1215 			 */
1216 			return;
1217 		}
1218 
1219 		if (sd->next_in_service == entity)
1220 			/*
1221 			 * entity was the next_in_service entity,
1222 			 * then, since entity has just been
1223 			 * deactivated, a new one must be found.
1224 			 */
1225 			bfq_update_next_in_service(sd, NULL);
1226 
1227 		if (sd->next_in_service || sd->in_service_entity) {
1228 			/*
1229 			 * The parent entity is still active, because
1230 			 * either next_in_service or in_service_entity
1231 			 * is not NULL. So, no further upwards
1232 			 * deactivation must be performed.  Yet,
1233 			 * next_in_service has changed.	Then the
1234 			 * schedule does need to be updated upwards.
1235 			 *
1236 			 * NOTE If in_service_entity is not NULL, then
1237 			 * next_in_service may happen to be NULL,
1238 			 * although the parent entity is evidently
1239 			 * active. This happens if 1) the entity
1240 			 * pointed by in_service_entity is the only
1241 			 * active entity in the parent entity, and 2)
1242 			 * according to the definition of
1243 			 * next_in_service, the in_service_entity
1244 			 * cannot be considered as
1245 			 * next_in_service. See the comments on the
1246 			 * definition of next_in_service for details.
1247 			 */
1248 			break;
1249 		}
1250 
1251 		/*
1252 		 * If we get here, then the parent is no more
1253 		 * backlogged and we need to propagate the
1254 		 * deactivation upwards. Thus let the loop go on.
1255 		 */
1256 
1257 		/*
1258 		 * Also let parent be queued into the idle tree on
1259 		 * deactivation, to preserve service guarantees, and
1260 		 * assuming that who invoked this function does not
1261 		 * need parent entities too to be removed completely.
1262 		 */
1263 		ins_into_idle_tree = true;
1264 	}
1265 
1266 	/*
1267 	 * If the deactivation loop is fully executed, then there are
1268 	 * no more entities to touch and next loop is not executed at
1269 	 * all. Otherwise, requeue remaining entities if they are
1270 	 * about to stop receiving service, or reposition them if this
1271 	 * is not the case.
1272 	 */
1273 	entity = parent;
1274 	for_each_entity(entity) {
1275 		/*
1276 		 * Invoke __bfq_requeue_entity on entity, even if
1277 		 * already active, to requeue/reposition it in the
1278 		 * active tree (because sd->next_in_service has
1279 		 * changed)
1280 		 */
1281 		__bfq_requeue_entity(entity);
1282 
1283 		sd = entity->sched_data;
1284 		if (!bfq_update_next_in_service(sd, entity) &&
1285 		    !expiration)
1286 			/*
1287 			 * next_in_service unchanged or not causing
1288 			 * any change in entity->parent->sd, and no
1289 			 * requeueing needed for expiration: stop
1290 			 * here.
1291 			 */
1292 			break;
1293 	}
1294 }
1295 
1296 /**
1297  * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1298  *                       if needed, to have at least one entity eligible.
1299  * @st: the service tree to act upon.
1300  *
1301  * Assumes that st is not empty.
1302  */
1303 static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1304 {
1305 	struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1306 
1307 	if (bfq_gt(root_entity->min_start, st->vtime))
1308 		return root_entity->min_start;
1309 
1310 	return st->vtime;
1311 }
1312 
1313 static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1314 {
1315 	if (new_value > st->vtime) {
1316 		st->vtime = new_value;
1317 		bfq_forget_idle(st);
1318 	}
1319 }
1320 
1321 /**
1322  * bfq_first_active_entity - find the eligible entity with
1323  *                           the smallest finish time
1324  * @st: the service tree to select from.
1325  * @vtime: the system virtual to use as a reference for eligibility
1326  *
1327  * This function searches the first schedulable entity, starting from the
1328  * root of the tree and going on the left every time on this side there is
1329  * a subtree with at least one eligible (start <= vtime) entity. The path on
1330  * the right is followed only if a) the left subtree contains no eligible
1331  * entities and b) no eligible entity has been found yet.
1332  */
1333 static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1334 						  u64 vtime)
1335 {
1336 	struct bfq_entity *entry, *first = NULL;
1337 	struct rb_node *node = st->active.rb_node;
1338 
1339 	while (node) {
1340 		entry = rb_entry(node, struct bfq_entity, rb_node);
1341 left:
1342 		if (!bfq_gt(entry->start, vtime))
1343 			first = entry;
1344 
1345 		if (node->rb_left) {
1346 			entry = rb_entry(node->rb_left,
1347 					 struct bfq_entity, rb_node);
1348 			if (!bfq_gt(entry->min_start, vtime)) {
1349 				node = node->rb_left;
1350 				goto left;
1351 			}
1352 		}
1353 		if (first)
1354 			break;
1355 		node = node->rb_right;
1356 	}
1357 
1358 	return first;
1359 }
1360 
1361 /**
1362  * __bfq_lookup_next_entity - return the first eligible entity in @st.
1363  * @st: the service tree.
1364  *
1365  * If there is no in-service entity for the sched_data st belongs to,
1366  * then return the entity that will be set in service if:
1367  * 1) the parent entity this st belongs to is set in service;
1368  * 2) no entity belonging to such parent entity undergoes a state change
1369  * that would influence the timestamps of the entity (e.g., becomes idle,
1370  * becomes backlogged, changes its budget, ...).
1371  *
1372  * In this first case, update the virtual time in @st too (see the
1373  * comments on this update inside the function).
1374  *
1375  * In constrast, if there is an in-service entity, then return the
1376  * entity that would be set in service if not only the above
1377  * conditions, but also the next one held true: the currently
1378  * in-service entity, on expiration,
1379  * 1) gets a finish time equal to the current one, or
1380  * 2) is not eligible any more, or
1381  * 3) is idle.
1382  */
1383 static struct bfq_entity *
1384 __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1385 {
1386 	struct bfq_entity *entity;
1387 	u64 new_vtime;
1388 
1389 	if (RB_EMPTY_ROOT(&st->active))
1390 		return NULL;
1391 
1392 	/*
1393 	 * Get the value of the system virtual time for which at
1394 	 * least one entity is eligible.
1395 	 */
1396 	new_vtime = bfq_calc_vtime_jump(st);
1397 
1398 	/*
1399 	 * If there is no in-service entity for the sched_data this
1400 	 * active tree belongs to, then push the system virtual time
1401 	 * up to the value that guarantees that at least one entity is
1402 	 * eligible. If, instead, there is an in-service entity, then
1403 	 * do not make any such update, because there is already an
1404 	 * eligible entity, namely the in-service one (even if the
1405 	 * entity is not on st, because it was extracted when set in
1406 	 * service).
1407 	 */
1408 	if (!in_service)
1409 		bfq_update_vtime(st, new_vtime);
1410 
1411 	entity = bfq_first_active_entity(st, new_vtime);
1412 
1413 	return entity;
1414 }
1415 
1416 /**
1417  * bfq_lookup_next_entity - return the first eligible entity in @sd.
1418  * @sd: the sched_data.
1419  *
1420  * This function is invoked when there has been a change in the trees
1421  * for sd, and we need know what is the new next entity after this
1422  * change.
1423  */
1424 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
1425 {
1426 	struct bfq_service_tree *st = sd->service_tree;
1427 	struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1428 	struct bfq_entity *entity = NULL;
1429 	int class_idx = 0;
1430 
1431 	/*
1432 	 * Choose from idle class, if needed to guarantee a minimum
1433 	 * bandwidth to this class (and if there is some active entity
1434 	 * in idle class). This should also mitigate
1435 	 * priority-inversion problems in case a low priority task is
1436 	 * holding file system resources.
1437 	 */
1438 	if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1439 				   BFQ_CL_IDLE_TIMEOUT)) {
1440 		if (!RB_EMPTY_ROOT(&idle_class_st->active))
1441 			class_idx = BFQ_IOPRIO_CLASSES - 1;
1442 		/* About to be served if backlogged, or not yet backlogged */
1443 		sd->bfq_class_idle_last_service = jiffies;
1444 	}
1445 
1446 	/*
1447 	 * Find the next entity to serve for the highest-priority
1448 	 * class, unless the idle class needs to be served.
1449 	 */
1450 	for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
1451 		entity = __bfq_lookup_next_entity(st + class_idx,
1452 						  sd->in_service_entity);
1453 
1454 		if (entity)
1455 			break;
1456 	}
1457 
1458 	if (!entity)
1459 		return NULL;
1460 
1461 	return entity;
1462 }
1463 
1464 bool next_queue_may_preempt(struct bfq_data *bfqd)
1465 {
1466 	struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1467 
1468 	return sd->next_in_service != sd->in_service_entity;
1469 }
1470 
1471 /*
1472  * Get next queue for service.
1473  */
1474 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1475 {
1476 	struct bfq_entity *entity = NULL;
1477 	struct bfq_sched_data *sd;
1478 	struct bfq_queue *bfqq;
1479 
1480 	if (bfqd->busy_queues == 0)
1481 		return NULL;
1482 
1483 	/*
1484 	 * Traverse the path from the root to the leaf entity to
1485 	 * serve. Set in service all the entities visited along the
1486 	 * way.
1487 	 */
1488 	sd = &bfqd->root_group->sched_data;
1489 	for (; sd ; sd = entity->my_sched_data) {
1490 		/*
1491 		 * WARNING. We are about to set the in-service entity
1492 		 * to sd->next_in_service, i.e., to the (cached) value
1493 		 * returned by bfq_lookup_next_entity(sd) the last
1494 		 * time it was invoked, i.e., the last time when the
1495 		 * service order in sd changed as a consequence of the
1496 		 * activation or deactivation of an entity. In this
1497 		 * respect, if we execute bfq_lookup_next_entity(sd)
1498 		 * in this very moment, it may, although with low
1499 		 * probability, yield a different entity than that
1500 		 * pointed to by sd->next_in_service. This rare event
1501 		 * happens in case there was no CLASS_IDLE entity to
1502 		 * serve for sd when bfq_lookup_next_entity(sd) was
1503 		 * invoked for the last time, while there is now one
1504 		 * such entity.
1505 		 *
1506 		 * If the above event happens, then the scheduling of
1507 		 * such entity in CLASS_IDLE is postponed until the
1508 		 * service of the sd->next_in_service entity
1509 		 * finishes. In fact, when the latter is expired,
1510 		 * bfq_lookup_next_entity(sd) gets called again,
1511 		 * exactly to update sd->next_in_service.
1512 		 */
1513 
1514 		/* Make next_in_service entity become in_service_entity */
1515 		entity = sd->next_in_service;
1516 		sd->in_service_entity = entity;
1517 
1518 		/*
1519 		 * Reset the accumulator of the amount of service that
1520 		 * the entity is about to receive.
1521 		 */
1522 		entity->service = 0;
1523 
1524 		/*
1525 		 * If entity is no longer a candidate for next
1526 		 * service, then it must be extracted from its active
1527 		 * tree, so as to make sure that it won't be
1528 		 * considered when computing next_in_service. See the
1529 		 * comments on the function
1530 		 * bfq_no_longer_next_in_service() for details.
1531 		 */
1532 		if (bfq_no_longer_next_in_service(entity))
1533 			bfq_active_extract(bfq_entity_service_tree(entity),
1534 					   entity);
1535 
1536 		/*
1537 		 * Even if entity is not to be extracted according to
1538 		 * the above check, a descendant entity may get
1539 		 * extracted in one of the next iterations of this
1540 		 * loop. Such an event could cause a change in
1541 		 * next_in_service for the level of the descendant
1542 		 * entity, and thus possibly back to this level.
1543 		 *
1544 		 * However, we cannot perform the resulting needed
1545 		 * update of next_in_service for this level before the
1546 		 * end of the whole loop, because, to know which is
1547 		 * the correct next-to-serve candidate entity for each
1548 		 * level, we need first to find the leaf entity to set
1549 		 * in service. In fact, only after we know which is
1550 		 * the next-to-serve leaf entity, we can discover
1551 		 * whether the parent entity of the leaf entity
1552 		 * becomes the next-to-serve, and so on.
1553 		 */
1554 	}
1555 
1556 	bfqq = bfq_entity_to_bfqq(entity);
1557 
1558 	/*
1559 	 * We can finally update all next-to-serve entities along the
1560 	 * path from the leaf entity just set in service to the root.
1561 	 */
1562 	for_each_entity(entity) {
1563 		struct bfq_sched_data *sd = entity->sched_data;
1564 
1565 		if (!bfq_update_next_in_service(sd, NULL))
1566 			break;
1567 	}
1568 
1569 	return bfqq;
1570 }
1571 
1572 void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1573 {
1574 	struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1575 	struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1576 	struct bfq_entity *entity = in_serv_entity;
1577 
1578 	bfq_clear_bfqq_wait_request(in_serv_bfqq);
1579 	hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1580 	bfqd->in_service_queue = NULL;
1581 
1582 	/*
1583 	 * When this function is called, all in-service entities have
1584 	 * been properly deactivated or requeued, so we can safely
1585 	 * execute the final step: reset in_service_entity along the
1586 	 * path from entity to the root.
1587 	 */
1588 	for_each_entity(entity)
1589 		entity->sched_data->in_service_entity = NULL;
1590 
1591 	/*
1592 	 * in_serv_entity is no longer in service, so, if it is in no
1593 	 * service tree either, then release the service reference to
1594 	 * the queue it represents (taken with bfq_get_entity).
1595 	 */
1596 	if (!in_serv_entity->on_st)
1597 		bfq_put_queue(in_serv_bfqq);
1598 }
1599 
1600 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1601 			 bool ins_into_idle_tree, bool expiration)
1602 {
1603 	struct bfq_entity *entity = &bfqq->entity;
1604 
1605 	bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1606 }
1607 
1608 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1609 {
1610 	struct bfq_entity *entity = &bfqq->entity;
1611 
1612 	bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
1613 				    false);
1614 	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1615 }
1616 
1617 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1618 {
1619 	struct bfq_entity *entity = &bfqq->entity;
1620 
1621 	bfq_activate_requeue_entity(entity, false,
1622 				    bfqq == bfqd->in_service_queue);
1623 }
1624 
1625 /*
1626  * Called when the bfqq no longer has requests pending, remove it from
1627  * the service tree. As a special case, it can be invoked during an
1628  * expiration.
1629  */
1630 void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1631 		       bool expiration)
1632 {
1633 	bfq_log_bfqq(bfqd, bfqq, "del from busy");
1634 
1635 	bfq_clear_bfqq_busy(bfqq);
1636 
1637 	bfqd->busy_queues--;
1638 
1639 	if (!bfqq->dispatched)
1640 		bfq_weights_tree_remove(bfqd, &bfqq->entity,
1641 					&bfqd->queue_weights_tree);
1642 
1643 	if (bfqq->wr_coeff > 1)
1644 		bfqd->wr_busy_queues--;
1645 
1646 	bfqg_stats_update_dequeue(bfqq_group(bfqq));
1647 
1648 	bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1649 }
1650 
1651 /*
1652  * Called when an inactive queue receives a new request.
1653  */
1654 void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1655 {
1656 	bfq_log_bfqq(bfqd, bfqq, "add to busy");
1657 
1658 	bfq_activate_bfqq(bfqd, bfqq);
1659 
1660 	bfq_mark_bfqq_busy(bfqq);
1661 	bfqd->busy_queues++;
1662 
1663 	if (!bfqq->dispatched)
1664 		if (bfqq->wr_coeff == 1)
1665 			bfq_weights_tree_add(bfqd, &bfqq->entity,
1666 					     &bfqd->queue_weights_tree);
1667 
1668 	if (bfqq->wr_coeff > 1)
1669 		bfqd->wr_busy_queues++;
1670 }
1671