xref: /openbmc/linux/block/elevator.c (revision 83268fa6)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
39 
40 #include <trace/events/block.h>
41 
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44 #include "blk-pm.h"
45 #include "blk-wbt.h"
46 
47 static DEFINE_SPINLOCK(elv_list_lock);
48 static LIST_HEAD(elv_list);
49 
50 /*
51  * Merge hash stuff.
52  */
53 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
54 
55 /*
56  * Query io scheduler to see if the current process issuing bio may be
57  * merged with rq.
58  */
59 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
60 {
61 	struct request_queue *q = rq->q;
62 	struct elevator_queue *e = q->elevator;
63 
64 	if (e->uses_mq && e->type->ops.mq.allow_merge)
65 		return e->type->ops.mq.allow_merge(q, rq, bio);
66 	else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
67 		return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
68 
69 	return 1;
70 }
71 
72 /*
73  * can we safely merge with this request?
74  */
75 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
76 {
77 	if (!blk_rq_merge_ok(rq, bio))
78 		return false;
79 
80 	if (!elv_iosched_allow_bio_merge(rq, bio))
81 		return false;
82 
83 	return true;
84 }
85 EXPORT_SYMBOL(elv_bio_merge_ok);
86 
87 static bool elevator_match(const struct elevator_type *e, const char *name)
88 {
89 	if (!strcmp(e->elevator_name, name))
90 		return true;
91 	if (e->elevator_alias && !strcmp(e->elevator_alias, name))
92 		return true;
93 
94 	return false;
95 }
96 
97 /*
98  * Return scheduler with name 'name' and with matching 'mq capability
99  */
100 static struct elevator_type *elevator_find(const char *name, bool mq)
101 {
102 	struct elevator_type *e;
103 
104 	list_for_each_entry(e, &elv_list, list) {
105 		if (elevator_match(e, name) && (mq == e->uses_mq))
106 			return e;
107 	}
108 
109 	return NULL;
110 }
111 
112 static void elevator_put(struct elevator_type *e)
113 {
114 	module_put(e->elevator_owner);
115 }
116 
117 static struct elevator_type *elevator_get(struct request_queue *q,
118 					  const char *name, bool try_loading)
119 {
120 	struct elevator_type *e;
121 
122 	spin_lock(&elv_list_lock);
123 
124 	e = elevator_find(name, q->mq_ops != NULL);
125 	if (!e && try_loading) {
126 		spin_unlock(&elv_list_lock);
127 		request_module("%s-iosched", name);
128 		spin_lock(&elv_list_lock);
129 		e = elevator_find(name, q->mq_ops != NULL);
130 	}
131 
132 	if (e && !try_module_get(e->elevator_owner))
133 		e = NULL;
134 
135 	spin_unlock(&elv_list_lock);
136 	return e;
137 }
138 
139 static char chosen_elevator[ELV_NAME_MAX];
140 
141 static int __init elevator_setup(char *str)
142 {
143 	/*
144 	 * Be backwards-compatible with previous kernels, so users
145 	 * won't get the wrong elevator.
146 	 */
147 	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
148 	return 1;
149 }
150 
151 __setup("elevator=", elevator_setup);
152 
153 /* called during boot to load the elevator chosen by the elevator param */
154 void __init load_default_elevator_module(void)
155 {
156 	struct elevator_type *e;
157 
158 	if (!chosen_elevator[0])
159 		return;
160 
161 	/*
162 	 * Boot parameter is deprecated, we haven't supported that for MQ.
163 	 * Only look for non-mq schedulers from here.
164 	 */
165 	spin_lock(&elv_list_lock);
166 	e = elevator_find(chosen_elevator, false);
167 	spin_unlock(&elv_list_lock);
168 
169 	if (!e)
170 		request_module("%s-iosched", chosen_elevator);
171 }
172 
173 static struct kobj_type elv_ktype;
174 
175 struct elevator_queue *elevator_alloc(struct request_queue *q,
176 				  struct elevator_type *e)
177 {
178 	struct elevator_queue *eq;
179 
180 	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
181 	if (unlikely(!eq))
182 		return NULL;
183 
184 	eq->type = e;
185 	kobject_init(&eq->kobj, &elv_ktype);
186 	mutex_init(&eq->sysfs_lock);
187 	hash_init(eq->hash);
188 	eq->uses_mq = e->uses_mq;
189 
190 	return eq;
191 }
192 EXPORT_SYMBOL(elevator_alloc);
193 
194 static void elevator_release(struct kobject *kobj)
195 {
196 	struct elevator_queue *e;
197 
198 	e = container_of(kobj, struct elevator_queue, kobj);
199 	elevator_put(e->type);
200 	kfree(e);
201 }
202 
203 /*
204  * Use the default elevator specified by config boot param for non-mq devices,
205  * or by config option.  Don't try to load modules as we could be running off
206  * async and request_module() isn't allowed from async.
207  */
208 int elevator_init(struct request_queue *q)
209 {
210 	struct elevator_type *e = NULL;
211 	int err = 0;
212 
213 	/*
214 	 * q->sysfs_lock must be held to provide mutual exclusion between
215 	 * elevator_switch() and here.
216 	 */
217 	mutex_lock(&q->sysfs_lock);
218 	if (unlikely(q->elevator))
219 		goto out_unlock;
220 
221 	if (*chosen_elevator) {
222 		e = elevator_get(q, chosen_elevator, false);
223 		if (!e)
224 			printk(KERN_ERR "I/O scheduler %s not found\n",
225 							chosen_elevator);
226 	}
227 
228 	if (!e)
229 		e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
230 	if (!e) {
231 		printk(KERN_ERR
232 			"Default I/O scheduler not found. Using noop.\n");
233 		e = elevator_get(q, "noop", false);
234 	}
235 
236 	err = e->ops.sq.elevator_init_fn(q, e);
237 	if (err)
238 		elevator_put(e);
239 out_unlock:
240 	mutex_unlock(&q->sysfs_lock);
241 	return err;
242 }
243 
244 void elevator_exit(struct request_queue *q, struct elevator_queue *e)
245 {
246 	mutex_lock(&e->sysfs_lock);
247 	if (e->uses_mq && e->type->ops.mq.exit_sched)
248 		blk_mq_exit_sched(q, e);
249 	else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
250 		e->type->ops.sq.elevator_exit_fn(e);
251 	mutex_unlock(&e->sysfs_lock);
252 
253 	kobject_put(&e->kobj);
254 }
255 
256 static inline void __elv_rqhash_del(struct request *rq)
257 {
258 	hash_del(&rq->hash);
259 	rq->rq_flags &= ~RQF_HASHED;
260 }
261 
262 void elv_rqhash_del(struct request_queue *q, struct request *rq)
263 {
264 	if (ELV_ON_HASH(rq))
265 		__elv_rqhash_del(rq);
266 }
267 EXPORT_SYMBOL_GPL(elv_rqhash_del);
268 
269 void elv_rqhash_add(struct request_queue *q, struct request *rq)
270 {
271 	struct elevator_queue *e = q->elevator;
272 
273 	BUG_ON(ELV_ON_HASH(rq));
274 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
275 	rq->rq_flags |= RQF_HASHED;
276 }
277 EXPORT_SYMBOL_GPL(elv_rqhash_add);
278 
279 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
280 {
281 	__elv_rqhash_del(rq);
282 	elv_rqhash_add(q, rq);
283 }
284 
285 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
286 {
287 	struct elevator_queue *e = q->elevator;
288 	struct hlist_node *next;
289 	struct request *rq;
290 
291 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
292 		BUG_ON(!ELV_ON_HASH(rq));
293 
294 		if (unlikely(!rq_mergeable(rq))) {
295 			__elv_rqhash_del(rq);
296 			continue;
297 		}
298 
299 		if (rq_hash_key(rq) == offset)
300 			return rq;
301 	}
302 
303 	return NULL;
304 }
305 
306 /*
307  * RB-tree support functions for inserting/lookup/removal of requests
308  * in a sorted RB tree.
309  */
310 void elv_rb_add(struct rb_root *root, struct request *rq)
311 {
312 	struct rb_node **p = &root->rb_node;
313 	struct rb_node *parent = NULL;
314 	struct request *__rq;
315 
316 	while (*p) {
317 		parent = *p;
318 		__rq = rb_entry(parent, struct request, rb_node);
319 
320 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
321 			p = &(*p)->rb_left;
322 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
323 			p = &(*p)->rb_right;
324 	}
325 
326 	rb_link_node(&rq->rb_node, parent, p);
327 	rb_insert_color(&rq->rb_node, root);
328 }
329 EXPORT_SYMBOL(elv_rb_add);
330 
331 void elv_rb_del(struct rb_root *root, struct request *rq)
332 {
333 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
334 	rb_erase(&rq->rb_node, root);
335 	RB_CLEAR_NODE(&rq->rb_node);
336 }
337 EXPORT_SYMBOL(elv_rb_del);
338 
339 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
340 {
341 	struct rb_node *n = root->rb_node;
342 	struct request *rq;
343 
344 	while (n) {
345 		rq = rb_entry(n, struct request, rb_node);
346 
347 		if (sector < blk_rq_pos(rq))
348 			n = n->rb_left;
349 		else if (sector > blk_rq_pos(rq))
350 			n = n->rb_right;
351 		else
352 			return rq;
353 	}
354 
355 	return NULL;
356 }
357 EXPORT_SYMBOL(elv_rb_find);
358 
359 /*
360  * Insert rq into dispatch queue of q.  Queue lock must be held on
361  * entry.  rq is sort instead into the dispatch queue. To be used by
362  * specific elevators.
363  */
364 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
365 {
366 	sector_t boundary;
367 	struct list_head *entry;
368 
369 	if (q->last_merge == rq)
370 		q->last_merge = NULL;
371 
372 	elv_rqhash_del(q, rq);
373 
374 	q->nr_sorted--;
375 
376 	boundary = q->end_sector;
377 	list_for_each_prev(entry, &q->queue_head) {
378 		struct request *pos = list_entry_rq(entry);
379 
380 		if (req_op(rq) != req_op(pos))
381 			break;
382 		if (rq_data_dir(rq) != rq_data_dir(pos))
383 			break;
384 		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
385 			break;
386 		if (blk_rq_pos(rq) >= boundary) {
387 			if (blk_rq_pos(pos) < boundary)
388 				continue;
389 		} else {
390 			if (blk_rq_pos(pos) >= boundary)
391 				break;
392 		}
393 		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
394 			break;
395 	}
396 
397 	list_add(&rq->queuelist, entry);
398 }
399 EXPORT_SYMBOL(elv_dispatch_sort);
400 
401 /*
402  * Insert rq into dispatch queue of q.  Queue lock must be held on
403  * entry.  rq is added to the back of the dispatch queue. To be used by
404  * specific elevators.
405  */
406 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
407 {
408 	if (q->last_merge == rq)
409 		q->last_merge = NULL;
410 
411 	elv_rqhash_del(q, rq);
412 
413 	q->nr_sorted--;
414 
415 	q->end_sector = rq_end_sector(rq);
416 	q->boundary_rq = rq;
417 	list_add_tail(&rq->queuelist, &q->queue_head);
418 }
419 EXPORT_SYMBOL(elv_dispatch_add_tail);
420 
421 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
422 		struct bio *bio)
423 {
424 	struct elevator_queue *e = q->elevator;
425 	struct request *__rq;
426 
427 	/*
428 	 * Levels of merges:
429 	 * 	nomerges:  No merges at all attempted
430 	 * 	noxmerges: Only simple one-hit cache try
431 	 * 	merges:	   All merge tries attempted
432 	 */
433 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
434 		return ELEVATOR_NO_MERGE;
435 
436 	/*
437 	 * First try one-hit cache.
438 	 */
439 	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
440 		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
441 
442 		if (ret != ELEVATOR_NO_MERGE) {
443 			*req = q->last_merge;
444 			return ret;
445 		}
446 	}
447 
448 	if (blk_queue_noxmerges(q))
449 		return ELEVATOR_NO_MERGE;
450 
451 	/*
452 	 * See if our hash lookup can find a potential backmerge.
453 	 */
454 	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
455 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
456 		*req = __rq;
457 		return ELEVATOR_BACK_MERGE;
458 	}
459 
460 	if (e->uses_mq && e->type->ops.mq.request_merge)
461 		return e->type->ops.mq.request_merge(q, req, bio);
462 	else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
463 		return e->type->ops.sq.elevator_merge_fn(q, req, bio);
464 
465 	return ELEVATOR_NO_MERGE;
466 }
467 
468 /*
469  * Attempt to do an insertion back merge. Only check for the case where
470  * we can append 'rq' to an existing request, so we can throw 'rq' away
471  * afterwards.
472  *
473  * Returns true if we merged, false otherwise
474  */
475 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
476 {
477 	struct request *__rq;
478 	bool ret;
479 
480 	if (blk_queue_nomerges(q))
481 		return false;
482 
483 	/*
484 	 * First try one-hit cache.
485 	 */
486 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
487 		return true;
488 
489 	if (blk_queue_noxmerges(q))
490 		return false;
491 
492 	ret = false;
493 	/*
494 	 * See if our hash lookup can find a potential backmerge.
495 	 */
496 	while (1) {
497 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
498 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
499 			break;
500 
501 		/* The merged request could be merged with others, try again */
502 		ret = true;
503 		rq = __rq;
504 	}
505 
506 	return ret;
507 }
508 
509 void elv_merged_request(struct request_queue *q, struct request *rq,
510 		enum elv_merge type)
511 {
512 	struct elevator_queue *e = q->elevator;
513 
514 	if (e->uses_mq && e->type->ops.mq.request_merged)
515 		e->type->ops.mq.request_merged(q, rq, type);
516 	else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
517 		e->type->ops.sq.elevator_merged_fn(q, rq, type);
518 
519 	if (type == ELEVATOR_BACK_MERGE)
520 		elv_rqhash_reposition(q, rq);
521 
522 	q->last_merge = rq;
523 }
524 
525 void elv_merge_requests(struct request_queue *q, struct request *rq,
526 			     struct request *next)
527 {
528 	struct elevator_queue *e = q->elevator;
529 	bool next_sorted = false;
530 
531 	if (e->uses_mq && e->type->ops.mq.requests_merged)
532 		e->type->ops.mq.requests_merged(q, rq, next);
533 	else if (e->type->ops.sq.elevator_merge_req_fn) {
534 		next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
535 		if (next_sorted)
536 			e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
537 	}
538 
539 	elv_rqhash_reposition(q, rq);
540 
541 	if (next_sorted) {
542 		elv_rqhash_del(q, next);
543 		q->nr_sorted--;
544 	}
545 
546 	q->last_merge = rq;
547 }
548 
549 void elv_bio_merged(struct request_queue *q, struct request *rq,
550 			struct bio *bio)
551 {
552 	struct elevator_queue *e = q->elevator;
553 
554 	if (WARN_ON_ONCE(e->uses_mq))
555 		return;
556 
557 	if (e->type->ops.sq.elevator_bio_merged_fn)
558 		e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
559 }
560 
561 void elv_requeue_request(struct request_queue *q, struct request *rq)
562 {
563 	/*
564 	 * it already went through dequeue, we need to decrement the
565 	 * in_flight count again
566 	 */
567 	if (blk_account_rq(rq)) {
568 		q->in_flight[rq_is_sync(rq)]--;
569 		if (rq->rq_flags & RQF_SORTED)
570 			elv_deactivate_rq(q, rq);
571 	}
572 
573 	rq->rq_flags &= ~RQF_STARTED;
574 
575 	blk_pm_requeue_request(rq);
576 
577 	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
578 }
579 
580 void elv_drain_elevator(struct request_queue *q)
581 {
582 	struct elevator_queue *e = q->elevator;
583 	static int printed;
584 
585 	if (WARN_ON_ONCE(e->uses_mq))
586 		return;
587 
588 	lockdep_assert_held(q->queue_lock);
589 
590 	while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
591 		;
592 	if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
593 		printk(KERN_ERR "%s: forced dispatching is broken "
594 		       "(nr_sorted=%u), please report this\n",
595 		       q->elevator->type->elevator_name, q->nr_sorted);
596 	}
597 }
598 
599 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
600 {
601 	trace_block_rq_insert(q, rq);
602 
603 	blk_pm_add_request(q, rq);
604 
605 	rq->q = q;
606 
607 	if (rq->rq_flags & RQF_SOFTBARRIER) {
608 		/* barriers are scheduling boundary, update end_sector */
609 		if (!blk_rq_is_passthrough(rq)) {
610 			q->end_sector = rq_end_sector(rq);
611 			q->boundary_rq = rq;
612 		}
613 	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
614 		    (where == ELEVATOR_INSERT_SORT ||
615 		     where == ELEVATOR_INSERT_SORT_MERGE))
616 		where = ELEVATOR_INSERT_BACK;
617 
618 	switch (where) {
619 	case ELEVATOR_INSERT_REQUEUE:
620 	case ELEVATOR_INSERT_FRONT:
621 		rq->rq_flags |= RQF_SOFTBARRIER;
622 		list_add(&rq->queuelist, &q->queue_head);
623 		break;
624 
625 	case ELEVATOR_INSERT_BACK:
626 		rq->rq_flags |= RQF_SOFTBARRIER;
627 		elv_drain_elevator(q);
628 		list_add_tail(&rq->queuelist, &q->queue_head);
629 		/*
630 		 * We kick the queue here for the following reasons.
631 		 * - The elevator might have returned NULL previously
632 		 *   to delay requests and returned them now.  As the
633 		 *   queue wasn't empty before this request, ll_rw_blk
634 		 *   won't run the queue on return, resulting in hang.
635 		 * - Usually, back inserted requests won't be merged
636 		 *   with anything.  There's no point in delaying queue
637 		 *   processing.
638 		 */
639 		__blk_run_queue(q);
640 		break;
641 
642 	case ELEVATOR_INSERT_SORT_MERGE:
643 		/*
644 		 * If we succeed in merging this request with one in the
645 		 * queue already, we are done - rq has now been freed,
646 		 * so no need to do anything further.
647 		 */
648 		if (elv_attempt_insert_merge(q, rq))
649 			break;
650 		/* fall through */
651 	case ELEVATOR_INSERT_SORT:
652 		BUG_ON(blk_rq_is_passthrough(rq));
653 		rq->rq_flags |= RQF_SORTED;
654 		q->nr_sorted++;
655 		if (rq_mergeable(rq)) {
656 			elv_rqhash_add(q, rq);
657 			if (!q->last_merge)
658 				q->last_merge = rq;
659 		}
660 
661 		/*
662 		 * Some ioscheds (cfq) run q->request_fn directly, so
663 		 * rq cannot be accessed after calling
664 		 * elevator_add_req_fn.
665 		 */
666 		q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
667 		break;
668 
669 	case ELEVATOR_INSERT_FLUSH:
670 		rq->rq_flags |= RQF_SOFTBARRIER;
671 		blk_insert_flush(rq);
672 		break;
673 	default:
674 		printk(KERN_ERR "%s: bad insertion point %d\n",
675 		       __func__, where);
676 		BUG();
677 	}
678 }
679 EXPORT_SYMBOL(__elv_add_request);
680 
681 void elv_add_request(struct request_queue *q, struct request *rq, int where)
682 {
683 	unsigned long flags;
684 
685 	spin_lock_irqsave(q->queue_lock, flags);
686 	__elv_add_request(q, rq, where);
687 	spin_unlock_irqrestore(q->queue_lock, flags);
688 }
689 EXPORT_SYMBOL(elv_add_request);
690 
691 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
692 {
693 	struct elevator_queue *e = q->elevator;
694 
695 	if (e->uses_mq && e->type->ops.mq.next_request)
696 		return e->type->ops.mq.next_request(q, rq);
697 	else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
698 		return e->type->ops.sq.elevator_latter_req_fn(q, rq);
699 
700 	return NULL;
701 }
702 
703 struct request *elv_former_request(struct request_queue *q, struct request *rq)
704 {
705 	struct elevator_queue *e = q->elevator;
706 
707 	if (e->uses_mq && e->type->ops.mq.former_request)
708 		return e->type->ops.mq.former_request(q, rq);
709 	if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
710 		return e->type->ops.sq.elevator_former_req_fn(q, rq);
711 	return NULL;
712 }
713 
714 int elv_set_request(struct request_queue *q, struct request *rq,
715 		    struct bio *bio, gfp_t gfp_mask)
716 {
717 	struct elevator_queue *e = q->elevator;
718 
719 	if (WARN_ON_ONCE(e->uses_mq))
720 		return 0;
721 
722 	if (e->type->ops.sq.elevator_set_req_fn)
723 		return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
724 	return 0;
725 }
726 
727 void elv_put_request(struct request_queue *q, struct request *rq)
728 {
729 	struct elevator_queue *e = q->elevator;
730 
731 	if (WARN_ON_ONCE(e->uses_mq))
732 		return;
733 
734 	if (e->type->ops.sq.elevator_put_req_fn)
735 		e->type->ops.sq.elevator_put_req_fn(rq);
736 }
737 
738 int elv_may_queue(struct request_queue *q, unsigned int op)
739 {
740 	struct elevator_queue *e = q->elevator;
741 
742 	if (WARN_ON_ONCE(e->uses_mq))
743 		return 0;
744 
745 	if (e->type->ops.sq.elevator_may_queue_fn)
746 		return e->type->ops.sq.elevator_may_queue_fn(q, op);
747 
748 	return ELV_MQUEUE_MAY;
749 }
750 
751 void elv_completed_request(struct request_queue *q, struct request *rq)
752 {
753 	struct elevator_queue *e = q->elevator;
754 
755 	if (WARN_ON_ONCE(e->uses_mq))
756 		return;
757 
758 	/*
759 	 * request is released from the driver, io must be done
760 	 */
761 	if (blk_account_rq(rq)) {
762 		q->in_flight[rq_is_sync(rq)]--;
763 		if ((rq->rq_flags & RQF_SORTED) &&
764 		    e->type->ops.sq.elevator_completed_req_fn)
765 			e->type->ops.sq.elevator_completed_req_fn(q, rq);
766 	}
767 }
768 
769 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
770 
771 static ssize_t
772 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
773 {
774 	struct elv_fs_entry *entry = to_elv(attr);
775 	struct elevator_queue *e;
776 	ssize_t error;
777 
778 	if (!entry->show)
779 		return -EIO;
780 
781 	e = container_of(kobj, struct elevator_queue, kobj);
782 	mutex_lock(&e->sysfs_lock);
783 	error = e->type ? entry->show(e, page) : -ENOENT;
784 	mutex_unlock(&e->sysfs_lock);
785 	return error;
786 }
787 
788 static ssize_t
789 elv_attr_store(struct kobject *kobj, struct attribute *attr,
790 	       const char *page, size_t length)
791 {
792 	struct elv_fs_entry *entry = to_elv(attr);
793 	struct elevator_queue *e;
794 	ssize_t error;
795 
796 	if (!entry->store)
797 		return -EIO;
798 
799 	e = container_of(kobj, struct elevator_queue, kobj);
800 	mutex_lock(&e->sysfs_lock);
801 	error = e->type ? entry->store(e, page, length) : -ENOENT;
802 	mutex_unlock(&e->sysfs_lock);
803 	return error;
804 }
805 
806 static const struct sysfs_ops elv_sysfs_ops = {
807 	.show	= elv_attr_show,
808 	.store	= elv_attr_store,
809 };
810 
811 static struct kobj_type elv_ktype = {
812 	.sysfs_ops	= &elv_sysfs_ops,
813 	.release	= elevator_release,
814 };
815 
816 int elv_register_queue(struct request_queue *q)
817 {
818 	struct elevator_queue *e = q->elevator;
819 	int error;
820 
821 	lockdep_assert_held(&q->sysfs_lock);
822 
823 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
824 	if (!error) {
825 		struct elv_fs_entry *attr = e->type->elevator_attrs;
826 		if (attr) {
827 			while (attr->attr.name) {
828 				if (sysfs_create_file(&e->kobj, &attr->attr))
829 					break;
830 				attr++;
831 			}
832 		}
833 		kobject_uevent(&e->kobj, KOBJ_ADD);
834 		e->registered = 1;
835 		if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
836 			e->type->ops.sq.elevator_registered_fn(q);
837 	}
838 	return error;
839 }
840 
841 void elv_unregister_queue(struct request_queue *q)
842 {
843 	lockdep_assert_held(&q->sysfs_lock);
844 
845 	if (q) {
846 		struct elevator_queue *e = q->elevator;
847 
848 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
849 		kobject_del(&e->kobj);
850 		e->registered = 0;
851 		/* Re-enable throttling in case elevator disabled it */
852 		wbt_enable_default(q);
853 	}
854 }
855 
856 int elv_register(struct elevator_type *e)
857 {
858 	char *def = "";
859 
860 	/* create icq_cache if requested */
861 	if (e->icq_size) {
862 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
863 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
864 			return -EINVAL;
865 
866 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
867 			 "%s_io_cq", e->elevator_name);
868 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
869 						 e->icq_align, 0, NULL);
870 		if (!e->icq_cache)
871 			return -ENOMEM;
872 	}
873 
874 	/* register, don't allow duplicate names */
875 	spin_lock(&elv_list_lock);
876 	if (elevator_find(e->elevator_name, e->uses_mq)) {
877 		spin_unlock(&elv_list_lock);
878 		kmem_cache_destroy(e->icq_cache);
879 		return -EBUSY;
880 	}
881 	list_add_tail(&e->list, &elv_list);
882 	spin_unlock(&elv_list_lock);
883 
884 	/* print pretty message */
885 	if (elevator_match(e, chosen_elevator) ||
886 			(!*chosen_elevator &&
887 			 elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
888 				def = " (default)";
889 
890 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
891 								def);
892 	return 0;
893 }
894 EXPORT_SYMBOL_GPL(elv_register);
895 
896 void elv_unregister(struct elevator_type *e)
897 {
898 	/* unregister */
899 	spin_lock(&elv_list_lock);
900 	list_del_init(&e->list);
901 	spin_unlock(&elv_list_lock);
902 
903 	/*
904 	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
905 	 * sure all RCU operations are complete before proceeding.
906 	 */
907 	if (e->icq_cache) {
908 		rcu_barrier();
909 		kmem_cache_destroy(e->icq_cache);
910 		e->icq_cache = NULL;
911 	}
912 }
913 EXPORT_SYMBOL_GPL(elv_unregister);
914 
915 int elevator_switch_mq(struct request_queue *q,
916 			      struct elevator_type *new_e)
917 {
918 	int ret;
919 
920 	lockdep_assert_held(&q->sysfs_lock);
921 
922 	if (q->elevator) {
923 		if (q->elevator->registered)
924 			elv_unregister_queue(q);
925 		ioc_clear_queue(q);
926 		elevator_exit(q, q->elevator);
927 	}
928 
929 	ret = blk_mq_init_sched(q, new_e);
930 	if (ret)
931 		goto out;
932 
933 	if (new_e) {
934 		ret = elv_register_queue(q);
935 		if (ret) {
936 			elevator_exit(q, q->elevator);
937 			goto out;
938 		}
939 	}
940 
941 	if (new_e)
942 		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
943 	else
944 		blk_add_trace_msg(q, "elv switch: none");
945 
946 out:
947 	return ret;
948 }
949 
950 /*
951  * For blk-mq devices, we default to using mq-deadline, if available, for single
952  * queue devices.  If deadline isn't available OR we have multiple queues,
953  * default to "none".
954  */
955 int elevator_init_mq(struct request_queue *q)
956 {
957 	struct elevator_type *e;
958 	int err = 0;
959 
960 	if (q->nr_hw_queues != 1)
961 		return 0;
962 
963 	/*
964 	 * q->sysfs_lock must be held to provide mutual exclusion between
965 	 * elevator_switch() and here.
966 	 */
967 	mutex_lock(&q->sysfs_lock);
968 	if (unlikely(q->elevator))
969 		goto out_unlock;
970 
971 	e = elevator_get(q, "mq-deadline", false);
972 	if (!e)
973 		goto out_unlock;
974 
975 	err = blk_mq_init_sched(q, e);
976 	if (err)
977 		elevator_put(e);
978 out_unlock:
979 	mutex_unlock(&q->sysfs_lock);
980 	return err;
981 }
982 
983 
984 /*
985  * switch to new_e io scheduler. be careful not to introduce deadlocks -
986  * we don't free the old io scheduler, before we have allocated what we
987  * need for the new one. this way we have a chance of going back to the old
988  * one, if the new one fails init for some reason.
989  */
990 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
991 {
992 	struct elevator_queue *old = q->elevator;
993 	bool old_registered = false;
994 	int err;
995 
996 	lockdep_assert_held(&q->sysfs_lock);
997 
998 	if (q->mq_ops) {
999 		blk_mq_freeze_queue(q);
1000 		blk_mq_quiesce_queue(q);
1001 
1002 		err = elevator_switch_mq(q, new_e);
1003 
1004 		blk_mq_unquiesce_queue(q);
1005 		blk_mq_unfreeze_queue(q);
1006 
1007 		return err;
1008 	}
1009 
1010 	/*
1011 	 * Turn on BYPASS and drain all requests w/ elevator private data.
1012 	 * Block layer doesn't call into a quiesced elevator - all requests
1013 	 * are directly put on the dispatch list without elevator data
1014 	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
1015 	 * merge happens either.
1016 	 */
1017 	if (old) {
1018 		old_registered = old->registered;
1019 
1020 		blk_queue_bypass_start(q);
1021 
1022 		/* unregister and clear all auxiliary data of the old elevator */
1023 		if (old_registered)
1024 			elv_unregister_queue(q);
1025 
1026 		ioc_clear_queue(q);
1027 	}
1028 
1029 	/* allocate, init and register new elevator */
1030 	err = new_e->ops.sq.elevator_init_fn(q, new_e);
1031 	if (err)
1032 		goto fail_init;
1033 
1034 	err = elv_register_queue(q);
1035 	if (err)
1036 		goto fail_register;
1037 
1038 	/* done, kill the old one and finish */
1039 	if (old) {
1040 		elevator_exit(q, old);
1041 		blk_queue_bypass_end(q);
1042 	}
1043 
1044 	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1045 
1046 	return 0;
1047 
1048 fail_register:
1049 	elevator_exit(q, q->elevator);
1050 fail_init:
1051 	/* switch failed, restore and re-register old elevator */
1052 	if (old) {
1053 		q->elevator = old;
1054 		elv_register_queue(q);
1055 		blk_queue_bypass_end(q);
1056 	}
1057 
1058 	return err;
1059 }
1060 
1061 /*
1062  * Switch this queue to the given IO scheduler.
1063  */
1064 static int __elevator_change(struct request_queue *q, const char *name)
1065 {
1066 	char elevator_name[ELV_NAME_MAX];
1067 	struct elevator_type *e;
1068 
1069 	/* Make sure queue is not in the middle of being removed */
1070 	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1071 		return -ENOENT;
1072 
1073 	/*
1074 	 * Special case for mq, turn off scheduling
1075 	 */
1076 	if (q->mq_ops && !strncmp(name, "none", 4))
1077 		return elevator_switch(q, NULL);
1078 
1079 	strlcpy(elevator_name, name, sizeof(elevator_name));
1080 	e = elevator_get(q, strstrip(elevator_name), true);
1081 	if (!e)
1082 		return -EINVAL;
1083 
1084 	if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
1085 		elevator_put(e);
1086 		return 0;
1087 	}
1088 
1089 	return elevator_switch(q, e);
1090 }
1091 
1092 static inline bool elv_support_iosched(struct request_queue *q)
1093 {
1094 	if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1095 				BLK_MQ_F_NO_SCHED))
1096 		return false;
1097 	return true;
1098 }
1099 
1100 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1101 			  size_t count)
1102 {
1103 	int ret;
1104 
1105 	if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1106 		return count;
1107 
1108 	ret = __elevator_change(q, name);
1109 	if (!ret)
1110 		return count;
1111 
1112 	return ret;
1113 }
1114 
1115 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1116 {
1117 	struct elevator_queue *e = q->elevator;
1118 	struct elevator_type *elv = NULL;
1119 	struct elevator_type *__e;
1120 	bool uses_mq = q->mq_ops != NULL;
1121 	int len = 0;
1122 
1123 	if (!queue_is_rq_based(q))
1124 		return sprintf(name, "none\n");
1125 
1126 	if (!q->elevator)
1127 		len += sprintf(name+len, "[none] ");
1128 	else
1129 		elv = e->type;
1130 
1131 	spin_lock(&elv_list_lock);
1132 	list_for_each_entry(__e, &elv_list, list) {
1133 		if (elv && elevator_match(elv, __e->elevator_name) &&
1134 		    (__e->uses_mq == uses_mq)) {
1135 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1136 			continue;
1137 		}
1138 		if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1139 			len += sprintf(name+len, "%s ", __e->elevator_name);
1140 		else if (!__e->uses_mq && !q->mq_ops)
1141 			len += sprintf(name+len, "%s ", __e->elevator_name);
1142 	}
1143 	spin_unlock(&elv_list_lock);
1144 
1145 	if (q->mq_ops && q->elevator)
1146 		len += sprintf(name+len, "none");
1147 
1148 	len += sprintf(len+name, "\n");
1149 	return len;
1150 }
1151 
1152 struct request *elv_rb_former_request(struct request_queue *q,
1153 				      struct request *rq)
1154 {
1155 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1156 
1157 	if (rbprev)
1158 		return rb_entry_rq(rbprev);
1159 
1160 	return NULL;
1161 }
1162 EXPORT_SYMBOL(elv_rb_former_request);
1163 
1164 struct request *elv_rb_latter_request(struct request_queue *q,
1165 				      struct request *rq)
1166 {
1167 	struct rb_node *rbnext = rb_next(&rq->rb_node);
1168 
1169 	if (rbnext)
1170 		return rb_entry_rq(rbnext);
1171 
1172 	return NULL;
1173 }
1174 EXPORT_SYMBOL(elv_rb_latter_request);
1175