xref: /openbmc/linux/block/elevator.c (revision 65417d9f)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
39 
40 #include <trace/events/block.h>
41 
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44 #include "blk-wbt.h"
45 
46 static DEFINE_SPINLOCK(elv_list_lock);
47 static LIST_HEAD(elv_list);
48 
49 /*
50  * Merge hash stuff.
51  */
52 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
53 
54 /*
55  * Query io scheduler to see if the current process issuing bio may be
56  * merged with rq.
57  */
58 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
59 {
60 	struct request_queue *q = rq->q;
61 	struct elevator_queue *e = q->elevator;
62 
63 	if (e->uses_mq && e->type->ops.mq.allow_merge)
64 		return e->type->ops.mq.allow_merge(q, rq, bio);
65 	else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
66 		return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
67 
68 	return 1;
69 }
70 
71 /*
72  * can we safely merge with this request?
73  */
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75 {
76 	if (!blk_rq_merge_ok(rq, bio))
77 		return false;
78 
79 	if (!elv_iosched_allow_bio_merge(rq, bio))
80 		return false;
81 
82 	return true;
83 }
84 EXPORT_SYMBOL(elv_bio_merge_ok);
85 
86 static bool elevator_match(const struct elevator_type *e, const char *name)
87 {
88 	if (!strcmp(e->elevator_name, name))
89 		return true;
90 	if (e->elevator_alias && !strcmp(e->elevator_alias, name))
91 		return true;
92 
93 	return false;
94 }
95 
96 /*
97  * Return scheduler with name 'name' and with matching 'mq capability
98  */
99 static struct elevator_type *elevator_find(const char *name, bool mq)
100 {
101 	struct elevator_type *e;
102 
103 	list_for_each_entry(e, &elv_list, list) {
104 		if (elevator_match(e, name) && (mq == e->uses_mq))
105 			return e;
106 	}
107 
108 	return NULL;
109 }
110 
111 static void elevator_put(struct elevator_type *e)
112 {
113 	module_put(e->elevator_owner);
114 }
115 
116 static struct elevator_type *elevator_get(struct request_queue *q,
117 					  const char *name, bool try_loading)
118 {
119 	struct elevator_type *e;
120 
121 	spin_lock(&elv_list_lock);
122 
123 	e = elevator_find(name, q->mq_ops != NULL);
124 	if (!e && try_loading) {
125 		spin_unlock(&elv_list_lock);
126 		request_module("%s-iosched", name);
127 		spin_lock(&elv_list_lock);
128 		e = elevator_find(name, q->mq_ops != NULL);
129 	}
130 
131 	if (e && !try_module_get(e->elevator_owner))
132 		e = NULL;
133 
134 	spin_unlock(&elv_list_lock);
135 	return e;
136 }
137 
138 static char chosen_elevator[ELV_NAME_MAX];
139 
140 static int __init elevator_setup(char *str)
141 {
142 	/*
143 	 * Be backwards-compatible with previous kernels, so users
144 	 * won't get the wrong elevator.
145 	 */
146 	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
147 	return 1;
148 }
149 
150 __setup("elevator=", elevator_setup);
151 
152 /* called during boot to load the elevator chosen by the elevator param */
153 void __init load_default_elevator_module(void)
154 {
155 	struct elevator_type *e;
156 
157 	if (!chosen_elevator[0])
158 		return;
159 
160 	/*
161 	 * Boot parameter is deprecated, we haven't supported that for MQ.
162 	 * Only look for non-mq schedulers from here.
163 	 */
164 	spin_lock(&elv_list_lock);
165 	e = elevator_find(chosen_elevator, false);
166 	spin_unlock(&elv_list_lock);
167 
168 	if (!e)
169 		request_module("%s-iosched", chosen_elevator);
170 }
171 
172 static struct kobj_type elv_ktype;
173 
174 struct elevator_queue *elevator_alloc(struct request_queue *q,
175 				  struct elevator_type *e)
176 {
177 	struct elevator_queue *eq;
178 
179 	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
180 	if (unlikely(!eq))
181 		return NULL;
182 
183 	eq->type = e;
184 	kobject_init(&eq->kobj, &elv_ktype);
185 	mutex_init(&eq->sysfs_lock);
186 	hash_init(eq->hash);
187 	eq->uses_mq = e->uses_mq;
188 
189 	return eq;
190 }
191 EXPORT_SYMBOL(elevator_alloc);
192 
193 static void elevator_release(struct kobject *kobj)
194 {
195 	struct elevator_queue *e;
196 
197 	e = container_of(kobj, struct elevator_queue, kobj);
198 	elevator_put(e->type);
199 	kfree(e);
200 }
201 
202 int elevator_init(struct request_queue *q, char *name)
203 {
204 	struct elevator_type *e = NULL;
205 	int err;
206 
207 	/*
208 	 * q->sysfs_lock must be held to provide mutual exclusion between
209 	 * elevator_switch() and here.
210 	 */
211 	lockdep_assert_held(&q->sysfs_lock);
212 
213 	if (unlikely(q->elevator))
214 		return 0;
215 
216 	INIT_LIST_HEAD(&q->queue_head);
217 	q->last_merge = NULL;
218 	q->end_sector = 0;
219 	q->boundary_rq = NULL;
220 
221 	if (name) {
222 		e = elevator_get(q, name, true);
223 		if (!e)
224 			return -EINVAL;
225 	}
226 
227 	/*
228 	 * Use the default elevator specified by config boot param for
229 	 * non-mq devices, or by config option. Don't try to load modules
230 	 * as we could be running off async and request_module() isn't
231 	 * allowed from async.
232 	 */
233 	if (!e && !q->mq_ops && *chosen_elevator) {
234 		e = elevator_get(q, chosen_elevator, false);
235 		if (!e)
236 			printk(KERN_ERR "I/O scheduler %s not found\n",
237 							chosen_elevator);
238 	}
239 
240 	if (!e) {
241 		/*
242 		 * For blk-mq devices, we default to using mq-deadline,
243 		 * if available, for single queue devices. If deadline
244 		 * isn't available OR we have multiple queues, default
245 		 * to "none".
246 		 */
247 		if (q->mq_ops) {
248 			if (q->nr_hw_queues == 1)
249 				e = elevator_get(q, "mq-deadline", false);
250 			if (!e)
251 				return 0;
252 		} else
253 			e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
254 
255 		if (!e) {
256 			printk(KERN_ERR
257 				"Default I/O scheduler not found. " \
258 				"Using noop.\n");
259 			e = elevator_get(q, "noop", false);
260 		}
261 	}
262 
263 	if (e->uses_mq)
264 		err = blk_mq_init_sched(q, e);
265 	else
266 		err = e->ops.sq.elevator_init_fn(q, e);
267 	if (err)
268 		elevator_put(e);
269 	return err;
270 }
271 EXPORT_SYMBOL(elevator_init);
272 
273 void elevator_exit(struct request_queue *q, struct elevator_queue *e)
274 {
275 	mutex_lock(&e->sysfs_lock);
276 	if (e->uses_mq && e->type->ops.mq.exit_sched)
277 		blk_mq_exit_sched(q, e);
278 	else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
279 		e->type->ops.sq.elevator_exit_fn(e);
280 	mutex_unlock(&e->sysfs_lock);
281 
282 	kobject_put(&e->kobj);
283 }
284 EXPORT_SYMBOL(elevator_exit);
285 
286 static inline void __elv_rqhash_del(struct request *rq)
287 {
288 	hash_del(&rq->hash);
289 	rq->rq_flags &= ~RQF_HASHED;
290 }
291 
292 void elv_rqhash_del(struct request_queue *q, struct request *rq)
293 {
294 	if (ELV_ON_HASH(rq))
295 		__elv_rqhash_del(rq);
296 }
297 EXPORT_SYMBOL_GPL(elv_rqhash_del);
298 
299 void elv_rqhash_add(struct request_queue *q, struct request *rq)
300 {
301 	struct elevator_queue *e = q->elevator;
302 
303 	BUG_ON(ELV_ON_HASH(rq));
304 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
305 	rq->rq_flags |= RQF_HASHED;
306 }
307 EXPORT_SYMBOL_GPL(elv_rqhash_add);
308 
309 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
310 {
311 	__elv_rqhash_del(rq);
312 	elv_rqhash_add(q, rq);
313 }
314 
315 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
316 {
317 	struct elevator_queue *e = q->elevator;
318 	struct hlist_node *next;
319 	struct request *rq;
320 
321 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
322 		BUG_ON(!ELV_ON_HASH(rq));
323 
324 		if (unlikely(!rq_mergeable(rq))) {
325 			__elv_rqhash_del(rq);
326 			continue;
327 		}
328 
329 		if (rq_hash_key(rq) == offset)
330 			return rq;
331 	}
332 
333 	return NULL;
334 }
335 
336 /*
337  * RB-tree support functions for inserting/lookup/removal of requests
338  * in a sorted RB tree.
339  */
340 void elv_rb_add(struct rb_root *root, struct request *rq)
341 {
342 	struct rb_node **p = &root->rb_node;
343 	struct rb_node *parent = NULL;
344 	struct request *__rq;
345 
346 	while (*p) {
347 		parent = *p;
348 		__rq = rb_entry(parent, struct request, rb_node);
349 
350 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
351 			p = &(*p)->rb_left;
352 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
353 			p = &(*p)->rb_right;
354 	}
355 
356 	rb_link_node(&rq->rb_node, parent, p);
357 	rb_insert_color(&rq->rb_node, root);
358 }
359 EXPORT_SYMBOL(elv_rb_add);
360 
361 void elv_rb_del(struct rb_root *root, struct request *rq)
362 {
363 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
364 	rb_erase(&rq->rb_node, root);
365 	RB_CLEAR_NODE(&rq->rb_node);
366 }
367 EXPORT_SYMBOL(elv_rb_del);
368 
369 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
370 {
371 	struct rb_node *n = root->rb_node;
372 	struct request *rq;
373 
374 	while (n) {
375 		rq = rb_entry(n, struct request, rb_node);
376 
377 		if (sector < blk_rq_pos(rq))
378 			n = n->rb_left;
379 		else if (sector > blk_rq_pos(rq))
380 			n = n->rb_right;
381 		else
382 			return rq;
383 	}
384 
385 	return NULL;
386 }
387 EXPORT_SYMBOL(elv_rb_find);
388 
389 /*
390  * Insert rq into dispatch queue of q.  Queue lock must be held on
391  * entry.  rq is sort instead into the dispatch queue. To be used by
392  * specific elevators.
393  */
394 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
395 {
396 	sector_t boundary;
397 	struct list_head *entry;
398 
399 	if (q->last_merge == rq)
400 		q->last_merge = NULL;
401 
402 	elv_rqhash_del(q, rq);
403 
404 	q->nr_sorted--;
405 
406 	boundary = q->end_sector;
407 	list_for_each_prev(entry, &q->queue_head) {
408 		struct request *pos = list_entry_rq(entry);
409 
410 		if (req_op(rq) != req_op(pos))
411 			break;
412 		if (rq_data_dir(rq) != rq_data_dir(pos))
413 			break;
414 		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
415 			break;
416 		if (blk_rq_pos(rq) >= boundary) {
417 			if (blk_rq_pos(pos) < boundary)
418 				continue;
419 		} else {
420 			if (blk_rq_pos(pos) >= boundary)
421 				break;
422 		}
423 		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
424 			break;
425 	}
426 
427 	list_add(&rq->queuelist, entry);
428 }
429 EXPORT_SYMBOL(elv_dispatch_sort);
430 
431 /*
432  * Insert rq into dispatch queue of q.  Queue lock must be held on
433  * entry.  rq is added to the back of the dispatch queue. To be used by
434  * specific elevators.
435  */
436 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
437 {
438 	if (q->last_merge == rq)
439 		q->last_merge = NULL;
440 
441 	elv_rqhash_del(q, rq);
442 
443 	q->nr_sorted--;
444 
445 	q->end_sector = rq_end_sector(rq);
446 	q->boundary_rq = rq;
447 	list_add_tail(&rq->queuelist, &q->queue_head);
448 }
449 EXPORT_SYMBOL(elv_dispatch_add_tail);
450 
451 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
452 		struct bio *bio)
453 {
454 	struct elevator_queue *e = q->elevator;
455 	struct request *__rq;
456 
457 	/*
458 	 * Levels of merges:
459 	 * 	nomerges:  No merges at all attempted
460 	 * 	noxmerges: Only simple one-hit cache try
461 	 * 	merges:	   All merge tries attempted
462 	 */
463 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
464 		return ELEVATOR_NO_MERGE;
465 
466 	/*
467 	 * First try one-hit cache.
468 	 */
469 	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
470 		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
471 
472 		if (ret != ELEVATOR_NO_MERGE) {
473 			*req = q->last_merge;
474 			return ret;
475 		}
476 	}
477 
478 	if (blk_queue_noxmerges(q))
479 		return ELEVATOR_NO_MERGE;
480 
481 	/*
482 	 * See if our hash lookup can find a potential backmerge.
483 	 */
484 	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
485 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
486 		*req = __rq;
487 		return ELEVATOR_BACK_MERGE;
488 	}
489 
490 	if (e->uses_mq && e->type->ops.mq.request_merge)
491 		return e->type->ops.mq.request_merge(q, req, bio);
492 	else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
493 		return e->type->ops.sq.elevator_merge_fn(q, req, bio);
494 
495 	return ELEVATOR_NO_MERGE;
496 }
497 
498 /*
499  * Attempt to do an insertion back merge. Only check for the case where
500  * we can append 'rq' to an existing request, so we can throw 'rq' away
501  * afterwards.
502  *
503  * Returns true if we merged, false otherwise
504  */
505 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
506 {
507 	struct request *__rq;
508 	bool ret;
509 
510 	if (blk_queue_nomerges(q))
511 		return false;
512 
513 	/*
514 	 * First try one-hit cache.
515 	 */
516 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
517 		return true;
518 
519 	if (blk_queue_noxmerges(q))
520 		return false;
521 
522 	ret = false;
523 	/*
524 	 * See if our hash lookup can find a potential backmerge.
525 	 */
526 	while (1) {
527 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
528 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
529 			break;
530 
531 		/* The merged request could be merged with others, try again */
532 		ret = true;
533 		rq = __rq;
534 	}
535 
536 	return ret;
537 }
538 
539 void elv_merged_request(struct request_queue *q, struct request *rq,
540 		enum elv_merge type)
541 {
542 	struct elevator_queue *e = q->elevator;
543 
544 	if (e->uses_mq && e->type->ops.mq.request_merged)
545 		e->type->ops.mq.request_merged(q, rq, type);
546 	else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
547 		e->type->ops.sq.elevator_merged_fn(q, rq, type);
548 
549 	if (type == ELEVATOR_BACK_MERGE)
550 		elv_rqhash_reposition(q, rq);
551 
552 	q->last_merge = rq;
553 }
554 
555 void elv_merge_requests(struct request_queue *q, struct request *rq,
556 			     struct request *next)
557 {
558 	struct elevator_queue *e = q->elevator;
559 	bool next_sorted = false;
560 
561 	if (e->uses_mq && e->type->ops.mq.requests_merged)
562 		e->type->ops.mq.requests_merged(q, rq, next);
563 	else if (e->type->ops.sq.elevator_merge_req_fn) {
564 		next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
565 		if (next_sorted)
566 			e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
567 	}
568 
569 	elv_rqhash_reposition(q, rq);
570 
571 	if (next_sorted) {
572 		elv_rqhash_del(q, next);
573 		q->nr_sorted--;
574 	}
575 
576 	q->last_merge = rq;
577 }
578 
579 void elv_bio_merged(struct request_queue *q, struct request *rq,
580 			struct bio *bio)
581 {
582 	struct elevator_queue *e = q->elevator;
583 
584 	if (WARN_ON_ONCE(e->uses_mq))
585 		return;
586 
587 	if (e->type->ops.sq.elevator_bio_merged_fn)
588 		e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
589 }
590 
591 #ifdef CONFIG_PM
592 static void blk_pm_requeue_request(struct request *rq)
593 {
594 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
595 		rq->q->nr_pending--;
596 }
597 
598 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
599 {
600 	if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
601 	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
602 		pm_request_resume(q->dev);
603 }
604 #else
605 static inline void blk_pm_requeue_request(struct request *rq) {}
606 static inline void blk_pm_add_request(struct request_queue *q,
607 				      struct request *rq)
608 {
609 }
610 #endif
611 
612 void elv_requeue_request(struct request_queue *q, struct request *rq)
613 {
614 	/*
615 	 * it already went through dequeue, we need to decrement the
616 	 * in_flight count again
617 	 */
618 	if (blk_account_rq(rq)) {
619 		q->in_flight[rq_is_sync(rq)]--;
620 		if (rq->rq_flags & RQF_SORTED)
621 			elv_deactivate_rq(q, rq);
622 	}
623 
624 	rq->rq_flags &= ~RQF_STARTED;
625 
626 	blk_pm_requeue_request(rq);
627 
628 	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
629 }
630 
631 void elv_drain_elevator(struct request_queue *q)
632 {
633 	struct elevator_queue *e = q->elevator;
634 	static int printed;
635 
636 	if (WARN_ON_ONCE(e->uses_mq))
637 		return;
638 
639 	lockdep_assert_held(q->queue_lock);
640 
641 	while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
642 		;
643 	if (q->nr_sorted && printed++ < 10) {
644 		printk(KERN_ERR "%s: forced dispatching is broken "
645 		       "(nr_sorted=%u), please report this\n",
646 		       q->elevator->type->elevator_name, q->nr_sorted);
647 	}
648 }
649 
650 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
651 {
652 	trace_block_rq_insert(q, rq);
653 
654 	blk_pm_add_request(q, rq);
655 
656 	rq->q = q;
657 
658 	if (rq->rq_flags & RQF_SOFTBARRIER) {
659 		/* barriers are scheduling boundary, update end_sector */
660 		if (!blk_rq_is_passthrough(rq)) {
661 			q->end_sector = rq_end_sector(rq);
662 			q->boundary_rq = rq;
663 		}
664 	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
665 		    (where == ELEVATOR_INSERT_SORT ||
666 		     where == ELEVATOR_INSERT_SORT_MERGE))
667 		where = ELEVATOR_INSERT_BACK;
668 
669 	switch (where) {
670 	case ELEVATOR_INSERT_REQUEUE:
671 	case ELEVATOR_INSERT_FRONT:
672 		rq->rq_flags |= RQF_SOFTBARRIER;
673 		list_add(&rq->queuelist, &q->queue_head);
674 		break;
675 
676 	case ELEVATOR_INSERT_BACK:
677 		rq->rq_flags |= RQF_SOFTBARRIER;
678 		elv_drain_elevator(q);
679 		list_add_tail(&rq->queuelist, &q->queue_head);
680 		/*
681 		 * We kick the queue here for the following reasons.
682 		 * - The elevator might have returned NULL previously
683 		 *   to delay requests and returned them now.  As the
684 		 *   queue wasn't empty before this request, ll_rw_blk
685 		 *   won't run the queue on return, resulting in hang.
686 		 * - Usually, back inserted requests won't be merged
687 		 *   with anything.  There's no point in delaying queue
688 		 *   processing.
689 		 */
690 		__blk_run_queue(q);
691 		break;
692 
693 	case ELEVATOR_INSERT_SORT_MERGE:
694 		/*
695 		 * If we succeed in merging this request with one in the
696 		 * queue already, we are done - rq has now been freed,
697 		 * so no need to do anything further.
698 		 */
699 		if (elv_attempt_insert_merge(q, rq))
700 			break;
701 		/* fall through */
702 	case ELEVATOR_INSERT_SORT:
703 		BUG_ON(blk_rq_is_passthrough(rq));
704 		rq->rq_flags |= RQF_SORTED;
705 		q->nr_sorted++;
706 		if (rq_mergeable(rq)) {
707 			elv_rqhash_add(q, rq);
708 			if (!q->last_merge)
709 				q->last_merge = rq;
710 		}
711 
712 		/*
713 		 * Some ioscheds (cfq) run q->request_fn directly, so
714 		 * rq cannot be accessed after calling
715 		 * elevator_add_req_fn.
716 		 */
717 		q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
718 		break;
719 
720 	case ELEVATOR_INSERT_FLUSH:
721 		rq->rq_flags |= RQF_SOFTBARRIER;
722 		blk_insert_flush(rq);
723 		break;
724 	default:
725 		printk(KERN_ERR "%s: bad insertion point %d\n",
726 		       __func__, where);
727 		BUG();
728 	}
729 }
730 EXPORT_SYMBOL(__elv_add_request);
731 
732 void elv_add_request(struct request_queue *q, struct request *rq, int where)
733 {
734 	unsigned long flags;
735 
736 	spin_lock_irqsave(q->queue_lock, flags);
737 	__elv_add_request(q, rq, where);
738 	spin_unlock_irqrestore(q->queue_lock, flags);
739 }
740 EXPORT_SYMBOL(elv_add_request);
741 
742 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
743 {
744 	struct elevator_queue *e = q->elevator;
745 
746 	if (e->uses_mq && e->type->ops.mq.next_request)
747 		return e->type->ops.mq.next_request(q, rq);
748 	else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
749 		return e->type->ops.sq.elevator_latter_req_fn(q, rq);
750 
751 	return NULL;
752 }
753 
754 struct request *elv_former_request(struct request_queue *q, struct request *rq)
755 {
756 	struct elevator_queue *e = q->elevator;
757 
758 	if (e->uses_mq && e->type->ops.mq.former_request)
759 		return e->type->ops.mq.former_request(q, rq);
760 	if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
761 		return e->type->ops.sq.elevator_former_req_fn(q, rq);
762 	return NULL;
763 }
764 
765 int elv_set_request(struct request_queue *q, struct request *rq,
766 		    struct bio *bio, gfp_t gfp_mask)
767 {
768 	struct elevator_queue *e = q->elevator;
769 
770 	if (WARN_ON_ONCE(e->uses_mq))
771 		return 0;
772 
773 	if (e->type->ops.sq.elevator_set_req_fn)
774 		return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
775 	return 0;
776 }
777 
778 void elv_put_request(struct request_queue *q, struct request *rq)
779 {
780 	struct elevator_queue *e = q->elevator;
781 
782 	if (WARN_ON_ONCE(e->uses_mq))
783 		return;
784 
785 	if (e->type->ops.sq.elevator_put_req_fn)
786 		e->type->ops.sq.elevator_put_req_fn(rq);
787 }
788 
789 int elv_may_queue(struct request_queue *q, unsigned int op)
790 {
791 	struct elevator_queue *e = q->elevator;
792 
793 	if (WARN_ON_ONCE(e->uses_mq))
794 		return 0;
795 
796 	if (e->type->ops.sq.elevator_may_queue_fn)
797 		return e->type->ops.sq.elevator_may_queue_fn(q, op);
798 
799 	return ELV_MQUEUE_MAY;
800 }
801 
802 void elv_completed_request(struct request_queue *q, struct request *rq)
803 {
804 	struct elevator_queue *e = q->elevator;
805 
806 	if (WARN_ON_ONCE(e->uses_mq))
807 		return;
808 
809 	/*
810 	 * request is released from the driver, io must be done
811 	 */
812 	if (blk_account_rq(rq)) {
813 		q->in_flight[rq_is_sync(rq)]--;
814 		if ((rq->rq_flags & RQF_SORTED) &&
815 		    e->type->ops.sq.elevator_completed_req_fn)
816 			e->type->ops.sq.elevator_completed_req_fn(q, rq);
817 	}
818 }
819 
820 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
821 
822 static ssize_t
823 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
824 {
825 	struct elv_fs_entry *entry = to_elv(attr);
826 	struct elevator_queue *e;
827 	ssize_t error;
828 
829 	if (!entry->show)
830 		return -EIO;
831 
832 	e = container_of(kobj, struct elevator_queue, kobj);
833 	mutex_lock(&e->sysfs_lock);
834 	error = e->type ? entry->show(e, page) : -ENOENT;
835 	mutex_unlock(&e->sysfs_lock);
836 	return error;
837 }
838 
839 static ssize_t
840 elv_attr_store(struct kobject *kobj, struct attribute *attr,
841 	       const char *page, size_t length)
842 {
843 	struct elv_fs_entry *entry = to_elv(attr);
844 	struct elevator_queue *e;
845 	ssize_t error;
846 
847 	if (!entry->store)
848 		return -EIO;
849 
850 	e = container_of(kobj, struct elevator_queue, kobj);
851 	mutex_lock(&e->sysfs_lock);
852 	error = e->type ? entry->store(e, page, length) : -ENOENT;
853 	mutex_unlock(&e->sysfs_lock);
854 	return error;
855 }
856 
857 static const struct sysfs_ops elv_sysfs_ops = {
858 	.show	= elv_attr_show,
859 	.store	= elv_attr_store,
860 };
861 
862 static struct kobj_type elv_ktype = {
863 	.sysfs_ops	= &elv_sysfs_ops,
864 	.release	= elevator_release,
865 };
866 
867 int elv_register_queue(struct request_queue *q)
868 {
869 	struct elevator_queue *e = q->elevator;
870 	int error;
871 
872 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
873 	if (!error) {
874 		struct elv_fs_entry *attr = e->type->elevator_attrs;
875 		if (attr) {
876 			while (attr->attr.name) {
877 				if (sysfs_create_file(&e->kobj, &attr->attr))
878 					break;
879 				attr++;
880 			}
881 		}
882 		kobject_uevent(&e->kobj, KOBJ_ADD);
883 		e->registered = 1;
884 		if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
885 			e->type->ops.sq.elevator_registered_fn(q);
886 	}
887 	return error;
888 }
889 EXPORT_SYMBOL(elv_register_queue);
890 
891 void elv_unregister_queue(struct request_queue *q)
892 {
893 	if (q) {
894 		struct elevator_queue *e = q->elevator;
895 
896 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
897 		kobject_del(&e->kobj);
898 		e->registered = 0;
899 		/* Re-enable throttling in case elevator disabled it */
900 		wbt_enable_default(q);
901 	}
902 }
903 EXPORT_SYMBOL(elv_unregister_queue);
904 
905 int elv_register(struct elevator_type *e)
906 {
907 	char *def = "";
908 
909 	/* create icq_cache if requested */
910 	if (e->icq_size) {
911 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
912 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
913 			return -EINVAL;
914 
915 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
916 			 "%s_io_cq", e->elevator_name);
917 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
918 						 e->icq_align, 0, NULL);
919 		if (!e->icq_cache)
920 			return -ENOMEM;
921 	}
922 
923 	/* register, don't allow duplicate names */
924 	spin_lock(&elv_list_lock);
925 	if (elevator_find(e->elevator_name, e->uses_mq)) {
926 		spin_unlock(&elv_list_lock);
927 		if (e->icq_cache)
928 			kmem_cache_destroy(e->icq_cache);
929 		return -EBUSY;
930 	}
931 	list_add_tail(&e->list, &elv_list);
932 	spin_unlock(&elv_list_lock);
933 
934 	/* print pretty message */
935 	if (elevator_match(e, chosen_elevator) ||
936 			(!*chosen_elevator &&
937 			 elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
938 				def = " (default)";
939 
940 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
941 								def);
942 	return 0;
943 }
944 EXPORT_SYMBOL_GPL(elv_register);
945 
946 void elv_unregister(struct elevator_type *e)
947 {
948 	/* unregister */
949 	spin_lock(&elv_list_lock);
950 	list_del_init(&e->list);
951 	spin_unlock(&elv_list_lock);
952 
953 	/*
954 	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
955 	 * sure all RCU operations are complete before proceeding.
956 	 */
957 	if (e->icq_cache) {
958 		rcu_barrier();
959 		kmem_cache_destroy(e->icq_cache);
960 		e->icq_cache = NULL;
961 	}
962 }
963 EXPORT_SYMBOL_GPL(elv_unregister);
964 
965 static int elevator_switch_mq(struct request_queue *q,
966 			      struct elevator_type *new_e)
967 {
968 	int ret;
969 
970 	blk_mq_freeze_queue(q);
971 
972 	if (q->elevator) {
973 		if (q->elevator->registered)
974 			elv_unregister_queue(q);
975 		ioc_clear_queue(q);
976 		elevator_exit(q, q->elevator);
977 	}
978 
979 	ret = blk_mq_init_sched(q, new_e);
980 	if (ret)
981 		goto out;
982 
983 	if (new_e) {
984 		ret = elv_register_queue(q);
985 		if (ret) {
986 			elevator_exit(q, q->elevator);
987 			goto out;
988 		}
989 	}
990 
991 	if (new_e)
992 		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
993 	else
994 		blk_add_trace_msg(q, "elv switch: none");
995 
996 out:
997 	blk_mq_unfreeze_queue(q);
998 	return ret;
999 }
1000 
1001 /*
1002  * switch to new_e io scheduler. be careful not to introduce deadlocks -
1003  * we don't free the old io scheduler, before we have allocated what we
1004  * need for the new one. this way we have a chance of going back to the old
1005  * one, if the new one fails init for some reason.
1006  */
1007 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1008 {
1009 	struct elevator_queue *old = q->elevator;
1010 	bool old_registered = false;
1011 	int err;
1012 
1013 	if (q->mq_ops)
1014 		return elevator_switch_mq(q, new_e);
1015 
1016 	/*
1017 	 * Turn on BYPASS and drain all requests w/ elevator private data.
1018 	 * Block layer doesn't call into a quiesced elevator - all requests
1019 	 * are directly put on the dispatch list without elevator data
1020 	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
1021 	 * merge happens either.
1022 	 */
1023 	if (old) {
1024 		old_registered = old->registered;
1025 
1026 		blk_queue_bypass_start(q);
1027 
1028 		/* unregister and clear all auxiliary data of the old elevator */
1029 		if (old_registered)
1030 			elv_unregister_queue(q);
1031 
1032 		ioc_clear_queue(q);
1033 	}
1034 
1035 	/* allocate, init and register new elevator */
1036 	err = new_e->ops.sq.elevator_init_fn(q, new_e);
1037 	if (err)
1038 		goto fail_init;
1039 
1040 	err = elv_register_queue(q);
1041 	if (err)
1042 		goto fail_register;
1043 
1044 	/* done, kill the old one and finish */
1045 	if (old) {
1046 		elevator_exit(q, old);
1047 		blk_queue_bypass_end(q);
1048 	}
1049 
1050 	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1051 
1052 	return 0;
1053 
1054 fail_register:
1055 	elevator_exit(q, q->elevator);
1056 fail_init:
1057 	/* switch failed, restore and re-register old elevator */
1058 	if (old) {
1059 		q->elevator = old;
1060 		elv_register_queue(q);
1061 		blk_queue_bypass_end(q);
1062 	}
1063 
1064 	return err;
1065 }
1066 
1067 /*
1068  * Switch this queue to the given IO scheduler.
1069  */
1070 static int __elevator_change(struct request_queue *q, const char *name)
1071 {
1072 	char elevator_name[ELV_NAME_MAX];
1073 	struct elevator_type *e;
1074 
1075 	/* Make sure queue is not in the middle of being removed */
1076 	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1077 		return -ENOENT;
1078 
1079 	/*
1080 	 * Special case for mq, turn off scheduling
1081 	 */
1082 	if (q->mq_ops && !strncmp(name, "none", 4))
1083 		return elevator_switch(q, NULL);
1084 
1085 	strlcpy(elevator_name, name, sizeof(elevator_name));
1086 	e = elevator_get(q, strstrip(elevator_name), true);
1087 	if (!e)
1088 		return -EINVAL;
1089 
1090 	if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
1091 		elevator_put(e);
1092 		return 0;
1093 	}
1094 
1095 	return elevator_switch(q, e);
1096 }
1097 
1098 static inline bool elv_support_iosched(struct request_queue *q)
1099 {
1100 	if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1101 				BLK_MQ_F_NO_SCHED))
1102 		return false;
1103 	return true;
1104 }
1105 
1106 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1107 			  size_t count)
1108 {
1109 	int ret;
1110 
1111 	if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1112 		return count;
1113 
1114 	ret = __elevator_change(q, name);
1115 	if (!ret)
1116 		return count;
1117 
1118 	return ret;
1119 }
1120 
1121 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1122 {
1123 	struct elevator_queue *e = q->elevator;
1124 	struct elevator_type *elv = NULL;
1125 	struct elevator_type *__e;
1126 	bool uses_mq = q->mq_ops != NULL;
1127 	int len = 0;
1128 
1129 	if (!queue_is_rq_based(q))
1130 		return sprintf(name, "none\n");
1131 
1132 	if (!q->elevator)
1133 		len += sprintf(name+len, "[none] ");
1134 	else
1135 		elv = e->type;
1136 
1137 	spin_lock(&elv_list_lock);
1138 	list_for_each_entry(__e, &elv_list, list) {
1139 		if (elv && elevator_match(elv, __e->elevator_name) &&
1140 		    (__e->uses_mq == uses_mq)) {
1141 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1142 			continue;
1143 		}
1144 		if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1145 			len += sprintf(name+len, "%s ", __e->elevator_name);
1146 		else if (!__e->uses_mq && !q->mq_ops)
1147 			len += sprintf(name+len, "%s ", __e->elevator_name);
1148 	}
1149 	spin_unlock(&elv_list_lock);
1150 
1151 	if (q->mq_ops && q->elevator)
1152 		len += sprintf(name+len, "none");
1153 
1154 	len += sprintf(len+name, "\n");
1155 	return len;
1156 }
1157 
1158 struct request *elv_rb_former_request(struct request_queue *q,
1159 				      struct request *rq)
1160 {
1161 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1162 
1163 	if (rbprev)
1164 		return rb_entry_rq(rbprev);
1165 
1166 	return NULL;
1167 }
1168 EXPORT_SYMBOL(elv_rb_former_request);
1169 
1170 struct request *elv_rb_latter_request(struct request_queue *q,
1171 				      struct request *rq)
1172 {
1173 	struct rb_node *rbnext = rb_next(&rq->rb_node);
1174 
1175 	if (rbnext)
1176 		return rb_entry_rq(rbnext);
1177 
1178 	return NULL;
1179 }
1180 EXPORT_SYMBOL(elv_rb_latter_request);
1181