xref: /openbmc/linux/block/elevator.c (revision 8e9356c6)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 
39 #include <trace/events/block.h>
40 
41 #include "blk.h"
42 #include "blk-cgroup.h"
43 
44 static DEFINE_SPINLOCK(elv_list_lock);
45 static LIST_HEAD(elv_list);
46 
47 /*
48  * Merge hash stuff.
49  */
50 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
51 
52 /*
53  * Query io scheduler to see if the current process issuing bio may be
54  * merged with rq.
55  */
56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
57 {
58 	struct request_queue *q = rq->q;
59 	struct elevator_queue *e = q->elevator;
60 
61 	if (e->type->ops.elevator_allow_merge_fn)
62 		return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
63 
64 	return 1;
65 }
66 
67 /*
68  * can we safely merge with this request?
69  */
70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
71 {
72 	if (!blk_rq_merge_ok(rq, bio))
73 		return 0;
74 
75 	if (!elv_iosched_allow_merge(rq, bio))
76 		return 0;
77 
78 	return 1;
79 }
80 EXPORT_SYMBOL(elv_rq_merge_ok);
81 
82 static struct elevator_type *elevator_find(const char *name)
83 {
84 	struct elevator_type *e;
85 
86 	list_for_each_entry(e, &elv_list, list) {
87 		if (!strcmp(e->elevator_name, name))
88 			return e;
89 	}
90 
91 	return NULL;
92 }
93 
94 static void elevator_put(struct elevator_type *e)
95 {
96 	module_put(e->elevator_owner);
97 }
98 
99 static struct elevator_type *elevator_get(const char *name, bool try_loading)
100 {
101 	struct elevator_type *e;
102 
103 	spin_lock(&elv_list_lock);
104 
105 	e = elevator_find(name);
106 	if (!e && try_loading) {
107 		spin_unlock(&elv_list_lock);
108 		request_module("%s-iosched", name);
109 		spin_lock(&elv_list_lock);
110 		e = elevator_find(name);
111 	}
112 
113 	if (e && !try_module_get(e->elevator_owner))
114 		e = NULL;
115 
116 	spin_unlock(&elv_list_lock);
117 
118 	return e;
119 }
120 
121 static char chosen_elevator[ELV_NAME_MAX];
122 
123 static int __init elevator_setup(char *str)
124 {
125 	/*
126 	 * Be backwards-compatible with previous kernels, so users
127 	 * won't get the wrong elevator.
128 	 */
129 	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
130 	return 1;
131 }
132 
133 __setup("elevator=", elevator_setup);
134 
135 /* called during boot to load the elevator chosen by the elevator param */
136 void __init load_default_elevator_module(void)
137 {
138 	struct elevator_type *e;
139 
140 	if (!chosen_elevator[0])
141 		return;
142 
143 	spin_lock(&elv_list_lock);
144 	e = elevator_find(chosen_elevator);
145 	spin_unlock(&elv_list_lock);
146 
147 	if (!e)
148 		request_module("%s-iosched", chosen_elevator);
149 }
150 
151 static struct kobj_type elv_ktype;
152 
153 struct elevator_queue *elevator_alloc(struct request_queue *q,
154 				  struct elevator_type *e)
155 {
156 	struct elevator_queue *eq;
157 
158 	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
159 	if (unlikely(!eq))
160 		goto err;
161 
162 	eq->type = e;
163 	kobject_init(&eq->kobj, &elv_ktype);
164 	mutex_init(&eq->sysfs_lock);
165 	hash_init(eq->hash);
166 
167 	return eq;
168 err:
169 	kfree(eq);
170 	elevator_put(e);
171 	return NULL;
172 }
173 EXPORT_SYMBOL(elevator_alloc);
174 
175 static void elevator_release(struct kobject *kobj)
176 {
177 	struct elevator_queue *e;
178 
179 	e = container_of(kobj, struct elevator_queue, kobj);
180 	elevator_put(e->type);
181 	kfree(e);
182 }
183 
184 int elevator_init(struct request_queue *q, char *name)
185 {
186 	struct elevator_type *e = NULL;
187 	int err;
188 
189 	/*
190 	 * q->sysfs_lock must be held to provide mutual exclusion between
191 	 * elevator_switch() and here.
192 	 */
193 	lockdep_assert_held(&q->sysfs_lock);
194 
195 	if (unlikely(q->elevator))
196 		return 0;
197 
198 	INIT_LIST_HEAD(&q->queue_head);
199 	q->last_merge = NULL;
200 	q->end_sector = 0;
201 	q->boundary_rq = NULL;
202 
203 	if (name) {
204 		e = elevator_get(name, true);
205 		if (!e)
206 			return -EINVAL;
207 	}
208 
209 	/*
210 	 * Use the default elevator specified by config boot param or
211 	 * config option.  Don't try to load modules as we could be running
212 	 * off async and request_module() isn't allowed from async.
213 	 */
214 	if (!e && *chosen_elevator) {
215 		e = elevator_get(chosen_elevator, false);
216 		if (!e)
217 			printk(KERN_ERR "I/O scheduler %s not found\n",
218 							chosen_elevator);
219 	}
220 
221 	if (!e) {
222 		e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
223 		if (!e) {
224 			printk(KERN_ERR
225 				"Default I/O scheduler not found. " \
226 				"Using noop.\n");
227 			e = elevator_get("noop", false);
228 		}
229 	}
230 
231 	err = e->ops.elevator_init_fn(q, e);
232 	return 0;
233 }
234 EXPORT_SYMBOL(elevator_init);
235 
236 void elevator_exit(struct elevator_queue *e)
237 {
238 	mutex_lock(&e->sysfs_lock);
239 	if (e->type->ops.elevator_exit_fn)
240 		e->type->ops.elevator_exit_fn(e);
241 	mutex_unlock(&e->sysfs_lock);
242 
243 	kobject_put(&e->kobj);
244 }
245 EXPORT_SYMBOL(elevator_exit);
246 
247 static inline void __elv_rqhash_del(struct request *rq)
248 {
249 	hash_del(&rq->hash);
250 }
251 
252 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
253 {
254 	if (ELV_ON_HASH(rq))
255 		__elv_rqhash_del(rq);
256 }
257 
258 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
259 {
260 	struct elevator_queue *e = q->elevator;
261 
262 	BUG_ON(ELV_ON_HASH(rq));
263 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
264 }
265 
266 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
267 {
268 	__elv_rqhash_del(rq);
269 	elv_rqhash_add(q, rq);
270 }
271 
272 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
273 {
274 	struct elevator_queue *e = q->elevator;
275 	struct hlist_node *next;
276 	struct request *rq;
277 
278 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
279 		BUG_ON(!ELV_ON_HASH(rq));
280 
281 		if (unlikely(!rq_mergeable(rq))) {
282 			__elv_rqhash_del(rq);
283 			continue;
284 		}
285 
286 		if (rq_hash_key(rq) == offset)
287 			return rq;
288 	}
289 
290 	return NULL;
291 }
292 
293 /*
294  * RB-tree support functions for inserting/lookup/removal of requests
295  * in a sorted RB tree.
296  */
297 void elv_rb_add(struct rb_root *root, struct request *rq)
298 {
299 	struct rb_node **p = &root->rb_node;
300 	struct rb_node *parent = NULL;
301 	struct request *__rq;
302 
303 	while (*p) {
304 		parent = *p;
305 		__rq = rb_entry(parent, struct request, rb_node);
306 
307 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
308 			p = &(*p)->rb_left;
309 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
310 			p = &(*p)->rb_right;
311 	}
312 
313 	rb_link_node(&rq->rb_node, parent, p);
314 	rb_insert_color(&rq->rb_node, root);
315 }
316 EXPORT_SYMBOL(elv_rb_add);
317 
318 void elv_rb_del(struct rb_root *root, struct request *rq)
319 {
320 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
321 	rb_erase(&rq->rb_node, root);
322 	RB_CLEAR_NODE(&rq->rb_node);
323 }
324 EXPORT_SYMBOL(elv_rb_del);
325 
326 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
327 {
328 	struct rb_node *n = root->rb_node;
329 	struct request *rq;
330 
331 	while (n) {
332 		rq = rb_entry(n, struct request, rb_node);
333 
334 		if (sector < blk_rq_pos(rq))
335 			n = n->rb_left;
336 		else if (sector > blk_rq_pos(rq))
337 			n = n->rb_right;
338 		else
339 			return rq;
340 	}
341 
342 	return NULL;
343 }
344 EXPORT_SYMBOL(elv_rb_find);
345 
346 /*
347  * Insert rq into dispatch queue of q.  Queue lock must be held on
348  * entry.  rq is sort instead into the dispatch queue. To be used by
349  * specific elevators.
350  */
351 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
352 {
353 	sector_t boundary;
354 	struct list_head *entry;
355 	int stop_flags;
356 
357 	if (q->last_merge == rq)
358 		q->last_merge = NULL;
359 
360 	elv_rqhash_del(q, rq);
361 
362 	q->nr_sorted--;
363 
364 	boundary = q->end_sector;
365 	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
366 	list_for_each_prev(entry, &q->queue_head) {
367 		struct request *pos = list_entry_rq(entry);
368 
369 		if ((rq->cmd_flags & REQ_DISCARD) !=
370 		    (pos->cmd_flags & REQ_DISCARD))
371 			break;
372 		if (rq_data_dir(rq) != rq_data_dir(pos))
373 			break;
374 		if (pos->cmd_flags & stop_flags)
375 			break;
376 		if (blk_rq_pos(rq) >= boundary) {
377 			if (blk_rq_pos(pos) < boundary)
378 				continue;
379 		} else {
380 			if (blk_rq_pos(pos) >= boundary)
381 				break;
382 		}
383 		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
384 			break;
385 	}
386 
387 	list_add(&rq->queuelist, entry);
388 }
389 EXPORT_SYMBOL(elv_dispatch_sort);
390 
391 /*
392  * Insert rq into dispatch queue of q.  Queue lock must be held on
393  * entry.  rq is added to the back of the dispatch queue. To be used by
394  * specific elevators.
395  */
396 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
397 {
398 	if (q->last_merge == rq)
399 		q->last_merge = NULL;
400 
401 	elv_rqhash_del(q, rq);
402 
403 	q->nr_sorted--;
404 
405 	q->end_sector = rq_end_sector(rq);
406 	q->boundary_rq = rq;
407 	list_add_tail(&rq->queuelist, &q->queue_head);
408 }
409 EXPORT_SYMBOL(elv_dispatch_add_tail);
410 
411 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
412 {
413 	struct elevator_queue *e = q->elevator;
414 	struct request *__rq;
415 	int ret;
416 
417 	/*
418 	 * Levels of merges:
419 	 * 	nomerges:  No merges at all attempted
420 	 * 	noxmerges: Only simple one-hit cache try
421 	 * 	merges:	   All merge tries attempted
422 	 */
423 	if (blk_queue_nomerges(q))
424 		return ELEVATOR_NO_MERGE;
425 
426 	/*
427 	 * First try one-hit cache.
428 	 */
429 	if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
430 		ret = blk_try_merge(q->last_merge, bio);
431 		if (ret != ELEVATOR_NO_MERGE) {
432 			*req = q->last_merge;
433 			return ret;
434 		}
435 	}
436 
437 	if (blk_queue_noxmerges(q))
438 		return ELEVATOR_NO_MERGE;
439 
440 	/*
441 	 * See if our hash lookup can find a potential backmerge.
442 	 */
443 	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
444 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
445 		*req = __rq;
446 		return ELEVATOR_BACK_MERGE;
447 	}
448 
449 	if (e->type->ops.elevator_merge_fn)
450 		return e->type->ops.elevator_merge_fn(q, req, bio);
451 
452 	return ELEVATOR_NO_MERGE;
453 }
454 
455 /*
456  * Attempt to do an insertion back merge. Only check for the case where
457  * we can append 'rq' to an existing request, so we can throw 'rq' away
458  * afterwards.
459  *
460  * Returns true if we merged, false otherwise
461  */
462 static bool elv_attempt_insert_merge(struct request_queue *q,
463 				     struct request *rq)
464 {
465 	struct request *__rq;
466 	bool ret;
467 
468 	if (blk_queue_nomerges(q))
469 		return false;
470 
471 	/*
472 	 * First try one-hit cache.
473 	 */
474 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
475 		return true;
476 
477 	if (blk_queue_noxmerges(q))
478 		return false;
479 
480 	ret = false;
481 	/*
482 	 * See if our hash lookup can find a potential backmerge.
483 	 */
484 	while (1) {
485 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
486 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
487 			break;
488 
489 		/* The merged request could be merged with others, try again */
490 		ret = true;
491 		rq = __rq;
492 	}
493 
494 	return ret;
495 }
496 
497 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
498 {
499 	struct elevator_queue *e = q->elevator;
500 
501 	if (e->type->ops.elevator_merged_fn)
502 		e->type->ops.elevator_merged_fn(q, rq, type);
503 
504 	if (type == ELEVATOR_BACK_MERGE)
505 		elv_rqhash_reposition(q, rq);
506 
507 	q->last_merge = rq;
508 }
509 
510 void elv_merge_requests(struct request_queue *q, struct request *rq,
511 			     struct request *next)
512 {
513 	struct elevator_queue *e = q->elevator;
514 	const int next_sorted = next->cmd_flags & REQ_SORTED;
515 
516 	if (next_sorted && e->type->ops.elevator_merge_req_fn)
517 		e->type->ops.elevator_merge_req_fn(q, rq, next);
518 
519 	elv_rqhash_reposition(q, rq);
520 
521 	if (next_sorted) {
522 		elv_rqhash_del(q, next);
523 		q->nr_sorted--;
524 	}
525 
526 	q->last_merge = rq;
527 }
528 
529 void elv_bio_merged(struct request_queue *q, struct request *rq,
530 			struct bio *bio)
531 {
532 	struct elevator_queue *e = q->elevator;
533 
534 	if (e->type->ops.elevator_bio_merged_fn)
535 		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
536 }
537 
538 #ifdef CONFIG_PM_RUNTIME
539 static void blk_pm_requeue_request(struct request *rq)
540 {
541 	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
542 		rq->q->nr_pending--;
543 }
544 
545 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
546 {
547 	if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
548 	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
549 		pm_request_resume(q->dev);
550 }
551 #else
552 static inline void blk_pm_requeue_request(struct request *rq) {}
553 static inline void blk_pm_add_request(struct request_queue *q,
554 				      struct request *rq)
555 {
556 }
557 #endif
558 
559 void elv_requeue_request(struct request_queue *q, struct request *rq)
560 {
561 	/*
562 	 * it already went through dequeue, we need to decrement the
563 	 * in_flight count again
564 	 */
565 	if (blk_account_rq(rq)) {
566 		q->in_flight[rq_is_sync(rq)]--;
567 		if (rq->cmd_flags & REQ_SORTED)
568 			elv_deactivate_rq(q, rq);
569 	}
570 
571 	rq->cmd_flags &= ~REQ_STARTED;
572 
573 	blk_pm_requeue_request(rq);
574 
575 	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
576 }
577 
578 void elv_drain_elevator(struct request_queue *q)
579 {
580 	static int printed;
581 
582 	lockdep_assert_held(q->queue_lock);
583 
584 	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
585 		;
586 	if (q->nr_sorted && printed++ < 10) {
587 		printk(KERN_ERR "%s: forced dispatching is broken "
588 		       "(nr_sorted=%u), please report this\n",
589 		       q->elevator->type->elevator_name, q->nr_sorted);
590 	}
591 }
592 
593 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
594 {
595 	trace_block_rq_insert(q, rq);
596 
597 	blk_pm_add_request(q, rq);
598 
599 	rq->q = q;
600 
601 	if (rq->cmd_flags & REQ_SOFTBARRIER) {
602 		/* barriers are scheduling boundary, update end_sector */
603 		if (rq->cmd_type == REQ_TYPE_FS) {
604 			q->end_sector = rq_end_sector(rq);
605 			q->boundary_rq = rq;
606 		}
607 	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
608 		    (where == ELEVATOR_INSERT_SORT ||
609 		     where == ELEVATOR_INSERT_SORT_MERGE))
610 		where = ELEVATOR_INSERT_BACK;
611 
612 	switch (where) {
613 	case ELEVATOR_INSERT_REQUEUE:
614 	case ELEVATOR_INSERT_FRONT:
615 		rq->cmd_flags |= REQ_SOFTBARRIER;
616 		list_add(&rq->queuelist, &q->queue_head);
617 		break;
618 
619 	case ELEVATOR_INSERT_BACK:
620 		rq->cmd_flags |= REQ_SOFTBARRIER;
621 		elv_drain_elevator(q);
622 		list_add_tail(&rq->queuelist, &q->queue_head);
623 		/*
624 		 * We kick the queue here for the following reasons.
625 		 * - The elevator might have returned NULL previously
626 		 *   to delay requests and returned them now.  As the
627 		 *   queue wasn't empty before this request, ll_rw_blk
628 		 *   won't run the queue on return, resulting in hang.
629 		 * - Usually, back inserted requests won't be merged
630 		 *   with anything.  There's no point in delaying queue
631 		 *   processing.
632 		 */
633 		__blk_run_queue(q);
634 		break;
635 
636 	case ELEVATOR_INSERT_SORT_MERGE:
637 		/*
638 		 * If we succeed in merging this request with one in the
639 		 * queue already, we are done - rq has now been freed,
640 		 * so no need to do anything further.
641 		 */
642 		if (elv_attempt_insert_merge(q, rq))
643 			break;
644 	case ELEVATOR_INSERT_SORT:
645 		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
646 		rq->cmd_flags |= REQ_SORTED;
647 		q->nr_sorted++;
648 		if (rq_mergeable(rq)) {
649 			elv_rqhash_add(q, rq);
650 			if (!q->last_merge)
651 				q->last_merge = rq;
652 		}
653 
654 		/*
655 		 * Some ioscheds (cfq) run q->request_fn directly, so
656 		 * rq cannot be accessed after calling
657 		 * elevator_add_req_fn.
658 		 */
659 		q->elevator->type->ops.elevator_add_req_fn(q, rq);
660 		break;
661 
662 	case ELEVATOR_INSERT_FLUSH:
663 		rq->cmd_flags |= REQ_SOFTBARRIER;
664 		blk_insert_flush(rq);
665 		break;
666 	default:
667 		printk(KERN_ERR "%s: bad insertion point %d\n",
668 		       __func__, where);
669 		BUG();
670 	}
671 }
672 EXPORT_SYMBOL(__elv_add_request);
673 
674 void elv_add_request(struct request_queue *q, struct request *rq, int where)
675 {
676 	unsigned long flags;
677 
678 	spin_lock_irqsave(q->queue_lock, flags);
679 	__elv_add_request(q, rq, where);
680 	spin_unlock_irqrestore(q->queue_lock, flags);
681 }
682 EXPORT_SYMBOL(elv_add_request);
683 
684 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
685 {
686 	struct elevator_queue *e = q->elevator;
687 
688 	if (e->type->ops.elevator_latter_req_fn)
689 		return e->type->ops.elevator_latter_req_fn(q, rq);
690 	return NULL;
691 }
692 
693 struct request *elv_former_request(struct request_queue *q, struct request *rq)
694 {
695 	struct elevator_queue *e = q->elevator;
696 
697 	if (e->type->ops.elevator_former_req_fn)
698 		return e->type->ops.elevator_former_req_fn(q, rq);
699 	return NULL;
700 }
701 
702 int elv_set_request(struct request_queue *q, struct request *rq,
703 		    struct bio *bio, gfp_t gfp_mask)
704 {
705 	struct elevator_queue *e = q->elevator;
706 
707 	if (e->type->ops.elevator_set_req_fn)
708 		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
709 	return 0;
710 }
711 
712 void elv_put_request(struct request_queue *q, struct request *rq)
713 {
714 	struct elevator_queue *e = q->elevator;
715 
716 	if (e->type->ops.elevator_put_req_fn)
717 		e->type->ops.elevator_put_req_fn(rq);
718 }
719 
720 int elv_may_queue(struct request_queue *q, int rw)
721 {
722 	struct elevator_queue *e = q->elevator;
723 
724 	if (e->type->ops.elevator_may_queue_fn)
725 		return e->type->ops.elevator_may_queue_fn(q, rw);
726 
727 	return ELV_MQUEUE_MAY;
728 }
729 
730 void elv_abort_queue(struct request_queue *q)
731 {
732 	struct request *rq;
733 
734 	blk_abort_flushes(q);
735 
736 	while (!list_empty(&q->queue_head)) {
737 		rq = list_entry_rq(q->queue_head.next);
738 		rq->cmd_flags |= REQ_QUIET;
739 		trace_block_rq_abort(q, rq);
740 		/*
741 		 * Mark this request as started so we don't trigger
742 		 * any debug logic in the end I/O path.
743 		 */
744 		blk_start_request(rq);
745 		__blk_end_request_all(rq, -EIO);
746 	}
747 }
748 EXPORT_SYMBOL(elv_abort_queue);
749 
750 void elv_completed_request(struct request_queue *q, struct request *rq)
751 {
752 	struct elevator_queue *e = q->elevator;
753 
754 	/*
755 	 * request is released from the driver, io must be done
756 	 */
757 	if (blk_account_rq(rq)) {
758 		q->in_flight[rq_is_sync(rq)]--;
759 		if ((rq->cmd_flags & REQ_SORTED) &&
760 		    e->type->ops.elevator_completed_req_fn)
761 			e->type->ops.elevator_completed_req_fn(q, rq);
762 	}
763 }
764 
765 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
766 
767 static ssize_t
768 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
769 {
770 	struct elv_fs_entry *entry = to_elv(attr);
771 	struct elevator_queue *e;
772 	ssize_t error;
773 
774 	if (!entry->show)
775 		return -EIO;
776 
777 	e = container_of(kobj, struct elevator_queue, kobj);
778 	mutex_lock(&e->sysfs_lock);
779 	error = e->type ? entry->show(e, page) : -ENOENT;
780 	mutex_unlock(&e->sysfs_lock);
781 	return error;
782 }
783 
784 static ssize_t
785 elv_attr_store(struct kobject *kobj, struct attribute *attr,
786 	       const char *page, size_t length)
787 {
788 	struct elv_fs_entry *entry = to_elv(attr);
789 	struct elevator_queue *e;
790 	ssize_t error;
791 
792 	if (!entry->store)
793 		return -EIO;
794 
795 	e = container_of(kobj, struct elevator_queue, kobj);
796 	mutex_lock(&e->sysfs_lock);
797 	error = e->type ? entry->store(e, page, length) : -ENOENT;
798 	mutex_unlock(&e->sysfs_lock);
799 	return error;
800 }
801 
802 static const struct sysfs_ops elv_sysfs_ops = {
803 	.show	= elv_attr_show,
804 	.store	= elv_attr_store,
805 };
806 
807 static struct kobj_type elv_ktype = {
808 	.sysfs_ops	= &elv_sysfs_ops,
809 	.release	= elevator_release,
810 };
811 
812 int elv_register_queue(struct request_queue *q)
813 {
814 	struct elevator_queue *e = q->elevator;
815 	int error;
816 
817 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
818 	if (!error) {
819 		struct elv_fs_entry *attr = e->type->elevator_attrs;
820 		if (attr) {
821 			while (attr->attr.name) {
822 				if (sysfs_create_file(&e->kobj, &attr->attr))
823 					break;
824 				attr++;
825 			}
826 		}
827 		kobject_uevent(&e->kobj, KOBJ_ADD);
828 		e->registered = 1;
829 	}
830 	return error;
831 }
832 EXPORT_SYMBOL(elv_register_queue);
833 
834 void elv_unregister_queue(struct request_queue *q)
835 {
836 	if (q) {
837 		struct elevator_queue *e = q->elevator;
838 
839 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
840 		kobject_del(&e->kobj);
841 		e->registered = 0;
842 	}
843 }
844 EXPORT_SYMBOL(elv_unregister_queue);
845 
846 int elv_register(struct elevator_type *e)
847 {
848 	char *def = "";
849 
850 	/* create icq_cache if requested */
851 	if (e->icq_size) {
852 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
853 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
854 			return -EINVAL;
855 
856 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
857 			 "%s_io_cq", e->elevator_name);
858 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
859 						 e->icq_align, 0, NULL);
860 		if (!e->icq_cache)
861 			return -ENOMEM;
862 	}
863 
864 	/* register, don't allow duplicate names */
865 	spin_lock(&elv_list_lock);
866 	if (elevator_find(e->elevator_name)) {
867 		spin_unlock(&elv_list_lock);
868 		if (e->icq_cache)
869 			kmem_cache_destroy(e->icq_cache);
870 		return -EBUSY;
871 	}
872 	list_add_tail(&e->list, &elv_list);
873 	spin_unlock(&elv_list_lock);
874 
875 	/* print pretty message */
876 	if (!strcmp(e->elevator_name, chosen_elevator) ||
877 			(!*chosen_elevator &&
878 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
879 				def = " (default)";
880 
881 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
882 								def);
883 	return 0;
884 }
885 EXPORT_SYMBOL_GPL(elv_register);
886 
887 void elv_unregister(struct elevator_type *e)
888 {
889 	/* unregister */
890 	spin_lock(&elv_list_lock);
891 	list_del_init(&e->list);
892 	spin_unlock(&elv_list_lock);
893 
894 	/*
895 	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
896 	 * sure all RCU operations are complete before proceeding.
897 	 */
898 	if (e->icq_cache) {
899 		rcu_barrier();
900 		kmem_cache_destroy(e->icq_cache);
901 		e->icq_cache = NULL;
902 	}
903 }
904 EXPORT_SYMBOL_GPL(elv_unregister);
905 
906 /*
907  * switch to new_e io scheduler. be careful not to introduce deadlocks -
908  * we don't free the old io scheduler, before we have allocated what we
909  * need for the new one. this way we have a chance of going back to the old
910  * one, if the new one fails init for some reason.
911  */
912 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
913 {
914 	struct elevator_queue *old = q->elevator;
915 	bool registered = old->registered;
916 	int err;
917 
918 	/*
919 	 * Turn on BYPASS and drain all requests w/ elevator private data.
920 	 * Block layer doesn't call into a quiesced elevator - all requests
921 	 * are directly put on the dispatch list without elevator data
922 	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
923 	 * merge happens either.
924 	 */
925 	blk_queue_bypass_start(q);
926 
927 	/* unregister and clear all auxiliary data of the old elevator */
928 	if (registered)
929 		elv_unregister_queue(q);
930 
931 	spin_lock_irq(q->queue_lock);
932 	ioc_clear_queue(q);
933 	spin_unlock_irq(q->queue_lock);
934 
935 	/* allocate, init and register new elevator */
936 	err = new_e->ops.elevator_init_fn(q, new_e);
937 	if (err)
938 		goto fail_init;
939 
940 	if (registered) {
941 		err = elv_register_queue(q);
942 		if (err)
943 			goto fail_register;
944 	}
945 
946 	/* done, kill the old one and finish */
947 	elevator_exit(old);
948 	blk_queue_bypass_end(q);
949 
950 	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
951 
952 	return 0;
953 
954 fail_register:
955 	elevator_exit(q->elevator);
956 fail_init:
957 	/* switch failed, restore and re-register old elevator */
958 	q->elevator = old;
959 	elv_register_queue(q);
960 	blk_queue_bypass_end(q);
961 
962 	return err;
963 }
964 
965 /*
966  * Switch this queue to the given IO scheduler.
967  */
968 static int __elevator_change(struct request_queue *q, const char *name)
969 {
970 	char elevator_name[ELV_NAME_MAX];
971 	struct elevator_type *e;
972 
973 	if (!q->elevator)
974 		return -ENXIO;
975 
976 	strlcpy(elevator_name, name, sizeof(elevator_name));
977 	e = elevator_get(strstrip(elevator_name), true);
978 	if (!e) {
979 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
980 		return -EINVAL;
981 	}
982 
983 	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
984 		elevator_put(e);
985 		return 0;
986 	}
987 
988 	return elevator_switch(q, e);
989 }
990 
991 int elevator_change(struct request_queue *q, const char *name)
992 {
993 	int ret;
994 
995 	/* Protect q->elevator from elevator_init() */
996 	mutex_lock(&q->sysfs_lock);
997 	ret = __elevator_change(q, name);
998 	mutex_unlock(&q->sysfs_lock);
999 
1000 	return ret;
1001 }
1002 EXPORT_SYMBOL(elevator_change);
1003 
1004 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1005 			  size_t count)
1006 {
1007 	int ret;
1008 
1009 	if (!q->elevator)
1010 		return count;
1011 
1012 	ret = __elevator_change(q, name);
1013 	if (!ret)
1014 		return count;
1015 
1016 	printk(KERN_ERR "elevator: switch to %s failed\n", name);
1017 	return ret;
1018 }
1019 
1020 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1021 {
1022 	struct elevator_queue *e = q->elevator;
1023 	struct elevator_type *elv;
1024 	struct elevator_type *__e;
1025 	int len = 0;
1026 
1027 	if (!q->elevator || !blk_queue_stackable(q))
1028 		return sprintf(name, "none\n");
1029 
1030 	elv = e->type;
1031 
1032 	spin_lock(&elv_list_lock);
1033 	list_for_each_entry(__e, &elv_list, list) {
1034 		if (!strcmp(elv->elevator_name, __e->elevator_name))
1035 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1036 		else
1037 			len += sprintf(name+len, "%s ", __e->elevator_name);
1038 	}
1039 	spin_unlock(&elv_list_lock);
1040 
1041 	len += sprintf(len+name, "\n");
1042 	return len;
1043 }
1044 
1045 struct request *elv_rb_former_request(struct request_queue *q,
1046 				      struct request *rq)
1047 {
1048 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1049 
1050 	if (rbprev)
1051 		return rb_entry_rq(rbprev);
1052 
1053 	return NULL;
1054 }
1055 EXPORT_SYMBOL(elv_rb_former_request);
1056 
1057 struct request *elv_rb_latter_request(struct request_queue *q,
1058 				      struct request *rq)
1059 {
1060 	struct rb_node *rbnext = rb_next(&rq->rb_node);
1061 
1062 	if (rbnext)
1063 		return rb_entry_rq(rbnext);
1064 
1065 	return NULL;
1066 }
1067 EXPORT_SYMBOL(elv_rb_latter_request);
1068