xref: /openbmc/linux/block/elevator.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 
38 #include <asm/uaccess.h>
39 
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42 
43 /*
44  * Merge hash stuff.
45  */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec)	((sec) >> 3)
48 #define ELV_HASH_FN(sec)	(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES	(1 << elv_hash_shift)
50 #define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
52 
53 /*
54  * Query io scheduler to see if the current process issuing bio may be
55  * merged with rq.
56  */
57 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58 {
59 	struct request_queue *q = rq->q;
60 	elevator_t *e = q->elevator;
61 
62 	if (e->ops->elevator_allow_merge_fn)
63 		return e->ops->elevator_allow_merge_fn(q, rq, bio);
64 
65 	return 1;
66 }
67 
68 /*
69  * can we safely merge with this request?
70  */
71 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
72 {
73 	if (!rq_mergeable(rq))
74 		return 0;
75 
76 	/*
77 	 * different data direction or already started, don't merge
78 	 */
79 	if (bio_data_dir(bio) != rq_data_dir(rq))
80 		return 0;
81 
82 	/*
83 	 * must be same device and not a special request
84 	 */
85 	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
86 		return 0;
87 
88 	if (!elv_iosched_allow_merge(rq, bio))
89 		return 0;
90 
91 	return 1;
92 }
93 EXPORT_SYMBOL(elv_rq_merge_ok);
94 
95 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
96 {
97 	int ret = ELEVATOR_NO_MERGE;
98 
99 	/*
100 	 * we can merge and sequence is ok, check if it's possible
101 	 */
102 	if (elv_rq_merge_ok(__rq, bio)) {
103 		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
104 			ret = ELEVATOR_BACK_MERGE;
105 		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
106 			ret = ELEVATOR_FRONT_MERGE;
107 	}
108 
109 	return ret;
110 }
111 
112 static struct elevator_type *elevator_find(const char *name)
113 {
114 	struct elevator_type *e;
115 
116 	list_for_each_entry(e, &elv_list, list) {
117 		if (!strcmp(e->elevator_name, name))
118 			return e;
119 	}
120 
121 	return NULL;
122 }
123 
124 static void elevator_put(struct elevator_type *e)
125 {
126 	module_put(e->elevator_owner);
127 }
128 
129 static struct elevator_type *elevator_get(const char *name)
130 {
131 	struct elevator_type *e;
132 
133 	spin_lock(&elv_list_lock);
134 
135 	e = elevator_find(name);
136 	if (e && !try_module_get(e->elevator_owner))
137 		e = NULL;
138 
139 	spin_unlock(&elv_list_lock);
140 
141 	return e;
142 }
143 
144 static void *elevator_init_queue(struct request_queue *q,
145 				 struct elevator_queue *eq)
146 {
147 	return eq->ops->elevator_init_fn(q);
148 }
149 
150 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
151 			   void *data)
152 {
153 	q->elevator = eq;
154 	eq->elevator_data = data;
155 }
156 
157 static char chosen_elevator[16];
158 
159 static int __init elevator_setup(char *str)
160 {
161 	/*
162 	 * Be backwards-compatible with previous kernels, so users
163 	 * won't get the wrong elevator.
164 	 */
165 	if (!strcmp(str, "as"))
166 		strcpy(chosen_elevator, "anticipatory");
167 	else
168 		strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
169 	return 1;
170 }
171 
172 __setup("elevator=", elevator_setup);
173 
174 static struct kobj_type elv_ktype;
175 
176 static elevator_t *elevator_alloc(struct request_queue *q,
177 				  struct elevator_type *e)
178 {
179 	elevator_t *eq;
180 	int i;
181 
182 	eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
183 	if (unlikely(!eq))
184 		goto err;
185 
186 	eq->ops = &e->ops;
187 	eq->elevator_type = e;
188 	kobject_init(&eq->kobj);
189 	kobject_set_name(&eq->kobj, "%s", "iosched");
190 	eq->kobj.ktype = &elv_ktype;
191 	mutex_init(&eq->sysfs_lock);
192 
193 	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
194 					GFP_KERNEL, q->node);
195 	if (!eq->hash)
196 		goto err;
197 
198 	for (i = 0; i < ELV_HASH_ENTRIES; i++)
199 		INIT_HLIST_HEAD(&eq->hash[i]);
200 
201 	return eq;
202 err:
203 	kfree(eq);
204 	elevator_put(e);
205 	return NULL;
206 }
207 
208 static void elevator_release(struct kobject *kobj)
209 {
210 	elevator_t *e = container_of(kobj, elevator_t, kobj);
211 
212 	elevator_put(e->elevator_type);
213 	kfree(e->hash);
214 	kfree(e);
215 }
216 
217 int elevator_init(struct request_queue *q, char *name)
218 {
219 	struct elevator_type *e = NULL;
220 	struct elevator_queue *eq;
221 	int ret = 0;
222 	void *data;
223 
224 	INIT_LIST_HEAD(&q->queue_head);
225 	q->last_merge = NULL;
226 	q->end_sector = 0;
227 	q->boundary_rq = NULL;
228 
229 	if (name && !(e = elevator_get(name)))
230 		return -EINVAL;
231 
232 	if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
233 		printk("I/O scheduler %s not found\n", chosen_elevator);
234 
235 	if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
236 		printk("Default I/O scheduler not found, using no-op\n");
237 		e = elevator_get("noop");
238 	}
239 
240 	eq = elevator_alloc(q, e);
241 	if (!eq)
242 		return -ENOMEM;
243 
244 	data = elevator_init_queue(q, eq);
245 	if (!data) {
246 		kobject_put(&eq->kobj);
247 		return -ENOMEM;
248 	}
249 
250 	elevator_attach(q, eq, data);
251 	return ret;
252 }
253 
254 EXPORT_SYMBOL(elevator_init);
255 
256 void elevator_exit(elevator_t *e)
257 {
258 	mutex_lock(&e->sysfs_lock);
259 	if (e->ops->elevator_exit_fn)
260 		e->ops->elevator_exit_fn(e);
261 	e->ops = NULL;
262 	mutex_unlock(&e->sysfs_lock);
263 
264 	kobject_put(&e->kobj);
265 }
266 
267 EXPORT_SYMBOL(elevator_exit);
268 
269 static void elv_activate_rq(struct request_queue *q, struct request *rq)
270 {
271 	elevator_t *e = q->elevator;
272 
273 	if (e->ops->elevator_activate_req_fn)
274 		e->ops->elevator_activate_req_fn(q, rq);
275 }
276 
277 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
278 {
279 	elevator_t *e = q->elevator;
280 
281 	if (e->ops->elevator_deactivate_req_fn)
282 		e->ops->elevator_deactivate_req_fn(q, rq);
283 }
284 
285 static inline void __elv_rqhash_del(struct request *rq)
286 {
287 	hlist_del_init(&rq->hash);
288 }
289 
290 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
291 {
292 	if (ELV_ON_HASH(rq))
293 		__elv_rqhash_del(rq);
294 }
295 
296 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
297 {
298 	elevator_t *e = q->elevator;
299 
300 	BUG_ON(ELV_ON_HASH(rq));
301 	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
302 }
303 
304 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
305 {
306 	__elv_rqhash_del(rq);
307 	elv_rqhash_add(q, rq);
308 }
309 
310 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
311 {
312 	elevator_t *e = q->elevator;
313 	struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
314 	struct hlist_node *entry, *next;
315 	struct request *rq;
316 
317 	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
318 		BUG_ON(!ELV_ON_HASH(rq));
319 
320 		if (unlikely(!rq_mergeable(rq))) {
321 			__elv_rqhash_del(rq);
322 			continue;
323 		}
324 
325 		if (rq_hash_key(rq) == offset)
326 			return rq;
327 	}
328 
329 	return NULL;
330 }
331 
332 /*
333  * RB-tree support functions for inserting/lookup/removal of requests
334  * in a sorted RB tree.
335  */
336 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
337 {
338 	struct rb_node **p = &root->rb_node;
339 	struct rb_node *parent = NULL;
340 	struct request *__rq;
341 
342 	while (*p) {
343 		parent = *p;
344 		__rq = rb_entry(parent, struct request, rb_node);
345 
346 		if (rq->sector < __rq->sector)
347 			p = &(*p)->rb_left;
348 		else if (rq->sector > __rq->sector)
349 			p = &(*p)->rb_right;
350 		else
351 			return __rq;
352 	}
353 
354 	rb_link_node(&rq->rb_node, parent, p);
355 	rb_insert_color(&rq->rb_node, root);
356 	return NULL;
357 }
358 
359 EXPORT_SYMBOL(elv_rb_add);
360 
361 void elv_rb_del(struct rb_root *root, struct request *rq)
362 {
363 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
364 	rb_erase(&rq->rb_node, root);
365 	RB_CLEAR_NODE(&rq->rb_node);
366 }
367 
368 EXPORT_SYMBOL(elv_rb_del);
369 
370 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
371 {
372 	struct rb_node *n = root->rb_node;
373 	struct request *rq;
374 
375 	while (n) {
376 		rq = rb_entry(n, struct request, rb_node);
377 
378 		if (sector < rq->sector)
379 			n = n->rb_left;
380 		else if (sector > rq->sector)
381 			n = n->rb_right;
382 		else
383 			return rq;
384 	}
385 
386 	return NULL;
387 }
388 
389 EXPORT_SYMBOL(elv_rb_find);
390 
391 /*
392  * Insert rq into dispatch queue of q.  Queue lock must be held on
393  * entry.  rq is sort insted into the dispatch queue. To be used by
394  * specific elevators.
395  */
396 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
397 {
398 	sector_t boundary;
399 	struct list_head *entry;
400 
401 	if (q->last_merge == rq)
402 		q->last_merge = NULL;
403 
404 	elv_rqhash_del(q, rq);
405 
406 	q->nr_sorted--;
407 
408 	boundary = q->end_sector;
409 
410 	list_for_each_prev(entry, &q->queue_head) {
411 		struct request *pos = list_entry_rq(entry);
412 
413 		if (rq_data_dir(rq) != rq_data_dir(pos))
414 			break;
415 		if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
416 			break;
417 		if (rq->sector >= boundary) {
418 			if (pos->sector < boundary)
419 				continue;
420 		} else {
421 			if (pos->sector >= boundary)
422 				break;
423 		}
424 		if (rq->sector >= pos->sector)
425 			break;
426 	}
427 
428 	list_add(&rq->queuelist, entry);
429 }
430 
431 EXPORT_SYMBOL(elv_dispatch_sort);
432 
433 /*
434  * Insert rq into dispatch queue of q.  Queue lock must be held on
435  * entry.  rq is added to the back of the dispatch queue. To be used by
436  * specific elevators.
437  */
438 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
439 {
440 	if (q->last_merge == rq)
441 		q->last_merge = NULL;
442 
443 	elv_rqhash_del(q, rq);
444 
445 	q->nr_sorted--;
446 
447 	q->end_sector = rq_end_sector(rq);
448 	q->boundary_rq = rq;
449 	list_add_tail(&rq->queuelist, &q->queue_head);
450 }
451 
452 EXPORT_SYMBOL(elv_dispatch_add_tail);
453 
454 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
455 {
456 	elevator_t *e = q->elevator;
457 	struct request *__rq;
458 	int ret;
459 
460 	/*
461 	 * First try one-hit cache.
462 	 */
463 	if (q->last_merge) {
464 		ret = elv_try_merge(q->last_merge, bio);
465 		if (ret != ELEVATOR_NO_MERGE) {
466 			*req = q->last_merge;
467 			return ret;
468 		}
469 	}
470 
471 	/*
472 	 * See if our hash lookup can find a potential backmerge.
473 	 */
474 	__rq = elv_rqhash_find(q, bio->bi_sector);
475 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
476 		*req = __rq;
477 		return ELEVATOR_BACK_MERGE;
478 	}
479 
480 	if (e->ops->elevator_merge_fn)
481 		return e->ops->elevator_merge_fn(q, req, bio);
482 
483 	return ELEVATOR_NO_MERGE;
484 }
485 
486 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
487 {
488 	elevator_t *e = q->elevator;
489 
490 	if (e->ops->elevator_merged_fn)
491 		e->ops->elevator_merged_fn(q, rq, type);
492 
493 	if (type == ELEVATOR_BACK_MERGE)
494 		elv_rqhash_reposition(q, rq);
495 
496 	q->last_merge = rq;
497 }
498 
499 void elv_merge_requests(struct request_queue *q, struct request *rq,
500 			     struct request *next)
501 {
502 	elevator_t *e = q->elevator;
503 
504 	if (e->ops->elevator_merge_req_fn)
505 		e->ops->elevator_merge_req_fn(q, rq, next);
506 
507 	elv_rqhash_reposition(q, rq);
508 	elv_rqhash_del(q, next);
509 
510 	q->nr_sorted--;
511 	q->last_merge = rq;
512 }
513 
514 void elv_requeue_request(struct request_queue *q, struct request *rq)
515 {
516 	/*
517 	 * it already went through dequeue, we need to decrement the
518 	 * in_flight count again
519 	 */
520 	if (blk_account_rq(rq)) {
521 		q->in_flight--;
522 		if (blk_sorted_rq(rq))
523 			elv_deactivate_rq(q, rq);
524 	}
525 
526 	rq->cmd_flags &= ~REQ_STARTED;
527 
528 	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
529 }
530 
531 static void elv_drain_elevator(struct request_queue *q)
532 {
533 	static int printed;
534 	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
535 		;
536 	if (q->nr_sorted == 0)
537 		return;
538 	if (printed++ < 10) {
539 		printk(KERN_ERR "%s: forced dispatching is broken "
540 		       "(nr_sorted=%u), please report this\n",
541 		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
542 	}
543 }
544 
545 void elv_insert(struct request_queue *q, struct request *rq, int where)
546 {
547 	struct list_head *pos;
548 	unsigned ordseq;
549 	int unplug_it = 1;
550 
551 	blk_add_trace_rq(q, rq, BLK_TA_INSERT);
552 
553 	rq->q = q;
554 
555 	switch (where) {
556 	case ELEVATOR_INSERT_FRONT:
557 		rq->cmd_flags |= REQ_SOFTBARRIER;
558 
559 		list_add(&rq->queuelist, &q->queue_head);
560 		break;
561 
562 	case ELEVATOR_INSERT_BACK:
563 		rq->cmd_flags |= REQ_SOFTBARRIER;
564 		elv_drain_elevator(q);
565 		list_add_tail(&rq->queuelist, &q->queue_head);
566 		/*
567 		 * We kick the queue here for the following reasons.
568 		 * - The elevator might have returned NULL previously
569 		 *   to delay requests and returned them now.  As the
570 		 *   queue wasn't empty before this request, ll_rw_blk
571 		 *   won't run the queue on return, resulting in hang.
572 		 * - Usually, back inserted requests won't be merged
573 		 *   with anything.  There's no point in delaying queue
574 		 *   processing.
575 		 */
576 		blk_remove_plug(q);
577 		q->request_fn(q);
578 		break;
579 
580 	case ELEVATOR_INSERT_SORT:
581 		BUG_ON(!blk_fs_request(rq));
582 		rq->cmd_flags |= REQ_SORTED;
583 		q->nr_sorted++;
584 		if (rq_mergeable(rq)) {
585 			elv_rqhash_add(q, rq);
586 			if (!q->last_merge)
587 				q->last_merge = rq;
588 		}
589 
590 		/*
591 		 * Some ioscheds (cfq) run q->request_fn directly, so
592 		 * rq cannot be accessed after calling
593 		 * elevator_add_req_fn.
594 		 */
595 		q->elevator->ops->elevator_add_req_fn(q, rq);
596 		break;
597 
598 	case ELEVATOR_INSERT_REQUEUE:
599 		/*
600 		 * If ordered flush isn't in progress, we do front
601 		 * insertion; otherwise, requests should be requeued
602 		 * in ordseq order.
603 		 */
604 		rq->cmd_flags |= REQ_SOFTBARRIER;
605 
606 		/*
607 		 * Most requeues happen because of a busy condition,
608 		 * don't force unplug of the queue for that case.
609 		 */
610 		unplug_it = 0;
611 
612 		if (q->ordseq == 0) {
613 			list_add(&rq->queuelist, &q->queue_head);
614 			break;
615 		}
616 
617 		ordseq = blk_ordered_req_seq(rq);
618 
619 		list_for_each(pos, &q->queue_head) {
620 			struct request *pos_rq = list_entry_rq(pos);
621 			if (ordseq <= blk_ordered_req_seq(pos_rq))
622 				break;
623 		}
624 
625 		list_add_tail(&rq->queuelist, pos);
626 		break;
627 
628 	default:
629 		printk(KERN_ERR "%s: bad insertion point %d\n",
630 		       __FUNCTION__, where);
631 		BUG();
632 	}
633 
634 	if (unplug_it && blk_queue_plugged(q)) {
635 		int nrq = q->rq.count[READ] + q->rq.count[WRITE]
636 			- q->in_flight;
637 
638 		if (nrq >= q->unplug_thresh)
639 			__generic_unplug_device(q);
640 	}
641 }
642 
643 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
644 		       int plug)
645 {
646 	if (q->ordcolor)
647 		rq->cmd_flags |= REQ_ORDERED_COLOR;
648 
649 	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
650 		/*
651 		 * toggle ordered color
652 		 */
653 		if (blk_barrier_rq(rq))
654 			q->ordcolor ^= 1;
655 
656 		/*
657 		 * barriers implicitly indicate back insertion
658 		 */
659 		if (where == ELEVATOR_INSERT_SORT)
660 			where = ELEVATOR_INSERT_BACK;
661 
662 		/*
663 		 * this request is scheduling boundary, update
664 		 * end_sector
665 		 */
666 		if (blk_fs_request(rq)) {
667 			q->end_sector = rq_end_sector(rq);
668 			q->boundary_rq = rq;
669 		}
670 	} else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
671 		where = ELEVATOR_INSERT_BACK;
672 
673 	if (plug)
674 		blk_plug_device(q);
675 
676 	elv_insert(q, rq, where);
677 }
678 
679 EXPORT_SYMBOL(__elv_add_request);
680 
681 void elv_add_request(struct request_queue *q, struct request *rq, int where,
682 		     int plug)
683 {
684 	unsigned long flags;
685 
686 	spin_lock_irqsave(q->queue_lock, flags);
687 	__elv_add_request(q, rq, where, plug);
688 	spin_unlock_irqrestore(q->queue_lock, flags);
689 }
690 
691 EXPORT_SYMBOL(elv_add_request);
692 
693 static inline struct request *__elv_next_request(struct request_queue *q)
694 {
695 	struct request *rq;
696 
697 	while (1) {
698 		while (!list_empty(&q->queue_head)) {
699 			rq = list_entry_rq(q->queue_head.next);
700 			if (blk_do_ordered(q, &rq))
701 				return rq;
702 		}
703 
704 		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
705 			return NULL;
706 	}
707 }
708 
709 struct request *elv_next_request(struct request_queue *q)
710 {
711 	struct request *rq;
712 	int ret;
713 
714 	while ((rq = __elv_next_request(q)) != NULL) {
715 		/*
716 		 * Kill the empty barrier place holder, the driver must
717 		 * not ever see it.
718 		 */
719 		if (blk_empty_barrier(rq)) {
720 			end_queued_request(rq, 1);
721 			continue;
722 		}
723 		if (!(rq->cmd_flags & REQ_STARTED)) {
724 			/*
725 			 * This is the first time the device driver
726 			 * sees this request (possibly after
727 			 * requeueing).  Notify IO scheduler.
728 			 */
729 			if (blk_sorted_rq(rq))
730 				elv_activate_rq(q, rq);
731 
732 			/*
733 			 * just mark as started even if we don't start
734 			 * it, a request that has been delayed should
735 			 * not be passed by new incoming requests
736 			 */
737 			rq->cmd_flags |= REQ_STARTED;
738 			blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
739 		}
740 
741 		if (!q->boundary_rq || q->boundary_rq == rq) {
742 			q->end_sector = rq_end_sector(rq);
743 			q->boundary_rq = NULL;
744 		}
745 
746 		if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
747 			break;
748 
749 		ret = q->prep_rq_fn(q, rq);
750 		if (ret == BLKPREP_OK) {
751 			break;
752 		} else if (ret == BLKPREP_DEFER) {
753 			/*
754 			 * the request may have been (partially) prepped.
755 			 * we need to keep this request in the front to
756 			 * avoid resource deadlock.  REQ_STARTED will
757 			 * prevent other fs requests from passing this one.
758 			 */
759 			rq = NULL;
760 			break;
761 		} else if (ret == BLKPREP_KILL) {
762 			rq->cmd_flags |= REQ_QUIET;
763 			end_queued_request(rq, 0);
764 		} else {
765 			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
766 								ret);
767 			break;
768 		}
769 	}
770 
771 	return rq;
772 }
773 
774 EXPORT_SYMBOL(elv_next_request);
775 
776 void elv_dequeue_request(struct request_queue *q, struct request *rq)
777 {
778 	BUG_ON(list_empty(&rq->queuelist));
779 	BUG_ON(ELV_ON_HASH(rq));
780 
781 	list_del_init(&rq->queuelist);
782 
783 	/*
784 	 * the time frame between a request being removed from the lists
785 	 * and to it is freed is accounted as io that is in progress at
786 	 * the driver side.
787 	 */
788 	if (blk_account_rq(rq))
789 		q->in_flight++;
790 }
791 
792 EXPORT_SYMBOL(elv_dequeue_request);
793 
794 int elv_queue_empty(struct request_queue *q)
795 {
796 	elevator_t *e = q->elevator;
797 
798 	if (!list_empty(&q->queue_head))
799 		return 0;
800 
801 	if (e->ops->elevator_queue_empty_fn)
802 		return e->ops->elevator_queue_empty_fn(q);
803 
804 	return 1;
805 }
806 
807 EXPORT_SYMBOL(elv_queue_empty);
808 
809 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
810 {
811 	elevator_t *e = q->elevator;
812 
813 	if (e->ops->elevator_latter_req_fn)
814 		return e->ops->elevator_latter_req_fn(q, rq);
815 	return NULL;
816 }
817 
818 struct request *elv_former_request(struct request_queue *q, struct request *rq)
819 {
820 	elevator_t *e = q->elevator;
821 
822 	if (e->ops->elevator_former_req_fn)
823 		return e->ops->elevator_former_req_fn(q, rq);
824 	return NULL;
825 }
826 
827 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
828 {
829 	elevator_t *e = q->elevator;
830 
831 	if (e->ops->elevator_set_req_fn)
832 		return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
833 
834 	rq->elevator_private = NULL;
835 	return 0;
836 }
837 
838 void elv_put_request(struct request_queue *q, struct request *rq)
839 {
840 	elevator_t *e = q->elevator;
841 
842 	if (e->ops->elevator_put_req_fn)
843 		e->ops->elevator_put_req_fn(rq);
844 }
845 
846 int elv_may_queue(struct request_queue *q, int rw)
847 {
848 	elevator_t *e = q->elevator;
849 
850 	if (e->ops->elevator_may_queue_fn)
851 		return e->ops->elevator_may_queue_fn(q, rw);
852 
853 	return ELV_MQUEUE_MAY;
854 }
855 
856 void elv_completed_request(struct request_queue *q, struct request *rq)
857 {
858 	elevator_t *e = q->elevator;
859 
860 	/*
861 	 * request is released from the driver, io must be done
862 	 */
863 	if (blk_account_rq(rq)) {
864 		q->in_flight--;
865 		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
866 			e->ops->elevator_completed_req_fn(q, rq);
867 	}
868 
869 	/*
870 	 * Check if the queue is waiting for fs requests to be
871 	 * drained for flush sequence.
872 	 */
873 	if (unlikely(q->ordseq)) {
874 		struct request *first_rq = list_entry_rq(q->queue_head.next);
875 		if (q->in_flight == 0 &&
876 		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
877 		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
878 			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
879 			q->request_fn(q);
880 		}
881 	}
882 }
883 
884 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
885 
886 static ssize_t
887 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
888 {
889 	elevator_t *e = container_of(kobj, elevator_t, kobj);
890 	struct elv_fs_entry *entry = to_elv(attr);
891 	ssize_t error;
892 
893 	if (!entry->show)
894 		return -EIO;
895 
896 	mutex_lock(&e->sysfs_lock);
897 	error = e->ops ? entry->show(e, page) : -ENOENT;
898 	mutex_unlock(&e->sysfs_lock);
899 	return error;
900 }
901 
902 static ssize_t
903 elv_attr_store(struct kobject *kobj, struct attribute *attr,
904 	       const char *page, size_t length)
905 {
906 	elevator_t *e = container_of(kobj, elevator_t, kobj);
907 	struct elv_fs_entry *entry = to_elv(attr);
908 	ssize_t error;
909 
910 	if (!entry->store)
911 		return -EIO;
912 
913 	mutex_lock(&e->sysfs_lock);
914 	error = e->ops ? entry->store(e, page, length) : -ENOENT;
915 	mutex_unlock(&e->sysfs_lock);
916 	return error;
917 }
918 
919 static struct sysfs_ops elv_sysfs_ops = {
920 	.show	= elv_attr_show,
921 	.store	= elv_attr_store,
922 };
923 
924 static struct kobj_type elv_ktype = {
925 	.sysfs_ops	= &elv_sysfs_ops,
926 	.release	= elevator_release,
927 };
928 
929 int elv_register_queue(struct request_queue *q)
930 {
931 	elevator_t *e = q->elevator;
932 	int error;
933 
934 	e->kobj.parent = &q->kobj;
935 
936 	error = kobject_add(&e->kobj);
937 	if (!error) {
938 		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
939 		if (attr) {
940 			while (attr->attr.name) {
941 				if (sysfs_create_file(&e->kobj, &attr->attr))
942 					break;
943 				attr++;
944 			}
945 		}
946 		kobject_uevent(&e->kobj, KOBJ_ADD);
947 	}
948 	return error;
949 }
950 
951 static void __elv_unregister_queue(elevator_t *e)
952 {
953 	kobject_uevent(&e->kobj, KOBJ_REMOVE);
954 	kobject_del(&e->kobj);
955 }
956 
957 void elv_unregister_queue(struct request_queue *q)
958 {
959 	if (q)
960 		__elv_unregister_queue(q->elevator);
961 }
962 
963 int elv_register(struct elevator_type *e)
964 {
965 	char *def = "";
966 
967 	spin_lock(&elv_list_lock);
968 	BUG_ON(elevator_find(e->elevator_name));
969 	list_add_tail(&e->list, &elv_list);
970 	spin_unlock(&elv_list_lock);
971 
972 	if (!strcmp(e->elevator_name, chosen_elevator) ||
973 			(!*chosen_elevator &&
974 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
975 				def = " (default)";
976 
977 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def);
978 	return 0;
979 }
980 EXPORT_SYMBOL_GPL(elv_register);
981 
982 void elv_unregister(struct elevator_type *e)
983 {
984 	struct task_struct *g, *p;
985 
986 	/*
987 	 * Iterate every thread in the process to remove the io contexts.
988 	 */
989 	if (e->ops.trim) {
990 		read_lock(&tasklist_lock);
991 		do_each_thread(g, p) {
992 			task_lock(p);
993 			if (p->io_context)
994 				e->ops.trim(p->io_context);
995 			task_unlock(p);
996 		} while_each_thread(g, p);
997 		read_unlock(&tasklist_lock);
998 	}
999 
1000 	spin_lock(&elv_list_lock);
1001 	list_del_init(&e->list);
1002 	spin_unlock(&elv_list_lock);
1003 }
1004 EXPORT_SYMBOL_GPL(elv_unregister);
1005 
1006 /*
1007  * switch to new_e io scheduler. be careful not to introduce deadlocks -
1008  * we don't free the old io scheduler, before we have allocated what we
1009  * need for the new one. this way we have a chance of going back to the old
1010  * one, if the new one fails init for some reason.
1011  */
1012 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1013 {
1014 	elevator_t *old_elevator, *e;
1015 	void *data;
1016 
1017 	/*
1018 	 * Allocate new elevator
1019 	 */
1020 	e = elevator_alloc(q, new_e);
1021 	if (!e)
1022 		return 0;
1023 
1024 	data = elevator_init_queue(q, e);
1025 	if (!data) {
1026 		kobject_put(&e->kobj);
1027 		return 0;
1028 	}
1029 
1030 	/*
1031 	 * Turn on BYPASS and drain all requests w/ elevator private data
1032 	 */
1033 	spin_lock_irq(q->queue_lock);
1034 
1035 	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1036 
1037 	elv_drain_elevator(q);
1038 
1039 	while (q->rq.elvpriv) {
1040 		blk_remove_plug(q);
1041 		q->request_fn(q);
1042 		spin_unlock_irq(q->queue_lock);
1043 		msleep(10);
1044 		spin_lock_irq(q->queue_lock);
1045 		elv_drain_elevator(q);
1046 	}
1047 
1048 	/*
1049 	 * Remember old elevator.
1050 	 */
1051 	old_elevator = q->elevator;
1052 
1053 	/*
1054 	 * attach and start new elevator
1055 	 */
1056 	elevator_attach(q, e, data);
1057 
1058 	spin_unlock_irq(q->queue_lock);
1059 
1060 	__elv_unregister_queue(old_elevator);
1061 
1062 	if (elv_register_queue(q))
1063 		goto fail_register;
1064 
1065 	/*
1066 	 * finally exit old elevator and turn off BYPASS.
1067 	 */
1068 	elevator_exit(old_elevator);
1069 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1070 	return 1;
1071 
1072 fail_register:
1073 	/*
1074 	 * switch failed, exit the new io scheduler and reattach the old
1075 	 * one again (along with re-adding the sysfs dir)
1076 	 */
1077 	elevator_exit(e);
1078 	q->elevator = old_elevator;
1079 	elv_register_queue(q);
1080 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1081 	return 0;
1082 }
1083 
1084 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1085 			  size_t count)
1086 {
1087 	char elevator_name[ELV_NAME_MAX];
1088 	size_t len;
1089 	struct elevator_type *e;
1090 
1091 	elevator_name[sizeof(elevator_name) - 1] = '\0';
1092 	strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1093 	len = strlen(elevator_name);
1094 
1095 	if (len && elevator_name[len - 1] == '\n')
1096 		elevator_name[len - 1] = '\0';
1097 
1098 	e = elevator_get(elevator_name);
1099 	if (!e) {
1100 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1101 		return -EINVAL;
1102 	}
1103 
1104 	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1105 		elevator_put(e);
1106 		return count;
1107 	}
1108 
1109 	if (!elevator_switch(q, e))
1110 		printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1111 	return count;
1112 }
1113 
1114 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1115 {
1116 	elevator_t *e = q->elevator;
1117 	struct elevator_type *elv = e->elevator_type;
1118 	struct elevator_type *__e;
1119 	int len = 0;
1120 
1121 	spin_lock(&elv_list_lock);
1122 	list_for_each_entry(__e, &elv_list, list) {
1123 		if (!strcmp(elv->elevator_name, __e->elevator_name))
1124 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1125 		else
1126 			len += sprintf(name+len, "%s ", __e->elevator_name);
1127 	}
1128 	spin_unlock(&elv_list_lock);
1129 
1130 	len += sprintf(len+name, "\n");
1131 	return len;
1132 }
1133 
1134 struct request *elv_rb_former_request(struct request_queue *q,
1135 				      struct request *rq)
1136 {
1137 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1138 
1139 	if (rbprev)
1140 		return rb_entry_rq(rbprev);
1141 
1142 	return NULL;
1143 }
1144 
1145 EXPORT_SYMBOL(elv_rb_former_request);
1146 
1147 struct request *elv_rb_latter_request(struct request_queue *q,
1148 				      struct request *rq)
1149 {
1150 	struct rb_node *rbnext = rb_next(&rq->rb_node);
1151 
1152 	if (rbnext)
1153 		return rb_entry_rq(rbnext);
1154 
1155 	return NULL;
1156 }
1157 
1158 EXPORT_SYMBOL(elv_rb_latter_request);
1159