xref: /openbmc/linux/block/blk-core.c (revision a1e58bbd)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/blktrace_api.h>
32 #include <linux/fault-inject.h>
33 
34 #include "blk.h"
35 
36 static int __make_request(struct request_queue *q, struct bio *bio);
37 
38 /*
39  * For the allocated request tables
40  */
41 static struct kmem_cache *request_cachep;
42 
43 /*
44  * For queue allocation
45  */
46 struct kmem_cache *blk_requestq_cachep;
47 
48 /*
49  * Controlling structure to kblockd
50  */
51 static struct workqueue_struct *kblockd_workqueue;
52 
53 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54 
55 static void drive_stat_acct(struct request *rq, int new_io)
56 {
57 	int rw = rq_data_dir(rq);
58 
59 	if (!blk_fs_request(rq) || !rq->rq_disk)
60 		return;
61 
62 	if (!new_io) {
63 		__all_stat_inc(rq->rq_disk, merges[rw], rq->sector);
64 	} else {
65 		struct hd_struct *part = get_part(rq->rq_disk, rq->sector);
66 		disk_round_stats(rq->rq_disk);
67 		rq->rq_disk->in_flight++;
68 		if (part) {
69 			part_round_stats(part);
70 			part->in_flight++;
71 		}
72 	}
73 }
74 
75 void blk_queue_congestion_threshold(struct request_queue *q)
76 {
77 	int nr;
78 
79 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
80 	if (nr > q->nr_requests)
81 		nr = q->nr_requests;
82 	q->nr_congestion_on = nr;
83 
84 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
85 	if (nr < 1)
86 		nr = 1;
87 	q->nr_congestion_off = nr;
88 }
89 
90 /**
91  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
92  * @bdev:	device
93  *
94  * Locates the passed device's request queue and returns the address of its
95  * backing_dev_info
96  *
97  * Will return NULL if the request queue cannot be located.
98  */
99 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
100 {
101 	struct backing_dev_info *ret = NULL;
102 	struct request_queue *q = bdev_get_queue(bdev);
103 
104 	if (q)
105 		ret = &q->backing_dev_info;
106 	return ret;
107 }
108 EXPORT_SYMBOL(blk_get_backing_dev_info);
109 
110 /*
111  * We can't just memset() the structure, since the allocation path
112  * already stored some information in the request.
113  */
114 void rq_init(struct request_queue *q, struct request *rq)
115 {
116 	INIT_LIST_HEAD(&rq->queuelist);
117 	INIT_LIST_HEAD(&rq->donelist);
118 	rq->q = q;
119 	rq->sector = rq->hard_sector = (sector_t) -1;
120 	rq->nr_sectors = rq->hard_nr_sectors = 0;
121 	rq->current_nr_sectors = rq->hard_cur_sectors = 0;
122 	rq->bio = rq->biotail = NULL;
123 	INIT_HLIST_NODE(&rq->hash);
124 	RB_CLEAR_NODE(&rq->rb_node);
125 	rq->rq_disk = NULL;
126 	rq->nr_phys_segments = 0;
127 	rq->nr_hw_segments = 0;
128 	rq->ioprio = 0;
129 	rq->special = NULL;
130 	rq->buffer = NULL;
131 	rq->tag = -1;
132 	rq->errors = 0;
133 	rq->ref_count = 1;
134 	rq->cmd_len = 0;
135 	memset(rq->cmd, 0, sizeof(rq->cmd));
136 	rq->data_len = 0;
137 	rq->extra_len = 0;
138 	rq->sense_len = 0;
139 	rq->data = NULL;
140 	rq->sense = NULL;
141 	rq->end_io = NULL;
142 	rq->end_io_data = NULL;
143 	rq->next_rq = NULL;
144 }
145 
146 static void req_bio_endio(struct request *rq, struct bio *bio,
147 			  unsigned int nbytes, int error)
148 {
149 	struct request_queue *q = rq->q;
150 
151 	if (&q->bar_rq != rq) {
152 		if (error)
153 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
154 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
155 			error = -EIO;
156 
157 		if (unlikely(nbytes > bio->bi_size)) {
158 			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
159 			       __FUNCTION__, nbytes, bio->bi_size);
160 			nbytes = bio->bi_size;
161 		}
162 
163 		bio->bi_size -= nbytes;
164 		bio->bi_sector += (nbytes >> 9);
165 		if (bio->bi_size == 0)
166 			bio_endio(bio, error);
167 	} else {
168 
169 		/*
170 		 * Okay, this is the barrier request in progress, just
171 		 * record the error;
172 		 */
173 		if (error && !q->orderr)
174 			q->orderr = error;
175 	}
176 }
177 
178 void blk_dump_rq_flags(struct request *rq, char *msg)
179 {
180 	int bit;
181 
182 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
183 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
184 		rq->cmd_flags);
185 
186 	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
187 						(unsigned long long)rq->sector,
188 						rq->nr_sectors,
189 						rq->current_nr_sectors);
190 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
191 						rq->bio, rq->biotail,
192 						rq->buffer, rq->data,
193 						rq->data_len);
194 
195 	if (blk_pc_request(rq)) {
196 		printk(KERN_INFO "  cdb: ");
197 		for (bit = 0; bit < sizeof(rq->cmd); bit++)
198 			printk("%02x ", rq->cmd[bit]);
199 		printk("\n");
200 	}
201 }
202 EXPORT_SYMBOL(blk_dump_rq_flags);
203 
204 /*
205  * "plug" the device if there are no outstanding requests: this will
206  * force the transfer to start only after we have put all the requests
207  * on the list.
208  *
209  * This is called with interrupts off and no requests on the queue and
210  * with the queue lock held.
211  */
212 void blk_plug_device(struct request_queue *q)
213 {
214 	WARN_ON(!irqs_disabled());
215 
216 	/*
217 	 * don't plug a stopped queue, it must be paired with blk_start_queue()
218 	 * which will restart the queueing
219 	 */
220 	if (blk_queue_stopped(q))
221 		return;
222 
223 	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
224 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
225 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
226 	}
227 }
228 EXPORT_SYMBOL(blk_plug_device);
229 
230 /*
231  * remove the queue from the plugged list, if present. called with
232  * queue lock held and interrupts disabled.
233  */
234 int blk_remove_plug(struct request_queue *q)
235 {
236 	WARN_ON(!irqs_disabled());
237 
238 	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
239 		return 0;
240 
241 	del_timer(&q->unplug_timer);
242 	return 1;
243 }
244 EXPORT_SYMBOL(blk_remove_plug);
245 
246 /*
247  * remove the plug and let it rip..
248  */
249 void __generic_unplug_device(struct request_queue *q)
250 {
251 	if (unlikely(blk_queue_stopped(q)))
252 		return;
253 
254 	if (!blk_remove_plug(q))
255 		return;
256 
257 	q->request_fn(q);
258 }
259 EXPORT_SYMBOL(__generic_unplug_device);
260 
261 /**
262  * generic_unplug_device - fire a request queue
263  * @q:    The &struct request_queue in question
264  *
265  * Description:
266  *   Linux uses plugging to build bigger requests queues before letting
267  *   the device have at them. If a queue is plugged, the I/O scheduler
268  *   is still adding and merging requests on the queue. Once the queue
269  *   gets unplugged, the request_fn defined for the queue is invoked and
270  *   transfers started.
271  **/
272 void generic_unplug_device(struct request_queue *q)
273 {
274 	spin_lock_irq(q->queue_lock);
275 	__generic_unplug_device(q);
276 	spin_unlock_irq(q->queue_lock);
277 }
278 EXPORT_SYMBOL(generic_unplug_device);
279 
280 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
281 				   struct page *page)
282 {
283 	struct request_queue *q = bdi->unplug_io_data;
284 
285 	blk_unplug(q);
286 }
287 
288 void blk_unplug_work(struct work_struct *work)
289 {
290 	struct request_queue *q =
291 		container_of(work, struct request_queue, unplug_work);
292 
293 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
294 				q->rq.count[READ] + q->rq.count[WRITE]);
295 
296 	q->unplug_fn(q);
297 }
298 
299 void blk_unplug_timeout(unsigned long data)
300 {
301 	struct request_queue *q = (struct request_queue *)data;
302 
303 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
304 				q->rq.count[READ] + q->rq.count[WRITE]);
305 
306 	kblockd_schedule_work(&q->unplug_work);
307 }
308 
309 void blk_unplug(struct request_queue *q)
310 {
311 	/*
312 	 * devices don't necessarily have an ->unplug_fn defined
313 	 */
314 	if (q->unplug_fn) {
315 		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
316 					q->rq.count[READ] + q->rq.count[WRITE]);
317 
318 		q->unplug_fn(q);
319 	}
320 }
321 EXPORT_SYMBOL(blk_unplug);
322 
323 /**
324  * blk_start_queue - restart a previously stopped queue
325  * @q:    The &struct request_queue in question
326  *
327  * Description:
328  *   blk_start_queue() will clear the stop flag on the queue, and call
329  *   the request_fn for the queue if it was in a stopped state when
330  *   entered. Also see blk_stop_queue(). Queue lock must be held.
331  **/
332 void blk_start_queue(struct request_queue *q)
333 {
334 	WARN_ON(!irqs_disabled());
335 
336 	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
337 
338 	/*
339 	 * one level of recursion is ok and is much faster than kicking
340 	 * the unplug handling
341 	 */
342 	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
343 		q->request_fn(q);
344 		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
345 	} else {
346 		blk_plug_device(q);
347 		kblockd_schedule_work(&q->unplug_work);
348 	}
349 }
350 EXPORT_SYMBOL(blk_start_queue);
351 
352 /**
353  * blk_stop_queue - stop a queue
354  * @q:    The &struct request_queue in question
355  *
356  * Description:
357  *   The Linux block layer assumes that a block driver will consume all
358  *   entries on the request queue when the request_fn strategy is called.
359  *   Often this will not happen, because of hardware limitations (queue
360  *   depth settings). If a device driver gets a 'queue full' response,
361  *   or if it simply chooses not to queue more I/O at one point, it can
362  *   call this function to prevent the request_fn from being called until
363  *   the driver has signalled it's ready to go again. This happens by calling
364  *   blk_start_queue() to restart queue operations. Queue lock must be held.
365  **/
366 void blk_stop_queue(struct request_queue *q)
367 {
368 	blk_remove_plug(q);
369 	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
370 }
371 EXPORT_SYMBOL(blk_stop_queue);
372 
373 /**
374  * blk_sync_queue - cancel any pending callbacks on a queue
375  * @q: the queue
376  *
377  * Description:
378  *     The block layer may perform asynchronous callback activity
379  *     on a queue, such as calling the unplug function after a timeout.
380  *     A block device may call blk_sync_queue to ensure that any
381  *     such activity is cancelled, thus allowing it to release resources
382  *     that the callbacks might use. The caller must already have made sure
383  *     that its ->make_request_fn will not re-add plugging prior to calling
384  *     this function.
385  *
386  */
387 void blk_sync_queue(struct request_queue *q)
388 {
389 	del_timer_sync(&q->unplug_timer);
390 	kblockd_flush_work(&q->unplug_work);
391 }
392 EXPORT_SYMBOL(blk_sync_queue);
393 
394 /**
395  * blk_run_queue - run a single device queue
396  * @q:	The queue to run
397  */
398 void blk_run_queue(struct request_queue *q)
399 {
400 	unsigned long flags;
401 
402 	spin_lock_irqsave(q->queue_lock, flags);
403 	blk_remove_plug(q);
404 
405 	/*
406 	 * Only recurse once to avoid overrunning the stack, let the unplug
407 	 * handling reinvoke the handler shortly if we already got there.
408 	 */
409 	if (!elv_queue_empty(q)) {
410 		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
411 			q->request_fn(q);
412 			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
413 		} else {
414 			blk_plug_device(q);
415 			kblockd_schedule_work(&q->unplug_work);
416 		}
417 	}
418 
419 	spin_unlock_irqrestore(q->queue_lock, flags);
420 }
421 EXPORT_SYMBOL(blk_run_queue);
422 
423 void blk_put_queue(struct request_queue *q)
424 {
425 	kobject_put(&q->kobj);
426 }
427 
428 void blk_cleanup_queue(struct request_queue *q)
429 {
430 	mutex_lock(&q->sysfs_lock);
431 	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
432 	mutex_unlock(&q->sysfs_lock);
433 
434 	if (q->elevator)
435 		elevator_exit(q->elevator);
436 
437 	blk_put_queue(q);
438 }
439 EXPORT_SYMBOL(blk_cleanup_queue);
440 
441 static int blk_init_free_list(struct request_queue *q)
442 {
443 	struct request_list *rl = &q->rq;
444 
445 	rl->count[READ] = rl->count[WRITE] = 0;
446 	rl->starved[READ] = rl->starved[WRITE] = 0;
447 	rl->elvpriv = 0;
448 	init_waitqueue_head(&rl->wait[READ]);
449 	init_waitqueue_head(&rl->wait[WRITE]);
450 
451 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
452 				mempool_free_slab, request_cachep, q->node);
453 
454 	if (!rl->rq_pool)
455 		return -ENOMEM;
456 
457 	return 0;
458 }
459 
460 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
461 {
462 	return blk_alloc_queue_node(gfp_mask, -1);
463 }
464 EXPORT_SYMBOL(blk_alloc_queue);
465 
466 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
467 {
468 	struct request_queue *q;
469 	int err;
470 
471 	q = kmem_cache_alloc_node(blk_requestq_cachep,
472 				gfp_mask | __GFP_ZERO, node_id);
473 	if (!q)
474 		return NULL;
475 
476 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
477 	q->backing_dev_info.unplug_io_data = q;
478 	err = bdi_init(&q->backing_dev_info);
479 	if (err) {
480 		kmem_cache_free(blk_requestq_cachep, q);
481 		return NULL;
482 	}
483 
484 	init_timer(&q->unplug_timer);
485 
486 	kobject_init(&q->kobj, &blk_queue_ktype);
487 
488 	mutex_init(&q->sysfs_lock);
489 
490 	return q;
491 }
492 EXPORT_SYMBOL(blk_alloc_queue_node);
493 
494 /**
495  * blk_init_queue  - prepare a request queue for use with a block device
496  * @rfn:  The function to be called to process requests that have been
497  *        placed on the queue.
498  * @lock: Request queue spin lock
499  *
500  * Description:
501  *    If a block device wishes to use the standard request handling procedures,
502  *    which sorts requests and coalesces adjacent requests, then it must
503  *    call blk_init_queue().  The function @rfn will be called when there
504  *    are requests on the queue that need to be processed.  If the device
505  *    supports plugging, then @rfn may not be called immediately when requests
506  *    are available on the queue, but may be called at some time later instead.
507  *    Plugged queues are generally unplugged when a buffer belonging to one
508  *    of the requests on the queue is needed, or due to memory pressure.
509  *
510  *    @rfn is not required, or even expected, to remove all requests off the
511  *    queue, but only as many as it can handle at a time.  If it does leave
512  *    requests on the queue, it is responsible for arranging that the requests
513  *    get dealt with eventually.
514  *
515  *    The queue spin lock must be held while manipulating the requests on the
516  *    request queue; this lock will be taken also from interrupt context, so irq
517  *    disabling is needed for it.
518  *
519  *    Function returns a pointer to the initialized request queue, or NULL if
520  *    it didn't succeed.
521  *
522  * Note:
523  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
524  *    when the block device is deactivated (such as at module unload).
525  **/
526 
527 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
528 {
529 	return blk_init_queue_node(rfn, lock, -1);
530 }
531 EXPORT_SYMBOL(blk_init_queue);
532 
533 struct request_queue *
534 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
535 {
536 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
537 
538 	if (!q)
539 		return NULL;
540 
541 	q->node = node_id;
542 	if (blk_init_free_list(q)) {
543 		kmem_cache_free(blk_requestq_cachep, q);
544 		return NULL;
545 	}
546 
547 	/*
548 	 * if caller didn't supply a lock, they get per-queue locking with
549 	 * our embedded lock
550 	 */
551 	if (!lock) {
552 		spin_lock_init(&q->__queue_lock);
553 		lock = &q->__queue_lock;
554 	}
555 
556 	q->request_fn		= rfn;
557 	q->prep_rq_fn		= NULL;
558 	q->unplug_fn		= generic_unplug_device;
559 	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
560 	q->queue_lock		= lock;
561 
562 	blk_queue_segment_boundary(q, 0xffffffff);
563 
564 	blk_queue_make_request(q, __make_request);
565 	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
566 
567 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
568 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
569 
570 	q->sg_reserved_size = INT_MAX;
571 
572 	/*
573 	 * all done
574 	 */
575 	if (!elevator_init(q, NULL)) {
576 		blk_queue_congestion_threshold(q);
577 		return q;
578 	}
579 
580 	blk_put_queue(q);
581 	return NULL;
582 }
583 EXPORT_SYMBOL(blk_init_queue_node);
584 
585 int blk_get_queue(struct request_queue *q)
586 {
587 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
588 		kobject_get(&q->kobj);
589 		return 0;
590 	}
591 
592 	return 1;
593 }
594 
595 static inline void blk_free_request(struct request_queue *q, struct request *rq)
596 {
597 	if (rq->cmd_flags & REQ_ELVPRIV)
598 		elv_put_request(q, rq);
599 	mempool_free(rq, q->rq.rq_pool);
600 }
601 
602 static struct request *
603 blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
604 {
605 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
606 
607 	if (!rq)
608 		return NULL;
609 
610 	/*
611 	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
612 	 * see bio.h and blkdev.h
613 	 */
614 	rq->cmd_flags = rw | REQ_ALLOCED;
615 
616 	if (priv) {
617 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
618 			mempool_free(rq, q->rq.rq_pool);
619 			return NULL;
620 		}
621 		rq->cmd_flags |= REQ_ELVPRIV;
622 	}
623 
624 	return rq;
625 }
626 
627 /*
628  * ioc_batching returns true if the ioc is a valid batching request and
629  * should be given priority access to a request.
630  */
631 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
632 {
633 	if (!ioc)
634 		return 0;
635 
636 	/*
637 	 * Make sure the process is able to allocate at least 1 request
638 	 * even if the batch times out, otherwise we could theoretically
639 	 * lose wakeups.
640 	 */
641 	return ioc->nr_batch_requests == q->nr_batching ||
642 		(ioc->nr_batch_requests > 0
643 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
644 }
645 
646 /*
647  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
648  * will cause the process to be a "batcher" on all queues in the system. This
649  * is the behaviour we want though - once it gets a wakeup it should be given
650  * a nice run.
651  */
652 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
653 {
654 	if (!ioc || ioc_batching(q, ioc))
655 		return;
656 
657 	ioc->nr_batch_requests = q->nr_batching;
658 	ioc->last_waited = jiffies;
659 }
660 
661 static void __freed_request(struct request_queue *q, int rw)
662 {
663 	struct request_list *rl = &q->rq;
664 
665 	if (rl->count[rw] < queue_congestion_off_threshold(q))
666 		blk_clear_queue_congested(q, rw);
667 
668 	if (rl->count[rw] + 1 <= q->nr_requests) {
669 		if (waitqueue_active(&rl->wait[rw]))
670 			wake_up(&rl->wait[rw]);
671 
672 		blk_clear_queue_full(q, rw);
673 	}
674 }
675 
676 /*
677  * A request has just been released.  Account for it, update the full and
678  * congestion status, wake up any waiters.   Called under q->queue_lock.
679  */
680 static void freed_request(struct request_queue *q, int rw, int priv)
681 {
682 	struct request_list *rl = &q->rq;
683 
684 	rl->count[rw]--;
685 	if (priv)
686 		rl->elvpriv--;
687 
688 	__freed_request(q, rw);
689 
690 	if (unlikely(rl->starved[rw ^ 1]))
691 		__freed_request(q, rw ^ 1);
692 }
693 
694 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
695 /*
696  * Get a free request, queue_lock must be held.
697  * Returns NULL on failure, with queue_lock held.
698  * Returns !NULL on success, with queue_lock *not held*.
699  */
700 static struct request *get_request(struct request_queue *q, int rw_flags,
701 				   struct bio *bio, gfp_t gfp_mask)
702 {
703 	struct request *rq = NULL;
704 	struct request_list *rl = &q->rq;
705 	struct io_context *ioc = NULL;
706 	const int rw = rw_flags & 0x01;
707 	int may_queue, priv;
708 
709 	may_queue = elv_may_queue(q, rw_flags);
710 	if (may_queue == ELV_MQUEUE_NO)
711 		goto rq_starved;
712 
713 	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
714 		if (rl->count[rw]+1 >= q->nr_requests) {
715 			ioc = current_io_context(GFP_ATOMIC, q->node);
716 			/*
717 			 * The queue will fill after this allocation, so set
718 			 * it as full, and mark this process as "batching".
719 			 * This process will be allowed to complete a batch of
720 			 * requests, others will be blocked.
721 			 */
722 			if (!blk_queue_full(q, rw)) {
723 				ioc_set_batching(q, ioc);
724 				blk_set_queue_full(q, rw);
725 			} else {
726 				if (may_queue != ELV_MQUEUE_MUST
727 						&& !ioc_batching(q, ioc)) {
728 					/*
729 					 * The queue is full and the allocating
730 					 * process is not a "batcher", and not
731 					 * exempted by the IO scheduler
732 					 */
733 					goto out;
734 				}
735 			}
736 		}
737 		blk_set_queue_congested(q, rw);
738 	}
739 
740 	/*
741 	 * Only allow batching queuers to allocate up to 50% over the defined
742 	 * limit of requests, otherwise we could have thousands of requests
743 	 * allocated with any setting of ->nr_requests
744 	 */
745 	if (rl->count[rw] >= (3 * q->nr_requests / 2))
746 		goto out;
747 
748 	rl->count[rw]++;
749 	rl->starved[rw] = 0;
750 
751 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
752 	if (priv)
753 		rl->elvpriv++;
754 
755 	spin_unlock_irq(q->queue_lock);
756 
757 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
758 	if (unlikely(!rq)) {
759 		/*
760 		 * Allocation failed presumably due to memory. Undo anything
761 		 * we might have messed up.
762 		 *
763 		 * Allocating task should really be put onto the front of the
764 		 * wait queue, but this is pretty rare.
765 		 */
766 		spin_lock_irq(q->queue_lock);
767 		freed_request(q, rw, priv);
768 
769 		/*
770 		 * in the very unlikely event that allocation failed and no
771 		 * requests for this direction was pending, mark us starved
772 		 * so that freeing of a request in the other direction will
773 		 * notice us. another possible fix would be to split the
774 		 * rq mempool into READ and WRITE
775 		 */
776 rq_starved:
777 		if (unlikely(rl->count[rw] == 0))
778 			rl->starved[rw] = 1;
779 
780 		goto out;
781 	}
782 
783 	/*
784 	 * ioc may be NULL here, and ioc_batching will be false. That's
785 	 * OK, if the queue is under the request limit then requests need
786 	 * not count toward the nr_batch_requests limit. There will always
787 	 * be some limit enforced by BLK_BATCH_TIME.
788 	 */
789 	if (ioc_batching(q, ioc))
790 		ioc->nr_batch_requests--;
791 
792 	rq_init(q, rq);
793 
794 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
795 out:
796 	return rq;
797 }
798 
799 /*
800  * No available requests for this queue, unplug the device and wait for some
801  * requests to become available.
802  *
803  * Called with q->queue_lock held, and returns with it unlocked.
804  */
805 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
806 					struct bio *bio)
807 {
808 	const int rw = rw_flags & 0x01;
809 	struct request *rq;
810 
811 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
812 	while (!rq) {
813 		DEFINE_WAIT(wait);
814 		struct request_list *rl = &q->rq;
815 
816 		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
817 				TASK_UNINTERRUPTIBLE);
818 
819 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
820 
821 		if (!rq) {
822 			struct io_context *ioc;
823 
824 			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
825 
826 			__generic_unplug_device(q);
827 			spin_unlock_irq(q->queue_lock);
828 			io_schedule();
829 
830 			/*
831 			 * After sleeping, we become a "batching" process and
832 			 * will be able to allocate at least one request, and
833 			 * up to a big batch of them for a small period time.
834 			 * See ioc_batching, ioc_set_batching
835 			 */
836 			ioc = current_io_context(GFP_NOIO, q->node);
837 			ioc_set_batching(q, ioc);
838 
839 			spin_lock_irq(q->queue_lock);
840 		}
841 		finish_wait(&rl->wait[rw], &wait);
842 	}
843 
844 	return rq;
845 }
846 
847 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
848 {
849 	struct request *rq;
850 
851 	BUG_ON(rw != READ && rw != WRITE);
852 
853 	spin_lock_irq(q->queue_lock);
854 	if (gfp_mask & __GFP_WAIT) {
855 		rq = get_request_wait(q, rw, NULL);
856 	} else {
857 		rq = get_request(q, rw, NULL, gfp_mask);
858 		if (!rq)
859 			spin_unlock_irq(q->queue_lock);
860 	}
861 	/* q->queue_lock is unlocked at this point */
862 
863 	return rq;
864 }
865 EXPORT_SYMBOL(blk_get_request);
866 
867 /**
868  * blk_start_queueing - initiate dispatch of requests to device
869  * @q:		request queue to kick into gear
870  *
871  * This is basically a helper to remove the need to know whether a queue
872  * is plugged or not if someone just wants to initiate dispatch of requests
873  * for this queue.
874  *
875  * The queue lock must be held with interrupts disabled.
876  */
877 void blk_start_queueing(struct request_queue *q)
878 {
879 	if (!blk_queue_plugged(q))
880 		q->request_fn(q);
881 	else
882 		__generic_unplug_device(q);
883 }
884 EXPORT_SYMBOL(blk_start_queueing);
885 
886 /**
887  * blk_requeue_request - put a request back on queue
888  * @q:		request queue where request should be inserted
889  * @rq:		request to be inserted
890  *
891  * Description:
892  *    Drivers often keep queueing requests until the hardware cannot accept
893  *    more, when that condition happens we need to put the request back
894  *    on the queue. Must be called with queue lock held.
895  */
896 void blk_requeue_request(struct request_queue *q, struct request *rq)
897 {
898 	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
899 
900 	if (blk_rq_tagged(rq))
901 		blk_queue_end_tag(q, rq);
902 
903 	elv_requeue_request(q, rq);
904 }
905 EXPORT_SYMBOL(blk_requeue_request);
906 
907 /**
908  * blk_insert_request - insert a special request in to a request queue
909  * @q:		request queue where request should be inserted
910  * @rq:		request to be inserted
911  * @at_head:	insert request at head or tail of queue
912  * @data:	private data
913  *
914  * Description:
915  *    Many block devices need to execute commands asynchronously, so they don't
916  *    block the whole kernel from preemption during request execution.  This is
917  *    accomplished normally by inserting aritficial requests tagged as
918  *    REQ_SPECIAL in to the corresponding request queue, and letting them be
919  *    scheduled for actual execution by the request queue.
920  *
921  *    We have the option of inserting the head or the tail of the queue.
922  *    Typically we use the tail for new ioctls and so forth.  We use the head
923  *    of the queue for things like a QUEUE_FULL message from a device, or a
924  *    host that is unable to accept a particular command.
925  */
926 void blk_insert_request(struct request_queue *q, struct request *rq,
927 			int at_head, void *data)
928 {
929 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
930 	unsigned long flags;
931 
932 	/*
933 	 * tell I/O scheduler that this isn't a regular read/write (ie it
934 	 * must not attempt merges on this) and that it acts as a soft
935 	 * barrier
936 	 */
937 	rq->cmd_type = REQ_TYPE_SPECIAL;
938 	rq->cmd_flags |= REQ_SOFTBARRIER;
939 
940 	rq->special = data;
941 
942 	spin_lock_irqsave(q->queue_lock, flags);
943 
944 	/*
945 	 * If command is tagged, release the tag
946 	 */
947 	if (blk_rq_tagged(rq))
948 		blk_queue_end_tag(q, rq);
949 
950 	drive_stat_acct(rq, 1);
951 	__elv_add_request(q, rq, where, 0);
952 	blk_start_queueing(q);
953 	spin_unlock_irqrestore(q->queue_lock, flags);
954 }
955 EXPORT_SYMBOL(blk_insert_request);
956 
957 /*
958  * add-request adds a request to the linked list.
959  * queue lock is held and interrupts disabled, as we muck with the
960  * request queue list.
961  */
962 static inline void add_request(struct request_queue *q, struct request *req)
963 {
964 	drive_stat_acct(req, 1);
965 
966 	/*
967 	 * elevator indicated where it wants this request to be
968 	 * inserted at elevator_merge time
969 	 */
970 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
971 }
972 
973 /*
974  * disk_round_stats()	- Round off the performance stats on a struct
975  * disk_stats.
976  *
977  * The average IO queue length and utilisation statistics are maintained
978  * by observing the current state of the queue length and the amount of
979  * time it has been in this state for.
980  *
981  * Normally, that accounting is done on IO completion, but that can result
982  * in more than a second's worth of IO being accounted for within any one
983  * second, leading to >100% utilisation.  To deal with that, we call this
984  * function to do a round-off before returning the results when reading
985  * /proc/diskstats.  This accounts immediately for all queue usage up to
986  * the current jiffies and restarts the counters again.
987  */
988 void disk_round_stats(struct gendisk *disk)
989 {
990 	unsigned long now = jiffies;
991 
992 	if (now == disk->stamp)
993 		return;
994 
995 	if (disk->in_flight) {
996 		__disk_stat_add(disk, time_in_queue,
997 				disk->in_flight * (now - disk->stamp));
998 		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
999 	}
1000 	disk->stamp = now;
1001 }
1002 EXPORT_SYMBOL_GPL(disk_round_stats);
1003 
1004 void part_round_stats(struct hd_struct *part)
1005 {
1006 	unsigned long now = jiffies;
1007 
1008 	if (now == part->stamp)
1009 		return;
1010 
1011 	if (part->in_flight) {
1012 		__part_stat_add(part, time_in_queue,
1013 				part->in_flight * (now - part->stamp));
1014 		__part_stat_add(part, io_ticks, (now - part->stamp));
1015 	}
1016 	part->stamp = now;
1017 }
1018 
1019 /*
1020  * queue lock must be held
1021  */
1022 void __blk_put_request(struct request_queue *q, struct request *req)
1023 {
1024 	if (unlikely(!q))
1025 		return;
1026 	if (unlikely(--req->ref_count))
1027 		return;
1028 
1029 	elv_completed_request(q, req);
1030 
1031 	/*
1032 	 * Request may not have originated from ll_rw_blk. if not,
1033 	 * it didn't come out of our reserved rq pools
1034 	 */
1035 	if (req->cmd_flags & REQ_ALLOCED) {
1036 		int rw = rq_data_dir(req);
1037 		int priv = req->cmd_flags & REQ_ELVPRIV;
1038 
1039 		BUG_ON(!list_empty(&req->queuelist));
1040 		BUG_ON(!hlist_unhashed(&req->hash));
1041 
1042 		blk_free_request(q, req);
1043 		freed_request(q, rw, priv);
1044 	}
1045 }
1046 EXPORT_SYMBOL_GPL(__blk_put_request);
1047 
1048 void blk_put_request(struct request *req)
1049 {
1050 	unsigned long flags;
1051 	struct request_queue *q = req->q;
1052 
1053 	/*
1054 	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
1055 	 * following if (q) test.
1056 	 */
1057 	if (q) {
1058 		spin_lock_irqsave(q->queue_lock, flags);
1059 		__blk_put_request(q, req);
1060 		spin_unlock_irqrestore(q->queue_lock, flags);
1061 	}
1062 }
1063 EXPORT_SYMBOL(blk_put_request);
1064 
1065 void init_request_from_bio(struct request *req, struct bio *bio)
1066 {
1067 	req->cmd_type = REQ_TYPE_FS;
1068 
1069 	/*
1070 	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1071 	 */
1072 	if (bio_rw_ahead(bio) || bio_failfast(bio))
1073 		req->cmd_flags |= REQ_FAILFAST;
1074 
1075 	/*
1076 	 * REQ_BARRIER implies no merging, but lets make it explicit
1077 	 */
1078 	if (unlikely(bio_barrier(bio)))
1079 		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1080 
1081 	if (bio_sync(bio))
1082 		req->cmd_flags |= REQ_RW_SYNC;
1083 	if (bio_rw_meta(bio))
1084 		req->cmd_flags |= REQ_RW_META;
1085 
1086 	req->errors = 0;
1087 	req->hard_sector = req->sector = bio->bi_sector;
1088 	req->ioprio = bio_prio(bio);
1089 	req->start_time = jiffies;
1090 	blk_rq_bio_prep(req->q, req, bio);
1091 }
1092 
1093 static int __make_request(struct request_queue *q, struct bio *bio)
1094 {
1095 	struct request *req;
1096 	int el_ret, nr_sectors, barrier, err;
1097 	const unsigned short prio = bio_prio(bio);
1098 	const int sync = bio_sync(bio);
1099 	int rw_flags;
1100 
1101 	nr_sectors = bio_sectors(bio);
1102 
1103 	/*
1104 	 * low level driver can indicate that it wants pages above a
1105 	 * certain limit bounced to low memory (ie for highmem, or even
1106 	 * ISA dma in theory)
1107 	 */
1108 	blk_queue_bounce(q, &bio);
1109 
1110 	barrier = bio_barrier(bio);
1111 	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1112 		err = -EOPNOTSUPP;
1113 		goto end_io;
1114 	}
1115 
1116 	spin_lock_irq(q->queue_lock);
1117 
1118 	if (unlikely(barrier) || elv_queue_empty(q))
1119 		goto get_rq;
1120 
1121 	el_ret = elv_merge(q, &req, bio);
1122 	switch (el_ret) {
1123 	case ELEVATOR_BACK_MERGE:
1124 		BUG_ON(!rq_mergeable(req));
1125 
1126 		if (!ll_back_merge_fn(q, req, bio))
1127 			break;
1128 
1129 		blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1130 
1131 		req->biotail->bi_next = bio;
1132 		req->biotail = bio;
1133 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1134 		req->ioprio = ioprio_best(req->ioprio, prio);
1135 		drive_stat_acct(req, 0);
1136 		if (!attempt_back_merge(q, req))
1137 			elv_merged_request(q, req, el_ret);
1138 		goto out;
1139 
1140 	case ELEVATOR_FRONT_MERGE:
1141 		BUG_ON(!rq_mergeable(req));
1142 
1143 		if (!ll_front_merge_fn(q, req, bio))
1144 			break;
1145 
1146 		blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1147 
1148 		bio->bi_next = req->bio;
1149 		req->bio = bio;
1150 
1151 		/*
1152 		 * may not be valid. if the low level driver said
1153 		 * it didn't need a bounce buffer then it better
1154 		 * not touch req->buffer either...
1155 		 */
1156 		req->buffer = bio_data(bio);
1157 		req->current_nr_sectors = bio_cur_sectors(bio);
1158 		req->hard_cur_sectors = req->current_nr_sectors;
1159 		req->sector = req->hard_sector = bio->bi_sector;
1160 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1161 		req->ioprio = ioprio_best(req->ioprio, prio);
1162 		drive_stat_acct(req, 0);
1163 		if (!attempt_front_merge(q, req))
1164 			elv_merged_request(q, req, el_ret);
1165 		goto out;
1166 
1167 	/* ELV_NO_MERGE: elevator says don't/can't merge. */
1168 	default:
1169 		;
1170 	}
1171 
1172 get_rq:
1173 	/*
1174 	 * This sync check and mask will be re-done in init_request_from_bio(),
1175 	 * but we need to set it earlier to expose the sync flag to the
1176 	 * rq allocator and io schedulers.
1177 	 */
1178 	rw_flags = bio_data_dir(bio);
1179 	if (sync)
1180 		rw_flags |= REQ_RW_SYNC;
1181 
1182 	/*
1183 	 * Grab a free request. This is might sleep but can not fail.
1184 	 * Returns with the queue unlocked.
1185 	 */
1186 	req = get_request_wait(q, rw_flags, bio);
1187 
1188 	/*
1189 	 * After dropping the lock and possibly sleeping here, our request
1190 	 * may now be mergeable after it had proven unmergeable (above).
1191 	 * We don't worry about that case for efficiency. It won't happen
1192 	 * often, and the elevators are able to handle it.
1193 	 */
1194 	init_request_from_bio(req, bio);
1195 
1196 	spin_lock_irq(q->queue_lock);
1197 	if (elv_queue_empty(q))
1198 		blk_plug_device(q);
1199 	add_request(q, req);
1200 out:
1201 	if (sync)
1202 		__generic_unplug_device(q);
1203 
1204 	spin_unlock_irq(q->queue_lock);
1205 	return 0;
1206 
1207 end_io:
1208 	bio_endio(bio, err);
1209 	return 0;
1210 }
1211 
1212 /*
1213  * If bio->bi_dev is a partition, remap the location
1214  */
1215 static inline void blk_partition_remap(struct bio *bio)
1216 {
1217 	struct block_device *bdev = bio->bi_bdev;
1218 
1219 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1220 		struct hd_struct *p = bdev->bd_part;
1221 
1222 		bio->bi_sector += p->start_sect;
1223 		bio->bi_bdev = bdev->bd_contains;
1224 
1225 		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1226 				    bdev->bd_dev, bio->bi_sector,
1227 				    bio->bi_sector - p->start_sect);
1228 	}
1229 }
1230 
1231 static void handle_bad_sector(struct bio *bio)
1232 {
1233 	char b[BDEVNAME_SIZE];
1234 
1235 	printk(KERN_INFO "attempt to access beyond end of device\n");
1236 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1237 			bdevname(bio->bi_bdev, b),
1238 			bio->bi_rw,
1239 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
1240 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1241 
1242 	set_bit(BIO_EOF, &bio->bi_flags);
1243 }
1244 
1245 #ifdef CONFIG_FAIL_MAKE_REQUEST
1246 
1247 static DECLARE_FAULT_ATTR(fail_make_request);
1248 
1249 static int __init setup_fail_make_request(char *str)
1250 {
1251 	return setup_fault_attr(&fail_make_request, str);
1252 }
1253 __setup("fail_make_request=", setup_fail_make_request);
1254 
1255 static int should_fail_request(struct bio *bio)
1256 {
1257 	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1258 	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1259 		return should_fail(&fail_make_request, bio->bi_size);
1260 
1261 	return 0;
1262 }
1263 
1264 static int __init fail_make_request_debugfs(void)
1265 {
1266 	return init_fault_attr_dentries(&fail_make_request,
1267 					"fail_make_request");
1268 }
1269 
1270 late_initcall(fail_make_request_debugfs);
1271 
1272 #else /* CONFIG_FAIL_MAKE_REQUEST */
1273 
1274 static inline int should_fail_request(struct bio *bio)
1275 {
1276 	return 0;
1277 }
1278 
1279 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1280 
1281 /*
1282  * Check whether this bio extends beyond the end of the device.
1283  */
1284 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1285 {
1286 	sector_t maxsector;
1287 
1288 	if (!nr_sectors)
1289 		return 0;
1290 
1291 	/* Test device or partition size, when known. */
1292 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1293 	if (maxsector) {
1294 		sector_t sector = bio->bi_sector;
1295 
1296 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1297 			/*
1298 			 * This may well happen - the kernel calls bread()
1299 			 * without checking the size of the device, e.g., when
1300 			 * mounting a device.
1301 			 */
1302 			handle_bad_sector(bio);
1303 			return 1;
1304 		}
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 /**
1311  * generic_make_request: hand a buffer to its device driver for I/O
1312  * @bio:  The bio describing the location in memory and on the device.
1313  *
1314  * generic_make_request() is used to make I/O requests of block
1315  * devices. It is passed a &struct bio, which describes the I/O that needs
1316  * to be done.
1317  *
1318  * generic_make_request() does not return any status.  The
1319  * success/failure status of the request, along with notification of
1320  * completion, is delivered asynchronously through the bio->bi_end_io
1321  * function described (one day) else where.
1322  *
1323  * The caller of generic_make_request must make sure that bi_io_vec
1324  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1325  * set to describe the device address, and the
1326  * bi_end_io and optionally bi_private are set to describe how
1327  * completion notification should be signaled.
1328  *
1329  * generic_make_request and the drivers it calls may use bi_next if this
1330  * bio happens to be merged with someone else, and may change bi_dev and
1331  * bi_sector for remaps as it sees fit.  So the values of these fields
1332  * should NOT be depended on after the call to generic_make_request.
1333  */
1334 static inline void __generic_make_request(struct bio *bio)
1335 {
1336 	struct request_queue *q;
1337 	sector_t old_sector;
1338 	int ret, nr_sectors = bio_sectors(bio);
1339 	dev_t old_dev;
1340 	int err = -EIO;
1341 
1342 	might_sleep();
1343 
1344 	if (bio_check_eod(bio, nr_sectors))
1345 		goto end_io;
1346 
1347 	/*
1348 	 * Resolve the mapping until finished. (drivers are
1349 	 * still free to implement/resolve their own stacking
1350 	 * by explicitly returning 0)
1351 	 *
1352 	 * NOTE: we don't repeat the blk_size check for each new device.
1353 	 * Stacking drivers are expected to know what they are doing.
1354 	 */
1355 	old_sector = -1;
1356 	old_dev = 0;
1357 	do {
1358 		char b[BDEVNAME_SIZE];
1359 
1360 		q = bdev_get_queue(bio->bi_bdev);
1361 		if (!q) {
1362 			printk(KERN_ERR
1363 			       "generic_make_request: Trying to access "
1364 				"nonexistent block-device %s (%Lu)\n",
1365 				bdevname(bio->bi_bdev, b),
1366 				(long long) bio->bi_sector);
1367 end_io:
1368 			bio_endio(bio, err);
1369 			break;
1370 		}
1371 
1372 		if (unlikely(nr_sectors > q->max_hw_sectors)) {
1373 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1374 				bdevname(bio->bi_bdev, b),
1375 				bio_sectors(bio),
1376 				q->max_hw_sectors);
1377 			goto end_io;
1378 		}
1379 
1380 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1381 			goto end_io;
1382 
1383 		if (should_fail_request(bio))
1384 			goto end_io;
1385 
1386 		/*
1387 		 * If this device has partitions, remap block n
1388 		 * of partition p to block n+start(p) of the disk.
1389 		 */
1390 		blk_partition_remap(bio);
1391 
1392 		if (old_sector != -1)
1393 			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1394 					    old_sector);
1395 
1396 		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1397 
1398 		old_sector = bio->bi_sector;
1399 		old_dev = bio->bi_bdev->bd_dev;
1400 
1401 		if (bio_check_eod(bio, nr_sectors))
1402 			goto end_io;
1403 		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1404 			err = -EOPNOTSUPP;
1405 			goto end_io;
1406 		}
1407 
1408 		ret = q->make_request_fn(q, bio);
1409 	} while (ret);
1410 }
1411 
1412 /*
1413  * We only want one ->make_request_fn to be active at a time,
1414  * else stack usage with stacked devices could be a problem.
1415  * So use current->bio_{list,tail} to keep a list of requests
1416  * submited by a make_request_fn function.
1417  * current->bio_tail is also used as a flag to say if
1418  * generic_make_request is currently active in this task or not.
1419  * If it is NULL, then no make_request is active.  If it is non-NULL,
1420  * then a make_request is active, and new requests should be added
1421  * at the tail
1422  */
1423 void generic_make_request(struct bio *bio)
1424 {
1425 	if (current->bio_tail) {
1426 		/* make_request is active */
1427 		*(current->bio_tail) = bio;
1428 		bio->bi_next = NULL;
1429 		current->bio_tail = &bio->bi_next;
1430 		return;
1431 	}
1432 	/* following loop may be a bit non-obvious, and so deserves some
1433 	 * explanation.
1434 	 * Before entering the loop, bio->bi_next is NULL (as all callers
1435 	 * ensure that) so we have a list with a single bio.
1436 	 * We pretend that we have just taken it off a longer list, so
1437 	 * we assign bio_list to the next (which is NULL) and bio_tail
1438 	 * to &bio_list, thus initialising the bio_list of new bios to be
1439 	 * added.  __generic_make_request may indeed add some more bios
1440 	 * through a recursive call to generic_make_request.  If it
1441 	 * did, we find a non-NULL value in bio_list and re-enter the loop
1442 	 * from the top.  In this case we really did just take the bio
1443 	 * of the top of the list (no pretending) and so fixup bio_list and
1444 	 * bio_tail or bi_next, and call into __generic_make_request again.
1445 	 *
1446 	 * The loop was structured like this to make only one call to
1447 	 * __generic_make_request (which is important as it is large and
1448 	 * inlined) and to keep the structure simple.
1449 	 */
1450 	BUG_ON(bio->bi_next);
1451 	do {
1452 		current->bio_list = bio->bi_next;
1453 		if (bio->bi_next == NULL)
1454 			current->bio_tail = &current->bio_list;
1455 		else
1456 			bio->bi_next = NULL;
1457 		__generic_make_request(bio);
1458 		bio = current->bio_list;
1459 	} while (bio);
1460 	current->bio_tail = NULL; /* deactivate */
1461 }
1462 EXPORT_SYMBOL(generic_make_request);
1463 
1464 /**
1465  * submit_bio: submit a bio to the block device layer for I/O
1466  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1467  * @bio: The &struct bio which describes the I/O
1468  *
1469  * submit_bio() is very similar in purpose to generic_make_request(), and
1470  * uses that function to do most of the work. Both are fairly rough
1471  * interfaces, @bio must be presetup and ready for I/O.
1472  *
1473  */
1474 void submit_bio(int rw, struct bio *bio)
1475 {
1476 	int count = bio_sectors(bio);
1477 
1478 	bio->bi_rw |= rw;
1479 
1480 	/*
1481 	 * If it's a regular read/write or a barrier with data attached,
1482 	 * go through the normal accounting stuff before submission.
1483 	 */
1484 	if (!bio_empty_barrier(bio)) {
1485 
1486 		BIO_BUG_ON(!bio->bi_size);
1487 		BIO_BUG_ON(!bio->bi_io_vec);
1488 
1489 		if (rw & WRITE) {
1490 			count_vm_events(PGPGOUT, count);
1491 		} else {
1492 			task_io_account_read(bio->bi_size);
1493 			count_vm_events(PGPGIN, count);
1494 		}
1495 
1496 		if (unlikely(block_dump)) {
1497 			char b[BDEVNAME_SIZE];
1498 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1499 			current->comm, task_pid_nr(current),
1500 				(rw & WRITE) ? "WRITE" : "READ",
1501 				(unsigned long long)bio->bi_sector,
1502 				bdevname(bio->bi_bdev, b));
1503 		}
1504 	}
1505 
1506 	generic_make_request(bio);
1507 }
1508 EXPORT_SYMBOL(submit_bio);
1509 
1510 /**
1511  * __end_that_request_first - end I/O on a request
1512  * @req:      the request being processed
1513  * @error:    0 for success, < 0 for error
1514  * @nr_bytes: number of bytes to complete
1515  *
1516  * Description:
1517  *     Ends I/O on a number of bytes attached to @req, and sets it up
1518  *     for the next range of segments (if any) in the cluster.
1519  *
1520  * Return:
1521  *     0 - we are done with this request, call end_that_request_last()
1522  *     1 - still buffers pending for this request
1523  **/
1524 static int __end_that_request_first(struct request *req, int error,
1525 				    int nr_bytes)
1526 {
1527 	int total_bytes, bio_nbytes, next_idx = 0;
1528 	struct bio *bio;
1529 
1530 	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1531 
1532 	/*
1533 	 * for a REQ_BLOCK_PC request, we want to carry any eventual
1534 	 * sense key with us all the way through
1535 	 */
1536 	if (!blk_pc_request(req))
1537 		req->errors = 0;
1538 
1539 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1540 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1541 				req->rq_disk ? req->rq_disk->disk_name : "?",
1542 				(unsigned long long)req->sector);
1543 	}
1544 
1545 	if (blk_fs_request(req) && req->rq_disk) {
1546 		const int rw = rq_data_dir(req);
1547 
1548 		all_stat_add(req->rq_disk, sectors[rw],
1549 			     nr_bytes >> 9, req->sector);
1550 	}
1551 
1552 	total_bytes = bio_nbytes = 0;
1553 	while ((bio = req->bio) != NULL) {
1554 		int nbytes;
1555 
1556 		/*
1557 		 * For an empty barrier request, the low level driver must
1558 		 * store a potential error location in ->sector. We pass
1559 		 * that back up in ->bi_sector.
1560 		 */
1561 		if (blk_empty_barrier(req))
1562 			bio->bi_sector = req->sector;
1563 
1564 		if (nr_bytes >= bio->bi_size) {
1565 			req->bio = bio->bi_next;
1566 			nbytes = bio->bi_size;
1567 			req_bio_endio(req, bio, nbytes, error);
1568 			next_idx = 0;
1569 			bio_nbytes = 0;
1570 		} else {
1571 			int idx = bio->bi_idx + next_idx;
1572 
1573 			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1574 				blk_dump_rq_flags(req, "__end_that");
1575 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1576 						__FUNCTION__, bio->bi_idx,
1577 						bio->bi_vcnt);
1578 				break;
1579 			}
1580 
1581 			nbytes = bio_iovec_idx(bio, idx)->bv_len;
1582 			BIO_BUG_ON(nbytes > bio->bi_size);
1583 
1584 			/*
1585 			 * not a complete bvec done
1586 			 */
1587 			if (unlikely(nbytes > nr_bytes)) {
1588 				bio_nbytes += nr_bytes;
1589 				total_bytes += nr_bytes;
1590 				break;
1591 			}
1592 
1593 			/*
1594 			 * advance to the next vector
1595 			 */
1596 			next_idx++;
1597 			bio_nbytes += nbytes;
1598 		}
1599 
1600 		total_bytes += nbytes;
1601 		nr_bytes -= nbytes;
1602 
1603 		bio = req->bio;
1604 		if (bio) {
1605 			/*
1606 			 * end more in this run, or just return 'not-done'
1607 			 */
1608 			if (unlikely(nr_bytes <= 0))
1609 				break;
1610 		}
1611 	}
1612 
1613 	/*
1614 	 * completely done
1615 	 */
1616 	if (!req->bio)
1617 		return 0;
1618 
1619 	/*
1620 	 * if the request wasn't completed, update state
1621 	 */
1622 	if (bio_nbytes) {
1623 		req_bio_endio(req, bio, bio_nbytes, error);
1624 		bio->bi_idx += next_idx;
1625 		bio_iovec(bio)->bv_offset += nr_bytes;
1626 		bio_iovec(bio)->bv_len -= nr_bytes;
1627 	}
1628 
1629 	blk_recalc_rq_sectors(req, total_bytes >> 9);
1630 	blk_recalc_rq_segments(req);
1631 	return 1;
1632 }
1633 
1634 /*
1635  * splice the completion data to a local structure and hand off to
1636  * process_completion_queue() to complete the requests
1637  */
1638 static void blk_done_softirq(struct softirq_action *h)
1639 {
1640 	struct list_head *cpu_list, local_list;
1641 
1642 	local_irq_disable();
1643 	cpu_list = &__get_cpu_var(blk_cpu_done);
1644 	list_replace_init(cpu_list, &local_list);
1645 	local_irq_enable();
1646 
1647 	while (!list_empty(&local_list)) {
1648 		struct request *rq;
1649 
1650 		rq = list_entry(local_list.next, struct request, donelist);
1651 		list_del_init(&rq->donelist);
1652 		rq->q->softirq_done_fn(rq);
1653 	}
1654 }
1655 
1656 static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1657 				    unsigned long action, void *hcpu)
1658 {
1659 	/*
1660 	 * If a CPU goes away, splice its entries to the current CPU
1661 	 * and trigger a run of the softirq
1662 	 */
1663 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1664 		int cpu = (unsigned long) hcpu;
1665 
1666 		local_irq_disable();
1667 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
1668 				 &__get_cpu_var(blk_cpu_done));
1669 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
1670 		local_irq_enable();
1671 	}
1672 
1673 	return NOTIFY_OK;
1674 }
1675 
1676 
1677 static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1678 	.notifier_call	= blk_cpu_notify,
1679 };
1680 
1681 /**
1682  * blk_complete_request - end I/O on a request
1683  * @req:      the request being processed
1684  *
1685  * Description:
1686  *     Ends all I/O on a request. It does not handle partial completions,
1687  *     unless the driver actually implements this in its completion callback
1688  *     through requeueing. The actual completion happens out-of-order,
1689  *     through a softirq handler. The user must have registered a completion
1690  *     callback through blk_queue_softirq_done().
1691  **/
1692 
1693 void blk_complete_request(struct request *req)
1694 {
1695 	struct list_head *cpu_list;
1696 	unsigned long flags;
1697 
1698 	BUG_ON(!req->q->softirq_done_fn);
1699 
1700 	local_irq_save(flags);
1701 
1702 	cpu_list = &__get_cpu_var(blk_cpu_done);
1703 	list_add_tail(&req->donelist, cpu_list);
1704 	raise_softirq_irqoff(BLOCK_SOFTIRQ);
1705 
1706 	local_irq_restore(flags);
1707 }
1708 EXPORT_SYMBOL(blk_complete_request);
1709 
1710 /*
1711  * queue lock must be held
1712  */
1713 static void end_that_request_last(struct request *req, int error)
1714 {
1715 	struct gendisk *disk = req->rq_disk;
1716 
1717 	if (blk_rq_tagged(req))
1718 		blk_queue_end_tag(req->q, req);
1719 
1720 	if (blk_queued_rq(req))
1721 		blkdev_dequeue_request(req);
1722 
1723 	if (unlikely(laptop_mode) && blk_fs_request(req))
1724 		laptop_io_completion();
1725 
1726 	/*
1727 	 * Account IO completion.  bar_rq isn't accounted as a normal
1728 	 * IO on queueing nor completion.  Accounting the containing
1729 	 * request is enough.
1730 	 */
1731 	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1732 		unsigned long duration = jiffies - req->start_time;
1733 		const int rw = rq_data_dir(req);
1734 		struct hd_struct *part = get_part(disk, req->sector);
1735 
1736 		__all_stat_inc(disk, ios[rw], req->sector);
1737 		__all_stat_add(disk, ticks[rw], duration, req->sector);
1738 		disk_round_stats(disk);
1739 		disk->in_flight--;
1740 		if (part) {
1741 			part_round_stats(part);
1742 			part->in_flight--;
1743 		}
1744 	}
1745 
1746 	if (req->end_io)
1747 		req->end_io(req, error);
1748 	else {
1749 		if (blk_bidi_rq(req))
1750 			__blk_put_request(req->next_rq->q, req->next_rq);
1751 
1752 		__blk_put_request(req->q, req);
1753 	}
1754 }
1755 
1756 static inline void __end_request(struct request *rq, int uptodate,
1757 				 unsigned int nr_bytes)
1758 {
1759 	int error = 0;
1760 
1761 	if (uptodate <= 0)
1762 		error = uptodate ? uptodate : -EIO;
1763 
1764 	__blk_end_request(rq, error, nr_bytes);
1765 }
1766 
1767 /**
1768  * blk_rq_bytes - Returns bytes left to complete in the entire request
1769  * @rq: the request being processed
1770  **/
1771 unsigned int blk_rq_bytes(struct request *rq)
1772 {
1773 	if (blk_fs_request(rq))
1774 		return rq->hard_nr_sectors << 9;
1775 
1776 	return rq->data_len;
1777 }
1778 EXPORT_SYMBOL_GPL(blk_rq_bytes);
1779 
1780 /**
1781  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1782  * @rq: the request being processed
1783  **/
1784 unsigned int blk_rq_cur_bytes(struct request *rq)
1785 {
1786 	if (blk_fs_request(rq))
1787 		return rq->current_nr_sectors << 9;
1788 
1789 	if (rq->bio)
1790 		return rq->bio->bi_size;
1791 
1792 	return rq->data_len;
1793 }
1794 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1795 
1796 /**
1797  * end_queued_request - end all I/O on a queued request
1798  * @rq:		the request being processed
1799  * @uptodate:	error value or 0/1 uptodate flag
1800  *
1801  * Description:
1802  *     Ends all I/O on a request, and removes it from the block layer queues.
1803  *     Not suitable for normal IO completion, unless the driver still has
1804  *     the request attached to the block layer.
1805  *
1806  **/
1807 void end_queued_request(struct request *rq, int uptodate)
1808 {
1809 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1810 }
1811 EXPORT_SYMBOL(end_queued_request);
1812 
1813 /**
1814  * end_dequeued_request - end all I/O on a dequeued request
1815  * @rq:		the request being processed
1816  * @uptodate:	error value or 0/1 uptodate flag
1817  *
1818  * Description:
1819  *     Ends all I/O on a request. The request must already have been
1820  *     dequeued using blkdev_dequeue_request(), as is normally the case
1821  *     for most drivers.
1822  *
1823  **/
1824 void end_dequeued_request(struct request *rq, int uptodate)
1825 {
1826 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1827 }
1828 EXPORT_SYMBOL(end_dequeued_request);
1829 
1830 
1831 /**
1832  * end_request - end I/O on the current segment of the request
1833  * @req:	the request being processed
1834  * @uptodate:	error value or 0/1 uptodate flag
1835  *
1836  * Description:
1837  *     Ends I/O on the current segment of a request. If that is the only
1838  *     remaining segment, the request is also completed and freed.
1839  *
1840  *     This is a remnant of how older block drivers handled IO completions.
1841  *     Modern drivers typically end IO on the full request in one go, unless
1842  *     they have a residual value to account for. For that case this function
1843  *     isn't really useful, unless the residual just happens to be the
1844  *     full current segment. In other words, don't use this function in new
1845  *     code. Either use end_request_completely(), or the
1846  *     end_that_request_chunk() (along with end_that_request_last()) for
1847  *     partial completions.
1848  *
1849  **/
1850 void end_request(struct request *req, int uptodate)
1851 {
1852 	__end_request(req, uptodate, req->hard_cur_sectors << 9);
1853 }
1854 EXPORT_SYMBOL(end_request);
1855 
1856 /**
1857  * blk_end_io - Generic end_io function to complete a request.
1858  * @rq:           the request being processed
1859  * @error:        0 for success, < 0 for error
1860  * @nr_bytes:     number of bytes to complete @rq
1861  * @bidi_bytes:   number of bytes to complete @rq->next_rq
1862  * @drv_callback: function called between completion of bios in the request
1863  *                and completion of the request.
1864  *                If the callback returns non 0, this helper returns without
1865  *                completion of the request.
1866  *
1867  * Description:
1868  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1869  *     If @rq has leftover, sets it up for the next range of segments.
1870  *
1871  * Return:
1872  *     0 - we are done with this request
1873  *     1 - this request is not freed yet, it still has pending buffers.
1874  **/
1875 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1876 		      unsigned int bidi_bytes,
1877 		      int (drv_callback)(struct request *))
1878 {
1879 	struct request_queue *q = rq->q;
1880 	unsigned long flags = 0UL;
1881 
1882 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1883 		if (__end_that_request_first(rq, error, nr_bytes))
1884 			return 1;
1885 
1886 		/* Bidi request must be completed as a whole */
1887 		if (blk_bidi_rq(rq) &&
1888 		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1889 			return 1;
1890 	}
1891 
1892 	/* Special feature for tricky drivers */
1893 	if (drv_callback && drv_callback(rq))
1894 		return 1;
1895 
1896 	add_disk_randomness(rq->rq_disk);
1897 
1898 	spin_lock_irqsave(q->queue_lock, flags);
1899 	end_that_request_last(rq, error);
1900 	spin_unlock_irqrestore(q->queue_lock, flags);
1901 
1902 	return 0;
1903 }
1904 
1905 /**
1906  * blk_end_request - Helper function for drivers to complete the request.
1907  * @rq:       the request being processed
1908  * @error:    0 for success, < 0 for error
1909  * @nr_bytes: number of bytes to complete
1910  *
1911  * Description:
1912  *     Ends I/O on a number of bytes attached to @rq.
1913  *     If @rq has leftover, sets it up for the next range of segments.
1914  *
1915  * Return:
1916  *     0 - we are done with this request
1917  *     1 - still buffers pending for this request
1918  **/
1919 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1920 {
1921 	return blk_end_io(rq, error, nr_bytes, 0, NULL);
1922 }
1923 EXPORT_SYMBOL_GPL(blk_end_request);
1924 
1925 /**
1926  * __blk_end_request - Helper function for drivers to complete the request.
1927  * @rq:       the request being processed
1928  * @error:    0 for success, < 0 for error
1929  * @nr_bytes: number of bytes to complete
1930  *
1931  * Description:
1932  *     Must be called with queue lock held unlike blk_end_request().
1933  *
1934  * Return:
1935  *     0 - we are done with this request
1936  *     1 - still buffers pending for this request
1937  **/
1938 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1939 {
1940 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1941 		if (__end_that_request_first(rq, error, nr_bytes))
1942 			return 1;
1943 	}
1944 
1945 	add_disk_randomness(rq->rq_disk);
1946 
1947 	end_that_request_last(rq, error);
1948 
1949 	return 0;
1950 }
1951 EXPORT_SYMBOL_GPL(__blk_end_request);
1952 
1953 /**
1954  * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1955  * @rq:         the bidi request being processed
1956  * @error:      0 for success, < 0 for error
1957  * @nr_bytes:   number of bytes to complete @rq
1958  * @bidi_bytes: number of bytes to complete @rq->next_rq
1959  *
1960  * Description:
1961  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1962  *
1963  * Return:
1964  *     0 - we are done with this request
1965  *     1 - still buffers pending for this request
1966  **/
1967 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1968 			 unsigned int bidi_bytes)
1969 {
1970 	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1971 }
1972 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1973 
1974 /**
1975  * blk_end_request_callback - Special helper function for tricky drivers
1976  * @rq:           the request being processed
1977  * @error:        0 for success, < 0 for error
1978  * @nr_bytes:     number of bytes to complete
1979  * @drv_callback: function called between completion of bios in the request
1980  *                and completion of the request.
1981  *                If the callback returns non 0, this helper returns without
1982  *                completion of the request.
1983  *
1984  * Description:
1985  *     Ends I/O on a number of bytes attached to @rq.
1986  *     If @rq has leftover, sets it up for the next range of segments.
1987  *
1988  *     This special helper function is used only for existing tricky drivers.
1989  *     (e.g. cdrom_newpc_intr() of ide-cd)
1990  *     This interface will be removed when such drivers are rewritten.
1991  *     Don't use this interface in other places anymore.
1992  *
1993  * Return:
1994  *     0 - we are done with this request
1995  *     1 - this request is not freed yet.
1996  *         this request still has pending buffers or
1997  *         the driver doesn't want to finish this request yet.
1998  **/
1999 int blk_end_request_callback(struct request *rq, int error,
2000 			     unsigned int nr_bytes,
2001 			     int (drv_callback)(struct request *))
2002 {
2003 	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2004 }
2005 EXPORT_SYMBOL_GPL(blk_end_request_callback);
2006 
2007 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2008 		     struct bio *bio)
2009 {
2010 	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2011 	rq->cmd_flags |= (bio->bi_rw & 3);
2012 
2013 	rq->nr_phys_segments = bio_phys_segments(q, bio);
2014 	rq->nr_hw_segments = bio_hw_segments(q, bio);
2015 	rq->current_nr_sectors = bio_cur_sectors(bio);
2016 	rq->hard_cur_sectors = rq->current_nr_sectors;
2017 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2018 	rq->buffer = bio_data(bio);
2019 	rq->data_len = bio->bi_size;
2020 
2021 	rq->bio = rq->biotail = bio;
2022 
2023 	if (bio->bi_bdev)
2024 		rq->rq_disk = bio->bi_bdev->bd_disk;
2025 }
2026 
2027 int kblockd_schedule_work(struct work_struct *work)
2028 {
2029 	return queue_work(kblockd_workqueue, work);
2030 }
2031 EXPORT_SYMBOL(kblockd_schedule_work);
2032 
2033 void kblockd_flush_work(struct work_struct *work)
2034 {
2035 	cancel_work_sync(work);
2036 }
2037 EXPORT_SYMBOL(kblockd_flush_work);
2038 
2039 int __init blk_dev_init(void)
2040 {
2041 	int i;
2042 
2043 	kblockd_workqueue = create_workqueue("kblockd");
2044 	if (!kblockd_workqueue)
2045 		panic("Failed to create kblockd\n");
2046 
2047 	request_cachep = kmem_cache_create("blkdev_requests",
2048 			sizeof(struct request), 0, SLAB_PANIC, NULL);
2049 
2050 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2051 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2052 
2053 	for_each_possible_cpu(i)
2054 		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2055 
2056 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
2057 	register_hotcpu_notifier(&blk_cpu_notifier);
2058 
2059 	return 0;
2060 }
2061 
2062