xref: /openbmc/linux/block/blk-core.c (revision 22246614)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/blktrace_api.h>
32 #include <linux/fault-inject.h>
33 
34 #include "blk.h"
35 
36 static int __make_request(struct request_queue *q, struct bio *bio);
37 
38 /*
39  * For the allocated request tables
40  */
41 static struct kmem_cache *request_cachep;
42 
43 /*
44  * For queue allocation
45  */
46 struct kmem_cache *blk_requestq_cachep;
47 
48 /*
49  * Controlling structure to kblockd
50  */
51 static struct workqueue_struct *kblockd_workqueue;
52 
53 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54 
55 static void drive_stat_acct(struct request *rq, int new_io)
56 {
57 	struct hd_struct *part;
58 	int rw = rq_data_dir(rq);
59 
60 	if (!blk_fs_request(rq) || !rq->rq_disk)
61 		return;
62 
63 	part = get_part(rq->rq_disk, rq->sector);
64 	if (!new_io)
65 		__all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
66 	else {
67 		disk_round_stats(rq->rq_disk);
68 		rq->rq_disk->in_flight++;
69 		if (part) {
70 			part_round_stats(part);
71 			part->in_flight++;
72 		}
73 	}
74 }
75 
76 void blk_queue_congestion_threshold(struct request_queue *q)
77 {
78 	int nr;
79 
80 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
81 	if (nr > q->nr_requests)
82 		nr = q->nr_requests;
83 	q->nr_congestion_on = nr;
84 
85 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
86 	if (nr < 1)
87 		nr = 1;
88 	q->nr_congestion_off = nr;
89 }
90 
91 /**
92  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
93  * @bdev:	device
94  *
95  * Locates the passed device's request queue and returns the address of its
96  * backing_dev_info
97  *
98  * Will return NULL if the request queue cannot be located.
99  */
100 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
101 {
102 	struct backing_dev_info *ret = NULL;
103 	struct request_queue *q = bdev_get_queue(bdev);
104 
105 	if (q)
106 		ret = &q->backing_dev_info;
107 	return ret;
108 }
109 EXPORT_SYMBOL(blk_get_backing_dev_info);
110 
111 void blk_rq_init(struct request_queue *q, struct request *rq)
112 {
113 	memset(rq, 0, sizeof(*rq));
114 
115 	INIT_LIST_HEAD(&rq->queuelist);
116 	INIT_LIST_HEAD(&rq->donelist);
117 	rq->q = q;
118 	rq->sector = rq->hard_sector = (sector_t) -1;
119 	INIT_HLIST_NODE(&rq->hash);
120 	RB_CLEAR_NODE(&rq->rb_node);
121 	rq->cmd = rq->__cmd;
122 	rq->tag = -1;
123 	rq->ref_count = 1;
124 }
125 EXPORT_SYMBOL(blk_rq_init);
126 
127 static void req_bio_endio(struct request *rq, struct bio *bio,
128 			  unsigned int nbytes, int error)
129 {
130 	struct request_queue *q = rq->q;
131 
132 	if (&q->bar_rq != rq) {
133 		if (error)
134 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
135 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
136 			error = -EIO;
137 
138 		if (unlikely(nbytes > bio->bi_size)) {
139 			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
140 			       __func__, nbytes, bio->bi_size);
141 			nbytes = bio->bi_size;
142 		}
143 
144 		bio->bi_size -= nbytes;
145 		bio->bi_sector += (nbytes >> 9);
146 		if (bio->bi_size == 0)
147 			bio_endio(bio, error);
148 	} else {
149 
150 		/*
151 		 * Okay, this is the barrier request in progress, just
152 		 * record the error;
153 		 */
154 		if (error && !q->orderr)
155 			q->orderr = error;
156 	}
157 }
158 
159 void blk_dump_rq_flags(struct request *rq, char *msg)
160 {
161 	int bit;
162 
163 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
164 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
165 		rq->cmd_flags);
166 
167 	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
168 						(unsigned long long)rq->sector,
169 						rq->nr_sectors,
170 						rq->current_nr_sectors);
171 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
172 						rq->bio, rq->biotail,
173 						rq->buffer, rq->data,
174 						rq->data_len);
175 
176 	if (blk_pc_request(rq)) {
177 		printk(KERN_INFO "  cdb: ");
178 		for (bit = 0; bit < BLK_MAX_CDB; bit++)
179 			printk("%02x ", rq->cmd[bit]);
180 		printk("\n");
181 	}
182 }
183 EXPORT_SYMBOL(blk_dump_rq_flags);
184 
185 /*
186  * "plug" the device if there are no outstanding requests: this will
187  * force the transfer to start only after we have put all the requests
188  * on the list.
189  *
190  * This is called with interrupts off and no requests on the queue and
191  * with the queue lock held.
192  */
193 void blk_plug_device(struct request_queue *q)
194 {
195 	WARN_ON(!irqs_disabled());
196 
197 	/*
198 	 * don't plug a stopped queue, it must be paired with blk_start_queue()
199 	 * which will restart the queueing
200 	 */
201 	if (blk_queue_stopped(q))
202 		return;
203 
204 	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
205 		__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
206 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
207 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
208 	}
209 }
210 EXPORT_SYMBOL(blk_plug_device);
211 
212 /*
213  * remove the queue from the plugged list, if present. called with
214  * queue lock held and interrupts disabled.
215  */
216 int blk_remove_plug(struct request_queue *q)
217 {
218 	WARN_ON(!irqs_disabled());
219 
220 	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
221 		return 0;
222 
223 	queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
224 	del_timer(&q->unplug_timer);
225 	return 1;
226 }
227 EXPORT_SYMBOL(blk_remove_plug);
228 
229 /*
230  * remove the plug and let it rip..
231  */
232 void __generic_unplug_device(struct request_queue *q)
233 {
234 	if (unlikely(blk_queue_stopped(q)))
235 		return;
236 
237 	if (!blk_remove_plug(q))
238 		return;
239 
240 	q->request_fn(q);
241 }
242 EXPORT_SYMBOL(__generic_unplug_device);
243 
244 /**
245  * generic_unplug_device - fire a request queue
246  * @q:    The &struct request_queue in question
247  *
248  * Description:
249  *   Linux uses plugging to build bigger requests queues before letting
250  *   the device have at them. If a queue is plugged, the I/O scheduler
251  *   is still adding and merging requests on the queue. Once the queue
252  *   gets unplugged, the request_fn defined for the queue is invoked and
253  *   transfers started.
254  **/
255 void generic_unplug_device(struct request_queue *q)
256 {
257 	if (blk_queue_plugged(q)) {
258 		spin_lock_irq(q->queue_lock);
259 		__generic_unplug_device(q);
260 		spin_unlock_irq(q->queue_lock);
261 	}
262 }
263 EXPORT_SYMBOL(generic_unplug_device);
264 
265 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
266 				   struct page *page)
267 {
268 	struct request_queue *q = bdi->unplug_io_data;
269 
270 	blk_unplug(q);
271 }
272 
273 void blk_unplug_work(struct work_struct *work)
274 {
275 	struct request_queue *q =
276 		container_of(work, struct request_queue, unplug_work);
277 
278 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
279 				q->rq.count[READ] + q->rq.count[WRITE]);
280 
281 	q->unplug_fn(q);
282 }
283 
284 void blk_unplug_timeout(unsigned long data)
285 {
286 	struct request_queue *q = (struct request_queue *)data;
287 
288 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
289 				q->rq.count[READ] + q->rq.count[WRITE]);
290 
291 	kblockd_schedule_work(&q->unplug_work);
292 }
293 
294 void blk_unplug(struct request_queue *q)
295 {
296 	/*
297 	 * devices don't necessarily have an ->unplug_fn defined
298 	 */
299 	if (q->unplug_fn) {
300 		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
301 					q->rq.count[READ] + q->rq.count[WRITE]);
302 
303 		q->unplug_fn(q);
304 	}
305 }
306 EXPORT_SYMBOL(blk_unplug);
307 
308 /**
309  * blk_start_queue - restart a previously stopped queue
310  * @q:    The &struct request_queue in question
311  *
312  * Description:
313  *   blk_start_queue() will clear the stop flag on the queue, and call
314  *   the request_fn for the queue if it was in a stopped state when
315  *   entered. Also see blk_stop_queue(). Queue lock must be held.
316  **/
317 void blk_start_queue(struct request_queue *q)
318 {
319 	WARN_ON(!irqs_disabled());
320 
321 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
322 
323 	/*
324 	 * one level of recursion is ok and is much faster than kicking
325 	 * the unplug handling
326 	 */
327 	if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
328 		queue_flag_set(QUEUE_FLAG_REENTER, q);
329 		q->request_fn(q);
330 		queue_flag_clear(QUEUE_FLAG_REENTER, q);
331 	} else {
332 		blk_plug_device(q);
333 		kblockd_schedule_work(&q->unplug_work);
334 	}
335 }
336 EXPORT_SYMBOL(blk_start_queue);
337 
338 /**
339  * blk_stop_queue - stop a queue
340  * @q:    The &struct request_queue in question
341  *
342  * Description:
343  *   The Linux block layer assumes that a block driver will consume all
344  *   entries on the request queue when the request_fn strategy is called.
345  *   Often this will not happen, because of hardware limitations (queue
346  *   depth settings). If a device driver gets a 'queue full' response,
347  *   or if it simply chooses not to queue more I/O at one point, it can
348  *   call this function to prevent the request_fn from being called until
349  *   the driver has signalled it's ready to go again. This happens by calling
350  *   blk_start_queue() to restart queue operations. Queue lock must be held.
351  **/
352 void blk_stop_queue(struct request_queue *q)
353 {
354 	blk_remove_plug(q);
355 	queue_flag_set(QUEUE_FLAG_STOPPED, q);
356 }
357 EXPORT_SYMBOL(blk_stop_queue);
358 
359 /**
360  * blk_sync_queue - cancel any pending callbacks on a queue
361  * @q: the queue
362  *
363  * Description:
364  *     The block layer may perform asynchronous callback activity
365  *     on a queue, such as calling the unplug function after a timeout.
366  *     A block device may call blk_sync_queue to ensure that any
367  *     such activity is cancelled, thus allowing it to release resources
368  *     that the callbacks might use. The caller must already have made sure
369  *     that its ->make_request_fn will not re-add plugging prior to calling
370  *     this function.
371  *
372  */
373 void blk_sync_queue(struct request_queue *q)
374 {
375 	del_timer_sync(&q->unplug_timer);
376 	kblockd_flush_work(&q->unplug_work);
377 }
378 EXPORT_SYMBOL(blk_sync_queue);
379 
380 /**
381  * blk_run_queue - run a single device queue
382  * @q:	The queue to run
383  */
384 void __blk_run_queue(struct request_queue *q)
385 {
386 	blk_remove_plug(q);
387 
388 	/*
389 	 * Only recurse once to avoid overrunning the stack, let the unplug
390 	 * handling reinvoke the handler shortly if we already got there.
391 	 */
392 	if (!elv_queue_empty(q)) {
393 		if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
394 			queue_flag_set(QUEUE_FLAG_REENTER, q);
395 			q->request_fn(q);
396 			queue_flag_clear(QUEUE_FLAG_REENTER, q);
397 		} else {
398 			blk_plug_device(q);
399 			kblockd_schedule_work(&q->unplug_work);
400 		}
401 	}
402 }
403 EXPORT_SYMBOL(__blk_run_queue);
404 
405 /**
406  * blk_run_queue - run a single device queue
407  * @q: The queue to run
408  */
409 void blk_run_queue(struct request_queue *q)
410 {
411 	unsigned long flags;
412 
413 	spin_lock_irqsave(q->queue_lock, flags);
414 	__blk_run_queue(q);
415 	spin_unlock_irqrestore(q->queue_lock, flags);
416 }
417 EXPORT_SYMBOL(blk_run_queue);
418 
419 void blk_put_queue(struct request_queue *q)
420 {
421 	kobject_put(&q->kobj);
422 }
423 
424 void blk_cleanup_queue(struct request_queue *q)
425 {
426 	mutex_lock(&q->sysfs_lock);
427 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
428 	mutex_unlock(&q->sysfs_lock);
429 
430 	if (q->elevator)
431 		elevator_exit(q->elevator);
432 
433 	blk_put_queue(q);
434 }
435 EXPORT_SYMBOL(blk_cleanup_queue);
436 
437 static int blk_init_free_list(struct request_queue *q)
438 {
439 	struct request_list *rl = &q->rq;
440 
441 	rl->count[READ] = rl->count[WRITE] = 0;
442 	rl->starved[READ] = rl->starved[WRITE] = 0;
443 	rl->elvpriv = 0;
444 	init_waitqueue_head(&rl->wait[READ]);
445 	init_waitqueue_head(&rl->wait[WRITE]);
446 
447 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
448 				mempool_free_slab, request_cachep, q->node);
449 
450 	if (!rl->rq_pool)
451 		return -ENOMEM;
452 
453 	return 0;
454 }
455 
456 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
457 {
458 	return blk_alloc_queue_node(gfp_mask, -1);
459 }
460 EXPORT_SYMBOL(blk_alloc_queue);
461 
462 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
463 {
464 	struct request_queue *q;
465 	int err;
466 
467 	q = kmem_cache_alloc_node(blk_requestq_cachep,
468 				gfp_mask | __GFP_ZERO, node_id);
469 	if (!q)
470 		return NULL;
471 
472 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
473 	q->backing_dev_info.unplug_io_data = q;
474 	err = bdi_init(&q->backing_dev_info);
475 	if (err) {
476 		kmem_cache_free(blk_requestq_cachep, q);
477 		return NULL;
478 	}
479 
480 	init_timer(&q->unplug_timer);
481 
482 	kobject_init(&q->kobj, &blk_queue_ktype);
483 
484 	mutex_init(&q->sysfs_lock);
485 
486 	return q;
487 }
488 EXPORT_SYMBOL(blk_alloc_queue_node);
489 
490 /**
491  * blk_init_queue  - prepare a request queue for use with a block device
492  * @rfn:  The function to be called to process requests that have been
493  *        placed on the queue.
494  * @lock: Request queue spin lock
495  *
496  * Description:
497  *    If a block device wishes to use the standard request handling procedures,
498  *    which sorts requests and coalesces adjacent requests, then it must
499  *    call blk_init_queue().  The function @rfn will be called when there
500  *    are requests on the queue that need to be processed.  If the device
501  *    supports plugging, then @rfn may not be called immediately when requests
502  *    are available on the queue, but may be called at some time later instead.
503  *    Plugged queues are generally unplugged when a buffer belonging to one
504  *    of the requests on the queue is needed, or due to memory pressure.
505  *
506  *    @rfn is not required, or even expected, to remove all requests off the
507  *    queue, but only as many as it can handle at a time.  If it does leave
508  *    requests on the queue, it is responsible for arranging that the requests
509  *    get dealt with eventually.
510  *
511  *    The queue spin lock must be held while manipulating the requests on the
512  *    request queue; this lock will be taken also from interrupt context, so irq
513  *    disabling is needed for it.
514  *
515  *    Function returns a pointer to the initialized request queue, or NULL if
516  *    it didn't succeed.
517  *
518  * Note:
519  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
520  *    when the block device is deactivated (such as at module unload).
521  **/
522 
523 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
524 {
525 	return blk_init_queue_node(rfn, lock, -1);
526 }
527 EXPORT_SYMBOL(blk_init_queue);
528 
529 struct request_queue *
530 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
531 {
532 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
533 
534 	if (!q)
535 		return NULL;
536 
537 	q->node = node_id;
538 	if (blk_init_free_list(q)) {
539 		kmem_cache_free(blk_requestq_cachep, q);
540 		return NULL;
541 	}
542 
543 	/*
544 	 * if caller didn't supply a lock, they get per-queue locking with
545 	 * our embedded lock
546 	 */
547 	if (!lock) {
548 		spin_lock_init(&q->__queue_lock);
549 		lock = &q->__queue_lock;
550 	}
551 
552 	q->request_fn		= rfn;
553 	q->prep_rq_fn		= NULL;
554 	q->unplug_fn		= generic_unplug_device;
555 	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
556 	q->queue_lock		= lock;
557 
558 	blk_queue_segment_boundary(q, 0xffffffff);
559 
560 	blk_queue_make_request(q, __make_request);
561 	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
562 
563 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
564 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
565 
566 	q->sg_reserved_size = INT_MAX;
567 
568 	/*
569 	 * all done
570 	 */
571 	if (!elevator_init(q, NULL)) {
572 		blk_queue_congestion_threshold(q);
573 		return q;
574 	}
575 
576 	blk_put_queue(q);
577 	return NULL;
578 }
579 EXPORT_SYMBOL(blk_init_queue_node);
580 
581 int blk_get_queue(struct request_queue *q)
582 {
583 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
584 		kobject_get(&q->kobj);
585 		return 0;
586 	}
587 
588 	return 1;
589 }
590 
591 static inline void blk_free_request(struct request_queue *q, struct request *rq)
592 {
593 	if (rq->cmd_flags & REQ_ELVPRIV)
594 		elv_put_request(q, rq);
595 	mempool_free(rq, q->rq.rq_pool);
596 }
597 
598 static struct request *
599 blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
600 {
601 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
602 
603 	if (!rq)
604 		return NULL;
605 
606 	blk_rq_init(q, rq);
607 
608 	/*
609 	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
610 	 * see bio.h and blkdev.h
611 	 */
612 	rq->cmd_flags = rw | REQ_ALLOCED;
613 
614 	if (priv) {
615 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
616 			mempool_free(rq, q->rq.rq_pool);
617 			return NULL;
618 		}
619 		rq->cmd_flags |= REQ_ELVPRIV;
620 	}
621 
622 	return rq;
623 }
624 
625 /*
626  * ioc_batching returns true if the ioc is a valid batching request and
627  * should be given priority access to a request.
628  */
629 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
630 {
631 	if (!ioc)
632 		return 0;
633 
634 	/*
635 	 * Make sure the process is able to allocate at least 1 request
636 	 * even if the batch times out, otherwise we could theoretically
637 	 * lose wakeups.
638 	 */
639 	return ioc->nr_batch_requests == q->nr_batching ||
640 		(ioc->nr_batch_requests > 0
641 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
642 }
643 
644 /*
645  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
646  * will cause the process to be a "batcher" on all queues in the system. This
647  * is the behaviour we want though - once it gets a wakeup it should be given
648  * a nice run.
649  */
650 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
651 {
652 	if (!ioc || ioc_batching(q, ioc))
653 		return;
654 
655 	ioc->nr_batch_requests = q->nr_batching;
656 	ioc->last_waited = jiffies;
657 }
658 
659 static void __freed_request(struct request_queue *q, int rw)
660 {
661 	struct request_list *rl = &q->rq;
662 
663 	if (rl->count[rw] < queue_congestion_off_threshold(q))
664 		blk_clear_queue_congested(q, rw);
665 
666 	if (rl->count[rw] + 1 <= q->nr_requests) {
667 		if (waitqueue_active(&rl->wait[rw]))
668 			wake_up(&rl->wait[rw]);
669 
670 		blk_clear_queue_full(q, rw);
671 	}
672 }
673 
674 /*
675  * A request has just been released.  Account for it, update the full and
676  * congestion status, wake up any waiters.   Called under q->queue_lock.
677  */
678 static void freed_request(struct request_queue *q, int rw, int priv)
679 {
680 	struct request_list *rl = &q->rq;
681 
682 	rl->count[rw]--;
683 	if (priv)
684 		rl->elvpriv--;
685 
686 	__freed_request(q, rw);
687 
688 	if (unlikely(rl->starved[rw ^ 1]))
689 		__freed_request(q, rw ^ 1);
690 }
691 
692 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
693 /*
694  * Get a free request, queue_lock must be held.
695  * Returns NULL on failure, with queue_lock held.
696  * Returns !NULL on success, with queue_lock *not held*.
697  */
698 static struct request *get_request(struct request_queue *q, int rw_flags,
699 				   struct bio *bio, gfp_t gfp_mask)
700 {
701 	struct request *rq = NULL;
702 	struct request_list *rl = &q->rq;
703 	struct io_context *ioc = NULL;
704 	const int rw = rw_flags & 0x01;
705 	int may_queue, priv;
706 
707 	may_queue = elv_may_queue(q, rw_flags);
708 	if (may_queue == ELV_MQUEUE_NO)
709 		goto rq_starved;
710 
711 	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
712 		if (rl->count[rw]+1 >= q->nr_requests) {
713 			ioc = current_io_context(GFP_ATOMIC, q->node);
714 			/*
715 			 * The queue will fill after this allocation, so set
716 			 * it as full, and mark this process as "batching".
717 			 * This process will be allowed to complete a batch of
718 			 * requests, others will be blocked.
719 			 */
720 			if (!blk_queue_full(q, rw)) {
721 				ioc_set_batching(q, ioc);
722 				blk_set_queue_full(q, rw);
723 			} else {
724 				if (may_queue != ELV_MQUEUE_MUST
725 						&& !ioc_batching(q, ioc)) {
726 					/*
727 					 * The queue is full and the allocating
728 					 * process is not a "batcher", and not
729 					 * exempted by the IO scheduler
730 					 */
731 					goto out;
732 				}
733 			}
734 		}
735 		blk_set_queue_congested(q, rw);
736 	}
737 
738 	/*
739 	 * Only allow batching queuers to allocate up to 50% over the defined
740 	 * limit of requests, otherwise we could have thousands of requests
741 	 * allocated with any setting of ->nr_requests
742 	 */
743 	if (rl->count[rw] >= (3 * q->nr_requests / 2))
744 		goto out;
745 
746 	rl->count[rw]++;
747 	rl->starved[rw] = 0;
748 
749 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
750 	if (priv)
751 		rl->elvpriv++;
752 
753 	spin_unlock_irq(q->queue_lock);
754 
755 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
756 	if (unlikely(!rq)) {
757 		/*
758 		 * Allocation failed presumably due to memory. Undo anything
759 		 * we might have messed up.
760 		 *
761 		 * Allocating task should really be put onto the front of the
762 		 * wait queue, but this is pretty rare.
763 		 */
764 		spin_lock_irq(q->queue_lock);
765 		freed_request(q, rw, priv);
766 
767 		/*
768 		 * in the very unlikely event that allocation failed and no
769 		 * requests for this direction was pending, mark us starved
770 		 * so that freeing of a request in the other direction will
771 		 * notice us. another possible fix would be to split the
772 		 * rq mempool into READ and WRITE
773 		 */
774 rq_starved:
775 		if (unlikely(rl->count[rw] == 0))
776 			rl->starved[rw] = 1;
777 
778 		goto out;
779 	}
780 
781 	/*
782 	 * ioc may be NULL here, and ioc_batching will be false. That's
783 	 * OK, if the queue is under the request limit then requests need
784 	 * not count toward the nr_batch_requests limit. There will always
785 	 * be some limit enforced by BLK_BATCH_TIME.
786 	 */
787 	if (ioc_batching(q, ioc))
788 		ioc->nr_batch_requests--;
789 
790 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
791 out:
792 	return rq;
793 }
794 
795 /*
796  * No available requests for this queue, unplug the device and wait for some
797  * requests to become available.
798  *
799  * Called with q->queue_lock held, and returns with it unlocked.
800  */
801 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
802 					struct bio *bio)
803 {
804 	const int rw = rw_flags & 0x01;
805 	struct request *rq;
806 
807 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
808 	while (!rq) {
809 		DEFINE_WAIT(wait);
810 		struct request_list *rl = &q->rq;
811 
812 		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
813 				TASK_UNINTERRUPTIBLE);
814 
815 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
816 
817 		if (!rq) {
818 			struct io_context *ioc;
819 
820 			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
821 
822 			__generic_unplug_device(q);
823 			spin_unlock_irq(q->queue_lock);
824 			io_schedule();
825 
826 			/*
827 			 * After sleeping, we become a "batching" process and
828 			 * will be able to allocate at least one request, and
829 			 * up to a big batch of them for a small period time.
830 			 * See ioc_batching, ioc_set_batching
831 			 */
832 			ioc = current_io_context(GFP_NOIO, q->node);
833 			ioc_set_batching(q, ioc);
834 
835 			spin_lock_irq(q->queue_lock);
836 		}
837 		finish_wait(&rl->wait[rw], &wait);
838 	}
839 
840 	return rq;
841 }
842 
843 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
844 {
845 	struct request *rq;
846 
847 	BUG_ON(rw != READ && rw != WRITE);
848 
849 	spin_lock_irq(q->queue_lock);
850 	if (gfp_mask & __GFP_WAIT) {
851 		rq = get_request_wait(q, rw, NULL);
852 	} else {
853 		rq = get_request(q, rw, NULL, gfp_mask);
854 		if (!rq)
855 			spin_unlock_irq(q->queue_lock);
856 	}
857 	/* q->queue_lock is unlocked at this point */
858 
859 	return rq;
860 }
861 EXPORT_SYMBOL(blk_get_request);
862 
863 /**
864  * blk_start_queueing - initiate dispatch of requests to device
865  * @q:		request queue to kick into gear
866  *
867  * This is basically a helper to remove the need to know whether a queue
868  * is plugged or not if someone just wants to initiate dispatch of requests
869  * for this queue.
870  *
871  * The queue lock must be held with interrupts disabled.
872  */
873 void blk_start_queueing(struct request_queue *q)
874 {
875 	if (!blk_queue_plugged(q))
876 		q->request_fn(q);
877 	else
878 		__generic_unplug_device(q);
879 }
880 EXPORT_SYMBOL(blk_start_queueing);
881 
882 /**
883  * blk_requeue_request - put a request back on queue
884  * @q:		request queue where request should be inserted
885  * @rq:		request to be inserted
886  *
887  * Description:
888  *    Drivers often keep queueing requests until the hardware cannot accept
889  *    more, when that condition happens we need to put the request back
890  *    on the queue. Must be called with queue lock held.
891  */
892 void blk_requeue_request(struct request_queue *q, struct request *rq)
893 {
894 	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
895 
896 	if (blk_rq_tagged(rq))
897 		blk_queue_end_tag(q, rq);
898 
899 	elv_requeue_request(q, rq);
900 }
901 EXPORT_SYMBOL(blk_requeue_request);
902 
903 /**
904  * blk_insert_request - insert a special request in to a request queue
905  * @q:		request queue where request should be inserted
906  * @rq:		request to be inserted
907  * @at_head:	insert request at head or tail of queue
908  * @data:	private data
909  *
910  * Description:
911  *    Many block devices need to execute commands asynchronously, so they don't
912  *    block the whole kernel from preemption during request execution.  This is
913  *    accomplished normally by inserting aritficial requests tagged as
914  *    REQ_SPECIAL in to the corresponding request queue, and letting them be
915  *    scheduled for actual execution by the request queue.
916  *
917  *    We have the option of inserting the head or the tail of the queue.
918  *    Typically we use the tail for new ioctls and so forth.  We use the head
919  *    of the queue for things like a QUEUE_FULL message from a device, or a
920  *    host that is unable to accept a particular command.
921  */
922 void blk_insert_request(struct request_queue *q, struct request *rq,
923 			int at_head, void *data)
924 {
925 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
926 	unsigned long flags;
927 
928 	/*
929 	 * tell I/O scheduler that this isn't a regular read/write (ie it
930 	 * must not attempt merges on this) and that it acts as a soft
931 	 * barrier
932 	 */
933 	rq->cmd_type = REQ_TYPE_SPECIAL;
934 	rq->cmd_flags |= REQ_SOFTBARRIER;
935 
936 	rq->special = data;
937 
938 	spin_lock_irqsave(q->queue_lock, flags);
939 
940 	/*
941 	 * If command is tagged, release the tag
942 	 */
943 	if (blk_rq_tagged(rq))
944 		blk_queue_end_tag(q, rq);
945 
946 	drive_stat_acct(rq, 1);
947 	__elv_add_request(q, rq, where, 0);
948 	blk_start_queueing(q);
949 	spin_unlock_irqrestore(q->queue_lock, flags);
950 }
951 EXPORT_SYMBOL(blk_insert_request);
952 
953 /*
954  * add-request adds a request to the linked list.
955  * queue lock is held and interrupts disabled, as we muck with the
956  * request queue list.
957  */
958 static inline void add_request(struct request_queue *q, struct request *req)
959 {
960 	drive_stat_acct(req, 1);
961 
962 	/*
963 	 * elevator indicated where it wants this request to be
964 	 * inserted at elevator_merge time
965 	 */
966 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
967 }
968 
969 /*
970  * disk_round_stats()	- Round off the performance stats on a struct
971  * disk_stats.
972  *
973  * The average IO queue length and utilisation statistics are maintained
974  * by observing the current state of the queue length and the amount of
975  * time it has been in this state for.
976  *
977  * Normally, that accounting is done on IO completion, but that can result
978  * in more than a second's worth of IO being accounted for within any one
979  * second, leading to >100% utilisation.  To deal with that, we call this
980  * function to do a round-off before returning the results when reading
981  * /proc/diskstats.  This accounts immediately for all queue usage up to
982  * the current jiffies and restarts the counters again.
983  */
984 void disk_round_stats(struct gendisk *disk)
985 {
986 	unsigned long now = jiffies;
987 
988 	if (now == disk->stamp)
989 		return;
990 
991 	if (disk->in_flight) {
992 		__disk_stat_add(disk, time_in_queue,
993 				disk->in_flight * (now - disk->stamp));
994 		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
995 	}
996 	disk->stamp = now;
997 }
998 EXPORT_SYMBOL_GPL(disk_round_stats);
999 
1000 void part_round_stats(struct hd_struct *part)
1001 {
1002 	unsigned long now = jiffies;
1003 
1004 	if (now == part->stamp)
1005 		return;
1006 
1007 	if (part->in_flight) {
1008 		__part_stat_add(part, time_in_queue,
1009 				part->in_flight * (now - part->stamp));
1010 		__part_stat_add(part, io_ticks, (now - part->stamp));
1011 	}
1012 	part->stamp = now;
1013 }
1014 
1015 /*
1016  * queue lock must be held
1017  */
1018 void __blk_put_request(struct request_queue *q, struct request *req)
1019 {
1020 	if (unlikely(!q))
1021 		return;
1022 	if (unlikely(--req->ref_count))
1023 		return;
1024 
1025 	elv_completed_request(q, req);
1026 
1027 	/*
1028 	 * Request may not have originated from ll_rw_blk. if not,
1029 	 * it didn't come out of our reserved rq pools
1030 	 */
1031 	if (req->cmd_flags & REQ_ALLOCED) {
1032 		int rw = rq_data_dir(req);
1033 		int priv = req->cmd_flags & REQ_ELVPRIV;
1034 
1035 		BUG_ON(!list_empty(&req->queuelist));
1036 		BUG_ON(!hlist_unhashed(&req->hash));
1037 
1038 		blk_free_request(q, req);
1039 		freed_request(q, rw, priv);
1040 	}
1041 }
1042 EXPORT_SYMBOL_GPL(__blk_put_request);
1043 
1044 void blk_put_request(struct request *req)
1045 {
1046 	unsigned long flags;
1047 	struct request_queue *q = req->q;
1048 
1049 	/*
1050 	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
1051 	 * following if (q) test.
1052 	 */
1053 	if (q) {
1054 		spin_lock_irqsave(q->queue_lock, flags);
1055 		__blk_put_request(q, req);
1056 		spin_unlock_irqrestore(q->queue_lock, flags);
1057 	}
1058 }
1059 EXPORT_SYMBOL(blk_put_request);
1060 
1061 void init_request_from_bio(struct request *req, struct bio *bio)
1062 {
1063 	req->cmd_type = REQ_TYPE_FS;
1064 
1065 	/*
1066 	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1067 	 */
1068 	if (bio_rw_ahead(bio) || bio_failfast(bio))
1069 		req->cmd_flags |= REQ_FAILFAST;
1070 
1071 	/*
1072 	 * REQ_BARRIER implies no merging, but lets make it explicit
1073 	 */
1074 	if (unlikely(bio_barrier(bio)))
1075 		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1076 
1077 	if (bio_sync(bio))
1078 		req->cmd_flags |= REQ_RW_SYNC;
1079 	if (bio_rw_meta(bio))
1080 		req->cmd_flags |= REQ_RW_META;
1081 
1082 	req->errors = 0;
1083 	req->hard_sector = req->sector = bio->bi_sector;
1084 	req->ioprio = bio_prio(bio);
1085 	req->start_time = jiffies;
1086 	blk_rq_bio_prep(req->q, req, bio);
1087 }
1088 
1089 static int __make_request(struct request_queue *q, struct bio *bio)
1090 {
1091 	struct request *req;
1092 	int el_ret, nr_sectors, barrier, err;
1093 	const unsigned short prio = bio_prio(bio);
1094 	const int sync = bio_sync(bio);
1095 	int rw_flags;
1096 
1097 	nr_sectors = bio_sectors(bio);
1098 
1099 	/*
1100 	 * low level driver can indicate that it wants pages above a
1101 	 * certain limit bounced to low memory (ie for highmem, or even
1102 	 * ISA dma in theory)
1103 	 */
1104 	blk_queue_bounce(q, &bio);
1105 
1106 	barrier = bio_barrier(bio);
1107 	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1108 		err = -EOPNOTSUPP;
1109 		goto end_io;
1110 	}
1111 
1112 	spin_lock_irq(q->queue_lock);
1113 
1114 	if (unlikely(barrier) || elv_queue_empty(q))
1115 		goto get_rq;
1116 
1117 	el_ret = elv_merge(q, &req, bio);
1118 	switch (el_ret) {
1119 	case ELEVATOR_BACK_MERGE:
1120 		BUG_ON(!rq_mergeable(req));
1121 
1122 		if (!ll_back_merge_fn(q, req, bio))
1123 			break;
1124 
1125 		blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1126 
1127 		req->biotail->bi_next = bio;
1128 		req->biotail = bio;
1129 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1130 		req->ioprio = ioprio_best(req->ioprio, prio);
1131 		drive_stat_acct(req, 0);
1132 		if (!attempt_back_merge(q, req))
1133 			elv_merged_request(q, req, el_ret);
1134 		goto out;
1135 
1136 	case ELEVATOR_FRONT_MERGE:
1137 		BUG_ON(!rq_mergeable(req));
1138 
1139 		if (!ll_front_merge_fn(q, req, bio))
1140 			break;
1141 
1142 		blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1143 
1144 		bio->bi_next = req->bio;
1145 		req->bio = bio;
1146 
1147 		/*
1148 		 * may not be valid. if the low level driver said
1149 		 * it didn't need a bounce buffer then it better
1150 		 * not touch req->buffer either...
1151 		 */
1152 		req->buffer = bio_data(bio);
1153 		req->current_nr_sectors = bio_cur_sectors(bio);
1154 		req->hard_cur_sectors = req->current_nr_sectors;
1155 		req->sector = req->hard_sector = bio->bi_sector;
1156 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1157 		req->ioprio = ioprio_best(req->ioprio, prio);
1158 		drive_stat_acct(req, 0);
1159 		if (!attempt_front_merge(q, req))
1160 			elv_merged_request(q, req, el_ret);
1161 		goto out;
1162 
1163 	/* ELV_NO_MERGE: elevator says don't/can't merge. */
1164 	default:
1165 		;
1166 	}
1167 
1168 get_rq:
1169 	/*
1170 	 * This sync check and mask will be re-done in init_request_from_bio(),
1171 	 * but we need to set it earlier to expose the sync flag to the
1172 	 * rq allocator and io schedulers.
1173 	 */
1174 	rw_flags = bio_data_dir(bio);
1175 	if (sync)
1176 		rw_flags |= REQ_RW_SYNC;
1177 
1178 	/*
1179 	 * Grab a free request. This is might sleep but can not fail.
1180 	 * Returns with the queue unlocked.
1181 	 */
1182 	req = get_request_wait(q, rw_flags, bio);
1183 
1184 	/*
1185 	 * After dropping the lock and possibly sleeping here, our request
1186 	 * may now be mergeable after it had proven unmergeable (above).
1187 	 * We don't worry about that case for efficiency. It won't happen
1188 	 * often, and the elevators are able to handle it.
1189 	 */
1190 	init_request_from_bio(req, bio);
1191 
1192 	spin_lock_irq(q->queue_lock);
1193 	if (elv_queue_empty(q))
1194 		blk_plug_device(q);
1195 	add_request(q, req);
1196 out:
1197 	if (sync)
1198 		__generic_unplug_device(q);
1199 
1200 	spin_unlock_irq(q->queue_lock);
1201 	return 0;
1202 
1203 end_io:
1204 	bio_endio(bio, err);
1205 	return 0;
1206 }
1207 
1208 /*
1209  * If bio->bi_dev is a partition, remap the location
1210  */
1211 static inline void blk_partition_remap(struct bio *bio)
1212 {
1213 	struct block_device *bdev = bio->bi_bdev;
1214 
1215 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1216 		struct hd_struct *p = bdev->bd_part;
1217 
1218 		bio->bi_sector += p->start_sect;
1219 		bio->bi_bdev = bdev->bd_contains;
1220 
1221 		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1222 				    bdev->bd_dev, bio->bi_sector,
1223 				    bio->bi_sector - p->start_sect);
1224 	}
1225 }
1226 
1227 static void handle_bad_sector(struct bio *bio)
1228 {
1229 	char b[BDEVNAME_SIZE];
1230 
1231 	printk(KERN_INFO "attempt to access beyond end of device\n");
1232 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1233 			bdevname(bio->bi_bdev, b),
1234 			bio->bi_rw,
1235 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
1236 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1237 
1238 	set_bit(BIO_EOF, &bio->bi_flags);
1239 }
1240 
1241 #ifdef CONFIG_FAIL_MAKE_REQUEST
1242 
1243 static DECLARE_FAULT_ATTR(fail_make_request);
1244 
1245 static int __init setup_fail_make_request(char *str)
1246 {
1247 	return setup_fault_attr(&fail_make_request, str);
1248 }
1249 __setup("fail_make_request=", setup_fail_make_request);
1250 
1251 static int should_fail_request(struct bio *bio)
1252 {
1253 	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1254 	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1255 		return should_fail(&fail_make_request, bio->bi_size);
1256 
1257 	return 0;
1258 }
1259 
1260 static int __init fail_make_request_debugfs(void)
1261 {
1262 	return init_fault_attr_dentries(&fail_make_request,
1263 					"fail_make_request");
1264 }
1265 
1266 late_initcall(fail_make_request_debugfs);
1267 
1268 #else /* CONFIG_FAIL_MAKE_REQUEST */
1269 
1270 static inline int should_fail_request(struct bio *bio)
1271 {
1272 	return 0;
1273 }
1274 
1275 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1276 
1277 /*
1278  * Check whether this bio extends beyond the end of the device.
1279  */
1280 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1281 {
1282 	sector_t maxsector;
1283 
1284 	if (!nr_sectors)
1285 		return 0;
1286 
1287 	/* Test device or partition size, when known. */
1288 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1289 	if (maxsector) {
1290 		sector_t sector = bio->bi_sector;
1291 
1292 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1293 			/*
1294 			 * This may well happen - the kernel calls bread()
1295 			 * without checking the size of the device, e.g., when
1296 			 * mounting a device.
1297 			 */
1298 			handle_bad_sector(bio);
1299 			return 1;
1300 		}
1301 	}
1302 
1303 	return 0;
1304 }
1305 
1306 /**
1307  * generic_make_request: hand a buffer to its device driver for I/O
1308  * @bio:  The bio describing the location in memory and on the device.
1309  *
1310  * generic_make_request() is used to make I/O requests of block
1311  * devices. It is passed a &struct bio, which describes the I/O that needs
1312  * to be done.
1313  *
1314  * generic_make_request() does not return any status.  The
1315  * success/failure status of the request, along with notification of
1316  * completion, is delivered asynchronously through the bio->bi_end_io
1317  * function described (one day) else where.
1318  *
1319  * The caller of generic_make_request must make sure that bi_io_vec
1320  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1321  * set to describe the device address, and the
1322  * bi_end_io and optionally bi_private are set to describe how
1323  * completion notification should be signaled.
1324  *
1325  * generic_make_request and the drivers it calls may use bi_next if this
1326  * bio happens to be merged with someone else, and may change bi_dev and
1327  * bi_sector for remaps as it sees fit.  So the values of these fields
1328  * should NOT be depended on after the call to generic_make_request.
1329  */
1330 static inline void __generic_make_request(struct bio *bio)
1331 {
1332 	struct request_queue *q;
1333 	sector_t old_sector;
1334 	int ret, nr_sectors = bio_sectors(bio);
1335 	dev_t old_dev;
1336 	int err = -EIO;
1337 
1338 	might_sleep();
1339 
1340 	if (bio_check_eod(bio, nr_sectors))
1341 		goto end_io;
1342 
1343 	/*
1344 	 * Resolve the mapping until finished. (drivers are
1345 	 * still free to implement/resolve their own stacking
1346 	 * by explicitly returning 0)
1347 	 *
1348 	 * NOTE: we don't repeat the blk_size check for each new device.
1349 	 * Stacking drivers are expected to know what they are doing.
1350 	 */
1351 	old_sector = -1;
1352 	old_dev = 0;
1353 	do {
1354 		char b[BDEVNAME_SIZE];
1355 
1356 		q = bdev_get_queue(bio->bi_bdev);
1357 		if (!q) {
1358 			printk(KERN_ERR
1359 			       "generic_make_request: Trying to access "
1360 				"nonexistent block-device %s (%Lu)\n",
1361 				bdevname(bio->bi_bdev, b),
1362 				(long long) bio->bi_sector);
1363 end_io:
1364 			bio_endio(bio, err);
1365 			break;
1366 		}
1367 
1368 		if (unlikely(nr_sectors > q->max_hw_sectors)) {
1369 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1370 				bdevname(bio->bi_bdev, b),
1371 				bio_sectors(bio),
1372 				q->max_hw_sectors);
1373 			goto end_io;
1374 		}
1375 
1376 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1377 			goto end_io;
1378 
1379 		if (should_fail_request(bio))
1380 			goto end_io;
1381 
1382 		/*
1383 		 * If this device has partitions, remap block n
1384 		 * of partition p to block n+start(p) of the disk.
1385 		 */
1386 		blk_partition_remap(bio);
1387 
1388 		if (old_sector != -1)
1389 			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1390 					    old_sector);
1391 
1392 		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1393 
1394 		old_sector = bio->bi_sector;
1395 		old_dev = bio->bi_bdev->bd_dev;
1396 
1397 		if (bio_check_eod(bio, nr_sectors))
1398 			goto end_io;
1399 		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1400 			err = -EOPNOTSUPP;
1401 			goto end_io;
1402 		}
1403 
1404 		ret = q->make_request_fn(q, bio);
1405 	} while (ret);
1406 }
1407 
1408 /*
1409  * We only want one ->make_request_fn to be active at a time,
1410  * else stack usage with stacked devices could be a problem.
1411  * So use current->bio_{list,tail} to keep a list of requests
1412  * submited by a make_request_fn function.
1413  * current->bio_tail is also used as a flag to say if
1414  * generic_make_request is currently active in this task or not.
1415  * If it is NULL, then no make_request is active.  If it is non-NULL,
1416  * then a make_request is active, and new requests should be added
1417  * at the tail
1418  */
1419 void generic_make_request(struct bio *bio)
1420 {
1421 	if (current->bio_tail) {
1422 		/* make_request is active */
1423 		*(current->bio_tail) = bio;
1424 		bio->bi_next = NULL;
1425 		current->bio_tail = &bio->bi_next;
1426 		return;
1427 	}
1428 	/* following loop may be a bit non-obvious, and so deserves some
1429 	 * explanation.
1430 	 * Before entering the loop, bio->bi_next is NULL (as all callers
1431 	 * ensure that) so we have a list with a single bio.
1432 	 * We pretend that we have just taken it off a longer list, so
1433 	 * we assign bio_list to the next (which is NULL) and bio_tail
1434 	 * to &bio_list, thus initialising the bio_list of new bios to be
1435 	 * added.  __generic_make_request may indeed add some more bios
1436 	 * through a recursive call to generic_make_request.  If it
1437 	 * did, we find a non-NULL value in bio_list and re-enter the loop
1438 	 * from the top.  In this case we really did just take the bio
1439 	 * of the top of the list (no pretending) and so fixup bio_list and
1440 	 * bio_tail or bi_next, and call into __generic_make_request again.
1441 	 *
1442 	 * The loop was structured like this to make only one call to
1443 	 * __generic_make_request (which is important as it is large and
1444 	 * inlined) and to keep the structure simple.
1445 	 */
1446 	BUG_ON(bio->bi_next);
1447 	do {
1448 		current->bio_list = bio->bi_next;
1449 		if (bio->bi_next == NULL)
1450 			current->bio_tail = &current->bio_list;
1451 		else
1452 			bio->bi_next = NULL;
1453 		__generic_make_request(bio);
1454 		bio = current->bio_list;
1455 	} while (bio);
1456 	current->bio_tail = NULL; /* deactivate */
1457 }
1458 EXPORT_SYMBOL(generic_make_request);
1459 
1460 /**
1461  * submit_bio: submit a bio to the block device layer for I/O
1462  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1463  * @bio: The &struct bio which describes the I/O
1464  *
1465  * submit_bio() is very similar in purpose to generic_make_request(), and
1466  * uses that function to do most of the work. Both are fairly rough
1467  * interfaces, @bio must be presetup and ready for I/O.
1468  *
1469  */
1470 void submit_bio(int rw, struct bio *bio)
1471 {
1472 	int count = bio_sectors(bio);
1473 
1474 	bio->bi_rw |= rw;
1475 
1476 	/*
1477 	 * If it's a regular read/write or a barrier with data attached,
1478 	 * go through the normal accounting stuff before submission.
1479 	 */
1480 	if (!bio_empty_barrier(bio)) {
1481 
1482 		BIO_BUG_ON(!bio->bi_size);
1483 		BIO_BUG_ON(!bio->bi_io_vec);
1484 
1485 		if (rw & WRITE) {
1486 			count_vm_events(PGPGOUT, count);
1487 		} else {
1488 			task_io_account_read(bio->bi_size);
1489 			count_vm_events(PGPGIN, count);
1490 		}
1491 
1492 		if (unlikely(block_dump)) {
1493 			char b[BDEVNAME_SIZE];
1494 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1495 			current->comm, task_pid_nr(current),
1496 				(rw & WRITE) ? "WRITE" : "READ",
1497 				(unsigned long long)bio->bi_sector,
1498 				bdevname(bio->bi_bdev, b));
1499 		}
1500 	}
1501 
1502 	generic_make_request(bio);
1503 }
1504 EXPORT_SYMBOL(submit_bio);
1505 
1506 /**
1507  * __end_that_request_first - end I/O on a request
1508  * @req:      the request being processed
1509  * @error:    0 for success, < 0 for error
1510  * @nr_bytes: number of bytes to complete
1511  *
1512  * Description:
1513  *     Ends I/O on a number of bytes attached to @req, and sets it up
1514  *     for the next range of segments (if any) in the cluster.
1515  *
1516  * Return:
1517  *     0 - we are done with this request, call end_that_request_last()
1518  *     1 - still buffers pending for this request
1519  **/
1520 static int __end_that_request_first(struct request *req, int error,
1521 				    int nr_bytes)
1522 {
1523 	int total_bytes, bio_nbytes, next_idx = 0;
1524 	struct bio *bio;
1525 
1526 	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1527 
1528 	/*
1529 	 * for a REQ_BLOCK_PC request, we want to carry any eventual
1530 	 * sense key with us all the way through
1531 	 */
1532 	if (!blk_pc_request(req))
1533 		req->errors = 0;
1534 
1535 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1536 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1537 				req->rq_disk ? req->rq_disk->disk_name : "?",
1538 				(unsigned long long)req->sector);
1539 	}
1540 
1541 	if (blk_fs_request(req) && req->rq_disk) {
1542 		struct hd_struct *part = get_part(req->rq_disk, req->sector);
1543 		const int rw = rq_data_dir(req);
1544 
1545 		all_stat_add(req->rq_disk, part, sectors[rw],
1546 				nr_bytes >> 9, req->sector);
1547 	}
1548 
1549 	total_bytes = bio_nbytes = 0;
1550 	while ((bio = req->bio) != NULL) {
1551 		int nbytes;
1552 
1553 		/*
1554 		 * For an empty barrier request, the low level driver must
1555 		 * store a potential error location in ->sector. We pass
1556 		 * that back up in ->bi_sector.
1557 		 */
1558 		if (blk_empty_barrier(req))
1559 			bio->bi_sector = req->sector;
1560 
1561 		if (nr_bytes >= bio->bi_size) {
1562 			req->bio = bio->bi_next;
1563 			nbytes = bio->bi_size;
1564 			req_bio_endio(req, bio, nbytes, error);
1565 			next_idx = 0;
1566 			bio_nbytes = 0;
1567 		} else {
1568 			int idx = bio->bi_idx + next_idx;
1569 
1570 			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1571 				blk_dump_rq_flags(req, "__end_that");
1572 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1573 				       __func__, bio->bi_idx, bio->bi_vcnt);
1574 				break;
1575 			}
1576 
1577 			nbytes = bio_iovec_idx(bio, idx)->bv_len;
1578 			BIO_BUG_ON(nbytes > bio->bi_size);
1579 
1580 			/*
1581 			 * not a complete bvec done
1582 			 */
1583 			if (unlikely(nbytes > nr_bytes)) {
1584 				bio_nbytes += nr_bytes;
1585 				total_bytes += nr_bytes;
1586 				break;
1587 			}
1588 
1589 			/*
1590 			 * advance to the next vector
1591 			 */
1592 			next_idx++;
1593 			bio_nbytes += nbytes;
1594 		}
1595 
1596 		total_bytes += nbytes;
1597 		nr_bytes -= nbytes;
1598 
1599 		bio = req->bio;
1600 		if (bio) {
1601 			/*
1602 			 * end more in this run, or just return 'not-done'
1603 			 */
1604 			if (unlikely(nr_bytes <= 0))
1605 				break;
1606 		}
1607 	}
1608 
1609 	/*
1610 	 * completely done
1611 	 */
1612 	if (!req->bio)
1613 		return 0;
1614 
1615 	/*
1616 	 * if the request wasn't completed, update state
1617 	 */
1618 	if (bio_nbytes) {
1619 		req_bio_endio(req, bio, bio_nbytes, error);
1620 		bio->bi_idx += next_idx;
1621 		bio_iovec(bio)->bv_offset += nr_bytes;
1622 		bio_iovec(bio)->bv_len -= nr_bytes;
1623 	}
1624 
1625 	blk_recalc_rq_sectors(req, total_bytes >> 9);
1626 	blk_recalc_rq_segments(req);
1627 	return 1;
1628 }
1629 
1630 /*
1631  * splice the completion data to a local structure and hand off to
1632  * process_completion_queue() to complete the requests
1633  */
1634 static void blk_done_softirq(struct softirq_action *h)
1635 {
1636 	struct list_head *cpu_list, local_list;
1637 
1638 	local_irq_disable();
1639 	cpu_list = &__get_cpu_var(blk_cpu_done);
1640 	list_replace_init(cpu_list, &local_list);
1641 	local_irq_enable();
1642 
1643 	while (!list_empty(&local_list)) {
1644 		struct request *rq;
1645 
1646 		rq = list_entry(local_list.next, struct request, donelist);
1647 		list_del_init(&rq->donelist);
1648 		rq->q->softirq_done_fn(rq);
1649 	}
1650 }
1651 
1652 static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1653 				    unsigned long action, void *hcpu)
1654 {
1655 	/*
1656 	 * If a CPU goes away, splice its entries to the current CPU
1657 	 * and trigger a run of the softirq
1658 	 */
1659 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1660 		int cpu = (unsigned long) hcpu;
1661 
1662 		local_irq_disable();
1663 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
1664 				 &__get_cpu_var(blk_cpu_done));
1665 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
1666 		local_irq_enable();
1667 	}
1668 
1669 	return NOTIFY_OK;
1670 }
1671 
1672 
1673 static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1674 	.notifier_call	= blk_cpu_notify,
1675 };
1676 
1677 /**
1678  * blk_complete_request - end I/O on a request
1679  * @req:      the request being processed
1680  *
1681  * Description:
1682  *     Ends all I/O on a request. It does not handle partial completions,
1683  *     unless the driver actually implements this in its completion callback
1684  *     through requeueing. The actual completion happens out-of-order,
1685  *     through a softirq handler. The user must have registered a completion
1686  *     callback through blk_queue_softirq_done().
1687  **/
1688 
1689 void blk_complete_request(struct request *req)
1690 {
1691 	struct list_head *cpu_list;
1692 	unsigned long flags;
1693 
1694 	BUG_ON(!req->q->softirq_done_fn);
1695 
1696 	local_irq_save(flags);
1697 
1698 	cpu_list = &__get_cpu_var(blk_cpu_done);
1699 	list_add_tail(&req->donelist, cpu_list);
1700 	raise_softirq_irqoff(BLOCK_SOFTIRQ);
1701 
1702 	local_irq_restore(flags);
1703 }
1704 EXPORT_SYMBOL(blk_complete_request);
1705 
1706 /*
1707  * queue lock must be held
1708  */
1709 static void end_that_request_last(struct request *req, int error)
1710 {
1711 	struct gendisk *disk = req->rq_disk;
1712 
1713 	if (blk_rq_tagged(req))
1714 		blk_queue_end_tag(req->q, req);
1715 
1716 	if (blk_queued_rq(req))
1717 		blkdev_dequeue_request(req);
1718 
1719 	if (unlikely(laptop_mode) && blk_fs_request(req))
1720 		laptop_io_completion();
1721 
1722 	/*
1723 	 * Account IO completion.  bar_rq isn't accounted as a normal
1724 	 * IO on queueing nor completion.  Accounting the containing
1725 	 * request is enough.
1726 	 */
1727 	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1728 		unsigned long duration = jiffies - req->start_time;
1729 		const int rw = rq_data_dir(req);
1730 		struct hd_struct *part = get_part(disk, req->sector);
1731 
1732 		__all_stat_inc(disk, part, ios[rw], req->sector);
1733 		__all_stat_add(disk, part, ticks[rw], duration, req->sector);
1734 		disk_round_stats(disk);
1735 		disk->in_flight--;
1736 		if (part) {
1737 			part_round_stats(part);
1738 			part->in_flight--;
1739 		}
1740 	}
1741 
1742 	if (req->end_io)
1743 		req->end_io(req, error);
1744 	else {
1745 		if (blk_bidi_rq(req))
1746 			__blk_put_request(req->next_rq->q, req->next_rq);
1747 
1748 		__blk_put_request(req->q, req);
1749 	}
1750 }
1751 
1752 static inline void __end_request(struct request *rq, int uptodate,
1753 				 unsigned int nr_bytes)
1754 {
1755 	int error = 0;
1756 
1757 	if (uptodate <= 0)
1758 		error = uptodate ? uptodate : -EIO;
1759 
1760 	__blk_end_request(rq, error, nr_bytes);
1761 }
1762 
1763 /**
1764  * blk_rq_bytes - Returns bytes left to complete in the entire request
1765  * @rq: the request being processed
1766  **/
1767 unsigned int blk_rq_bytes(struct request *rq)
1768 {
1769 	if (blk_fs_request(rq))
1770 		return rq->hard_nr_sectors << 9;
1771 
1772 	return rq->data_len;
1773 }
1774 EXPORT_SYMBOL_GPL(blk_rq_bytes);
1775 
1776 /**
1777  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1778  * @rq: the request being processed
1779  **/
1780 unsigned int blk_rq_cur_bytes(struct request *rq)
1781 {
1782 	if (blk_fs_request(rq))
1783 		return rq->current_nr_sectors << 9;
1784 
1785 	if (rq->bio)
1786 		return rq->bio->bi_size;
1787 
1788 	return rq->data_len;
1789 }
1790 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1791 
1792 /**
1793  * end_queued_request - end all I/O on a queued request
1794  * @rq:		the request being processed
1795  * @uptodate:	error value or 0/1 uptodate flag
1796  *
1797  * Description:
1798  *     Ends all I/O on a request, and removes it from the block layer queues.
1799  *     Not suitable for normal IO completion, unless the driver still has
1800  *     the request attached to the block layer.
1801  *
1802  **/
1803 void end_queued_request(struct request *rq, int uptodate)
1804 {
1805 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1806 }
1807 EXPORT_SYMBOL(end_queued_request);
1808 
1809 /**
1810  * end_dequeued_request - end all I/O on a dequeued request
1811  * @rq:		the request being processed
1812  * @uptodate:	error value or 0/1 uptodate flag
1813  *
1814  * Description:
1815  *     Ends all I/O on a request. The request must already have been
1816  *     dequeued using blkdev_dequeue_request(), as is normally the case
1817  *     for most drivers.
1818  *
1819  **/
1820 void end_dequeued_request(struct request *rq, int uptodate)
1821 {
1822 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1823 }
1824 EXPORT_SYMBOL(end_dequeued_request);
1825 
1826 
1827 /**
1828  * end_request - end I/O on the current segment of the request
1829  * @req:	the request being processed
1830  * @uptodate:	error value or 0/1 uptodate flag
1831  *
1832  * Description:
1833  *     Ends I/O on the current segment of a request. If that is the only
1834  *     remaining segment, the request is also completed and freed.
1835  *
1836  *     This is a remnant of how older block drivers handled IO completions.
1837  *     Modern drivers typically end IO on the full request in one go, unless
1838  *     they have a residual value to account for. For that case this function
1839  *     isn't really useful, unless the residual just happens to be the
1840  *     full current segment. In other words, don't use this function in new
1841  *     code. Either use end_request_completely(), or the
1842  *     end_that_request_chunk() (along with end_that_request_last()) for
1843  *     partial completions.
1844  *
1845  **/
1846 void end_request(struct request *req, int uptodate)
1847 {
1848 	__end_request(req, uptodate, req->hard_cur_sectors << 9);
1849 }
1850 EXPORT_SYMBOL(end_request);
1851 
1852 /**
1853  * blk_end_io - Generic end_io function to complete a request.
1854  * @rq:           the request being processed
1855  * @error:        0 for success, < 0 for error
1856  * @nr_bytes:     number of bytes to complete @rq
1857  * @bidi_bytes:   number of bytes to complete @rq->next_rq
1858  * @drv_callback: function called between completion of bios in the request
1859  *                and completion of the request.
1860  *                If the callback returns non 0, this helper returns without
1861  *                completion of the request.
1862  *
1863  * Description:
1864  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1865  *     If @rq has leftover, sets it up for the next range of segments.
1866  *
1867  * Return:
1868  *     0 - we are done with this request
1869  *     1 - this request is not freed yet, it still has pending buffers.
1870  **/
1871 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1872 		      unsigned int bidi_bytes,
1873 		      int (drv_callback)(struct request *))
1874 {
1875 	struct request_queue *q = rq->q;
1876 	unsigned long flags = 0UL;
1877 
1878 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1879 		if (__end_that_request_first(rq, error, nr_bytes))
1880 			return 1;
1881 
1882 		/* Bidi request must be completed as a whole */
1883 		if (blk_bidi_rq(rq) &&
1884 		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1885 			return 1;
1886 	}
1887 
1888 	/* Special feature for tricky drivers */
1889 	if (drv_callback && drv_callback(rq))
1890 		return 1;
1891 
1892 	add_disk_randomness(rq->rq_disk);
1893 
1894 	spin_lock_irqsave(q->queue_lock, flags);
1895 	end_that_request_last(rq, error);
1896 	spin_unlock_irqrestore(q->queue_lock, flags);
1897 
1898 	return 0;
1899 }
1900 
1901 /**
1902  * blk_end_request - Helper function for drivers to complete the request.
1903  * @rq:       the request being processed
1904  * @error:    0 for success, < 0 for error
1905  * @nr_bytes: number of bytes to complete
1906  *
1907  * Description:
1908  *     Ends I/O on a number of bytes attached to @rq.
1909  *     If @rq has leftover, sets it up for the next range of segments.
1910  *
1911  * Return:
1912  *     0 - we are done with this request
1913  *     1 - still buffers pending for this request
1914  **/
1915 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1916 {
1917 	return blk_end_io(rq, error, nr_bytes, 0, NULL);
1918 }
1919 EXPORT_SYMBOL_GPL(blk_end_request);
1920 
1921 /**
1922  * __blk_end_request - Helper function for drivers to complete the request.
1923  * @rq:       the request being processed
1924  * @error:    0 for success, < 0 for error
1925  * @nr_bytes: number of bytes to complete
1926  *
1927  * Description:
1928  *     Must be called with queue lock held unlike blk_end_request().
1929  *
1930  * Return:
1931  *     0 - we are done with this request
1932  *     1 - still buffers pending for this request
1933  **/
1934 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1935 {
1936 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1937 		if (__end_that_request_first(rq, error, nr_bytes))
1938 			return 1;
1939 	}
1940 
1941 	add_disk_randomness(rq->rq_disk);
1942 
1943 	end_that_request_last(rq, error);
1944 
1945 	return 0;
1946 }
1947 EXPORT_SYMBOL_GPL(__blk_end_request);
1948 
1949 /**
1950  * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1951  * @rq:         the bidi request being processed
1952  * @error:      0 for success, < 0 for error
1953  * @nr_bytes:   number of bytes to complete @rq
1954  * @bidi_bytes: number of bytes to complete @rq->next_rq
1955  *
1956  * Description:
1957  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1958  *
1959  * Return:
1960  *     0 - we are done with this request
1961  *     1 - still buffers pending for this request
1962  **/
1963 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1964 			 unsigned int bidi_bytes)
1965 {
1966 	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1967 }
1968 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1969 
1970 /**
1971  * blk_end_request_callback - Special helper function for tricky drivers
1972  * @rq:           the request being processed
1973  * @error:        0 for success, < 0 for error
1974  * @nr_bytes:     number of bytes to complete
1975  * @drv_callback: function called between completion of bios in the request
1976  *                and completion of the request.
1977  *                If the callback returns non 0, this helper returns without
1978  *                completion of the request.
1979  *
1980  * Description:
1981  *     Ends I/O on a number of bytes attached to @rq.
1982  *     If @rq has leftover, sets it up for the next range of segments.
1983  *
1984  *     This special helper function is used only for existing tricky drivers.
1985  *     (e.g. cdrom_newpc_intr() of ide-cd)
1986  *     This interface will be removed when such drivers are rewritten.
1987  *     Don't use this interface in other places anymore.
1988  *
1989  * Return:
1990  *     0 - we are done with this request
1991  *     1 - this request is not freed yet.
1992  *         this request still has pending buffers or
1993  *         the driver doesn't want to finish this request yet.
1994  **/
1995 int blk_end_request_callback(struct request *rq, int error,
1996 			     unsigned int nr_bytes,
1997 			     int (drv_callback)(struct request *))
1998 {
1999 	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2000 }
2001 EXPORT_SYMBOL_GPL(blk_end_request_callback);
2002 
2003 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2004 		     struct bio *bio)
2005 {
2006 	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2007 	rq->cmd_flags |= (bio->bi_rw & 3);
2008 
2009 	rq->nr_phys_segments = bio_phys_segments(q, bio);
2010 	rq->nr_hw_segments = bio_hw_segments(q, bio);
2011 	rq->current_nr_sectors = bio_cur_sectors(bio);
2012 	rq->hard_cur_sectors = rq->current_nr_sectors;
2013 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2014 	rq->buffer = bio_data(bio);
2015 	rq->data_len = bio->bi_size;
2016 
2017 	rq->bio = rq->biotail = bio;
2018 
2019 	if (bio->bi_bdev)
2020 		rq->rq_disk = bio->bi_bdev->bd_disk;
2021 }
2022 
2023 int kblockd_schedule_work(struct work_struct *work)
2024 {
2025 	return queue_work(kblockd_workqueue, work);
2026 }
2027 EXPORT_SYMBOL(kblockd_schedule_work);
2028 
2029 void kblockd_flush_work(struct work_struct *work)
2030 {
2031 	cancel_work_sync(work);
2032 }
2033 EXPORT_SYMBOL(kblockd_flush_work);
2034 
2035 int __init blk_dev_init(void)
2036 {
2037 	int i;
2038 
2039 	kblockd_workqueue = create_workqueue("kblockd");
2040 	if (!kblockd_workqueue)
2041 		panic("Failed to create kblockd\n");
2042 
2043 	request_cachep = kmem_cache_create("blkdev_requests",
2044 			sizeof(struct request), 0, SLAB_PANIC, NULL);
2045 
2046 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2047 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2048 
2049 	for_each_possible_cpu(i)
2050 		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2051 
2052 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
2053 	register_hotcpu_notifier(&blk_cpu_notifier);
2054 
2055 	return 0;
2056 }
2057 
2058