xref: /openbmc/linux/block/blk-core.c (revision aac5987a)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/slab.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/task_io_accounting_ops.h>
30 #include <linux/fault-inject.h>
31 #include <linux/list_sort.h>
32 #include <linux/delay.h>
33 #include <linux/ratelimit.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/blk-cgroup.h>
36 #include <linux/debugfs.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/block.h>
40 
41 #include "blk.h"
42 #include "blk-mq.h"
43 #include "blk-mq-sched.h"
44 #include "blk-wbt.h"
45 
46 #ifdef CONFIG_DEBUG_FS
47 struct dentry *blk_debugfs_root;
48 #endif
49 
50 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
51 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
52 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
53 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
54 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
55 
56 DEFINE_IDA(blk_queue_ida);
57 
58 /*
59  * For the allocated request tables
60  */
61 struct kmem_cache *request_cachep;
62 
63 /*
64  * For queue allocation
65  */
66 struct kmem_cache *blk_requestq_cachep;
67 
68 /*
69  * Controlling structure to kblockd
70  */
71 static struct workqueue_struct *kblockd_workqueue;
72 
73 static void blk_clear_congested(struct request_list *rl, int sync)
74 {
75 #ifdef CONFIG_CGROUP_WRITEBACK
76 	clear_wb_congested(rl->blkg->wb_congested, sync);
77 #else
78 	/*
79 	 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
80 	 * flip its congestion state for events on other blkcgs.
81 	 */
82 	if (rl == &rl->q->root_rl)
83 		clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
84 #endif
85 }
86 
87 static void blk_set_congested(struct request_list *rl, int sync)
88 {
89 #ifdef CONFIG_CGROUP_WRITEBACK
90 	set_wb_congested(rl->blkg->wb_congested, sync);
91 #else
92 	/* see blk_clear_congested() */
93 	if (rl == &rl->q->root_rl)
94 		set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
95 #endif
96 }
97 
98 void blk_queue_congestion_threshold(struct request_queue *q)
99 {
100 	int nr;
101 
102 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
103 	if (nr > q->nr_requests)
104 		nr = q->nr_requests;
105 	q->nr_congestion_on = nr;
106 
107 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
108 	if (nr < 1)
109 		nr = 1;
110 	q->nr_congestion_off = nr;
111 }
112 
113 void blk_rq_init(struct request_queue *q, struct request *rq)
114 {
115 	memset(rq, 0, sizeof(*rq));
116 
117 	INIT_LIST_HEAD(&rq->queuelist);
118 	INIT_LIST_HEAD(&rq->timeout_list);
119 	rq->cpu = -1;
120 	rq->q = q;
121 	rq->__sector = (sector_t) -1;
122 	INIT_HLIST_NODE(&rq->hash);
123 	RB_CLEAR_NODE(&rq->rb_node);
124 	rq->tag = -1;
125 	rq->internal_tag = -1;
126 	rq->start_time = jiffies;
127 	set_start_time_ns(rq);
128 	rq->part = NULL;
129 }
130 EXPORT_SYMBOL(blk_rq_init);
131 
132 static void req_bio_endio(struct request *rq, struct bio *bio,
133 			  unsigned int nbytes, int error)
134 {
135 	if (error)
136 		bio->bi_error = error;
137 
138 	if (unlikely(rq->rq_flags & RQF_QUIET))
139 		bio_set_flag(bio, BIO_QUIET);
140 
141 	bio_advance(bio, nbytes);
142 
143 	/* don't actually finish bio if it's part of flush sequence */
144 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
145 		bio_endio(bio);
146 }
147 
148 void blk_dump_rq_flags(struct request *rq, char *msg)
149 {
150 	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
151 		rq->rq_disk ? rq->rq_disk->disk_name : "?",
152 		(unsigned long long) rq->cmd_flags);
153 
154 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
155 	       (unsigned long long)blk_rq_pos(rq),
156 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
157 	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
158 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
159 }
160 EXPORT_SYMBOL(blk_dump_rq_flags);
161 
162 static void blk_delay_work(struct work_struct *work)
163 {
164 	struct request_queue *q;
165 
166 	q = container_of(work, struct request_queue, delay_work.work);
167 	spin_lock_irq(q->queue_lock);
168 	__blk_run_queue(q);
169 	spin_unlock_irq(q->queue_lock);
170 }
171 
172 /**
173  * blk_delay_queue - restart queueing after defined interval
174  * @q:		The &struct request_queue in question
175  * @msecs:	Delay in msecs
176  *
177  * Description:
178  *   Sometimes queueing needs to be postponed for a little while, to allow
179  *   resources to come back. This function will make sure that queueing is
180  *   restarted around the specified time. Queue lock must be held.
181  */
182 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
183 {
184 	if (likely(!blk_queue_dead(q)))
185 		queue_delayed_work(kblockd_workqueue, &q->delay_work,
186 				   msecs_to_jiffies(msecs));
187 }
188 EXPORT_SYMBOL(blk_delay_queue);
189 
190 /**
191  * blk_start_queue_async - asynchronously restart a previously stopped queue
192  * @q:    The &struct request_queue in question
193  *
194  * Description:
195  *   blk_start_queue_async() will clear the stop flag on the queue, and
196  *   ensure that the request_fn for the queue is run from an async
197  *   context.
198  **/
199 void blk_start_queue_async(struct request_queue *q)
200 {
201 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
202 	blk_run_queue_async(q);
203 }
204 EXPORT_SYMBOL(blk_start_queue_async);
205 
206 /**
207  * blk_start_queue - restart a previously stopped queue
208  * @q:    The &struct request_queue in question
209  *
210  * Description:
211  *   blk_start_queue() will clear the stop flag on the queue, and call
212  *   the request_fn for the queue if it was in a stopped state when
213  *   entered. Also see blk_stop_queue(). Queue lock must be held.
214  **/
215 void blk_start_queue(struct request_queue *q)
216 {
217 	WARN_ON(!irqs_disabled());
218 
219 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
220 	__blk_run_queue(q);
221 }
222 EXPORT_SYMBOL(blk_start_queue);
223 
224 /**
225  * blk_stop_queue - stop a queue
226  * @q:    The &struct request_queue in question
227  *
228  * Description:
229  *   The Linux block layer assumes that a block driver will consume all
230  *   entries on the request queue when the request_fn strategy is called.
231  *   Often this will not happen, because of hardware limitations (queue
232  *   depth settings). If a device driver gets a 'queue full' response,
233  *   or if it simply chooses not to queue more I/O at one point, it can
234  *   call this function to prevent the request_fn from being called until
235  *   the driver has signalled it's ready to go again. This happens by calling
236  *   blk_start_queue() to restart queue operations. Queue lock must be held.
237  **/
238 void blk_stop_queue(struct request_queue *q)
239 {
240 	cancel_delayed_work(&q->delay_work);
241 	queue_flag_set(QUEUE_FLAG_STOPPED, q);
242 }
243 EXPORT_SYMBOL(blk_stop_queue);
244 
245 /**
246  * blk_sync_queue - cancel any pending callbacks on a queue
247  * @q: the queue
248  *
249  * Description:
250  *     The block layer may perform asynchronous callback activity
251  *     on a queue, such as calling the unplug function after a timeout.
252  *     A block device may call blk_sync_queue to ensure that any
253  *     such activity is cancelled, thus allowing it to release resources
254  *     that the callbacks might use. The caller must already have made sure
255  *     that its ->make_request_fn will not re-add plugging prior to calling
256  *     this function.
257  *
258  *     This function does not cancel any asynchronous activity arising
259  *     out of elevator or throttling code. That would require elevator_exit()
260  *     and blkcg_exit_queue() to be called with queue lock initialized.
261  *
262  */
263 void blk_sync_queue(struct request_queue *q)
264 {
265 	del_timer_sync(&q->timeout);
266 
267 	if (q->mq_ops) {
268 		struct blk_mq_hw_ctx *hctx;
269 		int i;
270 
271 		queue_for_each_hw_ctx(q, hctx, i) {
272 			cancel_work_sync(&hctx->run_work);
273 			cancel_delayed_work_sync(&hctx->delay_work);
274 		}
275 	} else {
276 		cancel_delayed_work_sync(&q->delay_work);
277 	}
278 }
279 EXPORT_SYMBOL(blk_sync_queue);
280 
281 /**
282  * __blk_run_queue_uncond - run a queue whether or not it has been stopped
283  * @q:	The queue to run
284  *
285  * Description:
286  *    Invoke request handling on a queue if there are any pending requests.
287  *    May be used to restart request handling after a request has completed.
288  *    This variant runs the queue whether or not the queue has been
289  *    stopped. Must be called with the queue lock held and interrupts
290  *    disabled. See also @blk_run_queue.
291  */
292 inline void __blk_run_queue_uncond(struct request_queue *q)
293 {
294 	if (unlikely(blk_queue_dead(q)))
295 		return;
296 
297 	/*
298 	 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
299 	 * the queue lock internally. As a result multiple threads may be
300 	 * running such a request function concurrently. Keep track of the
301 	 * number of active request_fn invocations such that blk_drain_queue()
302 	 * can wait until all these request_fn calls have finished.
303 	 */
304 	q->request_fn_active++;
305 	q->request_fn(q);
306 	q->request_fn_active--;
307 }
308 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
309 
310 /**
311  * __blk_run_queue - run a single device queue
312  * @q:	The queue to run
313  *
314  * Description:
315  *    See @blk_run_queue. This variant must be called with the queue lock
316  *    held and interrupts disabled.
317  */
318 void __blk_run_queue(struct request_queue *q)
319 {
320 	if (unlikely(blk_queue_stopped(q)))
321 		return;
322 
323 	__blk_run_queue_uncond(q);
324 }
325 EXPORT_SYMBOL(__blk_run_queue);
326 
327 /**
328  * blk_run_queue_async - run a single device queue in workqueue context
329  * @q:	The queue to run
330  *
331  * Description:
332  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
333  *    of us. The caller must hold the queue lock.
334  */
335 void blk_run_queue_async(struct request_queue *q)
336 {
337 	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
338 		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
339 }
340 EXPORT_SYMBOL(blk_run_queue_async);
341 
342 /**
343  * blk_run_queue - run a single device queue
344  * @q: The queue to run
345  *
346  * Description:
347  *    Invoke request handling on this queue, if it has pending work to do.
348  *    May be used to restart queueing when a request has completed.
349  */
350 void blk_run_queue(struct request_queue *q)
351 {
352 	unsigned long flags;
353 
354 	spin_lock_irqsave(q->queue_lock, flags);
355 	__blk_run_queue(q);
356 	spin_unlock_irqrestore(q->queue_lock, flags);
357 }
358 EXPORT_SYMBOL(blk_run_queue);
359 
360 void blk_put_queue(struct request_queue *q)
361 {
362 	kobject_put(&q->kobj);
363 }
364 EXPORT_SYMBOL(blk_put_queue);
365 
366 /**
367  * __blk_drain_queue - drain requests from request_queue
368  * @q: queue to drain
369  * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
370  *
371  * Drain requests from @q.  If @drain_all is set, all requests are drained.
372  * If not, only ELVPRIV requests are drained.  The caller is responsible
373  * for ensuring that no new requests which need to be drained are queued.
374  */
375 static void __blk_drain_queue(struct request_queue *q, bool drain_all)
376 	__releases(q->queue_lock)
377 	__acquires(q->queue_lock)
378 {
379 	int i;
380 
381 	lockdep_assert_held(q->queue_lock);
382 
383 	while (true) {
384 		bool drain = false;
385 
386 		/*
387 		 * The caller might be trying to drain @q before its
388 		 * elevator is initialized.
389 		 */
390 		if (q->elevator)
391 			elv_drain_elevator(q);
392 
393 		blkcg_drain_queue(q);
394 
395 		/*
396 		 * This function might be called on a queue which failed
397 		 * driver init after queue creation or is not yet fully
398 		 * active yet.  Some drivers (e.g. fd and loop) get unhappy
399 		 * in such cases.  Kick queue iff dispatch queue has
400 		 * something on it and @q has request_fn set.
401 		 */
402 		if (!list_empty(&q->queue_head) && q->request_fn)
403 			__blk_run_queue(q);
404 
405 		drain |= q->nr_rqs_elvpriv;
406 		drain |= q->request_fn_active;
407 
408 		/*
409 		 * Unfortunately, requests are queued at and tracked from
410 		 * multiple places and there's no single counter which can
411 		 * be drained.  Check all the queues and counters.
412 		 */
413 		if (drain_all) {
414 			struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
415 			drain |= !list_empty(&q->queue_head);
416 			for (i = 0; i < 2; i++) {
417 				drain |= q->nr_rqs[i];
418 				drain |= q->in_flight[i];
419 				if (fq)
420 				    drain |= !list_empty(&fq->flush_queue[i]);
421 			}
422 		}
423 
424 		if (!drain)
425 			break;
426 
427 		spin_unlock_irq(q->queue_lock);
428 
429 		msleep(10);
430 
431 		spin_lock_irq(q->queue_lock);
432 	}
433 
434 	/*
435 	 * With queue marked dead, any woken up waiter will fail the
436 	 * allocation path, so the wakeup chaining is lost and we're
437 	 * left with hung waiters. We need to wake up those waiters.
438 	 */
439 	if (q->request_fn) {
440 		struct request_list *rl;
441 
442 		blk_queue_for_each_rl(rl, q)
443 			for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
444 				wake_up_all(&rl->wait[i]);
445 	}
446 }
447 
448 /**
449  * blk_queue_bypass_start - enter queue bypass mode
450  * @q: queue of interest
451  *
452  * In bypass mode, only the dispatch FIFO queue of @q is used.  This
453  * function makes @q enter bypass mode and drains all requests which were
454  * throttled or issued before.  On return, it's guaranteed that no request
455  * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
456  * inside queue or RCU read lock.
457  */
458 void blk_queue_bypass_start(struct request_queue *q)
459 {
460 	spin_lock_irq(q->queue_lock);
461 	q->bypass_depth++;
462 	queue_flag_set(QUEUE_FLAG_BYPASS, q);
463 	spin_unlock_irq(q->queue_lock);
464 
465 	/*
466 	 * Queues start drained.  Skip actual draining till init is
467 	 * complete.  This avoids lenghty delays during queue init which
468 	 * can happen many times during boot.
469 	 */
470 	if (blk_queue_init_done(q)) {
471 		spin_lock_irq(q->queue_lock);
472 		__blk_drain_queue(q, false);
473 		spin_unlock_irq(q->queue_lock);
474 
475 		/* ensure blk_queue_bypass() is %true inside RCU read lock */
476 		synchronize_rcu();
477 	}
478 }
479 EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
480 
481 /**
482  * blk_queue_bypass_end - leave queue bypass mode
483  * @q: queue of interest
484  *
485  * Leave bypass mode and restore the normal queueing behavior.
486  */
487 void blk_queue_bypass_end(struct request_queue *q)
488 {
489 	spin_lock_irq(q->queue_lock);
490 	if (!--q->bypass_depth)
491 		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
492 	WARN_ON_ONCE(q->bypass_depth < 0);
493 	spin_unlock_irq(q->queue_lock);
494 }
495 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
496 
497 void blk_set_queue_dying(struct request_queue *q)
498 {
499 	spin_lock_irq(q->queue_lock);
500 	queue_flag_set(QUEUE_FLAG_DYING, q);
501 	spin_unlock_irq(q->queue_lock);
502 
503 	if (q->mq_ops)
504 		blk_mq_wake_waiters(q);
505 	else {
506 		struct request_list *rl;
507 
508 		spin_lock_irq(q->queue_lock);
509 		blk_queue_for_each_rl(rl, q) {
510 			if (rl->rq_pool) {
511 				wake_up(&rl->wait[BLK_RW_SYNC]);
512 				wake_up(&rl->wait[BLK_RW_ASYNC]);
513 			}
514 		}
515 		spin_unlock_irq(q->queue_lock);
516 	}
517 }
518 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
519 
520 /**
521  * blk_cleanup_queue - shutdown a request queue
522  * @q: request queue to shutdown
523  *
524  * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
525  * put it.  All future requests will be failed immediately with -ENODEV.
526  */
527 void blk_cleanup_queue(struct request_queue *q)
528 {
529 	spinlock_t *lock = q->queue_lock;
530 
531 	/* mark @q DYING, no new request or merges will be allowed afterwards */
532 	mutex_lock(&q->sysfs_lock);
533 	blk_set_queue_dying(q);
534 	spin_lock_irq(lock);
535 
536 	/*
537 	 * A dying queue is permanently in bypass mode till released.  Note
538 	 * that, unlike blk_queue_bypass_start(), we aren't performing
539 	 * synchronize_rcu() after entering bypass mode to avoid the delay
540 	 * as some drivers create and destroy a lot of queues while
541 	 * probing.  This is still safe because blk_release_queue() will be
542 	 * called only after the queue refcnt drops to zero and nothing,
543 	 * RCU or not, would be traversing the queue by then.
544 	 */
545 	q->bypass_depth++;
546 	queue_flag_set(QUEUE_FLAG_BYPASS, q);
547 
548 	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
549 	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
550 	queue_flag_set(QUEUE_FLAG_DYING, q);
551 	spin_unlock_irq(lock);
552 	mutex_unlock(&q->sysfs_lock);
553 
554 	/*
555 	 * Drain all requests queued before DYING marking. Set DEAD flag to
556 	 * prevent that q->request_fn() gets invoked after draining finished.
557 	 */
558 	blk_freeze_queue(q);
559 	spin_lock_irq(lock);
560 	if (!q->mq_ops)
561 		__blk_drain_queue(q, true);
562 	queue_flag_set(QUEUE_FLAG_DEAD, q);
563 	spin_unlock_irq(lock);
564 
565 	/* for synchronous bio-based driver finish in-flight integrity i/o */
566 	blk_flush_integrity();
567 
568 	/* @q won't process any more request, flush async actions */
569 	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
570 	blk_sync_queue(q);
571 
572 	if (q->mq_ops)
573 		blk_mq_free_queue(q);
574 	percpu_ref_exit(&q->q_usage_counter);
575 
576 	spin_lock_irq(lock);
577 	if (q->queue_lock != &q->__queue_lock)
578 		q->queue_lock = &q->__queue_lock;
579 	spin_unlock_irq(lock);
580 
581 	put_disk_devt(q->disk_devt);
582 
583 	/* @q is and will stay empty, shutdown and put */
584 	blk_put_queue(q);
585 }
586 EXPORT_SYMBOL(blk_cleanup_queue);
587 
588 /* Allocate memory local to the request queue */
589 static void *alloc_request_simple(gfp_t gfp_mask, void *data)
590 {
591 	struct request_queue *q = data;
592 
593 	return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
594 }
595 
596 static void free_request_simple(void *element, void *data)
597 {
598 	kmem_cache_free(request_cachep, element);
599 }
600 
601 static void *alloc_request_size(gfp_t gfp_mask, void *data)
602 {
603 	struct request_queue *q = data;
604 	struct request *rq;
605 
606 	rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
607 			q->node);
608 	if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
609 		kfree(rq);
610 		rq = NULL;
611 	}
612 	return rq;
613 }
614 
615 static void free_request_size(void *element, void *data)
616 {
617 	struct request_queue *q = data;
618 
619 	if (q->exit_rq_fn)
620 		q->exit_rq_fn(q, element);
621 	kfree(element);
622 }
623 
624 int blk_init_rl(struct request_list *rl, struct request_queue *q,
625 		gfp_t gfp_mask)
626 {
627 	if (unlikely(rl->rq_pool))
628 		return 0;
629 
630 	rl->q = q;
631 	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
632 	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
633 	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
634 	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
635 
636 	if (q->cmd_size) {
637 		rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
638 				alloc_request_size, free_request_size,
639 				q, gfp_mask, q->node);
640 	} else {
641 		rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
642 				alloc_request_simple, free_request_simple,
643 				q, gfp_mask, q->node);
644 	}
645 	if (!rl->rq_pool)
646 		return -ENOMEM;
647 
648 	return 0;
649 }
650 
651 void blk_exit_rl(struct request_list *rl)
652 {
653 	if (rl->rq_pool)
654 		mempool_destroy(rl->rq_pool);
655 }
656 
657 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
658 {
659 	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
660 }
661 EXPORT_SYMBOL(blk_alloc_queue);
662 
663 int blk_queue_enter(struct request_queue *q, bool nowait)
664 {
665 	while (true) {
666 		int ret;
667 
668 		if (percpu_ref_tryget_live(&q->q_usage_counter))
669 			return 0;
670 
671 		if (nowait)
672 			return -EBUSY;
673 
674 		ret = wait_event_interruptible(q->mq_freeze_wq,
675 				!atomic_read(&q->mq_freeze_depth) ||
676 				blk_queue_dying(q));
677 		if (blk_queue_dying(q))
678 			return -ENODEV;
679 		if (ret)
680 			return ret;
681 	}
682 }
683 
684 void blk_queue_exit(struct request_queue *q)
685 {
686 	percpu_ref_put(&q->q_usage_counter);
687 }
688 
689 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
690 {
691 	struct request_queue *q =
692 		container_of(ref, struct request_queue, q_usage_counter);
693 
694 	wake_up_all(&q->mq_freeze_wq);
695 }
696 
697 static void blk_rq_timed_out_timer(unsigned long data)
698 {
699 	struct request_queue *q = (struct request_queue *)data;
700 
701 	kblockd_schedule_work(&q->timeout_work);
702 }
703 
704 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
705 {
706 	struct request_queue *q;
707 
708 	q = kmem_cache_alloc_node(blk_requestq_cachep,
709 				gfp_mask | __GFP_ZERO, node_id);
710 	if (!q)
711 		return NULL;
712 
713 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
714 	if (q->id < 0)
715 		goto fail_q;
716 
717 	q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
718 	if (!q->bio_split)
719 		goto fail_id;
720 
721 	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
722 	if (!q->backing_dev_info)
723 		goto fail_split;
724 
725 	q->backing_dev_info->ra_pages =
726 			(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
727 	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
728 	q->backing_dev_info->name = "block";
729 	q->node = node_id;
730 
731 	setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
732 		    laptop_mode_timer_fn, (unsigned long) q);
733 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
734 	INIT_LIST_HEAD(&q->queue_head);
735 	INIT_LIST_HEAD(&q->timeout_list);
736 	INIT_LIST_HEAD(&q->icq_list);
737 #ifdef CONFIG_BLK_CGROUP
738 	INIT_LIST_HEAD(&q->blkg_list);
739 #endif
740 	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
741 
742 	kobject_init(&q->kobj, &blk_queue_ktype);
743 
744 	mutex_init(&q->sysfs_lock);
745 	spin_lock_init(&q->__queue_lock);
746 
747 	/*
748 	 * By default initialize queue_lock to internal lock and driver can
749 	 * override it later if need be.
750 	 */
751 	q->queue_lock = &q->__queue_lock;
752 
753 	/*
754 	 * A queue starts its life with bypass turned on to avoid
755 	 * unnecessary bypass on/off overhead and nasty surprises during
756 	 * init.  The initial bypass will be finished when the queue is
757 	 * registered by blk_register_queue().
758 	 */
759 	q->bypass_depth = 1;
760 	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
761 
762 	init_waitqueue_head(&q->mq_freeze_wq);
763 
764 	/*
765 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
766 	 * See blk_register_queue() for details.
767 	 */
768 	if (percpu_ref_init(&q->q_usage_counter,
769 				blk_queue_usage_counter_release,
770 				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
771 		goto fail_bdi;
772 
773 	if (blkcg_init_queue(q))
774 		goto fail_ref;
775 
776 	return q;
777 
778 fail_ref:
779 	percpu_ref_exit(&q->q_usage_counter);
780 fail_bdi:
781 	bdi_put(q->backing_dev_info);
782 fail_split:
783 	bioset_free(q->bio_split);
784 fail_id:
785 	ida_simple_remove(&blk_queue_ida, q->id);
786 fail_q:
787 	kmem_cache_free(blk_requestq_cachep, q);
788 	return NULL;
789 }
790 EXPORT_SYMBOL(blk_alloc_queue_node);
791 
792 /**
793  * blk_init_queue  - prepare a request queue for use with a block device
794  * @rfn:  The function to be called to process requests that have been
795  *        placed on the queue.
796  * @lock: Request queue spin lock
797  *
798  * Description:
799  *    If a block device wishes to use the standard request handling procedures,
800  *    which sorts requests and coalesces adjacent requests, then it must
801  *    call blk_init_queue().  The function @rfn will be called when there
802  *    are requests on the queue that need to be processed.  If the device
803  *    supports plugging, then @rfn may not be called immediately when requests
804  *    are available on the queue, but may be called at some time later instead.
805  *    Plugged queues are generally unplugged when a buffer belonging to one
806  *    of the requests on the queue is needed, or due to memory pressure.
807  *
808  *    @rfn is not required, or even expected, to remove all requests off the
809  *    queue, but only as many as it can handle at a time.  If it does leave
810  *    requests on the queue, it is responsible for arranging that the requests
811  *    get dealt with eventually.
812  *
813  *    The queue spin lock must be held while manipulating the requests on the
814  *    request queue; this lock will be taken also from interrupt context, so irq
815  *    disabling is needed for it.
816  *
817  *    Function returns a pointer to the initialized request queue, or %NULL if
818  *    it didn't succeed.
819  *
820  * Note:
821  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
822  *    when the block device is deactivated (such as at module unload).
823  **/
824 
825 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
826 {
827 	return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
828 }
829 EXPORT_SYMBOL(blk_init_queue);
830 
831 struct request_queue *
832 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
833 {
834 	struct request_queue *q;
835 
836 	q = blk_alloc_queue_node(GFP_KERNEL, node_id);
837 	if (!q)
838 		return NULL;
839 
840 	q->request_fn = rfn;
841 	if (lock)
842 		q->queue_lock = lock;
843 	if (blk_init_allocated_queue(q) < 0) {
844 		blk_cleanup_queue(q);
845 		return NULL;
846 	}
847 
848 	return q;
849 }
850 EXPORT_SYMBOL(blk_init_queue_node);
851 
852 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
853 
854 
855 int blk_init_allocated_queue(struct request_queue *q)
856 {
857 	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
858 	if (!q->fq)
859 		return -ENOMEM;
860 
861 	if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
862 		goto out_free_flush_queue;
863 
864 	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
865 		goto out_exit_flush_rq;
866 
867 	INIT_WORK(&q->timeout_work, blk_timeout_work);
868 	q->queue_flags		|= QUEUE_FLAG_DEFAULT;
869 
870 	/*
871 	 * This also sets hw/phys segments, boundary and size
872 	 */
873 	blk_queue_make_request(q, blk_queue_bio);
874 
875 	q->sg_reserved_size = INT_MAX;
876 
877 	/* Protect q->elevator from elevator_change */
878 	mutex_lock(&q->sysfs_lock);
879 
880 	/* init elevator */
881 	if (elevator_init(q, NULL)) {
882 		mutex_unlock(&q->sysfs_lock);
883 		goto out_exit_flush_rq;
884 	}
885 
886 	mutex_unlock(&q->sysfs_lock);
887 	return 0;
888 
889 out_exit_flush_rq:
890 	if (q->exit_rq_fn)
891 		q->exit_rq_fn(q, q->fq->flush_rq);
892 out_free_flush_queue:
893 	blk_free_flush_queue(q->fq);
894 	wbt_exit(q);
895 	return -ENOMEM;
896 }
897 EXPORT_SYMBOL(blk_init_allocated_queue);
898 
899 bool blk_get_queue(struct request_queue *q)
900 {
901 	if (likely(!blk_queue_dying(q))) {
902 		__blk_get_queue(q);
903 		return true;
904 	}
905 
906 	return false;
907 }
908 EXPORT_SYMBOL(blk_get_queue);
909 
910 static inline void blk_free_request(struct request_list *rl, struct request *rq)
911 {
912 	if (rq->rq_flags & RQF_ELVPRIV) {
913 		elv_put_request(rl->q, rq);
914 		if (rq->elv.icq)
915 			put_io_context(rq->elv.icq->ioc);
916 	}
917 
918 	mempool_free(rq, rl->rq_pool);
919 }
920 
921 /*
922  * ioc_batching returns true if the ioc is a valid batching request and
923  * should be given priority access to a request.
924  */
925 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
926 {
927 	if (!ioc)
928 		return 0;
929 
930 	/*
931 	 * Make sure the process is able to allocate at least 1 request
932 	 * even if the batch times out, otherwise we could theoretically
933 	 * lose wakeups.
934 	 */
935 	return ioc->nr_batch_requests == q->nr_batching ||
936 		(ioc->nr_batch_requests > 0
937 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
938 }
939 
940 /*
941  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
942  * will cause the process to be a "batcher" on all queues in the system. This
943  * is the behaviour we want though - once it gets a wakeup it should be given
944  * a nice run.
945  */
946 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
947 {
948 	if (!ioc || ioc_batching(q, ioc))
949 		return;
950 
951 	ioc->nr_batch_requests = q->nr_batching;
952 	ioc->last_waited = jiffies;
953 }
954 
955 static void __freed_request(struct request_list *rl, int sync)
956 {
957 	struct request_queue *q = rl->q;
958 
959 	if (rl->count[sync] < queue_congestion_off_threshold(q))
960 		blk_clear_congested(rl, sync);
961 
962 	if (rl->count[sync] + 1 <= q->nr_requests) {
963 		if (waitqueue_active(&rl->wait[sync]))
964 			wake_up(&rl->wait[sync]);
965 
966 		blk_clear_rl_full(rl, sync);
967 	}
968 }
969 
970 /*
971  * A request has just been released.  Account for it, update the full and
972  * congestion status, wake up any waiters.   Called under q->queue_lock.
973  */
974 static void freed_request(struct request_list *rl, bool sync,
975 		req_flags_t rq_flags)
976 {
977 	struct request_queue *q = rl->q;
978 
979 	q->nr_rqs[sync]--;
980 	rl->count[sync]--;
981 	if (rq_flags & RQF_ELVPRIV)
982 		q->nr_rqs_elvpriv--;
983 
984 	__freed_request(rl, sync);
985 
986 	if (unlikely(rl->starved[sync ^ 1]))
987 		__freed_request(rl, sync ^ 1);
988 }
989 
990 int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
991 {
992 	struct request_list *rl;
993 	int on_thresh, off_thresh;
994 
995 	spin_lock_irq(q->queue_lock);
996 	q->nr_requests = nr;
997 	blk_queue_congestion_threshold(q);
998 	on_thresh = queue_congestion_on_threshold(q);
999 	off_thresh = queue_congestion_off_threshold(q);
1000 
1001 	blk_queue_for_each_rl(rl, q) {
1002 		if (rl->count[BLK_RW_SYNC] >= on_thresh)
1003 			blk_set_congested(rl, BLK_RW_SYNC);
1004 		else if (rl->count[BLK_RW_SYNC] < off_thresh)
1005 			blk_clear_congested(rl, BLK_RW_SYNC);
1006 
1007 		if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1008 			blk_set_congested(rl, BLK_RW_ASYNC);
1009 		else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1010 			blk_clear_congested(rl, BLK_RW_ASYNC);
1011 
1012 		if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1013 			blk_set_rl_full(rl, BLK_RW_SYNC);
1014 		} else {
1015 			blk_clear_rl_full(rl, BLK_RW_SYNC);
1016 			wake_up(&rl->wait[BLK_RW_SYNC]);
1017 		}
1018 
1019 		if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1020 			blk_set_rl_full(rl, BLK_RW_ASYNC);
1021 		} else {
1022 			blk_clear_rl_full(rl, BLK_RW_ASYNC);
1023 			wake_up(&rl->wait[BLK_RW_ASYNC]);
1024 		}
1025 	}
1026 
1027 	spin_unlock_irq(q->queue_lock);
1028 	return 0;
1029 }
1030 
1031 /**
1032  * __get_request - get a free request
1033  * @rl: request list to allocate from
1034  * @op: operation and flags
1035  * @bio: bio to allocate request for (can be %NULL)
1036  * @gfp_mask: allocation mask
1037  *
1038  * Get a free request from @q.  This function may fail under memory
1039  * pressure or if @q is dead.
1040  *
1041  * Must be called with @q->queue_lock held and,
1042  * Returns ERR_PTR on failure, with @q->queue_lock held.
1043  * Returns request pointer on success, with @q->queue_lock *not held*.
1044  */
1045 static struct request *__get_request(struct request_list *rl, unsigned int op,
1046 		struct bio *bio, gfp_t gfp_mask)
1047 {
1048 	struct request_queue *q = rl->q;
1049 	struct request *rq;
1050 	struct elevator_type *et = q->elevator->type;
1051 	struct io_context *ioc = rq_ioc(bio);
1052 	struct io_cq *icq = NULL;
1053 	const bool is_sync = op_is_sync(op);
1054 	int may_queue;
1055 	req_flags_t rq_flags = RQF_ALLOCED;
1056 
1057 	if (unlikely(blk_queue_dying(q)))
1058 		return ERR_PTR(-ENODEV);
1059 
1060 	may_queue = elv_may_queue(q, op);
1061 	if (may_queue == ELV_MQUEUE_NO)
1062 		goto rq_starved;
1063 
1064 	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1065 		if (rl->count[is_sync]+1 >= q->nr_requests) {
1066 			/*
1067 			 * The queue will fill after this allocation, so set
1068 			 * it as full, and mark this process as "batching".
1069 			 * This process will be allowed to complete a batch of
1070 			 * requests, others will be blocked.
1071 			 */
1072 			if (!blk_rl_full(rl, is_sync)) {
1073 				ioc_set_batching(q, ioc);
1074 				blk_set_rl_full(rl, is_sync);
1075 			} else {
1076 				if (may_queue != ELV_MQUEUE_MUST
1077 						&& !ioc_batching(q, ioc)) {
1078 					/*
1079 					 * The queue is full and the allocating
1080 					 * process is not a "batcher", and not
1081 					 * exempted by the IO scheduler
1082 					 */
1083 					return ERR_PTR(-ENOMEM);
1084 				}
1085 			}
1086 		}
1087 		blk_set_congested(rl, is_sync);
1088 	}
1089 
1090 	/*
1091 	 * Only allow batching queuers to allocate up to 50% over the defined
1092 	 * limit of requests, otherwise we could have thousands of requests
1093 	 * allocated with any setting of ->nr_requests
1094 	 */
1095 	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
1096 		return ERR_PTR(-ENOMEM);
1097 
1098 	q->nr_rqs[is_sync]++;
1099 	rl->count[is_sync]++;
1100 	rl->starved[is_sync] = 0;
1101 
1102 	/*
1103 	 * Decide whether the new request will be managed by elevator.  If
1104 	 * so, mark @rq_flags and increment elvpriv.  Non-zero elvpriv will
1105 	 * prevent the current elevator from being destroyed until the new
1106 	 * request is freed.  This guarantees icq's won't be destroyed and
1107 	 * makes creating new ones safe.
1108 	 *
1109 	 * Flush requests do not use the elevator so skip initialization.
1110 	 * This allows a request to share the flush and elevator data.
1111 	 *
1112 	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
1113 	 * it will be created after releasing queue_lock.
1114 	 */
1115 	if (!op_is_flush(op) && !blk_queue_bypass(q)) {
1116 		rq_flags |= RQF_ELVPRIV;
1117 		q->nr_rqs_elvpriv++;
1118 		if (et->icq_cache && ioc)
1119 			icq = ioc_lookup_icq(ioc, q);
1120 	}
1121 
1122 	if (blk_queue_io_stat(q))
1123 		rq_flags |= RQF_IO_STAT;
1124 	spin_unlock_irq(q->queue_lock);
1125 
1126 	/* allocate and init request */
1127 	rq = mempool_alloc(rl->rq_pool, gfp_mask);
1128 	if (!rq)
1129 		goto fail_alloc;
1130 
1131 	blk_rq_init(q, rq);
1132 	blk_rq_set_rl(rq, rl);
1133 	blk_rq_set_prio(rq, ioc);
1134 	rq->cmd_flags = op;
1135 	rq->rq_flags = rq_flags;
1136 
1137 	/* init elvpriv */
1138 	if (rq_flags & RQF_ELVPRIV) {
1139 		if (unlikely(et->icq_cache && !icq)) {
1140 			if (ioc)
1141 				icq = ioc_create_icq(ioc, q, gfp_mask);
1142 			if (!icq)
1143 				goto fail_elvpriv;
1144 		}
1145 
1146 		rq->elv.icq = icq;
1147 		if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1148 			goto fail_elvpriv;
1149 
1150 		/* @rq->elv.icq holds io_context until @rq is freed */
1151 		if (icq)
1152 			get_io_context(icq->ioc);
1153 	}
1154 out:
1155 	/*
1156 	 * ioc may be NULL here, and ioc_batching will be false. That's
1157 	 * OK, if the queue is under the request limit then requests need
1158 	 * not count toward the nr_batch_requests limit. There will always
1159 	 * be some limit enforced by BLK_BATCH_TIME.
1160 	 */
1161 	if (ioc_batching(q, ioc))
1162 		ioc->nr_batch_requests--;
1163 
1164 	trace_block_getrq(q, bio, op);
1165 	return rq;
1166 
1167 fail_elvpriv:
1168 	/*
1169 	 * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
1170 	 * and may fail indefinitely under memory pressure and thus
1171 	 * shouldn't stall IO.  Treat this request as !elvpriv.  This will
1172 	 * disturb iosched and blkcg but weird is bettern than dead.
1173 	 */
1174 	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1175 			   __func__, dev_name(q->backing_dev_info->dev));
1176 
1177 	rq->rq_flags &= ~RQF_ELVPRIV;
1178 	rq->elv.icq = NULL;
1179 
1180 	spin_lock_irq(q->queue_lock);
1181 	q->nr_rqs_elvpriv--;
1182 	spin_unlock_irq(q->queue_lock);
1183 	goto out;
1184 
1185 fail_alloc:
1186 	/*
1187 	 * Allocation failed presumably due to memory. Undo anything we
1188 	 * might have messed up.
1189 	 *
1190 	 * Allocating task should really be put onto the front of the wait
1191 	 * queue, but this is pretty rare.
1192 	 */
1193 	spin_lock_irq(q->queue_lock);
1194 	freed_request(rl, is_sync, rq_flags);
1195 
1196 	/*
1197 	 * in the very unlikely event that allocation failed and no
1198 	 * requests for this direction was pending, mark us starved so that
1199 	 * freeing of a request in the other direction will notice
1200 	 * us. another possible fix would be to split the rq mempool into
1201 	 * READ and WRITE
1202 	 */
1203 rq_starved:
1204 	if (unlikely(rl->count[is_sync] == 0))
1205 		rl->starved[is_sync] = 1;
1206 	return ERR_PTR(-ENOMEM);
1207 }
1208 
1209 /**
1210  * get_request - get a free request
1211  * @q: request_queue to allocate request from
1212  * @op: operation and flags
1213  * @bio: bio to allocate request for (can be %NULL)
1214  * @gfp_mask: allocation mask
1215  *
1216  * Get a free request from @q.  If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1217  * this function keeps retrying under memory pressure and fails iff @q is dead.
1218  *
1219  * Must be called with @q->queue_lock held and,
1220  * Returns ERR_PTR on failure, with @q->queue_lock held.
1221  * Returns request pointer on success, with @q->queue_lock *not held*.
1222  */
1223 static struct request *get_request(struct request_queue *q, unsigned int op,
1224 		struct bio *bio, gfp_t gfp_mask)
1225 {
1226 	const bool is_sync = op_is_sync(op);
1227 	DEFINE_WAIT(wait);
1228 	struct request_list *rl;
1229 	struct request *rq;
1230 
1231 	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
1232 retry:
1233 	rq = __get_request(rl, op, bio, gfp_mask);
1234 	if (!IS_ERR(rq))
1235 		return rq;
1236 
1237 	if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
1238 		blk_put_rl(rl);
1239 		return rq;
1240 	}
1241 
1242 	/* wait on @rl and retry */
1243 	prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1244 				  TASK_UNINTERRUPTIBLE);
1245 
1246 	trace_block_sleeprq(q, bio, op);
1247 
1248 	spin_unlock_irq(q->queue_lock);
1249 	io_schedule();
1250 
1251 	/*
1252 	 * After sleeping, we become a "batching" process and will be able
1253 	 * to allocate at least one request, and up to a big batch of them
1254 	 * for a small period time.  See ioc_batching, ioc_set_batching
1255 	 */
1256 	ioc_set_batching(q, current->io_context);
1257 
1258 	spin_lock_irq(q->queue_lock);
1259 	finish_wait(&rl->wait[is_sync], &wait);
1260 
1261 	goto retry;
1262 }
1263 
1264 static struct request *blk_old_get_request(struct request_queue *q, int rw,
1265 		gfp_t gfp_mask)
1266 {
1267 	struct request *rq;
1268 
1269 	/* create ioc upfront */
1270 	create_io_context(gfp_mask, q->node);
1271 
1272 	spin_lock_irq(q->queue_lock);
1273 	rq = get_request(q, rw, NULL, gfp_mask);
1274 	if (IS_ERR(rq)) {
1275 		spin_unlock_irq(q->queue_lock);
1276 		return rq;
1277 	}
1278 
1279 	/* q->queue_lock is unlocked at this point */
1280 	rq->__data_len = 0;
1281 	rq->__sector = (sector_t) -1;
1282 	rq->bio = rq->biotail = NULL;
1283 	return rq;
1284 }
1285 
1286 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1287 {
1288 	if (q->mq_ops)
1289 		return blk_mq_alloc_request(q, rw,
1290 			(gfp_mask & __GFP_DIRECT_RECLAIM) ?
1291 				0 : BLK_MQ_REQ_NOWAIT);
1292 	else
1293 		return blk_old_get_request(q, rw, gfp_mask);
1294 }
1295 EXPORT_SYMBOL(blk_get_request);
1296 
1297 /**
1298  * blk_requeue_request - put a request back on queue
1299  * @q:		request queue where request should be inserted
1300  * @rq:		request to be inserted
1301  *
1302  * Description:
1303  *    Drivers often keep queueing requests until the hardware cannot accept
1304  *    more, when that condition happens we need to put the request back
1305  *    on the queue. Must be called with queue lock held.
1306  */
1307 void blk_requeue_request(struct request_queue *q, struct request *rq)
1308 {
1309 	blk_delete_timer(rq);
1310 	blk_clear_rq_complete(rq);
1311 	trace_block_rq_requeue(q, rq);
1312 	wbt_requeue(q->rq_wb, &rq->issue_stat);
1313 
1314 	if (rq->rq_flags & RQF_QUEUED)
1315 		blk_queue_end_tag(q, rq);
1316 
1317 	BUG_ON(blk_queued_rq(rq));
1318 
1319 	elv_requeue_request(q, rq);
1320 }
1321 EXPORT_SYMBOL(blk_requeue_request);
1322 
1323 static void add_acct_request(struct request_queue *q, struct request *rq,
1324 			     int where)
1325 {
1326 	blk_account_io_start(rq, true);
1327 	__elv_add_request(q, rq, where);
1328 }
1329 
1330 static void part_round_stats_single(int cpu, struct hd_struct *part,
1331 				    unsigned long now)
1332 {
1333 	int inflight;
1334 
1335 	if (now == part->stamp)
1336 		return;
1337 
1338 	inflight = part_in_flight(part);
1339 	if (inflight) {
1340 		__part_stat_add(cpu, part, time_in_queue,
1341 				inflight * (now - part->stamp));
1342 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1343 	}
1344 	part->stamp = now;
1345 }
1346 
1347 /**
1348  * part_round_stats() - Round off the performance stats on a struct disk_stats.
1349  * @cpu: cpu number for stats access
1350  * @part: target partition
1351  *
1352  * The average IO queue length and utilisation statistics are maintained
1353  * by observing the current state of the queue length and the amount of
1354  * time it has been in this state for.
1355  *
1356  * Normally, that accounting is done on IO completion, but that can result
1357  * in more than a second's worth of IO being accounted for within any one
1358  * second, leading to >100% utilisation.  To deal with that, we call this
1359  * function to do a round-off before returning the results when reading
1360  * /proc/diskstats.  This accounts immediately for all queue usage up to
1361  * the current jiffies and restarts the counters again.
1362  */
1363 void part_round_stats(int cpu, struct hd_struct *part)
1364 {
1365 	unsigned long now = jiffies;
1366 
1367 	if (part->partno)
1368 		part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1369 	part_round_stats_single(cpu, part, now);
1370 }
1371 EXPORT_SYMBOL_GPL(part_round_stats);
1372 
1373 #ifdef CONFIG_PM
1374 static void blk_pm_put_request(struct request *rq)
1375 {
1376 	if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1377 		pm_runtime_mark_last_busy(rq->q->dev);
1378 }
1379 #else
1380 static inline void blk_pm_put_request(struct request *rq) {}
1381 #endif
1382 
1383 /*
1384  * queue lock must be held
1385  */
1386 void __blk_put_request(struct request_queue *q, struct request *req)
1387 {
1388 	req_flags_t rq_flags = req->rq_flags;
1389 
1390 	if (unlikely(!q))
1391 		return;
1392 
1393 	if (q->mq_ops) {
1394 		blk_mq_free_request(req);
1395 		return;
1396 	}
1397 
1398 	blk_pm_put_request(req);
1399 
1400 	elv_completed_request(q, req);
1401 
1402 	/* this is a bio leak */
1403 	WARN_ON(req->bio != NULL);
1404 
1405 	wbt_done(q->rq_wb, &req->issue_stat);
1406 
1407 	/*
1408 	 * Request may not have originated from ll_rw_blk. if not,
1409 	 * it didn't come out of our reserved rq pools
1410 	 */
1411 	if (rq_flags & RQF_ALLOCED) {
1412 		struct request_list *rl = blk_rq_rl(req);
1413 		bool sync = op_is_sync(req->cmd_flags);
1414 
1415 		BUG_ON(!list_empty(&req->queuelist));
1416 		BUG_ON(ELV_ON_HASH(req));
1417 
1418 		blk_free_request(rl, req);
1419 		freed_request(rl, sync, rq_flags);
1420 		blk_put_rl(rl);
1421 	}
1422 }
1423 EXPORT_SYMBOL_GPL(__blk_put_request);
1424 
1425 void blk_put_request(struct request *req)
1426 {
1427 	struct request_queue *q = req->q;
1428 
1429 	if (q->mq_ops)
1430 		blk_mq_free_request(req);
1431 	else {
1432 		unsigned long flags;
1433 
1434 		spin_lock_irqsave(q->queue_lock, flags);
1435 		__blk_put_request(q, req);
1436 		spin_unlock_irqrestore(q->queue_lock, flags);
1437 	}
1438 }
1439 EXPORT_SYMBOL(blk_put_request);
1440 
1441 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1442 			    struct bio *bio)
1443 {
1444 	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1445 
1446 	if (!ll_back_merge_fn(q, req, bio))
1447 		return false;
1448 
1449 	trace_block_bio_backmerge(q, req, bio);
1450 
1451 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1452 		blk_rq_set_mixed_merge(req);
1453 
1454 	req->biotail->bi_next = bio;
1455 	req->biotail = bio;
1456 	req->__data_len += bio->bi_iter.bi_size;
1457 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1458 
1459 	blk_account_io_start(req, false);
1460 	return true;
1461 }
1462 
1463 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1464 			     struct bio *bio)
1465 {
1466 	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1467 
1468 	if (!ll_front_merge_fn(q, req, bio))
1469 		return false;
1470 
1471 	trace_block_bio_frontmerge(q, req, bio);
1472 
1473 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1474 		blk_rq_set_mixed_merge(req);
1475 
1476 	bio->bi_next = req->bio;
1477 	req->bio = bio;
1478 
1479 	req->__sector = bio->bi_iter.bi_sector;
1480 	req->__data_len += bio->bi_iter.bi_size;
1481 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1482 
1483 	blk_account_io_start(req, false);
1484 	return true;
1485 }
1486 
1487 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
1488 		struct bio *bio)
1489 {
1490 	unsigned short segments = blk_rq_nr_discard_segments(req);
1491 
1492 	if (segments >= queue_max_discard_segments(q))
1493 		goto no_merge;
1494 	if (blk_rq_sectors(req) + bio_sectors(bio) >
1495 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1496 		goto no_merge;
1497 
1498 	req->biotail->bi_next = bio;
1499 	req->biotail = bio;
1500 	req->__data_len += bio->bi_iter.bi_size;
1501 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1502 	req->nr_phys_segments = segments + 1;
1503 
1504 	blk_account_io_start(req, false);
1505 	return true;
1506 no_merge:
1507 	req_set_nomerge(q, req);
1508 	return false;
1509 }
1510 
1511 /**
1512  * blk_attempt_plug_merge - try to merge with %current's plugged list
1513  * @q: request_queue new bio is being queued at
1514  * @bio: new bio being queued
1515  * @request_count: out parameter for number of traversed plugged requests
1516  * @same_queue_rq: pointer to &struct request that gets filled in when
1517  * another request associated with @q is found on the plug list
1518  * (optional, may be %NULL)
1519  *
1520  * Determine whether @bio being queued on @q can be merged with a request
1521  * on %current's plugged list.  Returns %true if merge was successful,
1522  * otherwise %false.
1523  *
1524  * Plugging coalesces IOs from the same issuer for the same purpose without
1525  * going through @q->queue_lock.  As such it's more of an issuing mechanism
1526  * than scheduling, and the request, while may have elvpriv data, is not
1527  * added on the elevator at this point.  In addition, we don't have
1528  * reliable access to the elevator outside queue lock.  Only check basic
1529  * merging parameters without querying the elevator.
1530  *
1531  * Caller must ensure !blk_queue_nomerges(q) beforehand.
1532  */
1533 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1534 			    unsigned int *request_count,
1535 			    struct request **same_queue_rq)
1536 {
1537 	struct blk_plug *plug;
1538 	struct request *rq;
1539 	struct list_head *plug_list;
1540 
1541 	plug = current->plug;
1542 	if (!plug)
1543 		return false;
1544 	*request_count = 0;
1545 
1546 	if (q->mq_ops)
1547 		plug_list = &plug->mq_list;
1548 	else
1549 		plug_list = &plug->list;
1550 
1551 	list_for_each_entry_reverse(rq, plug_list, queuelist) {
1552 		bool merged = false;
1553 
1554 		if (rq->q == q) {
1555 			(*request_count)++;
1556 			/*
1557 			 * Only blk-mq multiple hardware queues case checks the
1558 			 * rq in the same queue, there should be only one such
1559 			 * rq in a queue
1560 			 **/
1561 			if (same_queue_rq)
1562 				*same_queue_rq = rq;
1563 		}
1564 
1565 		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1566 			continue;
1567 
1568 		switch (blk_try_merge(rq, bio)) {
1569 		case ELEVATOR_BACK_MERGE:
1570 			merged = bio_attempt_back_merge(q, rq, bio);
1571 			break;
1572 		case ELEVATOR_FRONT_MERGE:
1573 			merged = bio_attempt_front_merge(q, rq, bio);
1574 			break;
1575 		case ELEVATOR_DISCARD_MERGE:
1576 			merged = bio_attempt_discard_merge(q, rq, bio);
1577 			break;
1578 		default:
1579 			break;
1580 		}
1581 
1582 		if (merged)
1583 			return true;
1584 	}
1585 
1586 	return false;
1587 }
1588 
1589 unsigned int blk_plug_queued_count(struct request_queue *q)
1590 {
1591 	struct blk_plug *plug;
1592 	struct request *rq;
1593 	struct list_head *plug_list;
1594 	unsigned int ret = 0;
1595 
1596 	plug = current->plug;
1597 	if (!plug)
1598 		goto out;
1599 
1600 	if (q->mq_ops)
1601 		plug_list = &plug->mq_list;
1602 	else
1603 		plug_list = &plug->list;
1604 
1605 	list_for_each_entry(rq, plug_list, queuelist) {
1606 		if (rq->q == q)
1607 			ret++;
1608 	}
1609 out:
1610 	return ret;
1611 }
1612 
1613 void init_request_from_bio(struct request *req, struct bio *bio)
1614 {
1615 	if (bio->bi_opf & REQ_RAHEAD)
1616 		req->cmd_flags |= REQ_FAILFAST_MASK;
1617 
1618 	req->errors = 0;
1619 	req->__sector = bio->bi_iter.bi_sector;
1620 	if (ioprio_valid(bio_prio(bio)))
1621 		req->ioprio = bio_prio(bio);
1622 	blk_rq_bio_prep(req->q, req, bio);
1623 }
1624 
1625 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1626 {
1627 	struct blk_plug *plug;
1628 	int where = ELEVATOR_INSERT_SORT;
1629 	struct request *req, *free;
1630 	unsigned int request_count = 0;
1631 	unsigned int wb_acct;
1632 
1633 	/*
1634 	 * low level driver can indicate that it wants pages above a
1635 	 * certain limit bounced to low memory (ie for highmem, or even
1636 	 * ISA dma in theory)
1637 	 */
1638 	blk_queue_bounce(q, &bio);
1639 
1640 	blk_queue_split(q, &bio, q->bio_split);
1641 
1642 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1643 		bio->bi_error = -EIO;
1644 		bio_endio(bio);
1645 		return BLK_QC_T_NONE;
1646 	}
1647 
1648 	if (op_is_flush(bio->bi_opf)) {
1649 		spin_lock_irq(q->queue_lock);
1650 		where = ELEVATOR_INSERT_FLUSH;
1651 		goto get_rq;
1652 	}
1653 
1654 	/*
1655 	 * Check if we can merge with the plugged list before grabbing
1656 	 * any locks.
1657 	 */
1658 	if (!blk_queue_nomerges(q)) {
1659 		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1660 			return BLK_QC_T_NONE;
1661 	} else
1662 		request_count = blk_plug_queued_count(q);
1663 
1664 	spin_lock_irq(q->queue_lock);
1665 
1666 	switch (elv_merge(q, &req, bio)) {
1667 	case ELEVATOR_BACK_MERGE:
1668 		if (!bio_attempt_back_merge(q, req, bio))
1669 			break;
1670 		elv_bio_merged(q, req, bio);
1671 		free = attempt_back_merge(q, req);
1672 		if (free)
1673 			__blk_put_request(q, free);
1674 		else
1675 			elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
1676 		goto out_unlock;
1677 	case ELEVATOR_FRONT_MERGE:
1678 		if (!bio_attempt_front_merge(q, req, bio))
1679 			break;
1680 		elv_bio_merged(q, req, bio);
1681 		free = attempt_front_merge(q, req);
1682 		if (free)
1683 			__blk_put_request(q, free);
1684 		else
1685 			elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
1686 		goto out_unlock;
1687 	default:
1688 		break;
1689 	}
1690 
1691 get_rq:
1692 	wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1693 
1694 	/*
1695 	 * Grab a free request. This is might sleep but can not fail.
1696 	 * Returns with the queue unlocked.
1697 	 */
1698 	req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
1699 	if (IS_ERR(req)) {
1700 		__wbt_done(q->rq_wb, wb_acct);
1701 		bio->bi_error = PTR_ERR(req);
1702 		bio_endio(bio);
1703 		goto out_unlock;
1704 	}
1705 
1706 	wbt_track(&req->issue_stat, wb_acct);
1707 
1708 	/*
1709 	 * After dropping the lock and possibly sleeping here, our request
1710 	 * may now be mergeable after it had proven unmergeable (above).
1711 	 * We don't worry about that case for efficiency. It won't happen
1712 	 * often, and the elevators are able to handle it.
1713 	 */
1714 	init_request_from_bio(req, bio);
1715 
1716 	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1717 		req->cpu = raw_smp_processor_id();
1718 
1719 	plug = current->plug;
1720 	if (plug) {
1721 		/*
1722 		 * If this is the first request added after a plug, fire
1723 		 * of a plug trace.
1724 		 *
1725 		 * @request_count may become stale because of schedule
1726 		 * out, so check plug list again.
1727 		 */
1728 		if (!request_count || list_empty(&plug->list))
1729 			trace_block_plug(q);
1730 		else {
1731 			struct request *last = list_entry_rq(plug->list.prev);
1732 			if (request_count >= BLK_MAX_REQUEST_COUNT ||
1733 			    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
1734 				blk_flush_plug_list(plug, false);
1735 				trace_block_plug(q);
1736 			}
1737 		}
1738 		list_add_tail(&req->queuelist, &plug->list);
1739 		blk_account_io_start(req, true);
1740 	} else {
1741 		spin_lock_irq(q->queue_lock);
1742 		add_acct_request(q, req, where);
1743 		__blk_run_queue(q);
1744 out_unlock:
1745 		spin_unlock_irq(q->queue_lock);
1746 	}
1747 
1748 	return BLK_QC_T_NONE;
1749 }
1750 
1751 /*
1752  * If bio->bi_dev is a partition, remap the location
1753  */
1754 static inline void blk_partition_remap(struct bio *bio)
1755 {
1756 	struct block_device *bdev = bio->bi_bdev;
1757 
1758 	/*
1759 	 * Zone reset does not include bi_size so bio_sectors() is always 0.
1760 	 * Include a test for the reset op code and perform the remap if needed.
1761 	 */
1762 	if (bdev != bdev->bd_contains &&
1763 	    (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
1764 		struct hd_struct *p = bdev->bd_part;
1765 
1766 		bio->bi_iter.bi_sector += p->start_sect;
1767 		bio->bi_bdev = bdev->bd_contains;
1768 
1769 		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1770 				      bdev->bd_dev,
1771 				      bio->bi_iter.bi_sector - p->start_sect);
1772 	}
1773 }
1774 
1775 static void handle_bad_sector(struct bio *bio)
1776 {
1777 	char b[BDEVNAME_SIZE];
1778 
1779 	printk(KERN_INFO "attempt to access beyond end of device\n");
1780 	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1781 			bdevname(bio->bi_bdev, b),
1782 			bio->bi_opf,
1783 			(unsigned long long)bio_end_sector(bio),
1784 			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1785 }
1786 
1787 #ifdef CONFIG_FAIL_MAKE_REQUEST
1788 
1789 static DECLARE_FAULT_ATTR(fail_make_request);
1790 
1791 static int __init setup_fail_make_request(char *str)
1792 {
1793 	return setup_fault_attr(&fail_make_request, str);
1794 }
1795 __setup("fail_make_request=", setup_fail_make_request);
1796 
1797 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1798 {
1799 	return part->make_it_fail && should_fail(&fail_make_request, bytes);
1800 }
1801 
1802 static int __init fail_make_request_debugfs(void)
1803 {
1804 	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1805 						NULL, &fail_make_request);
1806 
1807 	return PTR_ERR_OR_ZERO(dir);
1808 }
1809 
1810 late_initcall(fail_make_request_debugfs);
1811 
1812 #else /* CONFIG_FAIL_MAKE_REQUEST */
1813 
1814 static inline bool should_fail_request(struct hd_struct *part,
1815 					unsigned int bytes)
1816 {
1817 	return false;
1818 }
1819 
1820 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1821 
1822 /*
1823  * Check whether this bio extends beyond the end of the device.
1824  */
1825 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1826 {
1827 	sector_t maxsector;
1828 
1829 	if (!nr_sectors)
1830 		return 0;
1831 
1832 	/* Test device or partition size, when known. */
1833 	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1834 	if (maxsector) {
1835 		sector_t sector = bio->bi_iter.bi_sector;
1836 
1837 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1838 			/*
1839 			 * This may well happen - the kernel calls bread()
1840 			 * without checking the size of the device, e.g., when
1841 			 * mounting a device.
1842 			 */
1843 			handle_bad_sector(bio);
1844 			return 1;
1845 		}
1846 	}
1847 
1848 	return 0;
1849 }
1850 
1851 static noinline_for_stack bool
1852 generic_make_request_checks(struct bio *bio)
1853 {
1854 	struct request_queue *q;
1855 	int nr_sectors = bio_sectors(bio);
1856 	int err = -EIO;
1857 	char b[BDEVNAME_SIZE];
1858 	struct hd_struct *part;
1859 
1860 	might_sleep();
1861 
1862 	if (bio_check_eod(bio, nr_sectors))
1863 		goto end_io;
1864 
1865 	q = bdev_get_queue(bio->bi_bdev);
1866 	if (unlikely(!q)) {
1867 		printk(KERN_ERR
1868 		       "generic_make_request: Trying to access "
1869 			"nonexistent block-device %s (%Lu)\n",
1870 			bdevname(bio->bi_bdev, b),
1871 			(long long) bio->bi_iter.bi_sector);
1872 		goto end_io;
1873 	}
1874 
1875 	part = bio->bi_bdev->bd_part;
1876 	if (should_fail_request(part, bio->bi_iter.bi_size) ||
1877 	    should_fail_request(&part_to_disk(part)->part0,
1878 				bio->bi_iter.bi_size))
1879 		goto end_io;
1880 
1881 	/*
1882 	 * If this device has partitions, remap block n
1883 	 * of partition p to block n+start(p) of the disk.
1884 	 */
1885 	blk_partition_remap(bio);
1886 
1887 	if (bio_check_eod(bio, nr_sectors))
1888 		goto end_io;
1889 
1890 	/*
1891 	 * Filter flush bio's early so that make_request based
1892 	 * drivers without flush support don't have to worry
1893 	 * about them.
1894 	 */
1895 	if (op_is_flush(bio->bi_opf) &&
1896 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1897 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
1898 		if (!nr_sectors) {
1899 			err = 0;
1900 			goto end_io;
1901 		}
1902 	}
1903 
1904 	switch (bio_op(bio)) {
1905 	case REQ_OP_DISCARD:
1906 		if (!blk_queue_discard(q))
1907 			goto not_supported;
1908 		break;
1909 	case REQ_OP_SECURE_ERASE:
1910 		if (!blk_queue_secure_erase(q))
1911 			goto not_supported;
1912 		break;
1913 	case REQ_OP_WRITE_SAME:
1914 		if (!bdev_write_same(bio->bi_bdev))
1915 			goto not_supported;
1916 		break;
1917 	case REQ_OP_ZONE_REPORT:
1918 	case REQ_OP_ZONE_RESET:
1919 		if (!bdev_is_zoned(bio->bi_bdev))
1920 			goto not_supported;
1921 		break;
1922 	case REQ_OP_WRITE_ZEROES:
1923 		if (!bdev_write_zeroes_sectors(bio->bi_bdev))
1924 			goto not_supported;
1925 		break;
1926 	default:
1927 		break;
1928 	}
1929 
1930 	/*
1931 	 * Various block parts want %current->io_context and lazy ioc
1932 	 * allocation ends up trading a lot of pain for a small amount of
1933 	 * memory.  Just allocate it upfront.  This may fail and block
1934 	 * layer knows how to live with it.
1935 	 */
1936 	create_io_context(GFP_ATOMIC, q->node);
1937 
1938 	if (!blkcg_bio_issue_check(q, bio))
1939 		return false;
1940 
1941 	trace_block_bio_queue(q, bio);
1942 	return true;
1943 
1944 not_supported:
1945 	err = -EOPNOTSUPP;
1946 end_io:
1947 	bio->bi_error = err;
1948 	bio_endio(bio);
1949 	return false;
1950 }
1951 
1952 /**
1953  * generic_make_request - hand a buffer to its device driver for I/O
1954  * @bio:  The bio describing the location in memory and on the device.
1955  *
1956  * generic_make_request() is used to make I/O requests of block
1957  * devices. It is passed a &struct bio, which describes the I/O that needs
1958  * to be done.
1959  *
1960  * generic_make_request() does not return any status.  The
1961  * success/failure status of the request, along with notification of
1962  * completion, is delivered asynchronously through the bio->bi_end_io
1963  * function described (one day) else where.
1964  *
1965  * The caller of generic_make_request must make sure that bi_io_vec
1966  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1967  * set to describe the device address, and the
1968  * bi_end_io and optionally bi_private are set to describe how
1969  * completion notification should be signaled.
1970  *
1971  * generic_make_request and the drivers it calls may use bi_next if this
1972  * bio happens to be merged with someone else, and may resubmit the bio to
1973  * a lower device by calling into generic_make_request recursively, which
1974  * means the bio should NOT be touched after the call to ->make_request_fn.
1975  */
1976 blk_qc_t generic_make_request(struct bio *bio)
1977 {
1978 	struct bio_list bio_list_on_stack;
1979 	blk_qc_t ret = BLK_QC_T_NONE;
1980 
1981 	if (!generic_make_request_checks(bio))
1982 		goto out;
1983 
1984 	/*
1985 	 * We only want one ->make_request_fn to be active at a time, else
1986 	 * stack usage with stacked devices could be a problem.  So use
1987 	 * current->bio_list to keep a list of requests submited by a
1988 	 * make_request_fn function.  current->bio_list is also used as a
1989 	 * flag to say if generic_make_request is currently active in this
1990 	 * task or not.  If it is NULL, then no make_request is active.  If
1991 	 * it is non-NULL, then a make_request is active, and new requests
1992 	 * should be added at the tail
1993 	 */
1994 	if (current->bio_list) {
1995 		bio_list_add(current->bio_list, bio);
1996 		goto out;
1997 	}
1998 
1999 	/* following loop may be a bit non-obvious, and so deserves some
2000 	 * explanation.
2001 	 * Before entering the loop, bio->bi_next is NULL (as all callers
2002 	 * ensure that) so we have a list with a single bio.
2003 	 * We pretend that we have just taken it off a longer list, so
2004 	 * we assign bio_list to a pointer to the bio_list_on_stack,
2005 	 * thus initialising the bio_list of new bios to be
2006 	 * added.  ->make_request() may indeed add some more bios
2007 	 * through a recursive call to generic_make_request.  If it
2008 	 * did, we find a non-NULL value in bio_list and re-enter the loop
2009 	 * from the top.  In this case we really did just take the bio
2010 	 * of the top of the list (no pretending) and so remove it from
2011 	 * bio_list, and call into ->make_request() again.
2012 	 */
2013 	BUG_ON(bio->bi_next);
2014 	bio_list_init(&bio_list_on_stack);
2015 	current->bio_list = &bio_list_on_stack;
2016 	do {
2017 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2018 
2019 		if (likely(blk_queue_enter(q, false) == 0)) {
2020 			ret = q->make_request_fn(q, bio);
2021 
2022 			blk_queue_exit(q);
2023 
2024 			bio = bio_list_pop(current->bio_list);
2025 		} else {
2026 			struct bio *bio_next = bio_list_pop(current->bio_list);
2027 
2028 			bio_io_error(bio);
2029 			bio = bio_next;
2030 		}
2031 	} while (bio);
2032 	current->bio_list = NULL; /* deactivate */
2033 
2034 out:
2035 	return ret;
2036 }
2037 EXPORT_SYMBOL(generic_make_request);
2038 
2039 /**
2040  * submit_bio - submit a bio to the block device layer for I/O
2041  * @bio: The &struct bio which describes the I/O
2042  *
2043  * submit_bio() is very similar in purpose to generic_make_request(), and
2044  * uses that function to do most of the work. Both are fairly rough
2045  * interfaces; @bio must be presetup and ready for I/O.
2046  *
2047  */
2048 blk_qc_t submit_bio(struct bio *bio)
2049 {
2050 	/*
2051 	 * If it's a regular read/write or a barrier with data attached,
2052 	 * go through the normal accounting stuff before submission.
2053 	 */
2054 	if (bio_has_data(bio)) {
2055 		unsigned int count;
2056 
2057 		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
2058 			count = bdev_logical_block_size(bio->bi_bdev) >> 9;
2059 		else
2060 			count = bio_sectors(bio);
2061 
2062 		if (op_is_write(bio_op(bio))) {
2063 			count_vm_events(PGPGOUT, count);
2064 		} else {
2065 			task_io_account_read(bio->bi_iter.bi_size);
2066 			count_vm_events(PGPGIN, count);
2067 		}
2068 
2069 		if (unlikely(block_dump)) {
2070 			char b[BDEVNAME_SIZE];
2071 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
2072 			current->comm, task_pid_nr(current),
2073 				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
2074 				(unsigned long long)bio->bi_iter.bi_sector,
2075 				bdevname(bio->bi_bdev, b),
2076 				count);
2077 		}
2078 	}
2079 
2080 	return generic_make_request(bio);
2081 }
2082 EXPORT_SYMBOL(submit_bio);
2083 
2084 /**
2085  * blk_cloned_rq_check_limits - Helper function to check a cloned request
2086  *                              for new the queue limits
2087  * @q:  the queue
2088  * @rq: the request being checked
2089  *
2090  * Description:
2091  *    @rq may have been made based on weaker limitations of upper-level queues
2092  *    in request stacking drivers, and it may violate the limitation of @q.
2093  *    Since the block layer and the underlying device driver trust @rq
2094  *    after it is inserted to @q, it should be checked against @q before
2095  *    the insertion using this generic function.
2096  *
2097  *    Request stacking drivers like request-based dm may change the queue
2098  *    limits when retrying requests on other queues. Those requests need
2099  *    to be checked against the new queue limits again during dispatch.
2100  */
2101 static int blk_cloned_rq_check_limits(struct request_queue *q,
2102 				      struct request *rq)
2103 {
2104 	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
2105 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
2106 		return -EIO;
2107 	}
2108 
2109 	/*
2110 	 * queue's settings related to segment counting like q->bounce_pfn
2111 	 * may differ from that of other stacking queues.
2112 	 * Recalculate it to check the request correctly on this queue's
2113 	 * limitation.
2114 	 */
2115 	blk_recalc_rq_segments(rq);
2116 	if (rq->nr_phys_segments > queue_max_segments(q)) {
2117 		printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2118 		return -EIO;
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 /**
2125  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2126  * @q:  the queue to submit the request
2127  * @rq: the request being queued
2128  */
2129 int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2130 {
2131 	unsigned long flags;
2132 	int where = ELEVATOR_INSERT_BACK;
2133 
2134 	if (blk_cloned_rq_check_limits(q, rq))
2135 		return -EIO;
2136 
2137 	if (rq->rq_disk &&
2138 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2139 		return -EIO;
2140 
2141 	if (q->mq_ops) {
2142 		if (blk_queue_io_stat(q))
2143 			blk_account_io_start(rq, true);
2144 		blk_mq_sched_insert_request(rq, false, true, false, false);
2145 		return 0;
2146 	}
2147 
2148 	spin_lock_irqsave(q->queue_lock, flags);
2149 	if (unlikely(blk_queue_dying(q))) {
2150 		spin_unlock_irqrestore(q->queue_lock, flags);
2151 		return -ENODEV;
2152 	}
2153 
2154 	/*
2155 	 * Submitting request must be dequeued before calling this function
2156 	 * because it will be linked to another request_queue
2157 	 */
2158 	BUG_ON(blk_queued_rq(rq));
2159 
2160 	if (op_is_flush(rq->cmd_flags))
2161 		where = ELEVATOR_INSERT_FLUSH;
2162 
2163 	add_acct_request(q, rq, where);
2164 	if (where == ELEVATOR_INSERT_FLUSH)
2165 		__blk_run_queue(q);
2166 	spin_unlock_irqrestore(q->queue_lock, flags);
2167 
2168 	return 0;
2169 }
2170 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2171 
2172 /**
2173  * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2174  * @rq: request to examine
2175  *
2176  * Description:
2177  *     A request could be merge of IOs which require different failure
2178  *     handling.  This function determines the number of bytes which
2179  *     can be failed from the beginning of the request without
2180  *     crossing into area which need to be retried further.
2181  *
2182  * Return:
2183  *     The number of bytes to fail.
2184  *
2185  * Context:
2186  *     queue_lock must be held.
2187  */
2188 unsigned int blk_rq_err_bytes(const struct request *rq)
2189 {
2190 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2191 	unsigned int bytes = 0;
2192 	struct bio *bio;
2193 
2194 	if (!(rq->rq_flags & RQF_MIXED_MERGE))
2195 		return blk_rq_bytes(rq);
2196 
2197 	/*
2198 	 * Currently the only 'mixing' which can happen is between
2199 	 * different fastfail types.  We can safely fail portions
2200 	 * which have all the failfast bits that the first one has -
2201 	 * the ones which are at least as eager to fail as the first
2202 	 * one.
2203 	 */
2204 	for (bio = rq->bio; bio; bio = bio->bi_next) {
2205 		if ((bio->bi_opf & ff) != ff)
2206 			break;
2207 		bytes += bio->bi_iter.bi_size;
2208 	}
2209 
2210 	/* this could lead to infinite loop */
2211 	BUG_ON(blk_rq_bytes(rq) && !bytes);
2212 	return bytes;
2213 }
2214 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2215 
2216 void blk_account_io_completion(struct request *req, unsigned int bytes)
2217 {
2218 	if (blk_do_io_stat(req)) {
2219 		const int rw = rq_data_dir(req);
2220 		struct hd_struct *part;
2221 		int cpu;
2222 
2223 		cpu = part_stat_lock();
2224 		part = req->part;
2225 		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2226 		part_stat_unlock();
2227 	}
2228 }
2229 
2230 void blk_account_io_done(struct request *req)
2231 {
2232 	/*
2233 	 * Account IO completion.  flush_rq isn't accounted as a
2234 	 * normal IO on queueing nor completion.  Accounting the
2235 	 * containing request is enough.
2236 	 */
2237 	if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2238 		unsigned long duration = jiffies - req->start_time;
2239 		const int rw = rq_data_dir(req);
2240 		struct hd_struct *part;
2241 		int cpu;
2242 
2243 		cpu = part_stat_lock();
2244 		part = req->part;
2245 
2246 		part_stat_inc(cpu, part, ios[rw]);
2247 		part_stat_add(cpu, part, ticks[rw], duration);
2248 		part_round_stats(cpu, part);
2249 		part_dec_in_flight(part, rw);
2250 
2251 		hd_struct_put(part);
2252 		part_stat_unlock();
2253 	}
2254 }
2255 
2256 #ifdef CONFIG_PM
2257 /*
2258  * Don't process normal requests when queue is suspended
2259  * or in the process of suspending/resuming
2260  */
2261 static struct request *blk_pm_peek_request(struct request_queue *q,
2262 					   struct request *rq)
2263 {
2264 	if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2265 	    (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
2266 		return NULL;
2267 	else
2268 		return rq;
2269 }
2270 #else
2271 static inline struct request *blk_pm_peek_request(struct request_queue *q,
2272 						  struct request *rq)
2273 {
2274 	return rq;
2275 }
2276 #endif
2277 
2278 void blk_account_io_start(struct request *rq, bool new_io)
2279 {
2280 	struct hd_struct *part;
2281 	int rw = rq_data_dir(rq);
2282 	int cpu;
2283 
2284 	if (!blk_do_io_stat(rq))
2285 		return;
2286 
2287 	cpu = part_stat_lock();
2288 
2289 	if (!new_io) {
2290 		part = rq->part;
2291 		part_stat_inc(cpu, part, merges[rw]);
2292 	} else {
2293 		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2294 		if (!hd_struct_try_get(part)) {
2295 			/*
2296 			 * The partition is already being removed,
2297 			 * the request will be accounted on the disk only
2298 			 *
2299 			 * We take a reference on disk->part0 although that
2300 			 * partition will never be deleted, so we can treat
2301 			 * it as any other partition.
2302 			 */
2303 			part = &rq->rq_disk->part0;
2304 			hd_struct_get(part);
2305 		}
2306 		part_round_stats(cpu, part);
2307 		part_inc_in_flight(part, rw);
2308 		rq->part = part;
2309 	}
2310 
2311 	part_stat_unlock();
2312 }
2313 
2314 /**
2315  * blk_peek_request - peek at the top of a request queue
2316  * @q: request queue to peek at
2317  *
2318  * Description:
2319  *     Return the request at the top of @q.  The returned request
2320  *     should be started using blk_start_request() before LLD starts
2321  *     processing it.
2322  *
2323  * Return:
2324  *     Pointer to the request at the top of @q if available.  Null
2325  *     otherwise.
2326  *
2327  * Context:
2328  *     queue_lock must be held.
2329  */
2330 struct request *blk_peek_request(struct request_queue *q)
2331 {
2332 	struct request *rq;
2333 	int ret;
2334 
2335 	while ((rq = __elv_next_request(q)) != NULL) {
2336 
2337 		rq = blk_pm_peek_request(q, rq);
2338 		if (!rq)
2339 			break;
2340 
2341 		if (!(rq->rq_flags & RQF_STARTED)) {
2342 			/*
2343 			 * This is the first time the device driver
2344 			 * sees this request (possibly after
2345 			 * requeueing).  Notify IO scheduler.
2346 			 */
2347 			if (rq->rq_flags & RQF_SORTED)
2348 				elv_activate_rq(q, rq);
2349 
2350 			/*
2351 			 * just mark as started even if we don't start
2352 			 * it, a request that has been delayed should
2353 			 * not be passed by new incoming requests
2354 			 */
2355 			rq->rq_flags |= RQF_STARTED;
2356 			trace_block_rq_issue(q, rq);
2357 		}
2358 
2359 		if (!q->boundary_rq || q->boundary_rq == rq) {
2360 			q->end_sector = rq_end_sector(rq);
2361 			q->boundary_rq = NULL;
2362 		}
2363 
2364 		if (rq->rq_flags & RQF_DONTPREP)
2365 			break;
2366 
2367 		if (q->dma_drain_size && blk_rq_bytes(rq)) {
2368 			/*
2369 			 * make sure space for the drain appears we
2370 			 * know we can do this because max_hw_segments
2371 			 * has been adjusted to be one fewer than the
2372 			 * device can handle
2373 			 */
2374 			rq->nr_phys_segments++;
2375 		}
2376 
2377 		if (!q->prep_rq_fn)
2378 			break;
2379 
2380 		ret = q->prep_rq_fn(q, rq);
2381 		if (ret == BLKPREP_OK) {
2382 			break;
2383 		} else if (ret == BLKPREP_DEFER) {
2384 			/*
2385 			 * the request may have been (partially) prepped.
2386 			 * we need to keep this request in the front to
2387 			 * avoid resource deadlock.  RQF_STARTED will
2388 			 * prevent other fs requests from passing this one.
2389 			 */
2390 			if (q->dma_drain_size && blk_rq_bytes(rq) &&
2391 			    !(rq->rq_flags & RQF_DONTPREP)) {
2392 				/*
2393 				 * remove the space for the drain we added
2394 				 * so that we don't add it again
2395 				 */
2396 				--rq->nr_phys_segments;
2397 			}
2398 
2399 			rq = NULL;
2400 			break;
2401 		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2402 			int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2403 
2404 			rq->rq_flags |= RQF_QUIET;
2405 			/*
2406 			 * Mark this request as started so we don't trigger
2407 			 * any debug logic in the end I/O path.
2408 			 */
2409 			blk_start_request(rq);
2410 			__blk_end_request_all(rq, err);
2411 		} else {
2412 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2413 			break;
2414 		}
2415 	}
2416 
2417 	return rq;
2418 }
2419 EXPORT_SYMBOL(blk_peek_request);
2420 
2421 void blk_dequeue_request(struct request *rq)
2422 {
2423 	struct request_queue *q = rq->q;
2424 
2425 	BUG_ON(list_empty(&rq->queuelist));
2426 	BUG_ON(ELV_ON_HASH(rq));
2427 
2428 	list_del_init(&rq->queuelist);
2429 
2430 	/*
2431 	 * the time frame between a request being removed from the lists
2432 	 * and to it is freed is accounted as io that is in progress at
2433 	 * the driver side.
2434 	 */
2435 	if (blk_account_rq(rq)) {
2436 		q->in_flight[rq_is_sync(rq)]++;
2437 		set_io_start_time_ns(rq);
2438 	}
2439 }
2440 
2441 /**
2442  * blk_start_request - start request processing on the driver
2443  * @req: request to dequeue
2444  *
2445  * Description:
2446  *     Dequeue @req and start timeout timer on it.  This hands off the
2447  *     request to the driver.
2448  *
2449  *     Block internal functions which don't want to start timer should
2450  *     call blk_dequeue_request().
2451  *
2452  * Context:
2453  *     queue_lock must be held.
2454  */
2455 void blk_start_request(struct request *req)
2456 {
2457 	blk_dequeue_request(req);
2458 
2459 	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2460 		blk_stat_set_issue_time(&req->issue_stat);
2461 		req->rq_flags |= RQF_STATS;
2462 		wbt_issue(req->q->rq_wb, &req->issue_stat);
2463 	}
2464 
2465 	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
2466 	blk_add_timer(req);
2467 }
2468 EXPORT_SYMBOL(blk_start_request);
2469 
2470 /**
2471  * blk_fetch_request - fetch a request from a request queue
2472  * @q: request queue to fetch a request from
2473  *
2474  * Description:
2475  *     Return the request at the top of @q.  The request is started on
2476  *     return and LLD can start processing it immediately.
2477  *
2478  * Return:
2479  *     Pointer to the request at the top of @q if available.  Null
2480  *     otherwise.
2481  *
2482  * Context:
2483  *     queue_lock must be held.
2484  */
2485 struct request *blk_fetch_request(struct request_queue *q)
2486 {
2487 	struct request *rq;
2488 
2489 	rq = blk_peek_request(q);
2490 	if (rq)
2491 		blk_start_request(rq);
2492 	return rq;
2493 }
2494 EXPORT_SYMBOL(blk_fetch_request);
2495 
2496 /**
2497  * blk_update_request - Special helper function for request stacking drivers
2498  * @req:      the request being processed
2499  * @error:    %0 for success, < %0 for error
2500  * @nr_bytes: number of bytes to complete @req
2501  *
2502  * Description:
2503  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
2504  *     the request structure even if @req doesn't have leftover.
2505  *     If @req has leftover, sets it up for the next range of segments.
2506  *
2507  *     This special helper function is only for request stacking drivers
2508  *     (e.g. request-based dm) so that they can handle partial completion.
2509  *     Actual device drivers should use blk_end_request instead.
2510  *
2511  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2512  *     %false return from this function.
2513  *
2514  * Return:
2515  *     %false - this request doesn't have any more data
2516  *     %true  - this request has more data
2517  **/
2518 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2519 {
2520 	int total_bytes;
2521 
2522 	trace_block_rq_complete(req->q, req, nr_bytes);
2523 
2524 	if (!req->bio)
2525 		return false;
2526 
2527 	/*
2528 	 * For fs requests, rq is just carrier of independent bio's
2529 	 * and each partial completion should be handled separately.
2530 	 * Reset per-request error on each partial completion.
2531 	 *
2532 	 * TODO: tj: This is too subtle.  It would be better to let
2533 	 * low level drivers do what they see fit.
2534 	 */
2535 	if (!blk_rq_is_passthrough(req))
2536 		req->errors = 0;
2537 
2538 	if (error && !blk_rq_is_passthrough(req) &&
2539 	    !(req->rq_flags & RQF_QUIET)) {
2540 		char *error_type;
2541 
2542 		switch (error) {
2543 		case -ENOLINK:
2544 			error_type = "recoverable transport";
2545 			break;
2546 		case -EREMOTEIO:
2547 			error_type = "critical target";
2548 			break;
2549 		case -EBADE:
2550 			error_type = "critical nexus";
2551 			break;
2552 		case -ETIMEDOUT:
2553 			error_type = "timeout";
2554 			break;
2555 		case -ENOSPC:
2556 			error_type = "critical space allocation";
2557 			break;
2558 		case -ENODATA:
2559 			error_type = "critical medium";
2560 			break;
2561 		case -EIO:
2562 		default:
2563 			error_type = "I/O";
2564 			break;
2565 		}
2566 		printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
2567 				   __func__, error_type, req->rq_disk ?
2568 				   req->rq_disk->disk_name : "?",
2569 				   (unsigned long long)blk_rq_pos(req));
2570 
2571 	}
2572 
2573 	blk_account_io_completion(req, nr_bytes);
2574 
2575 	total_bytes = 0;
2576 	while (req->bio) {
2577 		struct bio *bio = req->bio;
2578 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2579 
2580 		if (bio_bytes == bio->bi_iter.bi_size)
2581 			req->bio = bio->bi_next;
2582 
2583 		req_bio_endio(req, bio, bio_bytes, error);
2584 
2585 		total_bytes += bio_bytes;
2586 		nr_bytes -= bio_bytes;
2587 
2588 		if (!nr_bytes)
2589 			break;
2590 	}
2591 
2592 	/*
2593 	 * completely done
2594 	 */
2595 	if (!req->bio) {
2596 		/*
2597 		 * Reset counters so that the request stacking driver
2598 		 * can find how many bytes remain in the request
2599 		 * later.
2600 		 */
2601 		req->__data_len = 0;
2602 		return false;
2603 	}
2604 
2605 	WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
2606 
2607 	req->__data_len -= total_bytes;
2608 
2609 	/* update sector only for requests with clear definition of sector */
2610 	if (!blk_rq_is_passthrough(req))
2611 		req->__sector += total_bytes >> 9;
2612 
2613 	/* mixed attributes always follow the first bio */
2614 	if (req->rq_flags & RQF_MIXED_MERGE) {
2615 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
2616 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
2617 	}
2618 
2619 	/*
2620 	 * If total number of sectors is less than the first segment
2621 	 * size, something has gone terribly wrong.
2622 	 */
2623 	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2624 		blk_dump_rq_flags(req, "request botched");
2625 		req->__data_len = blk_rq_cur_bytes(req);
2626 	}
2627 
2628 	/* recalculate the number of segments */
2629 	blk_recalc_rq_segments(req);
2630 
2631 	return true;
2632 }
2633 EXPORT_SYMBOL_GPL(blk_update_request);
2634 
2635 static bool blk_update_bidi_request(struct request *rq, int error,
2636 				    unsigned int nr_bytes,
2637 				    unsigned int bidi_bytes)
2638 {
2639 	if (blk_update_request(rq, error, nr_bytes))
2640 		return true;
2641 
2642 	/* Bidi request must be completed as a whole */
2643 	if (unlikely(blk_bidi_rq(rq)) &&
2644 	    blk_update_request(rq->next_rq, error, bidi_bytes))
2645 		return true;
2646 
2647 	if (blk_queue_add_random(rq->q))
2648 		add_disk_randomness(rq->rq_disk);
2649 
2650 	return false;
2651 }
2652 
2653 /**
2654  * blk_unprep_request - unprepare a request
2655  * @req:	the request
2656  *
2657  * This function makes a request ready for complete resubmission (or
2658  * completion).  It happens only after all error handling is complete,
2659  * so represents the appropriate moment to deallocate any resources
2660  * that were allocated to the request in the prep_rq_fn.  The queue
2661  * lock is held when calling this.
2662  */
2663 void blk_unprep_request(struct request *req)
2664 {
2665 	struct request_queue *q = req->q;
2666 
2667 	req->rq_flags &= ~RQF_DONTPREP;
2668 	if (q->unprep_rq_fn)
2669 		q->unprep_rq_fn(q, req);
2670 }
2671 EXPORT_SYMBOL_GPL(blk_unprep_request);
2672 
2673 /*
2674  * queue lock must be held
2675  */
2676 void blk_finish_request(struct request *req, int error)
2677 {
2678 	struct request_queue *q = req->q;
2679 
2680 	if (req->rq_flags & RQF_STATS)
2681 		blk_stat_add(&q->rq_stats[rq_data_dir(req)], req);
2682 
2683 	if (req->rq_flags & RQF_QUEUED)
2684 		blk_queue_end_tag(q, req);
2685 
2686 	BUG_ON(blk_queued_rq(req));
2687 
2688 	if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
2689 		laptop_io_completion(req->q->backing_dev_info);
2690 
2691 	blk_delete_timer(req);
2692 
2693 	if (req->rq_flags & RQF_DONTPREP)
2694 		blk_unprep_request(req);
2695 
2696 	blk_account_io_done(req);
2697 
2698 	if (req->end_io) {
2699 		wbt_done(req->q->rq_wb, &req->issue_stat);
2700 		req->end_io(req, error);
2701 	} else {
2702 		if (blk_bidi_rq(req))
2703 			__blk_put_request(req->next_rq->q, req->next_rq);
2704 
2705 		__blk_put_request(q, req);
2706 	}
2707 }
2708 EXPORT_SYMBOL(blk_finish_request);
2709 
2710 /**
2711  * blk_end_bidi_request - Complete a bidi request
2712  * @rq:         the request to complete
2713  * @error:      %0 for success, < %0 for error
2714  * @nr_bytes:   number of bytes to complete @rq
2715  * @bidi_bytes: number of bytes to complete @rq->next_rq
2716  *
2717  * Description:
2718  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2719  *     Drivers that supports bidi can safely call this member for any
2720  *     type of request, bidi or uni.  In the later case @bidi_bytes is
2721  *     just ignored.
2722  *
2723  * Return:
2724  *     %false - we are done with this request
2725  *     %true  - still buffers pending for this request
2726  **/
2727 static bool blk_end_bidi_request(struct request *rq, int error,
2728 				 unsigned int nr_bytes, unsigned int bidi_bytes)
2729 {
2730 	struct request_queue *q = rq->q;
2731 	unsigned long flags;
2732 
2733 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2734 		return true;
2735 
2736 	spin_lock_irqsave(q->queue_lock, flags);
2737 	blk_finish_request(rq, error);
2738 	spin_unlock_irqrestore(q->queue_lock, flags);
2739 
2740 	return false;
2741 }
2742 
2743 /**
2744  * __blk_end_bidi_request - Complete a bidi request with queue lock held
2745  * @rq:         the request to complete
2746  * @error:      %0 for success, < %0 for error
2747  * @nr_bytes:   number of bytes to complete @rq
2748  * @bidi_bytes: number of bytes to complete @rq->next_rq
2749  *
2750  * Description:
2751  *     Identical to blk_end_bidi_request() except that queue lock is
2752  *     assumed to be locked on entry and remains so on return.
2753  *
2754  * Return:
2755  *     %false - we are done with this request
2756  *     %true  - still buffers pending for this request
2757  **/
2758 bool __blk_end_bidi_request(struct request *rq, int error,
2759 				   unsigned int nr_bytes, unsigned int bidi_bytes)
2760 {
2761 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2762 		return true;
2763 
2764 	blk_finish_request(rq, error);
2765 
2766 	return false;
2767 }
2768 
2769 /**
2770  * blk_end_request - Helper function for drivers to complete the request.
2771  * @rq:       the request being processed
2772  * @error:    %0 for success, < %0 for error
2773  * @nr_bytes: number of bytes to complete
2774  *
2775  * Description:
2776  *     Ends I/O on a number of bytes attached to @rq.
2777  *     If @rq has leftover, sets it up for the next range of segments.
2778  *
2779  * Return:
2780  *     %false - we are done with this request
2781  *     %true  - still buffers pending for this request
2782  **/
2783 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2784 {
2785 	return blk_end_bidi_request(rq, error, nr_bytes, 0);
2786 }
2787 EXPORT_SYMBOL(blk_end_request);
2788 
2789 /**
2790  * blk_end_request_all - Helper function for drives to finish the request.
2791  * @rq: the request to finish
2792  * @error: %0 for success, < %0 for error
2793  *
2794  * Description:
2795  *     Completely finish @rq.
2796  */
2797 void blk_end_request_all(struct request *rq, int error)
2798 {
2799 	bool pending;
2800 	unsigned int bidi_bytes = 0;
2801 
2802 	if (unlikely(blk_bidi_rq(rq)))
2803 		bidi_bytes = blk_rq_bytes(rq->next_rq);
2804 
2805 	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2806 	BUG_ON(pending);
2807 }
2808 EXPORT_SYMBOL(blk_end_request_all);
2809 
2810 /**
2811  * blk_end_request_cur - Helper function to finish the current request chunk.
2812  * @rq: the request to finish the current chunk for
2813  * @error: %0 for success, < %0 for error
2814  *
2815  * Description:
2816  *     Complete the current consecutively mapped chunk from @rq.
2817  *
2818  * Return:
2819  *     %false - we are done with this request
2820  *     %true  - still buffers pending for this request
2821  */
2822 bool blk_end_request_cur(struct request *rq, int error)
2823 {
2824 	return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2825 }
2826 EXPORT_SYMBOL(blk_end_request_cur);
2827 
2828 /**
2829  * blk_end_request_err - Finish a request till the next failure boundary.
2830  * @rq: the request to finish till the next failure boundary for
2831  * @error: must be negative errno
2832  *
2833  * Description:
2834  *     Complete @rq till the next failure boundary.
2835  *
2836  * Return:
2837  *     %false - we are done with this request
2838  *     %true  - still buffers pending for this request
2839  */
2840 bool blk_end_request_err(struct request *rq, int error)
2841 {
2842 	WARN_ON(error >= 0);
2843 	return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2844 }
2845 EXPORT_SYMBOL_GPL(blk_end_request_err);
2846 
2847 /**
2848  * __blk_end_request - Helper function for drivers to complete the request.
2849  * @rq:       the request being processed
2850  * @error:    %0 for success, < %0 for error
2851  * @nr_bytes: number of bytes to complete
2852  *
2853  * Description:
2854  *     Must be called with queue lock held unlike blk_end_request().
2855  *
2856  * Return:
2857  *     %false - we are done with this request
2858  *     %true  - still buffers pending for this request
2859  **/
2860 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2861 {
2862 	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2863 }
2864 EXPORT_SYMBOL(__blk_end_request);
2865 
2866 /**
2867  * __blk_end_request_all - Helper function for drives to finish the request.
2868  * @rq: the request to finish
2869  * @error: %0 for success, < %0 for error
2870  *
2871  * Description:
2872  *     Completely finish @rq.  Must be called with queue lock held.
2873  */
2874 void __blk_end_request_all(struct request *rq, int error)
2875 {
2876 	bool pending;
2877 	unsigned int bidi_bytes = 0;
2878 
2879 	if (unlikely(blk_bidi_rq(rq)))
2880 		bidi_bytes = blk_rq_bytes(rq->next_rq);
2881 
2882 	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2883 	BUG_ON(pending);
2884 }
2885 EXPORT_SYMBOL(__blk_end_request_all);
2886 
2887 /**
2888  * __blk_end_request_cur - Helper function to finish the current request chunk.
2889  * @rq: the request to finish the current chunk for
2890  * @error: %0 for success, < %0 for error
2891  *
2892  * Description:
2893  *     Complete the current consecutively mapped chunk from @rq.  Must
2894  *     be called with queue lock held.
2895  *
2896  * Return:
2897  *     %false - we are done with this request
2898  *     %true  - still buffers pending for this request
2899  */
2900 bool __blk_end_request_cur(struct request *rq, int error)
2901 {
2902 	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2903 }
2904 EXPORT_SYMBOL(__blk_end_request_cur);
2905 
2906 /**
2907  * __blk_end_request_err - Finish a request till the next failure boundary.
2908  * @rq: the request to finish till the next failure boundary for
2909  * @error: must be negative errno
2910  *
2911  * Description:
2912  *     Complete @rq till the next failure boundary.  Must be called
2913  *     with queue lock held.
2914  *
2915  * Return:
2916  *     %false - we are done with this request
2917  *     %true  - still buffers pending for this request
2918  */
2919 bool __blk_end_request_err(struct request *rq, int error)
2920 {
2921 	WARN_ON(error >= 0);
2922 	return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2923 }
2924 EXPORT_SYMBOL_GPL(__blk_end_request_err);
2925 
2926 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2927 		     struct bio *bio)
2928 {
2929 	if (bio_has_data(bio))
2930 		rq->nr_phys_segments = bio_phys_segments(q, bio);
2931 
2932 	rq->__data_len = bio->bi_iter.bi_size;
2933 	rq->bio = rq->biotail = bio;
2934 
2935 	if (bio->bi_bdev)
2936 		rq->rq_disk = bio->bi_bdev->bd_disk;
2937 }
2938 
2939 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2940 /**
2941  * rq_flush_dcache_pages - Helper function to flush all pages in a request
2942  * @rq: the request to be flushed
2943  *
2944  * Description:
2945  *     Flush all pages in @rq.
2946  */
2947 void rq_flush_dcache_pages(struct request *rq)
2948 {
2949 	struct req_iterator iter;
2950 	struct bio_vec bvec;
2951 
2952 	rq_for_each_segment(bvec, rq, iter)
2953 		flush_dcache_page(bvec.bv_page);
2954 }
2955 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2956 #endif
2957 
2958 /**
2959  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2960  * @q : the queue of the device being checked
2961  *
2962  * Description:
2963  *    Check if underlying low-level drivers of a device are busy.
2964  *    If the drivers want to export their busy state, they must set own
2965  *    exporting function using blk_queue_lld_busy() first.
2966  *
2967  *    Basically, this function is used only by request stacking drivers
2968  *    to stop dispatching requests to underlying devices when underlying
2969  *    devices are busy.  This behavior helps more I/O merging on the queue
2970  *    of the request stacking driver and prevents I/O throughput regression
2971  *    on burst I/O load.
2972  *
2973  * Return:
2974  *    0 - Not busy (The request stacking driver should dispatch request)
2975  *    1 - Busy (The request stacking driver should stop dispatching request)
2976  */
2977 int blk_lld_busy(struct request_queue *q)
2978 {
2979 	if (q->lld_busy_fn)
2980 		return q->lld_busy_fn(q);
2981 
2982 	return 0;
2983 }
2984 EXPORT_SYMBOL_GPL(blk_lld_busy);
2985 
2986 /**
2987  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2988  * @rq: the clone request to be cleaned up
2989  *
2990  * Description:
2991  *     Free all bios in @rq for a cloned request.
2992  */
2993 void blk_rq_unprep_clone(struct request *rq)
2994 {
2995 	struct bio *bio;
2996 
2997 	while ((bio = rq->bio) != NULL) {
2998 		rq->bio = bio->bi_next;
2999 
3000 		bio_put(bio);
3001 	}
3002 }
3003 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3004 
3005 /*
3006  * Copy attributes of the original request to the clone request.
3007  * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3008  */
3009 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
3010 {
3011 	dst->cpu = src->cpu;
3012 	dst->__sector = blk_rq_pos(src);
3013 	dst->__data_len = blk_rq_bytes(src);
3014 	dst->nr_phys_segments = src->nr_phys_segments;
3015 	dst->ioprio = src->ioprio;
3016 	dst->extra_len = src->extra_len;
3017 }
3018 
3019 /**
3020  * blk_rq_prep_clone - Helper function to setup clone request
3021  * @rq: the request to be setup
3022  * @rq_src: original request to be cloned
3023  * @bs: bio_set that bios for clone are allocated from
3024  * @gfp_mask: memory allocation mask for bio
3025  * @bio_ctr: setup function to be called for each clone bio.
3026  *           Returns %0 for success, non %0 for failure.
3027  * @data: private data to be passed to @bio_ctr
3028  *
3029  * Description:
3030  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3031  *     The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3032  *     are not copied, and copying such parts is the caller's responsibility.
3033  *     Also, pages which the original bios are pointing to are not copied
3034  *     and the cloned bios just point same pages.
3035  *     So cloned bios must be completed before original bios, which means
3036  *     the caller must complete @rq before @rq_src.
3037  */
3038 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3039 		      struct bio_set *bs, gfp_t gfp_mask,
3040 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
3041 		      void *data)
3042 {
3043 	struct bio *bio, *bio_src;
3044 
3045 	if (!bs)
3046 		bs = fs_bio_set;
3047 
3048 	__rq_for_each_bio(bio_src, rq_src) {
3049 		bio = bio_clone_fast(bio_src, gfp_mask, bs);
3050 		if (!bio)
3051 			goto free_and_out;
3052 
3053 		if (bio_ctr && bio_ctr(bio, bio_src, data))
3054 			goto free_and_out;
3055 
3056 		if (rq->bio) {
3057 			rq->biotail->bi_next = bio;
3058 			rq->biotail = bio;
3059 		} else
3060 			rq->bio = rq->biotail = bio;
3061 	}
3062 
3063 	__blk_rq_prep_clone(rq, rq_src);
3064 
3065 	return 0;
3066 
3067 free_and_out:
3068 	if (bio)
3069 		bio_put(bio);
3070 	blk_rq_unprep_clone(rq);
3071 
3072 	return -ENOMEM;
3073 }
3074 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3075 
3076 int kblockd_schedule_work(struct work_struct *work)
3077 {
3078 	return queue_work(kblockd_workqueue, work);
3079 }
3080 EXPORT_SYMBOL(kblockd_schedule_work);
3081 
3082 int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3083 {
3084 	return queue_work_on(cpu, kblockd_workqueue, work);
3085 }
3086 EXPORT_SYMBOL(kblockd_schedule_work_on);
3087 
3088 int kblockd_schedule_delayed_work(struct delayed_work *dwork,
3089 				  unsigned long delay)
3090 {
3091 	return queue_delayed_work(kblockd_workqueue, dwork, delay);
3092 }
3093 EXPORT_SYMBOL(kblockd_schedule_delayed_work);
3094 
3095 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3096 				     unsigned long delay)
3097 {
3098 	return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3099 }
3100 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
3101 
3102 /**
3103  * blk_start_plug - initialize blk_plug and track it inside the task_struct
3104  * @plug:	The &struct blk_plug that needs to be initialized
3105  *
3106  * Description:
3107  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
3108  *   pending I/O should the task end up blocking between blk_start_plug() and
3109  *   blk_finish_plug(). This is important from a performance perspective, but
3110  *   also ensures that we don't deadlock. For instance, if the task is blocking
3111  *   for a memory allocation, memory reclaim could end up wanting to free a
3112  *   page belonging to that request that is currently residing in our private
3113  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
3114  *   this kind of deadlock.
3115  */
3116 void blk_start_plug(struct blk_plug *plug)
3117 {
3118 	struct task_struct *tsk = current;
3119 
3120 	/*
3121 	 * If this is a nested plug, don't actually assign it.
3122 	 */
3123 	if (tsk->plug)
3124 		return;
3125 
3126 	INIT_LIST_HEAD(&plug->list);
3127 	INIT_LIST_HEAD(&plug->mq_list);
3128 	INIT_LIST_HEAD(&plug->cb_list);
3129 	/*
3130 	 * Store ordering should not be needed here, since a potential
3131 	 * preempt will imply a full memory barrier
3132 	 */
3133 	tsk->plug = plug;
3134 }
3135 EXPORT_SYMBOL(blk_start_plug);
3136 
3137 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3138 {
3139 	struct request *rqa = container_of(a, struct request, queuelist);
3140 	struct request *rqb = container_of(b, struct request, queuelist);
3141 
3142 	return !(rqa->q < rqb->q ||
3143 		(rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
3144 }
3145 
3146 /*
3147  * If 'from_schedule' is true, then postpone the dispatch of requests
3148  * until a safe kblockd context. We due this to avoid accidental big
3149  * additional stack usage in driver dispatch, in places where the originally
3150  * plugger did not intend it.
3151  */
3152 static void queue_unplugged(struct request_queue *q, unsigned int depth,
3153 			    bool from_schedule)
3154 	__releases(q->queue_lock)
3155 {
3156 	trace_block_unplug(q, depth, !from_schedule);
3157 
3158 	if (from_schedule)
3159 		blk_run_queue_async(q);
3160 	else
3161 		__blk_run_queue(q);
3162 	spin_unlock(q->queue_lock);
3163 }
3164 
3165 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
3166 {
3167 	LIST_HEAD(callbacks);
3168 
3169 	while (!list_empty(&plug->cb_list)) {
3170 		list_splice_init(&plug->cb_list, &callbacks);
3171 
3172 		while (!list_empty(&callbacks)) {
3173 			struct blk_plug_cb *cb = list_first_entry(&callbacks,
3174 							  struct blk_plug_cb,
3175 							  list);
3176 			list_del(&cb->list);
3177 			cb->callback(cb, from_schedule);
3178 		}
3179 	}
3180 }
3181 
3182 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3183 				      int size)
3184 {
3185 	struct blk_plug *plug = current->plug;
3186 	struct blk_plug_cb *cb;
3187 
3188 	if (!plug)
3189 		return NULL;
3190 
3191 	list_for_each_entry(cb, &plug->cb_list, list)
3192 		if (cb->callback == unplug && cb->data == data)
3193 			return cb;
3194 
3195 	/* Not currently on the callback list */
3196 	BUG_ON(size < sizeof(*cb));
3197 	cb = kzalloc(size, GFP_ATOMIC);
3198 	if (cb) {
3199 		cb->data = data;
3200 		cb->callback = unplug;
3201 		list_add(&cb->list, &plug->cb_list);
3202 	}
3203 	return cb;
3204 }
3205 EXPORT_SYMBOL(blk_check_plugged);
3206 
3207 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3208 {
3209 	struct request_queue *q;
3210 	unsigned long flags;
3211 	struct request *rq;
3212 	LIST_HEAD(list);
3213 	unsigned int depth;
3214 
3215 	flush_plug_callbacks(plug, from_schedule);
3216 
3217 	if (!list_empty(&plug->mq_list))
3218 		blk_mq_flush_plug_list(plug, from_schedule);
3219 
3220 	if (list_empty(&plug->list))
3221 		return;
3222 
3223 	list_splice_init(&plug->list, &list);
3224 
3225 	list_sort(NULL, &list, plug_rq_cmp);
3226 
3227 	q = NULL;
3228 	depth = 0;
3229 
3230 	/*
3231 	 * Save and disable interrupts here, to avoid doing it for every
3232 	 * queue lock we have to take.
3233 	 */
3234 	local_irq_save(flags);
3235 	while (!list_empty(&list)) {
3236 		rq = list_entry_rq(list.next);
3237 		list_del_init(&rq->queuelist);
3238 		BUG_ON(!rq->q);
3239 		if (rq->q != q) {
3240 			/*
3241 			 * This drops the queue lock
3242 			 */
3243 			if (q)
3244 				queue_unplugged(q, depth, from_schedule);
3245 			q = rq->q;
3246 			depth = 0;
3247 			spin_lock(q->queue_lock);
3248 		}
3249 
3250 		/*
3251 		 * Short-circuit if @q is dead
3252 		 */
3253 		if (unlikely(blk_queue_dying(q))) {
3254 			__blk_end_request_all(rq, -ENODEV);
3255 			continue;
3256 		}
3257 
3258 		/*
3259 		 * rq is already accounted, so use raw insert
3260 		 */
3261 		if (op_is_flush(rq->cmd_flags))
3262 			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3263 		else
3264 			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3265 
3266 		depth++;
3267 	}
3268 
3269 	/*
3270 	 * This drops the queue lock
3271 	 */
3272 	if (q)
3273 		queue_unplugged(q, depth, from_schedule);
3274 
3275 	local_irq_restore(flags);
3276 }
3277 
3278 void blk_finish_plug(struct blk_plug *plug)
3279 {
3280 	if (plug != current->plug)
3281 		return;
3282 	blk_flush_plug_list(plug, false);
3283 
3284 	current->plug = NULL;
3285 }
3286 EXPORT_SYMBOL(blk_finish_plug);
3287 
3288 #ifdef CONFIG_PM
3289 /**
3290  * blk_pm_runtime_init - Block layer runtime PM initialization routine
3291  * @q: the queue of the device
3292  * @dev: the device the queue belongs to
3293  *
3294  * Description:
3295  *    Initialize runtime-PM-related fields for @q and start auto suspend for
3296  *    @dev. Drivers that want to take advantage of request-based runtime PM
3297  *    should call this function after @dev has been initialized, and its
3298  *    request queue @q has been allocated, and runtime PM for it can not happen
3299  *    yet(either due to disabled/forbidden or its usage_count > 0). In most
3300  *    cases, driver should call this function before any I/O has taken place.
3301  *
3302  *    This function takes care of setting up using auto suspend for the device,
3303  *    the autosuspend delay is set to -1 to make runtime suspend impossible
3304  *    until an updated value is either set by user or by driver. Drivers do
3305  *    not need to touch other autosuspend settings.
3306  *
3307  *    The block layer runtime PM is request based, so only works for drivers
3308  *    that use request as their IO unit instead of those directly use bio's.
3309  */
3310 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3311 {
3312 	q->dev = dev;
3313 	q->rpm_status = RPM_ACTIVE;
3314 	pm_runtime_set_autosuspend_delay(q->dev, -1);
3315 	pm_runtime_use_autosuspend(q->dev);
3316 }
3317 EXPORT_SYMBOL(blk_pm_runtime_init);
3318 
3319 /**
3320  * blk_pre_runtime_suspend - Pre runtime suspend check
3321  * @q: the queue of the device
3322  *
3323  * Description:
3324  *    This function will check if runtime suspend is allowed for the device
3325  *    by examining if there are any requests pending in the queue. If there
3326  *    are requests pending, the device can not be runtime suspended; otherwise,
3327  *    the queue's status will be updated to SUSPENDING and the driver can
3328  *    proceed to suspend the device.
3329  *
3330  *    For the not allowed case, we mark last busy for the device so that
3331  *    runtime PM core will try to autosuspend it some time later.
3332  *
3333  *    This function should be called near the start of the device's
3334  *    runtime_suspend callback.
3335  *
3336  * Return:
3337  *    0		- OK to runtime suspend the device
3338  *    -EBUSY	- Device should not be runtime suspended
3339  */
3340 int blk_pre_runtime_suspend(struct request_queue *q)
3341 {
3342 	int ret = 0;
3343 
3344 	if (!q->dev)
3345 		return ret;
3346 
3347 	spin_lock_irq(q->queue_lock);
3348 	if (q->nr_pending) {
3349 		ret = -EBUSY;
3350 		pm_runtime_mark_last_busy(q->dev);
3351 	} else {
3352 		q->rpm_status = RPM_SUSPENDING;
3353 	}
3354 	spin_unlock_irq(q->queue_lock);
3355 	return ret;
3356 }
3357 EXPORT_SYMBOL(blk_pre_runtime_suspend);
3358 
3359 /**
3360  * blk_post_runtime_suspend - Post runtime suspend processing
3361  * @q: the queue of the device
3362  * @err: return value of the device's runtime_suspend function
3363  *
3364  * Description:
3365  *    Update the queue's runtime status according to the return value of the
3366  *    device's runtime suspend function and mark last busy for the device so
3367  *    that PM core will try to auto suspend the device at a later time.
3368  *
3369  *    This function should be called near the end of the device's
3370  *    runtime_suspend callback.
3371  */
3372 void blk_post_runtime_suspend(struct request_queue *q, int err)
3373 {
3374 	if (!q->dev)
3375 		return;
3376 
3377 	spin_lock_irq(q->queue_lock);
3378 	if (!err) {
3379 		q->rpm_status = RPM_SUSPENDED;
3380 	} else {
3381 		q->rpm_status = RPM_ACTIVE;
3382 		pm_runtime_mark_last_busy(q->dev);
3383 	}
3384 	spin_unlock_irq(q->queue_lock);
3385 }
3386 EXPORT_SYMBOL(blk_post_runtime_suspend);
3387 
3388 /**
3389  * blk_pre_runtime_resume - Pre runtime resume processing
3390  * @q: the queue of the device
3391  *
3392  * Description:
3393  *    Update the queue's runtime status to RESUMING in preparation for the
3394  *    runtime resume of the device.
3395  *
3396  *    This function should be called near the start of the device's
3397  *    runtime_resume callback.
3398  */
3399 void blk_pre_runtime_resume(struct request_queue *q)
3400 {
3401 	if (!q->dev)
3402 		return;
3403 
3404 	spin_lock_irq(q->queue_lock);
3405 	q->rpm_status = RPM_RESUMING;
3406 	spin_unlock_irq(q->queue_lock);
3407 }
3408 EXPORT_SYMBOL(blk_pre_runtime_resume);
3409 
3410 /**
3411  * blk_post_runtime_resume - Post runtime resume processing
3412  * @q: the queue of the device
3413  * @err: return value of the device's runtime_resume function
3414  *
3415  * Description:
3416  *    Update the queue's runtime status according to the return value of the
3417  *    device's runtime_resume function. If it is successfully resumed, process
3418  *    the requests that are queued into the device's queue when it is resuming
3419  *    and then mark last busy and initiate autosuspend for it.
3420  *
3421  *    This function should be called near the end of the device's
3422  *    runtime_resume callback.
3423  */
3424 void blk_post_runtime_resume(struct request_queue *q, int err)
3425 {
3426 	if (!q->dev)
3427 		return;
3428 
3429 	spin_lock_irq(q->queue_lock);
3430 	if (!err) {
3431 		q->rpm_status = RPM_ACTIVE;
3432 		__blk_run_queue(q);
3433 		pm_runtime_mark_last_busy(q->dev);
3434 		pm_request_autosuspend(q->dev);
3435 	} else {
3436 		q->rpm_status = RPM_SUSPENDED;
3437 	}
3438 	spin_unlock_irq(q->queue_lock);
3439 }
3440 EXPORT_SYMBOL(blk_post_runtime_resume);
3441 
3442 /**
3443  * blk_set_runtime_active - Force runtime status of the queue to be active
3444  * @q: the queue of the device
3445  *
3446  * If the device is left runtime suspended during system suspend the resume
3447  * hook typically resumes the device and corrects runtime status
3448  * accordingly. However, that does not affect the queue runtime PM status
3449  * which is still "suspended". This prevents processing requests from the
3450  * queue.
3451  *
3452  * This function can be used in driver's resume hook to correct queue
3453  * runtime PM status and re-enable peeking requests from the queue. It
3454  * should be called before first request is added to the queue.
3455  */
3456 void blk_set_runtime_active(struct request_queue *q)
3457 {
3458 	spin_lock_irq(q->queue_lock);
3459 	q->rpm_status = RPM_ACTIVE;
3460 	pm_runtime_mark_last_busy(q->dev);
3461 	pm_request_autosuspend(q->dev);
3462 	spin_unlock_irq(q->queue_lock);
3463 }
3464 EXPORT_SYMBOL(blk_set_runtime_active);
3465 #endif
3466 
3467 int __init blk_dev_init(void)
3468 {
3469 	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3470 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3471 			FIELD_SIZEOF(struct request, cmd_flags));
3472 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3473 			FIELD_SIZEOF(struct bio, bi_opf));
3474 
3475 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
3476 	kblockd_workqueue = alloc_workqueue("kblockd",
3477 					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3478 	if (!kblockd_workqueue)
3479 		panic("Failed to create kblockd\n");
3480 
3481 	request_cachep = kmem_cache_create("blkdev_requests",
3482 			sizeof(struct request), 0, SLAB_PANIC, NULL);
3483 
3484 	blk_requestq_cachep = kmem_cache_create("request_queue",
3485 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3486 
3487 #ifdef CONFIG_DEBUG_FS
3488 	blk_debugfs_root = debugfs_create_dir("block", NULL);
3489 #endif
3490 
3491 	return 0;
3492 }
3493