xref: /openbmc/linux/block/blk-mq.c (revision ebb9a9ae)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/delay.h>
24 #include <linux/crash_dump.h>
25 #include <linux/prefetch.h>
26 
27 #include <trace/events/block.h>
28 
29 #include <linux/blk-mq.h>
30 #include "blk.h"
31 #include "blk-mq.h"
32 #include "blk-mq-tag.h"
33 #include "blk-stat.h"
34 #include "blk-wbt.h"
35 
36 static DEFINE_MUTEX(all_q_mutex);
37 static LIST_HEAD(all_q_list);
38 
39 /*
40  * Check if any of the ctx's have pending work in this hardware queue
41  */
42 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
43 {
44 	return sbitmap_any_bit_set(&hctx->ctx_map);
45 }
46 
47 /*
48  * Mark this ctx as having pending work in this hardware queue
49  */
50 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
51 				     struct blk_mq_ctx *ctx)
52 {
53 	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
54 		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
55 }
56 
57 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
58 				      struct blk_mq_ctx *ctx)
59 {
60 	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
61 }
62 
63 void blk_mq_freeze_queue_start(struct request_queue *q)
64 {
65 	int freeze_depth;
66 
67 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
68 	if (freeze_depth == 1) {
69 		percpu_ref_kill(&q->q_usage_counter);
70 		blk_mq_run_hw_queues(q, false);
71 	}
72 }
73 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
74 
75 static void blk_mq_freeze_queue_wait(struct request_queue *q)
76 {
77 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
78 }
79 
80 /*
81  * Guarantee no request is in use, so we can change any data structure of
82  * the queue afterward.
83  */
84 void blk_freeze_queue(struct request_queue *q)
85 {
86 	/*
87 	 * In the !blk_mq case we are only calling this to kill the
88 	 * q_usage_counter, otherwise this increases the freeze depth
89 	 * and waits for it to return to zero.  For this reason there is
90 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
91 	 * exported to drivers as the only user for unfreeze is blk_mq.
92 	 */
93 	blk_mq_freeze_queue_start(q);
94 	blk_mq_freeze_queue_wait(q);
95 }
96 
97 void blk_mq_freeze_queue(struct request_queue *q)
98 {
99 	/*
100 	 * ...just an alias to keep freeze and unfreeze actions balanced
101 	 * in the blk_mq_* namespace
102 	 */
103 	blk_freeze_queue(q);
104 }
105 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
106 
107 void blk_mq_unfreeze_queue(struct request_queue *q)
108 {
109 	int freeze_depth;
110 
111 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
112 	WARN_ON_ONCE(freeze_depth < 0);
113 	if (!freeze_depth) {
114 		percpu_ref_reinit(&q->q_usage_counter);
115 		wake_up_all(&q->mq_freeze_wq);
116 	}
117 }
118 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
119 
120 /**
121  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
122  * @q: request queue.
123  *
124  * Note: this function does not prevent that the struct request end_io()
125  * callback function is invoked. Additionally, it is not prevented that
126  * new queue_rq() calls occur unless the queue has been stopped first.
127  */
128 void blk_mq_quiesce_queue(struct request_queue *q)
129 {
130 	struct blk_mq_hw_ctx *hctx;
131 	unsigned int i;
132 	bool rcu = false;
133 
134 	blk_mq_stop_hw_queues(q);
135 
136 	queue_for_each_hw_ctx(q, hctx, i) {
137 		if (hctx->flags & BLK_MQ_F_BLOCKING)
138 			synchronize_srcu(&hctx->queue_rq_srcu);
139 		else
140 			rcu = true;
141 	}
142 	if (rcu)
143 		synchronize_rcu();
144 }
145 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
146 
147 void blk_mq_wake_waiters(struct request_queue *q)
148 {
149 	struct blk_mq_hw_ctx *hctx;
150 	unsigned int i;
151 
152 	queue_for_each_hw_ctx(q, hctx, i)
153 		if (blk_mq_hw_queue_mapped(hctx))
154 			blk_mq_tag_wakeup_all(hctx->tags, true);
155 
156 	/*
157 	 * If we are called because the queue has now been marked as
158 	 * dying, we need to ensure that processes currently waiting on
159 	 * the queue are notified as well.
160 	 */
161 	wake_up_all(&q->mq_freeze_wq);
162 }
163 
164 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
165 {
166 	return blk_mq_has_free_tags(hctx->tags);
167 }
168 EXPORT_SYMBOL(blk_mq_can_queue);
169 
170 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
171 			       struct request *rq, unsigned int op)
172 {
173 	INIT_LIST_HEAD(&rq->queuelist);
174 	/* csd/requeue_work/fifo_time is initialized before use */
175 	rq->q = q;
176 	rq->mq_ctx = ctx;
177 	rq->cmd_flags = op;
178 	if (blk_queue_io_stat(q))
179 		rq->rq_flags |= RQF_IO_STAT;
180 	/* do not touch atomic flags, it needs atomic ops against the timer */
181 	rq->cpu = -1;
182 	INIT_HLIST_NODE(&rq->hash);
183 	RB_CLEAR_NODE(&rq->rb_node);
184 	rq->rq_disk = NULL;
185 	rq->part = NULL;
186 	rq->start_time = jiffies;
187 #ifdef CONFIG_BLK_CGROUP
188 	rq->rl = NULL;
189 	set_start_time_ns(rq);
190 	rq->io_start_time_ns = 0;
191 #endif
192 	rq->nr_phys_segments = 0;
193 #if defined(CONFIG_BLK_DEV_INTEGRITY)
194 	rq->nr_integrity_segments = 0;
195 #endif
196 	rq->special = NULL;
197 	/* tag was already set */
198 	rq->errors = 0;
199 
200 	rq->cmd = rq->__cmd;
201 
202 	rq->extra_len = 0;
203 	rq->sense_len = 0;
204 	rq->resid_len = 0;
205 	rq->sense = NULL;
206 
207 	INIT_LIST_HEAD(&rq->timeout_list);
208 	rq->timeout = 0;
209 
210 	rq->end_io = NULL;
211 	rq->end_io_data = NULL;
212 	rq->next_rq = NULL;
213 
214 	ctx->rq_dispatched[op_is_sync(op)]++;
215 }
216 
217 static struct request *
218 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
219 {
220 	struct request *rq;
221 	unsigned int tag;
222 
223 	tag = blk_mq_get_tag(data);
224 	if (tag != BLK_MQ_TAG_FAIL) {
225 		rq = data->hctx->tags->rqs[tag];
226 
227 		if (blk_mq_tag_busy(data->hctx)) {
228 			rq->rq_flags = RQF_MQ_INFLIGHT;
229 			atomic_inc(&data->hctx->nr_active);
230 		}
231 
232 		rq->tag = tag;
233 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
234 		return rq;
235 	}
236 
237 	return NULL;
238 }
239 
240 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
241 		unsigned int flags)
242 {
243 	struct blk_mq_ctx *ctx;
244 	struct blk_mq_hw_ctx *hctx;
245 	struct request *rq;
246 	struct blk_mq_alloc_data alloc_data;
247 	int ret;
248 
249 	ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
250 	if (ret)
251 		return ERR_PTR(ret);
252 
253 	ctx = blk_mq_get_ctx(q);
254 	hctx = blk_mq_map_queue(q, ctx->cpu);
255 	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
256 	rq = __blk_mq_alloc_request(&alloc_data, rw);
257 	blk_mq_put_ctx(ctx);
258 
259 	if (!rq) {
260 		blk_queue_exit(q);
261 		return ERR_PTR(-EWOULDBLOCK);
262 	}
263 
264 	rq->__data_len = 0;
265 	rq->__sector = (sector_t) -1;
266 	rq->bio = rq->biotail = NULL;
267 	return rq;
268 }
269 EXPORT_SYMBOL(blk_mq_alloc_request);
270 
271 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
272 		unsigned int flags, unsigned int hctx_idx)
273 {
274 	struct blk_mq_hw_ctx *hctx;
275 	struct blk_mq_ctx *ctx;
276 	struct request *rq;
277 	struct blk_mq_alloc_data alloc_data;
278 	int ret;
279 
280 	/*
281 	 * If the tag allocator sleeps we could get an allocation for a
282 	 * different hardware context.  No need to complicate the low level
283 	 * allocator for this for the rare use case of a command tied to
284 	 * a specific queue.
285 	 */
286 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
287 		return ERR_PTR(-EINVAL);
288 
289 	if (hctx_idx >= q->nr_hw_queues)
290 		return ERR_PTR(-EIO);
291 
292 	ret = blk_queue_enter(q, true);
293 	if (ret)
294 		return ERR_PTR(ret);
295 
296 	/*
297 	 * Check if the hardware context is actually mapped to anything.
298 	 * If not tell the caller that it should skip this queue.
299 	 */
300 	hctx = q->queue_hw_ctx[hctx_idx];
301 	if (!blk_mq_hw_queue_mapped(hctx)) {
302 		ret = -EXDEV;
303 		goto out_queue_exit;
304 	}
305 	ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
306 
307 	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
308 	rq = __blk_mq_alloc_request(&alloc_data, rw);
309 	if (!rq) {
310 		ret = -EWOULDBLOCK;
311 		goto out_queue_exit;
312 	}
313 
314 	return rq;
315 
316 out_queue_exit:
317 	blk_queue_exit(q);
318 	return ERR_PTR(ret);
319 }
320 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
321 
322 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
323 				  struct blk_mq_ctx *ctx, struct request *rq)
324 {
325 	const int tag = rq->tag;
326 	struct request_queue *q = rq->q;
327 
328 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
329 		atomic_dec(&hctx->nr_active);
330 
331 	wbt_done(q->rq_wb, &rq->issue_stat);
332 	rq->rq_flags = 0;
333 
334 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
335 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
336 	blk_mq_put_tag(hctx, ctx, tag);
337 	blk_queue_exit(q);
338 }
339 
340 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
341 {
342 	struct blk_mq_ctx *ctx = rq->mq_ctx;
343 
344 	ctx->rq_completed[rq_is_sync(rq)]++;
345 	__blk_mq_free_request(hctx, ctx, rq);
346 
347 }
348 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
349 
350 void blk_mq_free_request(struct request *rq)
351 {
352 	blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
353 }
354 EXPORT_SYMBOL_GPL(blk_mq_free_request);
355 
356 inline void __blk_mq_end_request(struct request *rq, int error)
357 {
358 	blk_account_io_done(rq);
359 
360 	if (rq->end_io) {
361 		wbt_done(rq->q->rq_wb, &rq->issue_stat);
362 		rq->end_io(rq, error);
363 	} else {
364 		if (unlikely(blk_bidi_rq(rq)))
365 			blk_mq_free_request(rq->next_rq);
366 		blk_mq_free_request(rq);
367 	}
368 }
369 EXPORT_SYMBOL(__blk_mq_end_request);
370 
371 void blk_mq_end_request(struct request *rq, int error)
372 {
373 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
374 		BUG();
375 	__blk_mq_end_request(rq, error);
376 }
377 EXPORT_SYMBOL(blk_mq_end_request);
378 
379 static void __blk_mq_complete_request_remote(void *data)
380 {
381 	struct request *rq = data;
382 
383 	rq->q->softirq_done_fn(rq);
384 }
385 
386 static void blk_mq_ipi_complete_request(struct request *rq)
387 {
388 	struct blk_mq_ctx *ctx = rq->mq_ctx;
389 	bool shared = false;
390 	int cpu;
391 
392 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
393 		rq->q->softirq_done_fn(rq);
394 		return;
395 	}
396 
397 	cpu = get_cpu();
398 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
399 		shared = cpus_share_cache(cpu, ctx->cpu);
400 
401 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
402 		rq->csd.func = __blk_mq_complete_request_remote;
403 		rq->csd.info = rq;
404 		rq->csd.flags = 0;
405 		smp_call_function_single_async(ctx->cpu, &rq->csd);
406 	} else {
407 		rq->q->softirq_done_fn(rq);
408 	}
409 	put_cpu();
410 }
411 
412 static void blk_mq_stat_add(struct request *rq)
413 {
414 	if (rq->rq_flags & RQF_STATS) {
415 		/*
416 		 * We could rq->mq_ctx here, but there's less of a risk
417 		 * of races if we have the completion event add the stats
418 		 * to the local software queue.
419 		 */
420 		struct blk_mq_ctx *ctx;
421 
422 		ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
423 		blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
424 	}
425 }
426 
427 static void __blk_mq_complete_request(struct request *rq)
428 {
429 	struct request_queue *q = rq->q;
430 
431 	blk_mq_stat_add(rq);
432 
433 	if (!q->softirq_done_fn)
434 		blk_mq_end_request(rq, rq->errors);
435 	else
436 		blk_mq_ipi_complete_request(rq);
437 }
438 
439 /**
440  * blk_mq_complete_request - end I/O on a request
441  * @rq:		the request being processed
442  *
443  * Description:
444  *	Ends all I/O on a request. It does not handle partial completions.
445  *	The actual completion happens out-of-order, through a IPI handler.
446  **/
447 void blk_mq_complete_request(struct request *rq, int error)
448 {
449 	struct request_queue *q = rq->q;
450 
451 	if (unlikely(blk_should_fake_timeout(q)))
452 		return;
453 	if (!blk_mark_rq_complete(rq)) {
454 		rq->errors = error;
455 		__blk_mq_complete_request(rq);
456 	}
457 }
458 EXPORT_SYMBOL(blk_mq_complete_request);
459 
460 int blk_mq_request_started(struct request *rq)
461 {
462 	return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
463 }
464 EXPORT_SYMBOL_GPL(blk_mq_request_started);
465 
466 void blk_mq_start_request(struct request *rq)
467 {
468 	struct request_queue *q = rq->q;
469 
470 	trace_block_rq_issue(q, rq);
471 
472 	rq->resid_len = blk_rq_bytes(rq);
473 	if (unlikely(blk_bidi_rq(rq)))
474 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
475 
476 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
477 		blk_stat_set_issue_time(&rq->issue_stat);
478 		rq->rq_flags |= RQF_STATS;
479 		wbt_issue(q->rq_wb, &rq->issue_stat);
480 	}
481 
482 	blk_add_timer(rq);
483 
484 	/*
485 	 * Ensure that ->deadline is visible before set the started
486 	 * flag and clear the completed flag.
487 	 */
488 	smp_mb__before_atomic();
489 
490 	/*
491 	 * Mark us as started and clear complete. Complete might have been
492 	 * set if requeue raced with timeout, which then marked it as
493 	 * complete. So be sure to clear complete again when we start
494 	 * the request, otherwise we'll ignore the completion event.
495 	 */
496 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
497 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
498 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
499 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
500 
501 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
502 		/*
503 		 * Make sure space for the drain appears.  We know we can do
504 		 * this because max_hw_segments has been adjusted to be one
505 		 * fewer than the device can handle.
506 		 */
507 		rq->nr_phys_segments++;
508 	}
509 }
510 EXPORT_SYMBOL(blk_mq_start_request);
511 
512 static void __blk_mq_requeue_request(struct request *rq)
513 {
514 	struct request_queue *q = rq->q;
515 
516 	trace_block_rq_requeue(q, rq);
517 	wbt_requeue(q->rq_wb, &rq->issue_stat);
518 
519 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
520 		if (q->dma_drain_size && blk_rq_bytes(rq))
521 			rq->nr_phys_segments--;
522 	}
523 }
524 
525 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
526 {
527 	__blk_mq_requeue_request(rq);
528 
529 	BUG_ON(blk_queued_rq(rq));
530 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
531 }
532 EXPORT_SYMBOL(blk_mq_requeue_request);
533 
534 static void blk_mq_requeue_work(struct work_struct *work)
535 {
536 	struct request_queue *q =
537 		container_of(work, struct request_queue, requeue_work.work);
538 	LIST_HEAD(rq_list);
539 	struct request *rq, *next;
540 	unsigned long flags;
541 
542 	spin_lock_irqsave(&q->requeue_lock, flags);
543 	list_splice_init(&q->requeue_list, &rq_list);
544 	spin_unlock_irqrestore(&q->requeue_lock, flags);
545 
546 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
547 		if (!(rq->rq_flags & RQF_SOFTBARRIER))
548 			continue;
549 
550 		rq->rq_flags &= ~RQF_SOFTBARRIER;
551 		list_del_init(&rq->queuelist);
552 		blk_mq_insert_request(rq, true, false, false);
553 	}
554 
555 	while (!list_empty(&rq_list)) {
556 		rq = list_entry(rq_list.next, struct request, queuelist);
557 		list_del_init(&rq->queuelist);
558 		blk_mq_insert_request(rq, false, false, false);
559 	}
560 
561 	blk_mq_run_hw_queues(q, false);
562 }
563 
564 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
565 				bool kick_requeue_list)
566 {
567 	struct request_queue *q = rq->q;
568 	unsigned long flags;
569 
570 	/*
571 	 * We abuse this flag that is otherwise used by the I/O scheduler to
572 	 * request head insertation from the workqueue.
573 	 */
574 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
575 
576 	spin_lock_irqsave(&q->requeue_lock, flags);
577 	if (at_head) {
578 		rq->rq_flags |= RQF_SOFTBARRIER;
579 		list_add(&rq->queuelist, &q->requeue_list);
580 	} else {
581 		list_add_tail(&rq->queuelist, &q->requeue_list);
582 	}
583 	spin_unlock_irqrestore(&q->requeue_lock, flags);
584 
585 	if (kick_requeue_list)
586 		blk_mq_kick_requeue_list(q);
587 }
588 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
589 
590 void blk_mq_kick_requeue_list(struct request_queue *q)
591 {
592 	kblockd_schedule_delayed_work(&q->requeue_work, 0);
593 }
594 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
595 
596 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
597 				    unsigned long msecs)
598 {
599 	kblockd_schedule_delayed_work(&q->requeue_work,
600 				      msecs_to_jiffies(msecs));
601 }
602 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
603 
604 void blk_mq_abort_requeue_list(struct request_queue *q)
605 {
606 	unsigned long flags;
607 	LIST_HEAD(rq_list);
608 
609 	spin_lock_irqsave(&q->requeue_lock, flags);
610 	list_splice_init(&q->requeue_list, &rq_list);
611 	spin_unlock_irqrestore(&q->requeue_lock, flags);
612 
613 	while (!list_empty(&rq_list)) {
614 		struct request *rq;
615 
616 		rq = list_first_entry(&rq_list, struct request, queuelist);
617 		list_del_init(&rq->queuelist);
618 		rq->errors = -EIO;
619 		blk_mq_end_request(rq, rq->errors);
620 	}
621 }
622 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
623 
624 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
625 {
626 	if (tag < tags->nr_tags) {
627 		prefetch(tags->rqs[tag]);
628 		return tags->rqs[tag];
629 	}
630 
631 	return NULL;
632 }
633 EXPORT_SYMBOL(blk_mq_tag_to_rq);
634 
635 struct blk_mq_timeout_data {
636 	unsigned long next;
637 	unsigned int next_set;
638 };
639 
640 void blk_mq_rq_timed_out(struct request *req, bool reserved)
641 {
642 	struct blk_mq_ops *ops = req->q->mq_ops;
643 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
644 
645 	/*
646 	 * We know that complete is set at this point. If STARTED isn't set
647 	 * anymore, then the request isn't active and the "timeout" should
648 	 * just be ignored. This can happen due to the bitflag ordering.
649 	 * Timeout first checks if STARTED is set, and if it is, assumes
650 	 * the request is active. But if we race with completion, then
651 	 * we both flags will get cleared. So check here again, and ignore
652 	 * a timeout event with a request that isn't active.
653 	 */
654 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
655 		return;
656 
657 	if (ops->timeout)
658 		ret = ops->timeout(req, reserved);
659 
660 	switch (ret) {
661 	case BLK_EH_HANDLED:
662 		__blk_mq_complete_request(req);
663 		break;
664 	case BLK_EH_RESET_TIMER:
665 		blk_add_timer(req);
666 		blk_clear_rq_complete(req);
667 		break;
668 	case BLK_EH_NOT_HANDLED:
669 		break;
670 	default:
671 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
672 		break;
673 	}
674 }
675 
676 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
677 		struct request *rq, void *priv, bool reserved)
678 {
679 	struct blk_mq_timeout_data *data = priv;
680 
681 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
682 		/*
683 		 * If a request wasn't started before the queue was
684 		 * marked dying, kill it here or it'll go unnoticed.
685 		 */
686 		if (unlikely(blk_queue_dying(rq->q))) {
687 			rq->errors = -EIO;
688 			blk_mq_end_request(rq, rq->errors);
689 		}
690 		return;
691 	}
692 
693 	if (time_after_eq(jiffies, rq->deadline)) {
694 		if (!blk_mark_rq_complete(rq))
695 			blk_mq_rq_timed_out(rq, reserved);
696 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
697 		data->next = rq->deadline;
698 		data->next_set = 1;
699 	}
700 }
701 
702 static void blk_mq_timeout_work(struct work_struct *work)
703 {
704 	struct request_queue *q =
705 		container_of(work, struct request_queue, timeout_work);
706 	struct blk_mq_timeout_data data = {
707 		.next		= 0,
708 		.next_set	= 0,
709 	};
710 	int i;
711 
712 	/* A deadlock might occur if a request is stuck requiring a
713 	 * timeout at the same time a queue freeze is waiting
714 	 * completion, since the timeout code would not be able to
715 	 * acquire the queue reference here.
716 	 *
717 	 * That's why we don't use blk_queue_enter here; instead, we use
718 	 * percpu_ref_tryget directly, because we need to be able to
719 	 * obtain a reference even in the short window between the queue
720 	 * starting to freeze, by dropping the first reference in
721 	 * blk_mq_freeze_queue_start, and the moment the last request is
722 	 * consumed, marked by the instant q_usage_counter reaches
723 	 * zero.
724 	 */
725 	if (!percpu_ref_tryget(&q->q_usage_counter))
726 		return;
727 
728 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
729 
730 	if (data.next_set) {
731 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
732 		mod_timer(&q->timeout, data.next);
733 	} else {
734 		struct blk_mq_hw_ctx *hctx;
735 
736 		queue_for_each_hw_ctx(q, hctx, i) {
737 			/* the hctx may be unmapped, so check it here */
738 			if (blk_mq_hw_queue_mapped(hctx))
739 				blk_mq_tag_idle(hctx);
740 		}
741 	}
742 	blk_queue_exit(q);
743 }
744 
745 /*
746  * Reverse check our software queue for entries that we could potentially
747  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
748  * too much time checking for merges.
749  */
750 static bool blk_mq_attempt_merge(struct request_queue *q,
751 				 struct blk_mq_ctx *ctx, struct bio *bio)
752 {
753 	struct request *rq;
754 	int checked = 8;
755 
756 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
757 		int el_ret;
758 
759 		if (!checked--)
760 			break;
761 
762 		if (!blk_rq_merge_ok(rq, bio))
763 			continue;
764 
765 		el_ret = blk_try_merge(rq, bio);
766 		if (el_ret == ELEVATOR_BACK_MERGE) {
767 			if (bio_attempt_back_merge(q, rq, bio)) {
768 				ctx->rq_merged++;
769 				return true;
770 			}
771 			break;
772 		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
773 			if (bio_attempt_front_merge(q, rq, bio)) {
774 				ctx->rq_merged++;
775 				return true;
776 			}
777 			break;
778 		}
779 	}
780 
781 	return false;
782 }
783 
784 struct flush_busy_ctx_data {
785 	struct blk_mq_hw_ctx *hctx;
786 	struct list_head *list;
787 };
788 
789 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
790 {
791 	struct flush_busy_ctx_data *flush_data = data;
792 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
793 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
794 
795 	sbitmap_clear_bit(sb, bitnr);
796 	spin_lock(&ctx->lock);
797 	list_splice_tail_init(&ctx->rq_list, flush_data->list);
798 	spin_unlock(&ctx->lock);
799 	return true;
800 }
801 
802 /*
803  * Process software queues that have been marked busy, splicing them
804  * to the for-dispatch
805  */
806 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
807 {
808 	struct flush_busy_ctx_data data = {
809 		.hctx = hctx,
810 		.list = list,
811 	};
812 
813 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
814 }
815 
816 static inline unsigned int queued_to_index(unsigned int queued)
817 {
818 	if (!queued)
819 		return 0;
820 
821 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
822 }
823 
824 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
825 {
826 	struct request_queue *q = hctx->queue;
827 	struct request *rq;
828 	LIST_HEAD(driver_list);
829 	struct list_head *dptr;
830 	int queued, ret = BLK_MQ_RQ_QUEUE_OK;
831 
832 	/*
833 	 * Start off with dptr being NULL, so we start the first request
834 	 * immediately, even if we have more pending.
835 	 */
836 	dptr = NULL;
837 
838 	/*
839 	 * Now process all the entries, sending them to the driver.
840 	 */
841 	queued = 0;
842 	while (!list_empty(list)) {
843 		struct blk_mq_queue_data bd;
844 
845 		rq = list_first_entry(list, struct request, queuelist);
846 		list_del_init(&rq->queuelist);
847 
848 		bd.rq = rq;
849 		bd.list = dptr;
850 		bd.last = list_empty(list);
851 
852 		ret = q->mq_ops->queue_rq(hctx, &bd);
853 		switch (ret) {
854 		case BLK_MQ_RQ_QUEUE_OK:
855 			queued++;
856 			break;
857 		case BLK_MQ_RQ_QUEUE_BUSY:
858 			list_add(&rq->queuelist, list);
859 			__blk_mq_requeue_request(rq);
860 			break;
861 		default:
862 			pr_err("blk-mq: bad return on queue: %d\n", ret);
863 		case BLK_MQ_RQ_QUEUE_ERROR:
864 			rq->errors = -EIO;
865 			blk_mq_end_request(rq, rq->errors);
866 			break;
867 		}
868 
869 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
870 			break;
871 
872 		/*
873 		 * We've done the first request. If we have more than 1
874 		 * left in the list, set dptr to defer issue.
875 		 */
876 		if (!dptr && list->next != list->prev)
877 			dptr = &driver_list;
878 	}
879 
880 	hctx->dispatched[queued_to_index(queued)]++;
881 
882 	/*
883 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
884 	 * that is where we will continue on next queue run.
885 	 */
886 	if (!list_empty(list)) {
887 		spin_lock(&hctx->lock);
888 		list_splice(list, &hctx->dispatch);
889 		spin_unlock(&hctx->lock);
890 
891 		/*
892 		 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
893 		 * it's possible the queue is stopped and restarted again
894 		 * before this. Queue restart will dispatch requests. And since
895 		 * requests in rq_list aren't added into hctx->dispatch yet,
896 		 * the requests in rq_list might get lost.
897 		 *
898 		 * blk_mq_run_hw_queue() already checks the STOPPED bit
899 		 **/
900 		blk_mq_run_hw_queue(hctx, true);
901 	}
902 
903 	return ret != BLK_MQ_RQ_QUEUE_BUSY;
904 }
905 
906 /*
907  * Run this hardware queue, pulling any software queues mapped to it in.
908  * Note that this function currently has various problems around ordering
909  * of IO. In particular, we'd like FIFO behaviour on handling existing
910  * items on the hctx->dispatch list. Ignore that for now.
911  */
912 static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
913 {
914 	LIST_HEAD(rq_list);
915 	LIST_HEAD(driver_list);
916 
917 	if (unlikely(blk_mq_hctx_stopped(hctx)))
918 		return;
919 
920 	hctx->run++;
921 
922 	/*
923 	 * Touch any software queue that has pending entries.
924 	 */
925 	flush_busy_ctxs(hctx, &rq_list);
926 
927 	/*
928 	 * If we have previous entries on our dispatch list, grab them
929 	 * and stuff them at the front for more fair dispatch.
930 	 */
931 	if (!list_empty_careful(&hctx->dispatch)) {
932 		spin_lock(&hctx->lock);
933 		if (!list_empty(&hctx->dispatch))
934 			list_splice_init(&hctx->dispatch, &rq_list);
935 		spin_unlock(&hctx->lock);
936 	}
937 
938 	blk_mq_dispatch_rq_list(hctx, &rq_list);
939 }
940 
941 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
942 {
943 	int srcu_idx;
944 
945 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
946 		cpu_online(hctx->next_cpu));
947 
948 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
949 		rcu_read_lock();
950 		blk_mq_process_rq_list(hctx);
951 		rcu_read_unlock();
952 	} else {
953 		srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
954 		blk_mq_process_rq_list(hctx);
955 		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
956 	}
957 }
958 
959 /*
960  * It'd be great if the workqueue API had a way to pass
961  * in a mask and had some smarts for more clever placement.
962  * For now we just round-robin here, switching for every
963  * BLK_MQ_CPU_WORK_BATCH queued items.
964  */
965 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
966 {
967 	if (hctx->queue->nr_hw_queues == 1)
968 		return WORK_CPU_UNBOUND;
969 
970 	if (--hctx->next_cpu_batch <= 0) {
971 		int next_cpu;
972 
973 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
974 		if (next_cpu >= nr_cpu_ids)
975 			next_cpu = cpumask_first(hctx->cpumask);
976 
977 		hctx->next_cpu = next_cpu;
978 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
979 	}
980 
981 	return hctx->next_cpu;
982 }
983 
984 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
985 {
986 	if (unlikely(blk_mq_hctx_stopped(hctx) ||
987 		     !blk_mq_hw_queue_mapped(hctx)))
988 		return;
989 
990 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
991 		int cpu = get_cpu();
992 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
993 			__blk_mq_run_hw_queue(hctx);
994 			put_cpu();
995 			return;
996 		}
997 
998 		put_cpu();
999 	}
1000 
1001 	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
1002 }
1003 
1004 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1005 {
1006 	struct blk_mq_hw_ctx *hctx;
1007 	int i;
1008 
1009 	queue_for_each_hw_ctx(q, hctx, i) {
1010 		if ((!blk_mq_hctx_has_pending(hctx) &&
1011 		    list_empty_careful(&hctx->dispatch)) ||
1012 		    blk_mq_hctx_stopped(hctx))
1013 			continue;
1014 
1015 		blk_mq_run_hw_queue(hctx, async);
1016 	}
1017 }
1018 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1019 
1020 /**
1021  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1022  * @q: request queue.
1023  *
1024  * The caller is responsible for serializing this function against
1025  * blk_mq_{start,stop}_hw_queue().
1026  */
1027 bool blk_mq_queue_stopped(struct request_queue *q)
1028 {
1029 	struct blk_mq_hw_ctx *hctx;
1030 	int i;
1031 
1032 	queue_for_each_hw_ctx(q, hctx, i)
1033 		if (blk_mq_hctx_stopped(hctx))
1034 			return true;
1035 
1036 	return false;
1037 }
1038 EXPORT_SYMBOL(blk_mq_queue_stopped);
1039 
1040 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1041 {
1042 	cancel_work(&hctx->run_work);
1043 	cancel_delayed_work(&hctx->delay_work);
1044 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1045 }
1046 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1047 
1048 void blk_mq_stop_hw_queues(struct request_queue *q)
1049 {
1050 	struct blk_mq_hw_ctx *hctx;
1051 	int i;
1052 
1053 	queue_for_each_hw_ctx(q, hctx, i)
1054 		blk_mq_stop_hw_queue(hctx);
1055 }
1056 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1057 
1058 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1059 {
1060 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1061 
1062 	blk_mq_run_hw_queue(hctx, false);
1063 }
1064 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1065 
1066 void blk_mq_start_hw_queues(struct request_queue *q)
1067 {
1068 	struct blk_mq_hw_ctx *hctx;
1069 	int i;
1070 
1071 	queue_for_each_hw_ctx(q, hctx, i)
1072 		blk_mq_start_hw_queue(hctx);
1073 }
1074 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1075 
1076 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1077 {
1078 	if (!blk_mq_hctx_stopped(hctx))
1079 		return;
1080 
1081 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1082 	blk_mq_run_hw_queue(hctx, async);
1083 }
1084 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1085 
1086 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1087 {
1088 	struct blk_mq_hw_ctx *hctx;
1089 	int i;
1090 
1091 	queue_for_each_hw_ctx(q, hctx, i)
1092 		blk_mq_start_stopped_hw_queue(hctx, async);
1093 }
1094 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1095 
1096 static void blk_mq_run_work_fn(struct work_struct *work)
1097 {
1098 	struct blk_mq_hw_ctx *hctx;
1099 
1100 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1101 
1102 	__blk_mq_run_hw_queue(hctx);
1103 }
1104 
1105 static void blk_mq_delay_work_fn(struct work_struct *work)
1106 {
1107 	struct blk_mq_hw_ctx *hctx;
1108 
1109 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1110 
1111 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1112 		__blk_mq_run_hw_queue(hctx);
1113 }
1114 
1115 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1116 {
1117 	if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1118 		return;
1119 
1120 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1121 			&hctx->delay_work, msecs_to_jiffies(msecs));
1122 }
1123 EXPORT_SYMBOL(blk_mq_delay_queue);
1124 
1125 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1126 					    struct request *rq,
1127 					    bool at_head)
1128 {
1129 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1130 
1131 	trace_block_rq_insert(hctx->queue, rq);
1132 
1133 	if (at_head)
1134 		list_add(&rq->queuelist, &ctx->rq_list);
1135 	else
1136 		list_add_tail(&rq->queuelist, &ctx->rq_list);
1137 }
1138 
1139 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1140 				    struct request *rq, bool at_head)
1141 {
1142 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1143 
1144 	__blk_mq_insert_req_list(hctx, rq, at_head);
1145 	blk_mq_hctx_mark_pending(hctx, ctx);
1146 }
1147 
1148 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1149 			   bool async)
1150 {
1151 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1152 	struct request_queue *q = rq->q;
1153 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1154 
1155 	spin_lock(&ctx->lock);
1156 	__blk_mq_insert_request(hctx, rq, at_head);
1157 	spin_unlock(&ctx->lock);
1158 
1159 	if (run_queue)
1160 		blk_mq_run_hw_queue(hctx, async);
1161 }
1162 
1163 static void blk_mq_insert_requests(struct request_queue *q,
1164 				     struct blk_mq_ctx *ctx,
1165 				     struct list_head *list,
1166 				     int depth,
1167 				     bool from_schedule)
1168 
1169 {
1170 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1171 
1172 	trace_block_unplug(q, depth, !from_schedule);
1173 
1174 	/*
1175 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1176 	 * offline now
1177 	 */
1178 	spin_lock(&ctx->lock);
1179 	while (!list_empty(list)) {
1180 		struct request *rq;
1181 
1182 		rq = list_first_entry(list, struct request, queuelist);
1183 		BUG_ON(rq->mq_ctx != ctx);
1184 		list_del_init(&rq->queuelist);
1185 		__blk_mq_insert_req_list(hctx, rq, false);
1186 	}
1187 	blk_mq_hctx_mark_pending(hctx, ctx);
1188 	spin_unlock(&ctx->lock);
1189 
1190 	blk_mq_run_hw_queue(hctx, from_schedule);
1191 }
1192 
1193 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1194 {
1195 	struct request *rqa = container_of(a, struct request, queuelist);
1196 	struct request *rqb = container_of(b, struct request, queuelist);
1197 
1198 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1199 		 (rqa->mq_ctx == rqb->mq_ctx &&
1200 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1201 }
1202 
1203 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1204 {
1205 	struct blk_mq_ctx *this_ctx;
1206 	struct request_queue *this_q;
1207 	struct request *rq;
1208 	LIST_HEAD(list);
1209 	LIST_HEAD(ctx_list);
1210 	unsigned int depth;
1211 
1212 	list_splice_init(&plug->mq_list, &list);
1213 
1214 	list_sort(NULL, &list, plug_ctx_cmp);
1215 
1216 	this_q = NULL;
1217 	this_ctx = NULL;
1218 	depth = 0;
1219 
1220 	while (!list_empty(&list)) {
1221 		rq = list_entry_rq(list.next);
1222 		list_del_init(&rq->queuelist);
1223 		BUG_ON(!rq->q);
1224 		if (rq->mq_ctx != this_ctx) {
1225 			if (this_ctx) {
1226 				blk_mq_insert_requests(this_q, this_ctx,
1227 							&ctx_list, depth,
1228 							from_schedule);
1229 			}
1230 
1231 			this_ctx = rq->mq_ctx;
1232 			this_q = rq->q;
1233 			depth = 0;
1234 		}
1235 
1236 		depth++;
1237 		list_add_tail(&rq->queuelist, &ctx_list);
1238 	}
1239 
1240 	/*
1241 	 * If 'this_ctx' is set, we know we have entries to complete
1242 	 * on 'ctx_list'. Do those.
1243 	 */
1244 	if (this_ctx) {
1245 		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1246 				       from_schedule);
1247 	}
1248 }
1249 
1250 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1251 {
1252 	init_request_from_bio(rq, bio);
1253 
1254 	blk_account_io_start(rq, true);
1255 }
1256 
1257 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1258 {
1259 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1260 		!blk_queue_nomerges(hctx->queue);
1261 }
1262 
1263 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1264 					 struct blk_mq_ctx *ctx,
1265 					 struct request *rq, struct bio *bio)
1266 {
1267 	if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1268 		blk_mq_bio_to_request(rq, bio);
1269 		spin_lock(&ctx->lock);
1270 insert_rq:
1271 		__blk_mq_insert_request(hctx, rq, false);
1272 		spin_unlock(&ctx->lock);
1273 		return false;
1274 	} else {
1275 		struct request_queue *q = hctx->queue;
1276 
1277 		spin_lock(&ctx->lock);
1278 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1279 			blk_mq_bio_to_request(rq, bio);
1280 			goto insert_rq;
1281 		}
1282 
1283 		spin_unlock(&ctx->lock);
1284 		__blk_mq_free_request(hctx, ctx, rq);
1285 		return true;
1286 	}
1287 }
1288 
1289 static struct request *blk_mq_map_request(struct request_queue *q,
1290 					  struct bio *bio,
1291 					  struct blk_mq_alloc_data *data)
1292 {
1293 	struct blk_mq_hw_ctx *hctx;
1294 	struct blk_mq_ctx *ctx;
1295 	struct request *rq;
1296 
1297 	blk_queue_enter_live(q);
1298 	ctx = blk_mq_get_ctx(q);
1299 	hctx = blk_mq_map_queue(q, ctx->cpu);
1300 
1301 	trace_block_getrq(q, bio, bio->bi_opf);
1302 	blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
1303 	rq = __blk_mq_alloc_request(data, bio->bi_opf);
1304 
1305 	data->hctx->queued++;
1306 	return rq;
1307 }
1308 
1309 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1310 {
1311 	int ret;
1312 	struct request_queue *q = rq->q;
1313 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1314 	struct blk_mq_queue_data bd = {
1315 		.rq = rq,
1316 		.list = NULL,
1317 		.last = 1
1318 	};
1319 	blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1320 
1321 	if (blk_mq_hctx_stopped(hctx))
1322 		goto insert;
1323 
1324 	/*
1325 	 * For OK queue, we are done. For error, kill it. Any other
1326 	 * error (busy), just add it to our list as we previously
1327 	 * would have done
1328 	 */
1329 	ret = q->mq_ops->queue_rq(hctx, &bd);
1330 	if (ret == BLK_MQ_RQ_QUEUE_OK) {
1331 		*cookie = new_cookie;
1332 		return;
1333 	}
1334 
1335 	__blk_mq_requeue_request(rq);
1336 
1337 	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1338 		*cookie = BLK_QC_T_NONE;
1339 		rq->errors = -EIO;
1340 		blk_mq_end_request(rq, rq->errors);
1341 		return;
1342 	}
1343 
1344 insert:
1345 	blk_mq_insert_request(rq, false, true, true);
1346 }
1347 
1348 /*
1349  * Multiple hardware queue variant. This will not use per-process plugs,
1350  * but will attempt to bypass the hctx queueing if we can go straight to
1351  * hardware for SYNC IO.
1352  */
1353 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1354 {
1355 	const int is_sync = op_is_sync(bio->bi_opf);
1356 	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1357 	struct blk_mq_alloc_data data;
1358 	struct request *rq;
1359 	unsigned int request_count = 0, srcu_idx;
1360 	struct blk_plug *plug;
1361 	struct request *same_queue_rq = NULL;
1362 	blk_qc_t cookie;
1363 	unsigned int wb_acct;
1364 
1365 	blk_queue_bounce(q, &bio);
1366 
1367 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1368 		bio_io_error(bio);
1369 		return BLK_QC_T_NONE;
1370 	}
1371 
1372 	blk_queue_split(q, &bio, q->bio_split);
1373 
1374 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1375 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1376 		return BLK_QC_T_NONE;
1377 
1378 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1379 
1380 	rq = blk_mq_map_request(q, bio, &data);
1381 	if (unlikely(!rq)) {
1382 		__wbt_done(q->rq_wb, wb_acct);
1383 		return BLK_QC_T_NONE;
1384 	}
1385 
1386 	wbt_track(&rq->issue_stat, wb_acct);
1387 
1388 	cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1389 
1390 	if (unlikely(is_flush_fua)) {
1391 		blk_mq_bio_to_request(rq, bio);
1392 		blk_insert_flush(rq);
1393 		goto run_queue;
1394 	}
1395 
1396 	plug = current->plug;
1397 	/*
1398 	 * If the driver supports defer issued based on 'last', then
1399 	 * queue it up like normal since we can potentially save some
1400 	 * CPU this way.
1401 	 */
1402 	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1403 	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1404 		struct request *old_rq = NULL;
1405 
1406 		blk_mq_bio_to_request(rq, bio);
1407 
1408 		/*
1409 		 * We do limited plugging. If the bio can be merged, do that.
1410 		 * Otherwise the existing request in the plug list will be
1411 		 * issued. So the plug list will have one request at most
1412 		 */
1413 		if (plug) {
1414 			/*
1415 			 * The plug list might get flushed before this. If that
1416 			 * happens, same_queue_rq is invalid and plug list is
1417 			 * empty
1418 			 */
1419 			if (same_queue_rq && !list_empty(&plug->mq_list)) {
1420 				old_rq = same_queue_rq;
1421 				list_del_init(&old_rq->queuelist);
1422 			}
1423 			list_add_tail(&rq->queuelist, &plug->mq_list);
1424 		} else /* is_sync */
1425 			old_rq = rq;
1426 		blk_mq_put_ctx(data.ctx);
1427 		if (!old_rq)
1428 			goto done;
1429 
1430 		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1431 			rcu_read_lock();
1432 			blk_mq_try_issue_directly(old_rq, &cookie);
1433 			rcu_read_unlock();
1434 		} else {
1435 			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1436 			blk_mq_try_issue_directly(old_rq, &cookie);
1437 			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1438 		}
1439 		goto done;
1440 	}
1441 
1442 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1443 		/*
1444 		 * For a SYNC request, send it to the hardware immediately. For
1445 		 * an ASYNC request, just ensure that we run it later on. The
1446 		 * latter allows for merging opportunities and more efficient
1447 		 * dispatching.
1448 		 */
1449 run_queue:
1450 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1451 	}
1452 	blk_mq_put_ctx(data.ctx);
1453 done:
1454 	return cookie;
1455 }
1456 
1457 /*
1458  * Single hardware queue variant. This will attempt to use any per-process
1459  * plug for merging and IO deferral.
1460  */
1461 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1462 {
1463 	const int is_sync = op_is_sync(bio->bi_opf);
1464 	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1465 	struct blk_plug *plug;
1466 	unsigned int request_count = 0;
1467 	struct blk_mq_alloc_data data;
1468 	struct request *rq;
1469 	blk_qc_t cookie;
1470 	unsigned int wb_acct;
1471 
1472 	blk_queue_bounce(q, &bio);
1473 
1474 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1475 		bio_io_error(bio);
1476 		return BLK_QC_T_NONE;
1477 	}
1478 
1479 	blk_queue_split(q, &bio, q->bio_split);
1480 
1481 	if (!is_flush_fua && !blk_queue_nomerges(q)) {
1482 		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1483 			return BLK_QC_T_NONE;
1484 	} else
1485 		request_count = blk_plug_queued_count(q);
1486 
1487 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1488 
1489 	rq = blk_mq_map_request(q, bio, &data);
1490 	if (unlikely(!rq)) {
1491 		__wbt_done(q->rq_wb, wb_acct);
1492 		return BLK_QC_T_NONE;
1493 	}
1494 
1495 	wbt_track(&rq->issue_stat, wb_acct);
1496 
1497 	cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1498 
1499 	if (unlikely(is_flush_fua)) {
1500 		blk_mq_bio_to_request(rq, bio);
1501 		blk_insert_flush(rq);
1502 		goto run_queue;
1503 	}
1504 
1505 	/*
1506 	 * A task plug currently exists. Since this is completely lockless,
1507 	 * utilize that to temporarily store requests until the task is
1508 	 * either done or scheduled away.
1509 	 */
1510 	plug = current->plug;
1511 	if (plug) {
1512 		struct request *last = NULL;
1513 
1514 		blk_mq_bio_to_request(rq, bio);
1515 
1516 		/*
1517 		 * @request_count may become stale because of schedule
1518 		 * out, so check the list again.
1519 		 */
1520 		if (list_empty(&plug->mq_list))
1521 			request_count = 0;
1522 		if (!request_count)
1523 			trace_block_plug(q);
1524 		else
1525 			last = list_entry_rq(plug->mq_list.prev);
1526 
1527 		blk_mq_put_ctx(data.ctx);
1528 
1529 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1530 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1531 			blk_flush_plug_list(plug, false);
1532 			trace_block_plug(q);
1533 		}
1534 
1535 		list_add_tail(&rq->queuelist, &plug->mq_list);
1536 		return cookie;
1537 	}
1538 
1539 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1540 		/*
1541 		 * For a SYNC request, send it to the hardware immediately. For
1542 		 * an ASYNC request, just ensure that we run it later on. The
1543 		 * latter allows for merging opportunities and more efficient
1544 		 * dispatching.
1545 		 */
1546 run_queue:
1547 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1548 	}
1549 
1550 	blk_mq_put_ctx(data.ctx);
1551 	return cookie;
1552 }
1553 
1554 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1555 		struct blk_mq_tags *tags, unsigned int hctx_idx)
1556 {
1557 	struct page *page;
1558 
1559 	if (tags->rqs && set->ops->exit_request) {
1560 		int i;
1561 
1562 		for (i = 0; i < tags->nr_tags; i++) {
1563 			if (!tags->rqs[i])
1564 				continue;
1565 			set->ops->exit_request(set->driver_data, tags->rqs[i],
1566 						hctx_idx, i);
1567 			tags->rqs[i] = NULL;
1568 		}
1569 	}
1570 
1571 	while (!list_empty(&tags->page_list)) {
1572 		page = list_first_entry(&tags->page_list, struct page, lru);
1573 		list_del_init(&page->lru);
1574 		/*
1575 		 * Remove kmemleak object previously allocated in
1576 		 * blk_mq_init_rq_map().
1577 		 */
1578 		kmemleak_free(page_address(page));
1579 		__free_pages(page, page->private);
1580 	}
1581 
1582 	kfree(tags->rqs);
1583 
1584 	blk_mq_free_tags(tags);
1585 }
1586 
1587 static size_t order_to_size(unsigned int order)
1588 {
1589 	return (size_t)PAGE_SIZE << order;
1590 }
1591 
1592 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1593 		unsigned int hctx_idx)
1594 {
1595 	struct blk_mq_tags *tags;
1596 	unsigned int i, j, entries_per_page, max_order = 4;
1597 	size_t rq_size, left;
1598 
1599 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1600 				set->numa_node,
1601 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1602 	if (!tags)
1603 		return NULL;
1604 
1605 	INIT_LIST_HEAD(&tags->page_list);
1606 
1607 	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1608 				 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1609 				 set->numa_node);
1610 	if (!tags->rqs) {
1611 		blk_mq_free_tags(tags);
1612 		return NULL;
1613 	}
1614 
1615 	/*
1616 	 * rq_size is the size of the request plus driver payload, rounded
1617 	 * to the cacheline size
1618 	 */
1619 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1620 				cache_line_size());
1621 	left = rq_size * set->queue_depth;
1622 
1623 	for (i = 0; i < set->queue_depth; ) {
1624 		int this_order = max_order;
1625 		struct page *page;
1626 		int to_do;
1627 		void *p;
1628 
1629 		while (this_order && left < order_to_size(this_order - 1))
1630 			this_order--;
1631 
1632 		do {
1633 			page = alloc_pages_node(set->numa_node,
1634 				GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1635 				this_order);
1636 			if (page)
1637 				break;
1638 			if (!this_order--)
1639 				break;
1640 			if (order_to_size(this_order) < rq_size)
1641 				break;
1642 		} while (1);
1643 
1644 		if (!page)
1645 			goto fail;
1646 
1647 		page->private = this_order;
1648 		list_add_tail(&page->lru, &tags->page_list);
1649 
1650 		p = page_address(page);
1651 		/*
1652 		 * Allow kmemleak to scan these pages as they contain pointers
1653 		 * to additional allocations like via ops->init_request().
1654 		 */
1655 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
1656 		entries_per_page = order_to_size(this_order) / rq_size;
1657 		to_do = min(entries_per_page, set->queue_depth - i);
1658 		left -= to_do * rq_size;
1659 		for (j = 0; j < to_do; j++) {
1660 			tags->rqs[i] = p;
1661 			if (set->ops->init_request) {
1662 				if (set->ops->init_request(set->driver_data,
1663 						tags->rqs[i], hctx_idx, i,
1664 						set->numa_node)) {
1665 					tags->rqs[i] = NULL;
1666 					goto fail;
1667 				}
1668 			}
1669 
1670 			p += rq_size;
1671 			i++;
1672 		}
1673 	}
1674 	return tags;
1675 
1676 fail:
1677 	blk_mq_free_rq_map(set, tags, hctx_idx);
1678 	return NULL;
1679 }
1680 
1681 /*
1682  * 'cpu' is going away. splice any existing rq_list entries from this
1683  * software queue to the hw queue dispatch list, and ensure that it
1684  * gets run.
1685  */
1686 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1687 {
1688 	struct blk_mq_hw_ctx *hctx;
1689 	struct blk_mq_ctx *ctx;
1690 	LIST_HEAD(tmp);
1691 
1692 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1693 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1694 
1695 	spin_lock(&ctx->lock);
1696 	if (!list_empty(&ctx->rq_list)) {
1697 		list_splice_init(&ctx->rq_list, &tmp);
1698 		blk_mq_hctx_clear_pending(hctx, ctx);
1699 	}
1700 	spin_unlock(&ctx->lock);
1701 
1702 	if (list_empty(&tmp))
1703 		return 0;
1704 
1705 	spin_lock(&hctx->lock);
1706 	list_splice_tail_init(&tmp, &hctx->dispatch);
1707 	spin_unlock(&hctx->lock);
1708 
1709 	blk_mq_run_hw_queue(hctx, true);
1710 	return 0;
1711 }
1712 
1713 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1714 {
1715 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1716 					    &hctx->cpuhp_dead);
1717 }
1718 
1719 /* hctx->ctxs will be freed in queue's release handler */
1720 static void blk_mq_exit_hctx(struct request_queue *q,
1721 		struct blk_mq_tag_set *set,
1722 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1723 {
1724 	unsigned flush_start_tag = set->queue_depth;
1725 
1726 	blk_mq_tag_idle(hctx);
1727 
1728 	if (set->ops->exit_request)
1729 		set->ops->exit_request(set->driver_data,
1730 				       hctx->fq->flush_rq, hctx_idx,
1731 				       flush_start_tag + hctx_idx);
1732 
1733 	if (set->ops->exit_hctx)
1734 		set->ops->exit_hctx(hctx, hctx_idx);
1735 
1736 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1737 		cleanup_srcu_struct(&hctx->queue_rq_srcu);
1738 
1739 	blk_mq_remove_cpuhp(hctx);
1740 	blk_free_flush_queue(hctx->fq);
1741 	sbitmap_free(&hctx->ctx_map);
1742 }
1743 
1744 static void blk_mq_exit_hw_queues(struct request_queue *q,
1745 		struct blk_mq_tag_set *set, int nr_queue)
1746 {
1747 	struct blk_mq_hw_ctx *hctx;
1748 	unsigned int i;
1749 
1750 	queue_for_each_hw_ctx(q, hctx, i) {
1751 		if (i == nr_queue)
1752 			break;
1753 		blk_mq_exit_hctx(q, set, hctx, i);
1754 	}
1755 }
1756 
1757 static void blk_mq_free_hw_queues(struct request_queue *q,
1758 		struct blk_mq_tag_set *set)
1759 {
1760 	struct blk_mq_hw_ctx *hctx;
1761 	unsigned int i;
1762 
1763 	queue_for_each_hw_ctx(q, hctx, i)
1764 		free_cpumask_var(hctx->cpumask);
1765 }
1766 
1767 static int blk_mq_init_hctx(struct request_queue *q,
1768 		struct blk_mq_tag_set *set,
1769 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1770 {
1771 	int node;
1772 	unsigned flush_start_tag = set->queue_depth;
1773 
1774 	node = hctx->numa_node;
1775 	if (node == NUMA_NO_NODE)
1776 		node = hctx->numa_node = set->numa_node;
1777 
1778 	INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1779 	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1780 	spin_lock_init(&hctx->lock);
1781 	INIT_LIST_HEAD(&hctx->dispatch);
1782 	hctx->queue = q;
1783 	hctx->queue_num = hctx_idx;
1784 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1785 
1786 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1787 
1788 	hctx->tags = set->tags[hctx_idx];
1789 
1790 	/*
1791 	 * Allocate space for all possible cpus to avoid allocation at
1792 	 * runtime
1793 	 */
1794 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1795 					GFP_KERNEL, node);
1796 	if (!hctx->ctxs)
1797 		goto unregister_cpu_notifier;
1798 
1799 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1800 			      node))
1801 		goto free_ctxs;
1802 
1803 	hctx->nr_ctx = 0;
1804 
1805 	if (set->ops->init_hctx &&
1806 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1807 		goto free_bitmap;
1808 
1809 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1810 	if (!hctx->fq)
1811 		goto exit_hctx;
1812 
1813 	if (set->ops->init_request &&
1814 	    set->ops->init_request(set->driver_data,
1815 				   hctx->fq->flush_rq, hctx_idx,
1816 				   flush_start_tag + hctx_idx, node))
1817 		goto free_fq;
1818 
1819 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1820 		init_srcu_struct(&hctx->queue_rq_srcu);
1821 
1822 	return 0;
1823 
1824  free_fq:
1825 	kfree(hctx->fq);
1826  exit_hctx:
1827 	if (set->ops->exit_hctx)
1828 		set->ops->exit_hctx(hctx, hctx_idx);
1829  free_bitmap:
1830 	sbitmap_free(&hctx->ctx_map);
1831  free_ctxs:
1832 	kfree(hctx->ctxs);
1833  unregister_cpu_notifier:
1834 	blk_mq_remove_cpuhp(hctx);
1835 	return -1;
1836 }
1837 
1838 static void blk_mq_init_cpu_queues(struct request_queue *q,
1839 				   unsigned int nr_hw_queues)
1840 {
1841 	unsigned int i;
1842 
1843 	for_each_possible_cpu(i) {
1844 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1845 		struct blk_mq_hw_ctx *hctx;
1846 
1847 		memset(__ctx, 0, sizeof(*__ctx));
1848 		__ctx->cpu = i;
1849 		spin_lock_init(&__ctx->lock);
1850 		INIT_LIST_HEAD(&__ctx->rq_list);
1851 		__ctx->queue = q;
1852 		blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
1853 		blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
1854 
1855 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1856 		if (!cpu_online(i))
1857 			continue;
1858 
1859 		hctx = blk_mq_map_queue(q, i);
1860 
1861 		/*
1862 		 * Set local node, IFF we have more than one hw queue. If
1863 		 * not, we remain on the home node of the device
1864 		 */
1865 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1866 			hctx->numa_node = local_memory_node(cpu_to_node(i));
1867 	}
1868 }
1869 
1870 static void blk_mq_map_swqueue(struct request_queue *q,
1871 			       const struct cpumask *online_mask)
1872 {
1873 	unsigned int i;
1874 	struct blk_mq_hw_ctx *hctx;
1875 	struct blk_mq_ctx *ctx;
1876 	struct blk_mq_tag_set *set = q->tag_set;
1877 
1878 	/*
1879 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
1880 	 */
1881 	mutex_lock(&q->sysfs_lock);
1882 
1883 	queue_for_each_hw_ctx(q, hctx, i) {
1884 		cpumask_clear(hctx->cpumask);
1885 		hctx->nr_ctx = 0;
1886 	}
1887 
1888 	/*
1889 	 * Map software to hardware queues
1890 	 */
1891 	for_each_possible_cpu(i) {
1892 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1893 		if (!cpumask_test_cpu(i, online_mask))
1894 			continue;
1895 
1896 		ctx = per_cpu_ptr(q->queue_ctx, i);
1897 		hctx = blk_mq_map_queue(q, i);
1898 
1899 		cpumask_set_cpu(i, hctx->cpumask);
1900 		ctx->index_hw = hctx->nr_ctx;
1901 		hctx->ctxs[hctx->nr_ctx++] = ctx;
1902 	}
1903 
1904 	mutex_unlock(&q->sysfs_lock);
1905 
1906 	queue_for_each_hw_ctx(q, hctx, i) {
1907 		/*
1908 		 * If no software queues are mapped to this hardware queue,
1909 		 * disable it and free the request entries.
1910 		 */
1911 		if (!hctx->nr_ctx) {
1912 			if (set->tags[i]) {
1913 				blk_mq_free_rq_map(set, set->tags[i], i);
1914 				set->tags[i] = NULL;
1915 			}
1916 			hctx->tags = NULL;
1917 			continue;
1918 		}
1919 
1920 		/* unmapped hw queue can be remapped after CPU topo changed */
1921 		if (!set->tags[i])
1922 			set->tags[i] = blk_mq_init_rq_map(set, i);
1923 		hctx->tags = set->tags[i];
1924 		WARN_ON(!hctx->tags);
1925 
1926 		/*
1927 		 * Set the map size to the number of mapped software queues.
1928 		 * This is more accurate and more efficient than looping
1929 		 * over all possibly mapped software queues.
1930 		 */
1931 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
1932 
1933 		/*
1934 		 * Initialize batch roundrobin counts
1935 		 */
1936 		hctx->next_cpu = cpumask_first(hctx->cpumask);
1937 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1938 	}
1939 }
1940 
1941 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
1942 {
1943 	struct blk_mq_hw_ctx *hctx;
1944 	int i;
1945 
1946 	queue_for_each_hw_ctx(q, hctx, i) {
1947 		if (shared)
1948 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
1949 		else
1950 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1951 	}
1952 }
1953 
1954 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
1955 {
1956 	struct request_queue *q;
1957 
1958 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
1959 		blk_mq_freeze_queue(q);
1960 		queue_set_hctx_shared(q, shared);
1961 		blk_mq_unfreeze_queue(q);
1962 	}
1963 }
1964 
1965 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1966 {
1967 	struct blk_mq_tag_set *set = q->tag_set;
1968 
1969 	mutex_lock(&set->tag_list_lock);
1970 	list_del_init(&q->tag_set_list);
1971 	if (list_is_singular(&set->tag_list)) {
1972 		/* just transitioned to unshared */
1973 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
1974 		/* update existing queue */
1975 		blk_mq_update_tag_set_depth(set, false);
1976 	}
1977 	mutex_unlock(&set->tag_list_lock);
1978 }
1979 
1980 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1981 				     struct request_queue *q)
1982 {
1983 	q->tag_set = set;
1984 
1985 	mutex_lock(&set->tag_list_lock);
1986 
1987 	/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
1988 	if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
1989 		set->flags |= BLK_MQ_F_TAG_SHARED;
1990 		/* update existing queue */
1991 		blk_mq_update_tag_set_depth(set, true);
1992 	}
1993 	if (set->flags & BLK_MQ_F_TAG_SHARED)
1994 		queue_set_hctx_shared(q, true);
1995 	list_add_tail(&q->tag_set_list, &set->tag_list);
1996 
1997 	mutex_unlock(&set->tag_list_lock);
1998 }
1999 
2000 /*
2001  * It is the actual release handler for mq, but we do it from
2002  * request queue's release handler for avoiding use-after-free
2003  * and headache because q->mq_kobj shouldn't have been introduced,
2004  * but we can't group ctx/kctx kobj without it.
2005  */
2006 void blk_mq_release(struct request_queue *q)
2007 {
2008 	struct blk_mq_hw_ctx *hctx;
2009 	unsigned int i;
2010 
2011 	/* hctx kobj stays in hctx */
2012 	queue_for_each_hw_ctx(q, hctx, i) {
2013 		if (!hctx)
2014 			continue;
2015 		kfree(hctx->ctxs);
2016 		kfree(hctx);
2017 	}
2018 
2019 	q->mq_map = NULL;
2020 
2021 	kfree(q->queue_hw_ctx);
2022 
2023 	/* ctx kobj stays in queue_ctx */
2024 	free_percpu(q->queue_ctx);
2025 }
2026 
2027 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2028 {
2029 	struct request_queue *uninit_q, *q;
2030 
2031 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2032 	if (!uninit_q)
2033 		return ERR_PTR(-ENOMEM);
2034 
2035 	q = blk_mq_init_allocated_queue(set, uninit_q);
2036 	if (IS_ERR(q))
2037 		blk_cleanup_queue(uninit_q);
2038 
2039 	return q;
2040 }
2041 EXPORT_SYMBOL(blk_mq_init_queue);
2042 
2043 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2044 						struct request_queue *q)
2045 {
2046 	int i, j;
2047 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2048 
2049 	blk_mq_sysfs_unregister(q);
2050 	for (i = 0; i < set->nr_hw_queues; i++) {
2051 		int node;
2052 
2053 		if (hctxs[i])
2054 			continue;
2055 
2056 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2057 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2058 					GFP_KERNEL, node);
2059 		if (!hctxs[i])
2060 			break;
2061 
2062 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2063 						node)) {
2064 			kfree(hctxs[i]);
2065 			hctxs[i] = NULL;
2066 			break;
2067 		}
2068 
2069 		atomic_set(&hctxs[i]->nr_active, 0);
2070 		hctxs[i]->numa_node = node;
2071 		hctxs[i]->queue_num = i;
2072 
2073 		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2074 			free_cpumask_var(hctxs[i]->cpumask);
2075 			kfree(hctxs[i]);
2076 			hctxs[i] = NULL;
2077 			break;
2078 		}
2079 		blk_mq_hctx_kobj_init(hctxs[i]);
2080 	}
2081 	for (j = i; j < q->nr_hw_queues; j++) {
2082 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2083 
2084 		if (hctx) {
2085 			if (hctx->tags) {
2086 				blk_mq_free_rq_map(set, hctx->tags, j);
2087 				set->tags[j] = NULL;
2088 			}
2089 			blk_mq_exit_hctx(q, set, hctx, j);
2090 			free_cpumask_var(hctx->cpumask);
2091 			kobject_put(&hctx->kobj);
2092 			kfree(hctx->ctxs);
2093 			kfree(hctx);
2094 			hctxs[j] = NULL;
2095 
2096 		}
2097 	}
2098 	q->nr_hw_queues = i;
2099 	blk_mq_sysfs_register(q);
2100 }
2101 
2102 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2103 						  struct request_queue *q)
2104 {
2105 	/* mark the queue as mq asap */
2106 	q->mq_ops = set->ops;
2107 
2108 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2109 	if (!q->queue_ctx)
2110 		goto err_exit;
2111 
2112 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2113 						GFP_KERNEL, set->numa_node);
2114 	if (!q->queue_hw_ctx)
2115 		goto err_percpu;
2116 
2117 	q->mq_map = set->mq_map;
2118 
2119 	blk_mq_realloc_hw_ctxs(set, q);
2120 	if (!q->nr_hw_queues)
2121 		goto err_hctxs;
2122 
2123 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2124 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2125 
2126 	q->nr_queues = nr_cpu_ids;
2127 
2128 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2129 
2130 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2131 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2132 
2133 	q->sg_reserved_size = INT_MAX;
2134 
2135 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2136 	INIT_LIST_HEAD(&q->requeue_list);
2137 	spin_lock_init(&q->requeue_lock);
2138 
2139 	if (q->nr_hw_queues > 1)
2140 		blk_queue_make_request(q, blk_mq_make_request);
2141 	else
2142 		blk_queue_make_request(q, blk_sq_make_request);
2143 
2144 	/*
2145 	 * Do this after blk_queue_make_request() overrides it...
2146 	 */
2147 	q->nr_requests = set->queue_depth;
2148 
2149 	/*
2150 	 * Default to classic polling
2151 	 */
2152 	q->poll_nsec = -1;
2153 
2154 	if (set->ops->complete)
2155 		blk_queue_softirq_done(q, set->ops->complete);
2156 
2157 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2158 
2159 	get_online_cpus();
2160 	mutex_lock(&all_q_mutex);
2161 
2162 	list_add_tail(&q->all_q_node, &all_q_list);
2163 	blk_mq_add_queue_tag_set(set, q);
2164 	blk_mq_map_swqueue(q, cpu_online_mask);
2165 
2166 	mutex_unlock(&all_q_mutex);
2167 	put_online_cpus();
2168 
2169 	return q;
2170 
2171 err_hctxs:
2172 	kfree(q->queue_hw_ctx);
2173 err_percpu:
2174 	free_percpu(q->queue_ctx);
2175 err_exit:
2176 	q->mq_ops = NULL;
2177 	return ERR_PTR(-ENOMEM);
2178 }
2179 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2180 
2181 void blk_mq_free_queue(struct request_queue *q)
2182 {
2183 	struct blk_mq_tag_set	*set = q->tag_set;
2184 
2185 	mutex_lock(&all_q_mutex);
2186 	list_del_init(&q->all_q_node);
2187 	mutex_unlock(&all_q_mutex);
2188 
2189 	wbt_exit(q);
2190 
2191 	blk_mq_del_queue_tag_set(q);
2192 
2193 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2194 	blk_mq_free_hw_queues(q, set);
2195 }
2196 
2197 /* Basically redo blk_mq_init_queue with queue frozen */
2198 static void blk_mq_queue_reinit(struct request_queue *q,
2199 				const struct cpumask *online_mask)
2200 {
2201 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2202 
2203 	blk_mq_sysfs_unregister(q);
2204 
2205 	/*
2206 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2207 	 * we should change hctx numa_node according to new topology (this
2208 	 * involves free and re-allocate memory, worthy doing?)
2209 	 */
2210 
2211 	blk_mq_map_swqueue(q, online_mask);
2212 
2213 	blk_mq_sysfs_register(q);
2214 }
2215 
2216 /*
2217  * New online cpumask which is going to be set in this hotplug event.
2218  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2219  * one-by-one and dynamically allocating this could result in a failure.
2220  */
2221 static struct cpumask cpuhp_online_new;
2222 
2223 static void blk_mq_queue_reinit_work(void)
2224 {
2225 	struct request_queue *q;
2226 
2227 	mutex_lock(&all_q_mutex);
2228 	/*
2229 	 * We need to freeze and reinit all existing queues.  Freezing
2230 	 * involves synchronous wait for an RCU grace period and doing it
2231 	 * one by one may take a long time.  Start freezing all queues in
2232 	 * one swoop and then wait for the completions so that freezing can
2233 	 * take place in parallel.
2234 	 */
2235 	list_for_each_entry(q, &all_q_list, all_q_node)
2236 		blk_mq_freeze_queue_start(q);
2237 	list_for_each_entry(q, &all_q_list, all_q_node)
2238 		blk_mq_freeze_queue_wait(q);
2239 
2240 	list_for_each_entry(q, &all_q_list, all_q_node)
2241 		blk_mq_queue_reinit(q, &cpuhp_online_new);
2242 
2243 	list_for_each_entry(q, &all_q_list, all_q_node)
2244 		blk_mq_unfreeze_queue(q);
2245 
2246 	mutex_unlock(&all_q_mutex);
2247 }
2248 
2249 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2250 {
2251 	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2252 	blk_mq_queue_reinit_work();
2253 	return 0;
2254 }
2255 
2256 /*
2257  * Before hotadded cpu starts handling requests, new mappings must be
2258  * established.  Otherwise, these requests in hw queue might never be
2259  * dispatched.
2260  *
2261  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2262  * for CPU0, and ctx1 for CPU1).
2263  *
2264  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2265  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2266  *
2267  * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
2268  * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2269  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
2270  * is ignored.
2271  */
2272 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2273 {
2274 	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2275 	cpumask_set_cpu(cpu, &cpuhp_online_new);
2276 	blk_mq_queue_reinit_work();
2277 	return 0;
2278 }
2279 
2280 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2281 {
2282 	int i;
2283 
2284 	for (i = 0; i < set->nr_hw_queues; i++) {
2285 		set->tags[i] = blk_mq_init_rq_map(set, i);
2286 		if (!set->tags[i])
2287 			goto out_unwind;
2288 	}
2289 
2290 	return 0;
2291 
2292 out_unwind:
2293 	while (--i >= 0)
2294 		blk_mq_free_rq_map(set, set->tags[i], i);
2295 
2296 	return -ENOMEM;
2297 }
2298 
2299 /*
2300  * Allocate the request maps associated with this tag_set. Note that this
2301  * may reduce the depth asked for, if memory is tight. set->queue_depth
2302  * will be updated to reflect the allocated depth.
2303  */
2304 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2305 {
2306 	unsigned int depth;
2307 	int err;
2308 
2309 	depth = set->queue_depth;
2310 	do {
2311 		err = __blk_mq_alloc_rq_maps(set);
2312 		if (!err)
2313 			break;
2314 
2315 		set->queue_depth >>= 1;
2316 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2317 			err = -ENOMEM;
2318 			break;
2319 		}
2320 	} while (set->queue_depth);
2321 
2322 	if (!set->queue_depth || err) {
2323 		pr_err("blk-mq: failed to allocate request map\n");
2324 		return -ENOMEM;
2325 	}
2326 
2327 	if (depth != set->queue_depth)
2328 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2329 						depth, set->queue_depth);
2330 
2331 	return 0;
2332 }
2333 
2334 /*
2335  * Alloc a tag set to be associated with one or more request queues.
2336  * May fail with EINVAL for various error conditions. May adjust the
2337  * requested depth down, if if it too large. In that case, the set
2338  * value will be stored in set->queue_depth.
2339  */
2340 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2341 {
2342 	int ret;
2343 
2344 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2345 
2346 	if (!set->nr_hw_queues)
2347 		return -EINVAL;
2348 	if (!set->queue_depth)
2349 		return -EINVAL;
2350 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2351 		return -EINVAL;
2352 
2353 	if (!set->ops->queue_rq)
2354 		return -EINVAL;
2355 
2356 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2357 		pr_info("blk-mq: reduced tag depth to %u\n",
2358 			BLK_MQ_MAX_DEPTH);
2359 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2360 	}
2361 
2362 	/*
2363 	 * If a crashdump is active, then we are potentially in a very
2364 	 * memory constrained environment. Limit us to 1 queue and
2365 	 * 64 tags to prevent using too much memory.
2366 	 */
2367 	if (is_kdump_kernel()) {
2368 		set->nr_hw_queues = 1;
2369 		set->queue_depth = min(64U, set->queue_depth);
2370 	}
2371 	/*
2372 	 * There is no use for more h/w queues than cpus.
2373 	 */
2374 	if (set->nr_hw_queues > nr_cpu_ids)
2375 		set->nr_hw_queues = nr_cpu_ids;
2376 
2377 	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2378 				 GFP_KERNEL, set->numa_node);
2379 	if (!set->tags)
2380 		return -ENOMEM;
2381 
2382 	ret = -ENOMEM;
2383 	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2384 			GFP_KERNEL, set->numa_node);
2385 	if (!set->mq_map)
2386 		goto out_free_tags;
2387 
2388 	if (set->ops->map_queues)
2389 		ret = set->ops->map_queues(set);
2390 	else
2391 		ret = blk_mq_map_queues(set);
2392 	if (ret)
2393 		goto out_free_mq_map;
2394 
2395 	ret = blk_mq_alloc_rq_maps(set);
2396 	if (ret)
2397 		goto out_free_mq_map;
2398 
2399 	mutex_init(&set->tag_list_lock);
2400 	INIT_LIST_HEAD(&set->tag_list);
2401 
2402 	return 0;
2403 
2404 out_free_mq_map:
2405 	kfree(set->mq_map);
2406 	set->mq_map = NULL;
2407 out_free_tags:
2408 	kfree(set->tags);
2409 	set->tags = NULL;
2410 	return ret;
2411 }
2412 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2413 
2414 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2415 {
2416 	int i;
2417 
2418 	for (i = 0; i < nr_cpu_ids; i++) {
2419 		if (set->tags[i])
2420 			blk_mq_free_rq_map(set, set->tags[i], i);
2421 	}
2422 
2423 	kfree(set->mq_map);
2424 	set->mq_map = NULL;
2425 
2426 	kfree(set->tags);
2427 	set->tags = NULL;
2428 }
2429 EXPORT_SYMBOL(blk_mq_free_tag_set);
2430 
2431 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2432 {
2433 	struct blk_mq_tag_set *set = q->tag_set;
2434 	struct blk_mq_hw_ctx *hctx;
2435 	int i, ret;
2436 
2437 	if (!set || nr > set->queue_depth)
2438 		return -EINVAL;
2439 
2440 	ret = 0;
2441 	queue_for_each_hw_ctx(q, hctx, i) {
2442 		if (!hctx->tags)
2443 			continue;
2444 		ret = blk_mq_tag_update_depth(hctx->tags, nr);
2445 		if (ret)
2446 			break;
2447 	}
2448 
2449 	if (!ret)
2450 		q->nr_requests = nr;
2451 
2452 	return ret;
2453 }
2454 
2455 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2456 {
2457 	struct request_queue *q;
2458 
2459 	if (nr_hw_queues > nr_cpu_ids)
2460 		nr_hw_queues = nr_cpu_ids;
2461 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2462 		return;
2463 
2464 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2465 		blk_mq_freeze_queue(q);
2466 
2467 	set->nr_hw_queues = nr_hw_queues;
2468 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2469 		blk_mq_realloc_hw_ctxs(set, q);
2470 
2471 		if (q->nr_hw_queues > 1)
2472 			blk_queue_make_request(q, blk_mq_make_request);
2473 		else
2474 			blk_queue_make_request(q, blk_sq_make_request);
2475 
2476 		blk_mq_queue_reinit(q, cpu_online_mask);
2477 	}
2478 
2479 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2480 		blk_mq_unfreeze_queue(q);
2481 }
2482 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2483 
2484 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2485 				       struct blk_mq_hw_ctx *hctx,
2486 				       struct request *rq)
2487 {
2488 	struct blk_rq_stat stat[2];
2489 	unsigned long ret = 0;
2490 
2491 	/*
2492 	 * If stats collection isn't on, don't sleep but turn it on for
2493 	 * future users
2494 	 */
2495 	if (!blk_stat_enable(q))
2496 		return 0;
2497 
2498 	/*
2499 	 * We don't have to do this once per IO, should optimize this
2500 	 * to just use the current window of stats until it changes
2501 	 */
2502 	memset(&stat, 0, sizeof(stat));
2503 	blk_hctx_stat_get(hctx, stat);
2504 
2505 	/*
2506 	 * As an optimistic guess, use half of the mean service time
2507 	 * for this type of request. We can (and should) make this smarter.
2508 	 * For instance, if the completion latencies are tight, we can
2509 	 * get closer than just half the mean. This is especially
2510 	 * important on devices where the completion latencies are longer
2511 	 * than ~10 usec.
2512 	 */
2513 	if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2514 		ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2515 	else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2516 		ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2517 
2518 	return ret;
2519 }
2520 
2521 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2522 				     struct blk_mq_hw_ctx *hctx,
2523 				     struct request *rq)
2524 {
2525 	struct hrtimer_sleeper hs;
2526 	enum hrtimer_mode mode;
2527 	unsigned int nsecs;
2528 	ktime_t kt;
2529 
2530 	if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2531 		return false;
2532 
2533 	/*
2534 	 * poll_nsec can be:
2535 	 *
2536 	 * -1:	don't ever hybrid sleep
2537 	 *  0:	use half of prev avg
2538 	 * >0:	use this specific value
2539 	 */
2540 	if (q->poll_nsec == -1)
2541 		return false;
2542 	else if (q->poll_nsec > 0)
2543 		nsecs = q->poll_nsec;
2544 	else
2545 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2546 
2547 	if (!nsecs)
2548 		return false;
2549 
2550 	set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2551 
2552 	/*
2553 	 * This will be replaced with the stats tracking code, using
2554 	 * 'avg_completion_time / 2' as the pre-sleep target.
2555 	 */
2556 	kt = ktime_set(0, nsecs);
2557 
2558 	mode = HRTIMER_MODE_REL;
2559 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2560 	hrtimer_set_expires(&hs.timer, kt);
2561 
2562 	hrtimer_init_sleeper(&hs, current);
2563 	do {
2564 		if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2565 			break;
2566 		set_current_state(TASK_UNINTERRUPTIBLE);
2567 		hrtimer_start_expires(&hs.timer, mode);
2568 		if (hs.task)
2569 			io_schedule();
2570 		hrtimer_cancel(&hs.timer);
2571 		mode = HRTIMER_MODE_ABS;
2572 	} while (hs.task && !signal_pending(current));
2573 
2574 	__set_current_state(TASK_RUNNING);
2575 	destroy_hrtimer_on_stack(&hs.timer);
2576 	return true;
2577 }
2578 
2579 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2580 {
2581 	struct request_queue *q = hctx->queue;
2582 	long state;
2583 
2584 	/*
2585 	 * If we sleep, have the caller restart the poll loop to reset
2586 	 * the state. Like for the other success return cases, the
2587 	 * caller is responsible for checking if the IO completed. If
2588 	 * the IO isn't complete, we'll get called again and will go
2589 	 * straight to the busy poll loop.
2590 	 */
2591 	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2592 		return true;
2593 
2594 	hctx->poll_considered++;
2595 
2596 	state = current->state;
2597 	while (!need_resched()) {
2598 		int ret;
2599 
2600 		hctx->poll_invoked++;
2601 
2602 		ret = q->mq_ops->poll(hctx, rq->tag);
2603 		if (ret > 0) {
2604 			hctx->poll_success++;
2605 			set_current_state(TASK_RUNNING);
2606 			return true;
2607 		}
2608 
2609 		if (signal_pending_state(state, current))
2610 			set_current_state(TASK_RUNNING);
2611 
2612 		if (current->state == TASK_RUNNING)
2613 			return true;
2614 		if (ret < 0)
2615 			break;
2616 		cpu_relax();
2617 	}
2618 
2619 	return false;
2620 }
2621 
2622 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2623 {
2624 	struct blk_mq_hw_ctx *hctx;
2625 	struct blk_plug *plug;
2626 	struct request *rq;
2627 
2628 	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2629 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2630 		return false;
2631 
2632 	plug = current->plug;
2633 	if (plug)
2634 		blk_flush_plug_list(plug, false);
2635 
2636 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2637 	rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2638 
2639 	return __blk_mq_poll(hctx, rq);
2640 }
2641 EXPORT_SYMBOL_GPL(blk_mq_poll);
2642 
2643 void blk_mq_disable_hotplug(void)
2644 {
2645 	mutex_lock(&all_q_mutex);
2646 }
2647 
2648 void blk_mq_enable_hotplug(void)
2649 {
2650 	mutex_unlock(&all_q_mutex);
2651 }
2652 
2653 static int __init blk_mq_init(void)
2654 {
2655 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2656 				blk_mq_hctx_notify_dead);
2657 
2658 	cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2659 				  blk_mq_queue_reinit_prepare,
2660 				  blk_mq_queue_reinit_dead);
2661 	return 0;
2662 }
2663 subsys_initcall(blk_mq_init);
2664