xref: /openbmc/linux/block/blk-mq.c (revision 7bcae826)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/delay.h>
24 #include <linux/crash_dump.h>
25 #include <linux/prefetch.h>
26 
27 #include <trace/events/block.h>
28 
29 #include <linux/blk-mq.h>
30 #include "blk.h"
31 #include "blk-mq.h"
32 #include "blk-mq-tag.h"
33 #include "blk-stat.h"
34 #include "blk-wbt.h"
35 #include "blk-mq-sched.h"
36 
37 static DEFINE_MUTEX(all_q_mutex);
38 static LIST_HEAD(all_q_list);
39 
40 /*
41  * Check if any of the ctx's have pending work in this hardware queue
42  */
43 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
44 {
45 	return sbitmap_any_bit_set(&hctx->ctx_map) ||
46 			!list_empty_careful(&hctx->dispatch) ||
47 			blk_mq_sched_has_work(hctx);
48 }
49 
50 /*
51  * Mark this ctx as having pending work in this hardware queue
52  */
53 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
54 				     struct blk_mq_ctx *ctx)
55 {
56 	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
57 		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
58 }
59 
60 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
61 				      struct blk_mq_ctx *ctx)
62 {
63 	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
64 }
65 
66 void blk_mq_freeze_queue_start(struct request_queue *q)
67 {
68 	int freeze_depth;
69 
70 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
71 	if (freeze_depth == 1) {
72 		percpu_ref_kill(&q->q_usage_counter);
73 		blk_mq_run_hw_queues(q, false);
74 	}
75 }
76 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
77 
78 static void blk_mq_freeze_queue_wait(struct request_queue *q)
79 {
80 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
81 }
82 
83 /*
84  * Guarantee no request is in use, so we can change any data structure of
85  * the queue afterward.
86  */
87 void blk_freeze_queue(struct request_queue *q)
88 {
89 	/*
90 	 * In the !blk_mq case we are only calling this to kill the
91 	 * q_usage_counter, otherwise this increases the freeze depth
92 	 * and waits for it to return to zero.  For this reason there is
93 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
94 	 * exported to drivers as the only user for unfreeze is blk_mq.
95 	 */
96 	blk_mq_freeze_queue_start(q);
97 	blk_mq_freeze_queue_wait(q);
98 }
99 
100 void blk_mq_freeze_queue(struct request_queue *q)
101 {
102 	/*
103 	 * ...just an alias to keep freeze and unfreeze actions balanced
104 	 * in the blk_mq_* namespace
105 	 */
106 	blk_freeze_queue(q);
107 }
108 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
109 
110 void blk_mq_unfreeze_queue(struct request_queue *q)
111 {
112 	int freeze_depth;
113 
114 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
115 	WARN_ON_ONCE(freeze_depth < 0);
116 	if (!freeze_depth) {
117 		percpu_ref_reinit(&q->q_usage_counter);
118 		wake_up_all(&q->mq_freeze_wq);
119 	}
120 }
121 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
122 
123 /**
124  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
125  * @q: request queue.
126  *
127  * Note: this function does not prevent that the struct request end_io()
128  * callback function is invoked. Additionally, it is not prevented that
129  * new queue_rq() calls occur unless the queue has been stopped first.
130  */
131 void blk_mq_quiesce_queue(struct request_queue *q)
132 {
133 	struct blk_mq_hw_ctx *hctx;
134 	unsigned int i;
135 	bool rcu = false;
136 
137 	blk_mq_stop_hw_queues(q);
138 
139 	queue_for_each_hw_ctx(q, hctx, i) {
140 		if (hctx->flags & BLK_MQ_F_BLOCKING)
141 			synchronize_srcu(&hctx->queue_rq_srcu);
142 		else
143 			rcu = true;
144 	}
145 	if (rcu)
146 		synchronize_rcu();
147 }
148 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
149 
150 void blk_mq_wake_waiters(struct request_queue *q)
151 {
152 	struct blk_mq_hw_ctx *hctx;
153 	unsigned int i;
154 
155 	queue_for_each_hw_ctx(q, hctx, i)
156 		if (blk_mq_hw_queue_mapped(hctx))
157 			blk_mq_tag_wakeup_all(hctx->tags, true);
158 
159 	/*
160 	 * If we are called because the queue has now been marked as
161 	 * dying, we need to ensure that processes currently waiting on
162 	 * the queue are notified as well.
163 	 */
164 	wake_up_all(&q->mq_freeze_wq);
165 }
166 
167 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
168 {
169 	return blk_mq_has_free_tags(hctx->tags);
170 }
171 EXPORT_SYMBOL(blk_mq_can_queue);
172 
173 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
174 			struct request *rq, unsigned int op)
175 {
176 	INIT_LIST_HEAD(&rq->queuelist);
177 	/* csd/requeue_work/fifo_time is initialized before use */
178 	rq->q = q;
179 	rq->mq_ctx = ctx;
180 	rq->cmd_flags = op;
181 	if (blk_queue_io_stat(q))
182 		rq->rq_flags |= RQF_IO_STAT;
183 	/* do not touch atomic flags, it needs atomic ops against the timer */
184 	rq->cpu = -1;
185 	INIT_HLIST_NODE(&rq->hash);
186 	RB_CLEAR_NODE(&rq->rb_node);
187 	rq->rq_disk = NULL;
188 	rq->part = NULL;
189 	rq->start_time = jiffies;
190 #ifdef CONFIG_BLK_CGROUP
191 	rq->rl = NULL;
192 	set_start_time_ns(rq);
193 	rq->io_start_time_ns = 0;
194 #endif
195 	rq->nr_phys_segments = 0;
196 #if defined(CONFIG_BLK_DEV_INTEGRITY)
197 	rq->nr_integrity_segments = 0;
198 #endif
199 	rq->special = NULL;
200 	/* tag was already set */
201 	rq->errors = 0;
202 	rq->extra_len = 0;
203 
204 	INIT_LIST_HEAD(&rq->timeout_list);
205 	rq->timeout = 0;
206 
207 	rq->end_io = NULL;
208 	rq->end_io_data = NULL;
209 	rq->next_rq = NULL;
210 
211 	ctx->rq_dispatched[op_is_sync(op)]++;
212 }
213 EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
214 
215 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
216 				       unsigned int op)
217 {
218 	struct request *rq;
219 	unsigned int tag;
220 
221 	tag = blk_mq_get_tag(data);
222 	if (tag != BLK_MQ_TAG_FAIL) {
223 		struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
224 
225 		rq = tags->static_rqs[tag];
226 
227 		if (data->flags & BLK_MQ_REQ_INTERNAL) {
228 			rq->tag = -1;
229 			rq->internal_tag = tag;
230 		} else {
231 			if (blk_mq_tag_busy(data->hctx)) {
232 				rq->rq_flags = RQF_MQ_INFLIGHT;
233 				atomic_inc(&data->hctx->nr_active);
234 			}
235 			rq->tag = tag;
236 			rq->internal_tag = -1;
237 		}
238 
239 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
240 		return rq;
241 	}
242 
243 	return NULL;
244 }
245 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
246 
247 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
248 		unsigned int flags)
249 {
250 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
251 	struct request *rq;
252 	int ret;
253 
254 	ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
255 	if (ret)
256 		return ERR_PTR(ret);
257 
258 	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
259 
260 	blk_mq_put_ctx(alloc_data.ctx);
261 	blk_queue_exit(q);
262 
263 	if (!rq)
264 		return ERR_PTR(-EWOULDBLOCK);
265 
266 	rq->__data_len = 0;
267 	rq->__sector = (sector_t) -1;
268 	rq->bio = rq->biotail = NULL;
269 	return rq;
270 }
271 EXPORT_SYMBOL(blk_mq_alloc_request);
272 
273 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
274 		unsigned int flags, unsigned int hctx_idx)
275 {
276 	struct blk_mq_hw_ctx *hctx;
277 	struct blk_mq_ctx *ctx;
278 	struct request *rq;
279 	struct blk_mq_alloc_data alloc_data;
280 	int ret;
281 
282 	/*
283 	 * If the tag allocator sleeps we could get an allocation for a
284 	 * different hardware context.  No need to complicate the low level
285 	 * allocator for this for the rare use case of a command tied to
286 	 * a specific queue.
287 	 */
288 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
289 		return ERR_PTR(-EINVAL);
290 
291 	if (hctx_idx >= q->nr_hw_queues)
292 		return ERR_PTR(-EIO);
293 
294 	ret = blk_queue_enter(q, true);
295 	if (ret)
296 		return ERR_PTR(ret);
297 
298 	/*
299 	 * Check if the hardware context is actually mapped to anything.
300 	 * If not tell the caller that it should skip this queue.
301 	 */
302 	hctx = q->queue_hw_ctx[hctx_idx];
303 	if (!blk_mq_hw_queue_mapped(hctx)) {
304 		ret = -EXDEV;
305 		goto out_queue_exit;
306 	}
307 	ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
308 
309 	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
310 	rq = __blk_mq_alloc_request(&alloc_data, rw);
311 	if (!rq) {
312 		ret = -EWOULDBLOCK;
313 		goto out_queue_exit;
314 	}
315 
316 	return rq;
317 
318 out_queue_exit:
319 	blk_queue_exit(q);
320 	return ERR_PTR(ret);
321 }
322 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
323 
324 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
325 			     struct request *rq)
326 {
327 	const int sched_tag = rq->internal_tag;
328 	struct request_queue *q = rq->q;
329 
330 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
331 		atomic_dec(&hctx->nr_active);
332 
333 	wbt_done(q->rq_wb, &rq->issue_stat);
334 	rq->rq_flags = 0;
335 
336 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
337 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
338 	if (rq->tag != -1)
339 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
340 	if (sched_tag != -1)
341 		blk_mq_sched_completed_request(hctx, rq);
342 	blk_mq_sched_restart_queues(hctx);
343 	blk_queue_exit(q);
344 }
345 
346 static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
347 				     struct request *rq)
348 {
349 	struct blk_mq_ctx *ctx = rq->mq_ctx;
350 
351 	ctx->rq_completed[rq_is_sync(rq)]++;
352 	__blk_mq_finish_request(hctx, ctx, rq);
353 }
354 
355 void blk_mq_finish_request(struct request *rq)
356 {
357 	blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
358 }
359 
360 void blk_mq_free_request(struct request *rq)
361 {
362 	blk_mq_sched_put_request(rq);
363 }
364 EXPORT_SYMBOL_GPL(blk_mq_free_request);
365 
366 inline void __blk_mq_end_request(struct request *rq, int error)
367 {
368 	blk_account_io_done(rq);
369 
370 	if (rq->end_io) {
371 		wbt_done(rq->q->rq_wb, &rq->issue_stat);
372 		rq->end_io(rq, error);
373 	} else {
374 		if (unlikely(blk_bidi_rq(rq)))
375 			blk_mq_free_request(rq->next_rq);
376 		blk_mq_free_request(rq);
377 	}
378 }
379 EXPORT_SYMBOL(__blk_mq_end_request);
380 
381 void blk_mq_end_request(struct request *rq, int error)
382 {
383 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
384 		BUG();
385 	__blk_mq_end_request(rq, error);
386 }
387 EXPORT_SYMBOL(blk_mq_end_request);
388 
389 static void __blk_mq_complete_request_remote(void *data)
390 {
391 	struct request *rq = data;
392 
393 	rq->q->softirq_done_fn(rq);
394 }
395 
396 static void blk_mq_ipi_complete_request(struct request *rq)
397 {
398 	struct blk_mq_ctx *ctx = rq->mq_ctx;
399 	bool shared = false;
400 	int cpu;
401 
402 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
403 		rq->q->softirq_done_fn(rq);
404 		return;
405 	}
406 
407 	cpu = get_cpu();
408 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
409 		shared = cpus_share_cache(cpu, ctx->cpu);
410 
411 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
412 		rq->csd.func = __blk_mq_complete_request_remote;
413 		rq->csd.info = rq;
414 		rq->csd.flags = 0;
415 		smp_call_function_single_async(ctx->cpu, &rq->csd);
416 	} else {
417 		rq->q->softirq_done_fn(rq);
418 	}
419 	put_cpu();
420 }
421 
422 static void blk_mq_stat_add(struct request *rq)
423 {
424 	if (rq->rq_flags & RQF_STATS) {
425 		/*
426 		 * We could rq->mq_ctx here, but there's less of a risk
427 		 * of races if we have the completion event add the stats
428 		 * to the local software queue.
429 		 */
430 		struct blk_mq_ctx *ctx;
431 
432 		ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
433 		blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
434 	}
435 }
436 
437 static void __blk_mq_complete_request(struct request *rq)
438 {
439 	struct request_queue *q = rq->q;
440 
441 	blk_mq_stat_add(rq);
442 
443 	if (!q->softirq_done_fn)
444 		blk_mq_end_request(rq, rq->errors);
445 	else
446 		blk_mq_ipi_complete_request(rq);
447 }
448 
449 /**
450  * blk_mq_complete_request - end I/O on a request
451  * @rq:		the request being processed
452  *
453  * Description:
454  *	Ends all I/O on a request. It does not handle partial completions.
455  *	The actual completion happens out-of-order, through a IPI handler.
456  **/
457 void blk_mq_complete_request(struct request *rq, int error)
458 {
459 	struct request_queue *q = rq->q;
460 
461 	if (unlikely(blk_should_fake_timeout(q)))
462 		return;
463 	if (!blk_mark_rq_complete(rq)) {
464 		rq->errors = error;
465 		__blk_mq_complete_request(rq);
466 	}
467 }
468 EXPORT_SYMBOL(blk_mq_complete_request);
469 
470 int blk_mq_request_started(struct request *rq)
471 {
472 	return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
473 }
474 EXPORT_SYMBOL_GPL(blk_mq_request_started);
475 
476 void blk_mq_start_request(struct request *rq)
477 {
478 	struct request_queue *q = rq->q;
479 
480 	blk_mq_sched_started_request(rq);
481 
482 	trace_block_rq_issue(q, rq);
483 
484 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
485 		blk_stat_set_issue_time(&rq->issue_stat);
486 		rq->rq_flags |= RQF_STATS;
487 		wbt_issue(q->rq_wb, &rq->issue_stat);
488 	}
489 
490 	blk_add_timer(rq);
491 
492 	/*
493 	 * Ensure that ->deadline is visible before set the started
494 	 * flag and clear the completed flag.
495 	 */
496 	smp_mb__before_atomic();
497 
498 	/*
499 	 * Mark us as started and clear complete. Complete might have been
500 	 * set if requeue raced with timeout, which then marked it as
501 	 * complete. So be sure to clear complete again when we start
502 	 * the request, otherwise we'll ignore the completion event.
503 	 */
504 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
505 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
506 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
507 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
508 
509 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
510 		/*
511 		 * Make sure space for the drain appears.  We know we can do
512 		 * this because max_hw_segments has been adjusted to be one
513 		 * fewer than the device can handle.
514 		 */
515 		rq->nr_phys_segments++;
516 	}
517 }
518 EXPORT_SYMBOL(blk_mq_start_request);
519 
520 static void __blk_mq_requeue_request(struct request *rq)
521 {
522 	struct request_queue *q = rq->q;
523 
524 	trace_block_rq_requeue(q, rq);
525 	wbt_requeue(q->rq_wb, &rq->issue_stat);
526 	blk_mq_sched_requeue_request(rq);
527 
528 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
529 		if (q->dma_drain_size && blk_rq_bytes(rq))
530 			rq->nr_phys_segments--;
531 	}
532 }
533 
534 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
535 {
536 	__blk_mq_requeue_request(rq);
537 
538 	BUG_ON(blk_queued_rq(rq));
539 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
540 }
541 EXPORT_SYMBOL(blk_mq_requeue_request);
542 
543 static void blk_mq_requeue_work(struct work_struct *work)
544 {
545 	struct request_queue *q =
546 		container_of(work, struct request_queue, requeue_work.work);
547 	LIST_HEAD(rq_list);
548 	struct request *rq, *next;
549 	unsigned long flags;
550 
551 	spin_lock_irqsave(&q->requeue_lock, flags);
552 	list_splice_init(&q->requeue_list, &rq_list);
553 	spin_unlock_irqrestore(&q->requeue_lock, flags);
554 
555 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
556 		if (!(rq->rq_flags & RQF_SOFTBARRIER))
557 			continue;
558 
559 		rq->rq_flags &= ~RQF_SOFTBARRIER;
560 		list_del_init(&rq->queuelist);
561 		blk_mq_sched_insert_request(rq, true, false, false, true);
562 	}
563 
564 	while (!list_empty(&rq_list)) {
565 		rq = list_entry(rq_list.next, struct request, queuelist);
566 		list_del_init(&rq->queuelist);
567 		blk_mq_sched_insert_request(rq, false, false, false, true);
568 	}
569 
570 	blk_mq_run_hw_queues(q, false);
571 }
572 
573 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
574 				bool kick_requeue_list)
575 {
576 	struct request_queue *q = rq->q;
577 	unsigned long flags;
578 
579 	/*
580 	 * We abuse this flag that is otherwise used by the I/O scheduler to
581 	 * request head insertation from the workqueue.
582 	 */
583 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
584 
585 	spin_lock_irqsave(&q->requeue_lock, flags);
586 	if (at_head) {
587 		rq->rq_flags |= RQF_SOFTBARRIER;
588 		list_add(&rq->queuelist, &q->requeue_list);
589 	} else {
590 		list_add_tail(&rq->queuelist, &q->requeue_list);
591 	}
592 	spin_unlock_irqrestore(&q->requeue_lock, flags);
593 
594 	if (kick_requeue_list)
595 		blk_mq_kick_requeue_list(q);
596 }
597 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
598 
599 void blk_mq_kick_requeue_list(struct request_queue *q)
600 {
601 	kblockd_schedule_delayed_work(&q->requeue_work, 0);
602 }
603 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
604 
605 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
606 				    unsigned long msecs)
607 {
608 	kblockd_schedule_delayed_work(&q->requeue_work,
609 				      msecs_to_jiffies(msecs));
610 }
611 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
612 
613 void blk_mq_abort_requeue_list(struct request_queue *q)
614 {
615 	unsigned long flags;
616 	LIST_HEAD(rq_list);
617 
618 	spin_lock_irqsave(&q->requeue_lock, flags);
619 	list_splice_init(&q->requeue_list, &rq_list);
620 	spin_unlock_irqrestore(&q->requeue_lock, flags);
621 
622 	while (!list_empty(&rq_list)) {
623 		struct request *rq;
624 
625 		rq = list_first_entry(&rq_list, struct request, queuelist);
626 		list_del_init(&rq->queuelist);
627 		rq->errors = -EIO;
628 		blk_mq_end_request(rq, rq->errors);
629 	}
630 }
631 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
632 
633 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
634 {
635 	if (tag < tags->nr_tags) {
636 		prefetch(tags->rqs[tag]);
637 		return tags->rqs[tag];
638 	}
639 
640 	return NULL;
641 }
642 EXPORT_SYMBOL(blk_mq_tag_to_rq);
643 
644 struct blk_mq_timeout_data {
645 	unsigned long next;
646 	unsigned int next_set;
647 };
648 
649 void blk_mq_rq_timed_out(struct request *req, bool reserved)
650 {
651 	const struct blk_mq_ops *ops = req->q->mq_ops;
652 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
653 
654 	/*
655 	 * We know that complete is set at this point. If STARTED isn't set
656 	 * anymore, then the request isn't active and the "timeout" should
657 	 * just be ignored. This can happen due to the bitflag ordering.
658 	 * Timeout first checks if STARTED is set, and if it is, assumes
659 	 * the request is active. But if we race with completion, then
660 	 * we both flags will get cleared. So check here again, and ignore
661 	 * a timeout event with a request that isn't active.
662 	 */
663 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
664 		return;
665 
666 	if (ops->timeout)
667 		ret = ops->timeout(req, reserved);
668 
669 	switch (ret) {
670 	case BLK_EH_HANDLED:
671 		__blk_mq_complete_request(req);
672 		break;
673 	case BLK_EH_RESET_TIMER:
674 		blk_add_timer(req);
675 		blk_clear_rq_complete(req);
676 		break;
677 	case BLK_EH_NOT_HANDLED:
678 		break;
679 	default:
680 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
681 		break;
682 	}
683 }
684 
685 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
686 		struct request *rq, void *priv, bool reserved)
687 {
688 	struct blk_mq_timeout_data *data = priv;
689 
690 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
691 		/*
692 		 * If a request wasn't started before the queue was
693 		 * marked dying, kill it here or it'll go unnoticed.
694 		 */
695 		if (unlikely(blk_queue_dying(rq->q))) {
696 			rq->errors = -EIO;
697 			blk_mq_end_request(rq, rq->errors);
698 		}
699 		return;
700 	}
701 
702 	if (time_after_eq(jiffies, rq->deadline)) {
703 		if (!blk_mark_rq_complete(rq))
704 			blk_mq_rq_timed_out(rq, reserved);
705 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
706 		data->next = rq->deadline;
707 		data->next_set = 1;
708 	}
709 }
710 
711 static void blk_mq_timeout_work(struct work_struct *work)
712 {
713 	struct request_queue *q =
714 		container_of(work, struct request_queue, timeout_work);
715 	struct blk_mq_timeout_data data = {
716 		.next		= 0,
717 		.next_set	= 0,
718 	};
719 	int i;
720 
721 	/* A deadlock might occur if a request is stuck requiring a
722 	 * timeout at the same time a queue freeze is waiting
723 	 * completion, since the timeout code would not be able to
724 	 * acquire the queue reference here.
725 	 *
726 	 * That's why we don't use blk_queue_enter here; instead, we use
727 	 * percpu_ref_tryget directly, because we need to be able to
728 	 * obtain a reference even in the short window between the queue
729 	 * starting to freeze, by dropping the first reference in
730 	 * blk_mq_freeze_queue_start, and the moment the last request is
731 	 * consumed, marked by the instant q_usage_counter reaches
732 	 * zero.
733 	 */
734 	if (!percpu_ref_tryget(&q->q_usage_counter))
735 		return;
736 
737 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
738 
739 	if (data.next_set) {
740 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
741 		mod_timer(&q->timeout, data.next);
742 	} else {
743 		struct blk_mq_hw_ctx *hctx;
744 
745 		queue_for_each_hw_ctx(q, hctx, i) {
746 			/* the hctx may be unmapped, so check it here */
747 			if (blk_mq_hw_queue_mapped(hctx))
748 				blk_mq_tag_idle(hctx);
749 		}
750 	}
751 	blk_queue_exit(q);
752 }
753 
754 /*
755  * Reverse check our software queue for entries that we could potentially
756  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
757  * too much time checking for merges.
758  */
759 static bool blk_mq_attempt_merge(struct request_queue *q,
760 				 struct blk_mq_ctx *ctx, struct bio *bio)
761 {
762 	struct request *rq;
763 	int checked = 8;
764 
765 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
766 		bool merged = false;
767 
768 		if (!checked--)
769 			break;
770 
771 		if (!blk_rq_merge_ok(rq, bio))
772 			continue;
773 
774 		switch (blk_try_merge(rq, bio)) {
775 		case ELEVATOR_BACK_MERGE:
776 			if (blk_mq_sched_allow_merge(q, rq, bio))
777 				merged = bio_attempt_back_merge(q, rq, bio);
778 			break;
779 		case ELEVATOR_FRONT_MERGE:
780 			if (blk_mq_sched_allow_merge(q, rq, bio))
781 				merged = bio_attempt_front_merge(q, rq, bio);
782 			break;
783 		case ELEVATOR_DISCARD_MERGE:
784 			merged = bio_attempt_discard_merge(q, rq, bio);
785 			break;
786 		default:
787 			continue;
788 		}
789 
790 		if (merged)
791 			ctx->rq_merged++;
792 		return merged;
793 	}
794 
795 	return false;
796 }
797 
798 struct flush_busy_ctx_data {
799 	struct blk_mq_hw_ctx *hctx;
800 	struct list_head *list;
801 };
802 
803 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
804 {
805 	struct flush_busy_ctx_data *flush_data = data;
806 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
807 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
808 
809 	sbitmap_clear_bit(sb, bitnr);
810 	spin_lock(&ctx->lock);
811 	list_splice_tail_init(&ctx->rq_list, flush_data->list);
812 	spin_unlock(&ctx->lock);
813 	return true;
814 }
815 
816 /*
817  * Process software queues that have been marked busy, splicing them
818  * to the for-dispatch
819  */
820 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
821 {
822 	struct flush_busy_ctx_data data = {
823 		.hctx = hctx,
824 		.list = list,
825 	};
826 
827 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
828 }
829 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
830 
831 static inline unsigned int queued_to_index(unsigned int queued)
832 {
833 	if (!queued)
834 		return 0;
835 
836 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
837 }
838 
839 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
840 			   bool wait)
841 {
842 	struct blk_mq_alloc_data data = {
843 		.q = rq->q,
844 		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
845 		.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
846 	};
847 
848 	if (rq->tag != -1) {
849 done:
850 		if (hctx)
851 			*hctx = data.hctx;
852 		return true;
853 	}
854 
855 	rq->tag = blk_mq_get_tag(&data);
856 	if (rq->tag >= 0) {
857 		if (blk_mq_tag_busy(data.hctx)) {
858 			rq->rq_flags |= RQF_MQ_INFLIGHT;
859 			atomic_inc(&data.hctx->nr_active);
860 		}
861 		data.hctx->tags->rqs[rq->tag] = rq;
862 		goto done;
863 	}
864 
865 	return false;
866 }
867 
868 static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
869 				  struct request *rq)
870 {
871 	if (rq->tag == -1 || rq->internal_tag == -1)
872 		return;
873 
874 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
875 	rq->tag = -1;
876 
877 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
878 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
879 		atomic_dec(&hctx->nr_active);
880 	}
881 }
882 
883 /*
884  * If we fail getting a driver tag because all the driver tags are already
885  * assigned and on the dispatch list, BUT the first entry does not have a
886  * tag, then we could deadlock. For that case, move entries with assigned
887  * driver tags to the front, leaving the set of tagged requests in the
888  * same order, and the untagged set in the same order.
889  */
890 static bool reorder_tags_to_front(struct list_head *list)
891 {
892 	struct request *rq, *tmp, *first = NULL;
893 
894 	list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
895 		if (rq == first)
896 			break;
897 		if (rq->tag != -1) {
898 			list_move(&rq->queuelist, list);
899 			if (!first)
900 				first = rq;
901 		}
902 	}
903 
904 	return first != NULL;
905 }
906 
907 static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
908 				void *key)
909 {
910 	struct blk_mq_hw_ctx *hctx;
911 
912 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
913 
914 	list_del(&wait->task_list);
915 	clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
916 	blk_mq_run_hw_queue(hctx, true);
917 	return 1;
918 }
919 
920 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
921 {
922 	struct sbq_wait_state *ws;
923 
924 	/*
925 	 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
926 	 * The thread which wins the race to grab this bit adds the hardware
927 	 * queue to the wait queue.
928 	 */
929 	if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
930 	    test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
931 		return false;
932 
933 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
934 	ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
935 
936 	/*
937 	 * As soon as this returns, it's no longer safe to fiddle with
938 	 * hctx->dispatch_wait, since a completion can wake up the wait queue
939 	 * and unlock the bit.
940 	 */
941 	add_wait_queue(&ws->wait, &hctx->dispatch_wait);
942 	return true;
943 }
944 
945 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
946 {
947 	struct request_queue *q = hctx->queue;
948 	struct request *rq;
949 	LIST_HEAD(driver_list);
950 	struct list_head *dptr;
951 	int queued, ret = BLK_MQ_RQ_QUEUE_OK;
952 
953 	/*
954 	 * Start off with dptr being NULL, so we start the first request
955 	 * immediately, even if we have more pending.
956 	 */
957 	dptr = NULL;
958 
959 	/*
960 	 * Now process all the entries, sending them to the driver.
961 	 */
962 	queued = 0;
963 	while (!list_empty(list)) {
964 		struct blk_mq_queue_data bd;
965 
966 		rq = list_first_entry(list, struct request, queuelist);
967 		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
968 			if (!queued && reorder_tags_to_front(list))
969 				continue;
970 
971 			/*
972 			 * The initial allocation attempt failed, so we need to
973 			 * rerun the hardware queue when a tag is freed.
974 			 */
975 			if (blk_mq_dispatch_wait_add(hctx)) {
976 				/*
977 				 * It's possible that a tag was freed in the
978 				 * window between the allocation failure and
979 				 * adding the hardware queue to the wait queue.
980 				 */
981 				if (!blk_mq_get_driver_tag(rq, &hctx, false))
982 					break;
983 			} else {
984 				break;
985 			}
986 		}
987 
988 		list_del_init(&rq->queuelist);
989 
990 		bd.rq = rq;
991 		bd.list = dptr;
992 		bd.last = list_empty(list);
993 
994 		ret = q->mq_ops->queue_rq(hctx, &bd);
995 		switch (ret) {
996 		case BLK_MQ_RQ_QUEUE_OK:
997 			queued++;
998 			break;
999 		case BLK_MQ_RQ_QUEUE_BUSY:
1000 			blk_mq_put_driver_tag(hctx, rq);
1001 			list_add(&rq->queuelist, list);
1002 			__blk_mq_requeue_request(rq);
1003 			break;
1004 		default:
1005 			pr_err("blk-mq: bad return on queue: %d\n", ret);
1006 		case BLK_MQ_RQ_QUEUE_ERROR:
1007 			rq->errors = -EIO;
1008 			blk_mq_end_request(rq, rq->errors);
1009 			break;
1010 		}
1011 
1012 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1013 			break;
1014 
1015 		/*
1016 		 * We've done the first request. If we have more than 1
1017 		 * left in the list, set dptr to defer issue.
1018 		 */
1019 		if (!dptr && list->next != list->prev)
1020 			dptr = &driver_list;
1021 	}
1022 
1023 	hctx->dispatched[queued_to_index(queued)]++;
1024 
1025 	/*
1026 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1027 	 * that is where we will continue on next queue run.
1028 	 */
1029 	if (!list_empty(list)) {
1030 		spin_lock(&hctx->lock);
1031 		list_splice_init(list, &hctx->dispatch);
1032 		spin_unlock(&hctx->lock);
1033 
1034 		/*
1035 		 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1036 		 * it's possible the queue is stopped and restarted again
1037 		 * before this. Queue restart will dispatch requests. And since
1038 		 * requests in rq_list aren't added into hctx->dispatch yet,
1039 		 * the requests in rq_list might get lost.
1040 		 *
1041 		 * blk_mq_run_hw_queue() already checks the STOPPED bit
1042 		 *
1043 		 * If RESTART or TAG_WAITING is set, then let completion restart
1044 		 * the queue instead of potentially looping here.
1045 		 */
1046 		if (!blk_mq_sched_needs_restart(hctx) &&
1047 		    !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1048 			blk_mq_run_hw_queue(hctx, true);
1049 	}
1050 
1051 	return queued != 0;
1052 }
1053 
1054 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1055 {
1056 	int srcu_idx;
1057 
1058 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1059 		cpu_online(hctx->next_cpu));
1060 
1061 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1062 		rcu_read_lock();
1063 		blk_mq_sched_dispatch_requests(hctx);
1064 		rcu_read_unlock();
1065 	} else {
1066 		srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1067 		blk_mq_sched_dispatch_requests(hctx);
1068 		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1069 	}
1070 }
1071 
1072 /*
1073  * It'd be great if the workqueue API had a way to pass
1074  * in a mask and had some smarts for more clever placement.
1075  * For now we just round-robin here, switching for every
1076  * BLK_MQ_CPU_WORK_BATCH queued items.
1077  */
1078 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1079 {
1080 	if (hctx->queue->nr_hw_queues == 1)
1081 		return WORK_CPU_UNBOUND;
1082 
1083 	if (--hctx->next_cpu_batch <= 0) {
1084 		int next_cpu;
1085 
1086 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1087 		if (next_cpu >= nr_cpu_ids)
1088 			next_cpu = cpumask_first(hctx->cpumask);
1089 
1090 		hctx->next_cpu = next_cpu;
1091 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1092 	}
1093 
1094 	return hctx->next_cpu;
1095 }
1096 
1097 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1098 {
1099 	if (unlikely(blk_mq_hctx_stopped(hctx) ||
1100 		     !blk_mq_hw_queue_mapped(hctx)))
1101 		return;
1102 
1103 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1104 		int cpu = get_cpu();
1105 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1106 			__blk_mq_run_hw_queue(hctx);
1107 			put_cpu();
1108 			return;
1109 		}
1110 
1111 		put_cpu();
1112 	}
1113 
1114 	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
1115 }
1116 
1117 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1118 {
1119 	struct blk_mq_hw_ctx *hctx;
1120 	int i;
1121 
1122 	queue_for_each_hw_ctx(q, hctx, i) {
1123 		if (!blk_mq_hctx_has_pending(hctx) ||
1124 		    blk_mq_hctx_stopped(hctx))
1125 			continue;
1126 
1127 		blk_mq_run_hw_queue(hctx, async);
1128 	}
1129 }
1130 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1131 
1132 /**
1133  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1134  * @q: request queue.
1135  *
1136  * The caller is responsible for serializing this function against
1137  * blk_mq_{start,stop}_hw_queue().
1138  */
1139 bool blk_mq_queue_stopped(struct request_queue *q)
1140 {
1141 	struct blk_mq_hw_ctx *hctx;
1142 	int i;
1143 
1144 	queue_for_each_hw_ctx(q, hctx, i)
1145 		if (blk_mq_hctx_stopped(hctx))
1146 			return true;
1147 
1148 	return false;
1149 }
1150 EXPORT_SYMBOL(blk_mq_queue_stopped);
1151 
1152 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1153 {
1154 	cancel_work(&hctx->run_work);
1155 	cancel_delayed_work(&hctx->delay_work);
1156 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1157 }
1158 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1159 
1160 void blk_mq_stop_hw_queues(struct request_queue *q)
1161 {
1162 	struct blk_mq_hw_ctx *hctx;
1163 	int i;
1164 
1165 	queue_for_each_hw_ctx(q, hctx, i)
1166 		blk_mq_stop_hw_queue(hctx);
1167 }
1168 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1169 
1170 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1171 {
1172 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1173 
1174 	blk_mq_run_hw_queue(hctx, false);
1175 }
1176 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1177 
1178 void blk_mq_start_hw_queues(struct request_queue *q)
1179 {
1180 	struct blk_mq_hw_ctx *hctx;
1181 	int i;
1182 
1183 	queue_for_each_hw_ctx(q, hctx, i)
1184 		blk_mq_start_hw_queue(hctx);
1185 }
1186 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1187 
1188 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1189 {
1190 	if (!blk_mq_hctx_stopped(hctx))
1191 		return;
1192 
1193 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1194 	blk_mq_run_hw_queue(hctx, async);
1195 }
1196 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1197 
1198 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1199 {
1200 	struct blk_mq_hw_ctx *hctx;
1201 	int i;
1202 
1203 	queue_for_each_hw_ctx(q, hctx, i)
1204 		blk_mq_start_stopped_hw_queue(hctx, async);
1205 }
1206 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1207 
1208 static void blk_mq_run_work_fn(struct work_struct *work)
1209 {
1210 	struct blk_mq_hw_ctx *hctx;
1211 
1212 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1213 
1214 	__blk_mq_run_hw_queue(hctx);
1215 }
1216 
1217 static void blk_mq_delay_work_fn(struct work_struct *work)
1218 {
1219 	struct blk_mq_hw_ctx *hctx;
1220 
1221 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1222 
1223 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1224 		__blk_mq_run_hw_queue(hctx);
1225 }
1226 
1227 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1228 {
1229 	if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1230 		return;
1231 
1232 	blk_mq_stop_hw_queue(hctx);
1233 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1234 			&hctx->delay_work, msecs_to_jiffies(msecs));
1235 }
1236 EXPORT_SYMBOL(blk_mq_delay_queue);
1237 
1238 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1239 					    struct request *rq,
1240 					    bool at_head)
1241 {
1242 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1243 
1244 	trace_block_rq_insert(hctx->queue, rq);
1245 
1246 	if (at_head)
1247 		list_add(&rq->queuelist, &ctx->rq_list);
1248 	else
1249 		list_add_tail(&rq->queuelist, &ctx->rq_list);
1250 }
1251 
1252 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1253 			     bool at_head)
1254 {
1255 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1256 
1257 	__blk_mq_insert_req_list(hctx, rq, at_head);
1258 	blk_mq_hctx_mark_pending(hctx, ctx);
1259 }
1260 
1261 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1262 			    struct list_head *list)
1263 
1264 {
1265 	/*
1266 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1267 	 * offline now
1268 	 */
1269 	spin_lock(&ctx->lock);
1270 	while (!list_empty(list)) {
1271 		struct request *rq;
1272 
1273 		rq = list_first_entry(list, struct request, queuelist);
1274 		BUG_ON(rq->mq_ctx != ctx);
1275 		list_del_init(&rq->queuelist);
1276 		__blk_mq_insert_req_list(hctx, rq, false);
1277 	}
1278 	blk_mq_hctx_mark_pending(hctx, ctx);
1279 	spin_unlock(&ctx->lock);
1280 }
1281 
1282 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1283 {
1284 	struct request *rqa = container_of(a, struct request, queuelist);
1285 	struct request *rqb = container_of(b, struct request, queuelist);
1286 
1287 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1288 		 (rqa->mq_ctx == rqb->mq_ctx &&
1289 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1290 }
1291 
1292 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1293 {
1294 	struct blk_mq_ctx *this_ctx;
1295 	struct request_queue *this_q;
1296 	struct request *rq;
1297 	LIST_HEAD(list);
1298 	LIST_HEAD(ctx_list);
1299 	unsigned int depth;
1300 
1301 	list_splice_init(&plug->mq_list, &list);
1302 
1303 	list_sort(NULL, &list, plug_ctx_cmp);
1304 
1305 	this_q = NULL;
1306 	this_ctx = NULL;
1307 	depth = 0;
1308 
1309 	while (!list_empty(&list)) {
1310 		rq = list_entry_rq(list.next);
1311 		list_del_init(&rq->queuelist);
1312 		BUG_ON(!rq->q);
1313 		if (rq->mq_ctx != this_ctx) {
1314 			if (this_ctx) {
1315 				trace_block_unplug(this_q, depth, from_schedule);
1316 				blk_mq_sched_insert_requests(this_q, this_ctx,
1317 								&ctx_list,
1318 								from_schedule);
1319 			}
1320 
1321 			this_ctx = rq->mq_ctx;
1322 			this_q = rq->q;
1323 			depth = 0;
1324 		}
1325 
1326 		depth++;
1327 		list_add_tail(&rq->queuelist, &ctx_list);
1328 	}
1329 
1330 	/*
1331 	 * If 'this_ctx' is set, we know we have entries to complete
1332 	 * on 'ctx_list'. Do those.
1333 	 */
1334 	if (this_ctx) {
1335 		trace_block_unplug(this_q, depth, from_schedule);
1336 		blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1337 						from_schedule);
1338 	}
1339 }
1340 
1341 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1342 {
1343 	init_request_from_bio(rq, bio);
1344 
1345 	blk_account_io_start(rq, true);
1346 }
1347 
1348 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1349 {
1350 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1351 		!blk_queue_nomerges(hctx->queue);
1352 }
1353 
1354 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1355 					 struct blk_mq_ctx *ctx,
1356 					 struct request *rq, struct bio *bio)
1357 {
1358 	if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1359 		blk_mq_bio_to_request(rq, bio);
1360 		spin_lock(&ctx->lock);
1361 insert_rq:
1362 		__blk_mq_insert_request(hctx, rq, false);
1363 		spin_unlock(&ctx->lock);
1364 		return false;
1365 	} else {
1366 		struct request_queue *q = hctx->queue;
1367 
1368 		spin_lock(&ctx->lock);
1369 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1370 			blk_mq_bio_to_request(rq, bio);
1371 			goto insert_rq;
1372 		}
1373 
1374 		spin_unlock(&ctx->lock);
1375 		__blk_mq_finish_request(hctx, ctx, rq);
1376 		return true;
1377 	}
1378 }
1379 
1380 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1381 {
1382 	if (rq->tag != -1)
1383 		return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1384 
1385 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1386 }
1387 
1388 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1389 {
1390 	struct request_queue *q = rq->q;
1391 	struct blk_mq_queue_data bd = {
1392 		.rq = rq,
1393 		.list = NULL,
1394 		.last = 1
1395 	};
1396 	struct blk_mq_hw_ctx *hctx;
1397 	blk_qc_t new_cookie;
1398 	int ret;
1399 
1400 	if (q->elevator)
1401 		goto insert;
1402 
1403 	if (!blk_mq_get_driver_tag(rq, &hctx, false))
1404 		goto insert;
1405 
1406 	new_cookie = request_to_qc_t(hctx, rq);
1407 
1408 	/*
1409 	 * For OK queue, we are done. For error, kill it. Any other
1410 	 * error (busy), just add it to our list as we previously
1411 	 * would have done
1412 	 */
1413 	ret = q->mq_ops->queue_rq(hctx, &bd);
1414 	if (ret == BLK_MQ_RQ_QUEUE_OK) {
1415 		*cookie = new_cookie;
1416 		return;
1417 	}
1418 
1419 	__blk_mq_requeue_request(rq);
1420 
1421 	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1422 		*cookie = BLK_QC_T_NONE;
1423 		rq->errors = -EIO;
1424 		blk_mq_end_request(rq, rq->errors);
1425 		return;
1426 	}
1427 
1428 insert:
1429 	blk_mq_sched_insert_request(rq, false, true, true, false);
1430 }
1431 
1432 /*
1433  * Multiple hardware queue variant. This will not use per-process plugs,
1434  * but will attempt to bypass the hctx queueing if we can go straight to
1435  * hardware for SYNC IO.
1436  */
1437 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1438 {
1439 	const int is_sync = op_is_sync(bio->bi_opf);
1440 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1441 	struct blk_mq_alloc_data data = { .flags = 0 };
1442 	struct request *rq;
1443 	unsigned int request_count = 0, srcu_idx;
1444 	struct blk_plug *plug;
1445 	struct request *same_queue_rq = NULL;
1446 	blk_qc_t cookie;
1447 	unsigned int wb_acct;
1448 
1449 	blk_queue_bounce(q, &bio);
1450 
1451 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1452 		bio_io_error(bio);
1453 		return BLK_QC_T_NONE;
1454 	}
1455 
1456 	blk_queue_split(q, &bio, q->bio_split);
1457 
1458 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1459 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1460 		return BLK_QC_T_NONE;
1461 
1462 	if (blk_mq_sched_bio_merge(q, bio))
1463 		return BLK_QC_T_NONE;
1464 
1465 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1466 
1467 	trace_block_getrq(q, bio, bio->bi_opf);
1468 
1469 	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1470 	if (unlikely(!rq)) {
1471 		__wbt_done(q->rq_wb, wb_acct);
1472 		return BLK_QC_T_NONE;
1473 	}
1474 
1475 	wbt_track(&rq->issue_stat, wb_acct);
1476 
1477 	cookie = request_to_qc_t(data.hctx, rq);
1478 
1479 	if (unlikely(is_flush_fua)) {
1480 		if (q->elevator)
1481 			goto elv_insert;
1482 		blk_mq_bio_to_request(rq, bio);
1483 		blk_insert_flush(rq);
1484 		goto run_queue;
1485 	}
1486 
1487 	plug = current->plug;
1488 	/*
1489 	 * If the driver supports defer issued based on 'last', then
1490 	 * queue it up like normal since we can potentially save some
1491 	 * CPU this way.
1492 	 */
1493 	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1494 	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1495 		struct request *old_rq = NULL;
1496 
1497 		blk_mq_bio_to_request(rq, bio);
1498 
1499 		/*
1500 		 * We do limited plugging. If the bio can be merged, do that.
1501 		 * Otherwise the existing request in the plug list will be
1502 		 * issued. So the plug list will have one request at most
1503 		 */
1504 		if (plug) {
1505 			/*
1506 			 * The plug list might get flushed before this. If that
1507 			 * happens, same_queue_rq is invalid and plug list is
1508 			 * empty
1509 			 */
1510 			if (same_queue_rq && !list_empty(&plug->mq_list)) {
1511 				old_rq = same_queue_rq;
1512 				list_del_init(&old_rq->queuelist);
1513 			}
1514 			list_add_tail(&rq->queuelist, &plug->mq_list);
1515 		} else /* is_sync */
1516 			old_rq = rq;
1517 		blk_mq_put_ctx(data.ctx);
1518 		if (!old_rq)
1519 			goto done;
1520 
1521 		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1522 			rcu_read_lock();
1523 			blk_mq_try_issue_directly(old_rq, &cookie);
1524 			rcu_read_unlock();
1525 		} else {
1526 			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1527 			blk_mq_try_issue_directly(old_rq, &cookie);
1528 			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1529 		}
1530 		goto done;
1531 	}
1532 
1533 	if (q->elevator) {
1534 elv_insert:
1535 		blk_mq_put_ctx(data.ctx);
1536 		blk_mq_bio_to_request(rq, bio);
1537 		blk_mq_sched_insert_request(rq, false, true,
1538 						!is_sync || is_flush_fua, true);
1539 		goto done;
1540 	}
1541 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1542 		/*
1543 		 * For a SYNC request, send it to the hardware immediately. For
1544 		 * an ASYNC request, just ensure that we run it later on. The
1545 		 * latter allows for merging opportunities and more efficient
1546 		 * dispatching.
1547 		 */
1548 run_queue:
1549 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1550 	}
1551 	blk_mq_put_ctx(data.ctx);
1552 done:
1553 	return cookie;
1554 }
1555 
1556 /*
1557  * Single hardware queue variant. This will attempt to use any per-process
1558  * plug for merging and IO deferral.
1559  */
1560 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1561 {
1562 	const int is_sync = op_is_sync(bio->bi_opf);
1563 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1564 	struct blk_plug *plug;
1565 	unsigned int request_count = 0;
1566 	struct blk_mq_alloc_data data = { .flags = 0 };
1567 	struct request *rq;
1568 	blk_qc_t cookie;
1569 	unsigned int wb_acct;
1570 
1571 	blk_queue_bounce(q, &bio);
1572 
1573 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1574 		bio_io_error(bio);
1575 		return BLK_QC_T_NONE;
1576 	}
1577 
1578 	blk_queue_split(q, &bio, q->bio_split);
1579 
1580 	if (!is_flush_fua && !blk_queue_nomerges(q)) {
1581 		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1582 			return BLK_QC_T_NONE;
1583 	} else
1584 		request_count = blk_plug_queued_count(q);
1585 
1586 	if (blk_mq_sched_bio_merge(q, bio))
1587 		return BLK_QC_T_NONE;
1588 
1589 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1590 
1591 	trace_block_getrq(q, bio, bio->bi_opf);
1592 
1593 	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1594 	if (unlikely(!rq)) {
1595 		__wbt_done(q->rq_wb, wb_acct);
1596 		return BLK_QC_T_NONE;
1597 	}
1598 
1599 	wbt_track(&rq->issue_stat, wb_acct);
1600 
1601 	cookie = request_to_qc_t(data.hctx, rq);
1602 
1603 	if (unlikely(is_flush_fua)) {
1604 		if (q->elevator)
1605 			goto elv_insert;
1606 		blk_mq_bio_to_request(rq, bio);
1607 		blk_insert_flush(rq);
1608 		goto run_queue;
1609 	}
1610 
1611 	/*
1612 	 * A task plug currently exists. Since this is completely lockless,
1613 	 * utilize that to temporarily store requests until the task is
1614 	 * either done or scheduled away.
1615 	 */
1616 	plug = current->plug;
1617 	if (plug) {
1618 		struct request *last = NULL;
1619 
1620 		blk_mq_bio_to_request(rq, bio);
1621 
1622 		/*
1623 		 * @request_count may become stale because of schedule
1624 		 * out, so check the list again.
1625 		 */
1626 		if (list_empty(&plug->mq_list))
1627 			request_count = 0;
1628 		if (!request_count)
1629 			trace_block_plug(q);
1630 		else
1631 			last = list_entry_rq(plug->mq_list.prev);
1632 
1633 		blk_mq_put_ctx(data.ctx);
1634 
1635 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1636 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1637 			blk_flush_plug_list(plug, false);
1638 			trace_block_plug(q);
1639 		}
1640 
1641 		list_add_tail(&rq->queuelist, &plug->mq_list);
1642 		return cookie;
1643 	}
1644 
1645 	if (q->elevator) {
1646 elv_insert:
1647 		blk_mq_put_ctx(data.ctx);
1648 		blk_mq_bio_to_request(rq, bio);
1649 		blk_mq_sched_insert_request(rq, false, true,
1650 						!is_sync || is_flush_fua, true);
1651 		goto done;
1652 	}
1653 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1654 		/*
1655 		 * For a SYNC request, send it to the hardware immediately. For
1656 		 * an ASYNC request, just ensure that we run it later on. The
1657 		 * latter allows for merging opportunities and more efficient
1658 		 * dispatching.
1659 		 */
1660 run_queue:
1661 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1662 	}
1663 
1664 	blk_mq_put_ctx(data.ctx);
1665 done:
1666 	return cookie;
1667 }
1668 
1669 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1670 		     unsigned int hctx_idx)
1671 {
1672 	struct page *page;
1673 
1674 	if (tags->rqs && set->ops->exit_request) {
1675 		int i;
1676 
1677 		for (i = 0; i < tags->nr_tags; i++) {
1678 			struct request *rq = tags->static_rqs[i];
1679 
1680 			if (!rq)
1681 				continue;
1682 			set->ops->exit_request(set->driver_data, rq,
1683 						hctx_idx, i);
1684 			tags->static_rqs[i] = NULL;
1685 		}
1686 	}
1687 
1688 	while (!list_empty(&tags->page_list)) {
1689 		page = list_first_entry(&tags->page_list, struct page, lru);
1690 		list_del_init(&page->lru);
1691 		/*
1692 		 * Remove kmemleak object previously allocated in
1693 		 * blk_mq_init_rq_map().
1694 		 */
1695 		kmemleak_free(page_address(page));
1696 		__free_pages(page, page->private);
1697 	}
1698 }
1699 
1700 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1701 {
1702 	kfree(tags->rqs);
1703 	tags->rqs = NULL;
1704 	kfree(tags->static_rqs);
1705 	tags->static_rqs = NULL;
1706 
1707 	blk_mq_free_tags(tags);
1708 }
1709 
1710 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1711 					unsigned int hctx_idx,
1712 					unsigned int nr_tags,
1713 					unsigned int reserved_tags)
1714 {
1715 	struct blk_mq_tags *tags;
1716 
1717 	tags = blk_mq_init_tags(nr_tags, reserved_tags,
1718 				set->numa_node,
1719 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1720 	if (!tags)
1721 		return NULL;
1722 
1723 	tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1724 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1725 				 set->numa_node);
1726 	if (!tags->rqs) {
1727 		blk_mq_free_tags(tags);
1728 		return NULL;
1729 	}
1730 
1731 	tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1732 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1733 				 set->numa_node);
1734 	if (!tags->static_rqs) {
1735 		kfree(tags->rqs);
1736 		blk_mq_free_tags(tags);
1737 		return NULL;
1738 	}
1739 
1740 	return tags;
1741 }
1742 
1743 static size_t order_to_size(unsigned int order)
1744 {
1745 	return (size_t)PAGE_SIZE << order;
1746 }
1747 
1748 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1749 		     unsigned int hctx_idx, unsigned int depth)
1750 {
1751 	unsigned int i, j, entries_per_page, max_order = 4;
1752 	size_t rq_size, left;
1753 
1754 	INIT_LIST_HEAD(&tags->page_list);
1755 
1756 	/*
1757 	 * rq_size is the size of the request plus driver payload, rounded
1758 	 * to the cacheline size
1759 	 */
1760 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1761 				cache_line_size());
1762 	left = rq_size * depth;
1763 
1764 	for (i = 0; i < depth; ) {
1765 		int this_order = max_order;
1766 		struct page *page;
1767 		int to_do;
1768 		void *p;
1769 
1770 		while (this_order && left < order_to_size(this_order - 1))
1771 			this_order--;
1772 
1773 		do {
1774 			page = alloc_pages_node(set->numa_node,
1775 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1776 				this_order);
1777 			if (page)
1778 				break;
1779 			if (!this_order--)
1780 				break;
1781 			if (order_to_size(this_order) < rq_size)
1782 				break;
1783 		} while (1);
1784 
1785 		if (!page)
1786 			goto fail;
1787 
1788 		page->private = this_order;
1789 		list_add_tail(&page->lru, &tags->page_list);
1790 
1791 		p = page_address(page);
1792 		/*
1793 		 * Allow kmemleak to scan these pages as they contain pointers
1794 		 * to additional allocations like via ops->init_request().
1795 		 */
1796 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1797 		entries_per_page = order_to_size(this_order) / rq_size;
1798 		to_do = min(entries_per_page, depth - i);
1799 		left -= to_do * rq_size;
1800 		for (j = 0; j < to_do; j++) {
1801 			struct request *rq = p;
1802 
1803 			tags->static_rqs[i] = rq;
1804 			if (set->ops->init_request) {
1805 				if (set->ops->init_request(set->driver_data,
1806 						rq, hctx_idx, i,
1807 						set->numa_node)) {
1808 					tags->static_rqs[i] = NULL;
1809 					goto fail;
1810 				}
1811 			}
1812 
1813 			p += rq_size;
1814 			i++;
1815 		}
1816 	}
1817 	return 0;
1818 
1819 fail:
1820 	blk_mq_free_rqs(set, tags, hctx_idx);
1821 	return -ENOMEM;
1822 }
1823 
1824 /*
1825  * 'cpu' is going away. splice any existing rq_list entries from this
1826  * software queue to the hw queue dispatch list, and ensure that it
1827  * gets run.
1828  */
1829 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1830 {
1831 	struct blk_mq_hw_ctx *hctx;
1832 	struct blk_mq_ctx *ctx;
1833 	LIST_HEAD(tmp);
1834 
1835 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1836 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1837 
1838 	spin_lock(&ctx->lock);
1839 	if (!list_empty(&ctx->rq_list)) {
1840 		list_splice_init(&ctx->rq_list, &tmp);
1841 		blk_mq_hctx_clear_pending(hctx, ctx);
1842 	}
1843 	spin_unlock(&ctx->lock);
1844 
1845 	if (list_empty(&tmp))
1846 		return 0;
1847 
1848 	spin_lock(&hctx->lock);
1849 	list_splice_tail_init(&tmp, &hctx->dispatch);
1850 	spin_unlock(&hctx->lock);
1851 
1852 	blk_mq_run_hw_queue(hctx, true);
1853 	return 0;
1854 }
1855 
1856 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1857 {
1858 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1859 					    &hctx->cpuhp_dead);
1860 }
1861 
1862 /* hctx->ctxs will be freed in queue's release handler */
1863 static void blk_mq_exit_hctx(struct request_queue *q,
1864 		struct blk_mq_tag_set *set,
1865 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1866 {
1867 	unsigned flush_start_tag = set->queue_depth;
1868 
1869 	blk_mq_tag_idle(hctx);
1870 
1871 	if (set->ops->exit_request)
1872 		set->ops->exit_request(set->driver_data,
1873 				       hctx->fq->flush_rq, hctx_idx,
1874 				       flush_start_tag + hctx_idx);
1875 
1876 	if (set->ops->exit_hctx)
1877 		set->ops->exit_hctx(hctx, hctx_idx);
1878 
1879 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1880 		cleanup_srcu_struct(&hctx->queue_rq_srcu);
1881 
1882 	blk_mq_remove_cpuhp(hctx);
1883 	blk_free_flush_queue(hctx->fq);
1884 	sbitmap_free(&hctx->ctx_map);
1885 }
1886 
1887 static void blk_mq_exit_hw_queues(struct request_queue *q,
1888 		struct blk_mq_tag_set *set, int nr_queue)
1889 {
1890 	struct blk_mq_hw_ctx *hctx;
1891 	unsigned int i;
1892 
1893 	queue_for_each_hw_ctx(q, hctx, i) {
1894 		if (i == nr_queue)
1895 			break;
1896 		blk_mq_exit_hctx(q, set, hctx, i);
1897 	}
1898 }
1899 
1900 static void blk_mq_free_hw_queues(struct request_queue *q,
1901 		struct blk_mq_tag_set *set)
1902 {
1903 	struct blk_mq_hw_ctx *hctx;
1904 	unsigned int i;
1905 
1906 	queue_for_each_hw_ctx(q, hctx, i)
1907 		free_cpumask_var(hctx->cpumask);
1908 }
1909 
1910 static int blk_mq_init_hctx(struct request_queue *q,
1911 		struct blk_mq_tag_set *set,
1912 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1913 {
1914 	int node;
1915 	unsigned flush_start_tag = set->queue_depth;
1916 
1917 	node = hctx->numa_node;
1918 	if (node == NUMA_NO_NODE)
1919 		node = hctx->numa_node = set->numa_node;
1920 
1921 	INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1922 	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1923 	spin_lock_init(&hctx->lock);
1924 	INIT_LIST_HEAD(&hctx->dispatch);
1925 	hctx->queue = q;
1926 	hctx->queue_num = hctx_idx;
1927 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1928 
1929 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1930 
1931 	hctx->tags = set->tags[hctx_idx];
1932 
1933 	/*
1934 	 * Allocate space for all possible cpus to avoid allocation at
1935 	 * runtime
1936 	 */
1937 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1938 					GFP_KERNEL, node);
1939 	if (!hctx->ctxs)
1940 		goto unregister_cpu_notifier;
1941 
1942 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1943 			      node))
1944 		goto free_ctxs;
1945 
1946 	hctx->nr_ctx = 0;
1947 
1948 	if (set->ops->init_hctx &&
1949 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1950 		goto free_bitmap;
1951 
1952 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1953 	if (!hctx->fq)
1954 		goto exit_hctx;
1955 
1956 	if (set->ops->init_request &&
1957 	    set->ops->init_request(set->driver_data,
1958 				   hctx->fq->flush_rq, hctx_idx,
1959 				   flush_start_tag + hctx_idx, node))
1960 		goto free_fq;
1961 
1962 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1963 		init_srcu_struct(&hctx->queue_rq_srcu);
1964 
1965 	return 0;
1966 
1967  free_fq:
1968 	kfree(hctx->fq);
1969  exit_hctx:
1970 	if (set->ops->exit_hctx)
1971 		set->ops->exit_hctx(hctx, hctx_idx);
1972  free_bitmap:
1973 	sbitmap_free(&hctx->ctx_map);
1974  free_ctxs:
1975 	kfree(hctx->ctxs);
1976  unregister_cpu_notifier:
1977 	blk_mq_remove_cpuhp(hctx);
1978 	return -1;
1979 }
1980 
1981 static void blk_mq_init_cpu_queues(struct request_queue *q,
1982 				   unsigned int nr_hw_queues)
1983 {
1984 	unsigned int i;
1985 
1986 	for_each_possible_cpu(i) {
1987 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1988 		struct blk_mq_hw_ctx *hctx;
1989 
1990 		memset(__ctx, 0, sizeof(*__ctx));
1991 		__ctx->cpu = i;
1992 		spin_lock_init(&__ctx->lock);
1993 		INIT_LIST_HEAD(&__ctx->rq_list);
1994 		__ctx->queue = q;
1995 		blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
1996 		blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
1997 
1998 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1999 		if (!cpu_online(i))
2000 			continue;
2001 
2002 		hctx = blk_mq_map_queue(q, i);
2003 
2004 		/*
2005 		 * Set local node, IFF we have more than one hw queue. If
2006 		 * not, we remain on the home node of the device
2007 		 */
2008 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2009 			hctx->numa_node = local_memory_node(cpu_to_node(i));
2010 	}
2011 }
2012 
2013 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2014 {
2015 	int ret = 0;
2016 
2017 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2018 					set->queue_depth, set->reserved_tags);
2019 	if (!set->tags[hctx_idx])
2020 		return false;
2021 
2022 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2023 				set->queue_depth);
2024 	if (!ret)
2025 		return true;
2026 
2027 	blk_mq_free_rq_map(set->tags[hctx_idx]);
2028 	set->tags[hctx_idx] = NULL;
2029 	return false;
2030 }
2031 
2032 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2033 					 unsigned int hctx_idx)
2034 {
2035 	if (set->tags[hctx_idx]) {
2036 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2037 		blk_mq_free_rq_map(set->tags[hctx_idx]);
2038 		set->tags[hctx_idx] = NULL;
2039 	}
2040 }
2041 
2042 static void blk_mq_map_swqueue(struct request_queue *q,
2043 			       const struct cpumask *online_mask)
2044 {
2045 	unsigned int i, hctx_idx;
2046 	struct blk_mq_hw_ctx *hctx;
2047 	struct blk_mq_ctx *ctx;
2048 	struct blk_mq_tag_set *set = q->tag_set;
2049 
2050 	/*
2051 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
2052 	 */
2053 	mutex_lock(&q->sysfs_lock);
2054 
2055 	queue_for_each_hw_ctx(q, hctx, i) {
2056 		cpumask_clear(hctx->cpumask);
2057 		hctx->nr_ctx = 0;
2058 	}
2059 
2060 	/*
2061 	 * Map software to hardware queues
2062 	 */
2063 	for_each_possible_cpu(i) {
2064 		/* If the cpu isn't online, the cpu is mapped to first hctx */
2065 		if (!cpumask_test_cpu(i, online_mask))
2066 			continue;
2067 
2068 		hctx_idx = q->mq_map[i];
2069 		/* unmapped hw queue can be remapped after CPU topo changed */
2070 		if (!set->tags[hctx_idx] &&
2071 		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2072 			/*
2073 			 * If tags initialization fail for some hctx,
2074 			 * that hctx won't be brought online.  In this
2075 			 * case, remap the current ctx to hctx[0] which
2076 			 * is guaranteed to always have tags allocated
2077 			 */
2078 			q->mq_map[i] = 0;
2079 		}
2080 
2081 		ctx = per_cpu_ptr(q->queue_ctx, i);
2082 		hctx = blk_mq_map_queue(q, i);
2083 
2084 		cpumask_set_cpu(i, hctx->cpumask);
2085 		ctx->index_hw = hctx->nr_ctx;
2086 		hctx->ctxs[hctx->nr_ctx++] = ctx;
2087 	}
2088 
2089 	mutex_unlock(&q->sysfs_lock);
2090 
2091 	queue_for_each_hw_ctx(q, hctx, i) {
2092 		/*
2093 		 * If no software queues are mapped to this hardware queue,
2094 		 * disable it and free the request entries.
2095 		 */
2096 		if (!hctx->nr_ctx) {
2097 			/* Never unmap queue 0.  We need it as a
2098 			 * fallback in case of a new remap fails
2099 			 * allocation
2100 			 */
2101 			if (i && set->tags[i])
2102 				blk_mq_free_map_and_requests(set, i);
2103 
2104 			hctx->tags = NULL;
2105 			continue;
2106 		}
2107 
2108 		hctx->tags = set->tags[i];
2109 		WARN_ON(!hctx->tags);
2110 
2111 		/*
2112 		 * Set the map size to the number of mapped software queues.
2113 		 * This is more accurate and more efficient than looping
2114 		 * over all possibly mapped software queues.
2115 		 */
2116 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2117 
2118 		/*
2119 		 * Initialize batch roundrobin counts
2120 		 */
2121 		hctx->next_cpu = cpumask_first(hctx->cpumask);
2122 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2123 	}
2124 }
2125 
2126 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2127 {
2128 	struct blk_mq_hw_ctx *hctx;
2129 	int i;
2130 
2131 	queue_for_each_hw_ctx(q, hctx, i) {
2132 		if (shared)
2133 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2134 		else
2135 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2136 	}
2137 }
2138 
2139 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2140 {
2141 	struct request_queue *q;
2142 
2143 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2144 		blk_mq_freeze_queue(q);
2145 		queue_set_hctx_shared(q, shared);
2146 		blk_mq_unfreeze_queue(q);
2147 	}
2148 }
2149 
2150 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2151 {
2152 	struct blk_mq_tag_set *set = q->tag_set;
2153 
2154 	mutex_lock(&set->tag_list_lock);
2155 	list_del_init(&q->tag_set_list);
2156 	if (list_is_singular(&set->tag_list)) {
2157 		/* just transitioned to unshared */
2158 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
2159 		/* update existing queue */
2160 		blk_mq_update_tag_set_depth(set, false);
2161 	}
2162 	mutex_unlock(&set->tag_list_lock);
2163 }
2164 
2165 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2166 				     struct request_queue *q)
2167 {
2168 	q->tag_set = set;
2169 
2170 	mutex_lock(&set->tag_list_lock);
2171 
2172 	/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2173 	if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2174 		set->flags |= BLK_MQ_F_TAG_SHARED;
2175 		/* update existing queue */
2176 		blk_mq_update_tag_set_depth(set, true);
2177 	}
2178 	if (set->flags & BLK_MQ_F_TAG_SHARED)
2179 		queue_set_hctx_shared(q, true);
2180 	list_add_tail(&q->tag_set_list, &set->tag_list);
2181 
2182 	mutex_unlock(&set->tag_list_lock);
2183 }
2184 
2185 /*
2186  * It is the actual release handler for mq, but we do it from
2187  * request queue's release handler for avoiding use-after-free
2188  * and headache because q->mq_kobj shouldn't have been introduced,
2189  * but we can't group ctx/kctx kobj without it.
2190  */
2191 void blk_mq_release(struct request_queue *q)
2192 {
2193 	struct blk_mq_hw_ctx *hctx;
2194 	unsigned int i;
2195 
2196 	blk_mq_sched_teardown(q);
2197 
2198 	/* hctx kobj stays in hctx */
2199 	queue_for_each_hw_ctx(q, hctx, i) {
2200 		if (!hctx)
2201 			continue;
2202 		kfree(hctx->ctxs);
2203 		kfree(hctx);
2204 	}
2205 
2206 	q->mq_map = NULL;
2207 
2208 	kfree(q->queue_hw_ctx);
2209 
2210 	/* ctx kobj stays in queue_ctx */
2211 	free_percpu(q->queue_ctx);
2212 }
2213 
2214 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2215 {
2216 	struct request_queue *uninit_q, *q;
2217 
2218 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2219 	if (!uninit_q)
2220 		return ERR_PTR(-ENOMEM);
2221 
2222 	q = blk_mq_init_allocated_queue(set, uninit_q);
2223 	if (IS_ERR(q))
2224 		blk_cleanup_queue(uninit_q);
2225 
2226 	return q;
2227 }
2228 EXPORT_SYMBOL(blk_mq_init_queue);
2229 
2230 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2231 						struct request_queue *q)
2232 {
2233 	int i, j;
2234 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2235 
2236 	blk_mq_sysfs_unregister(q);
2237 	for (i = 0; i < set->nr_hw_queues; i++) {
2238 		int node;
2239 
2240 		if (hctxs[i])
2241 			continue;
2242 
2243 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2244 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2245 					GFP_KERNEL, node);
2246 		if (!hctxs[i])
2247 			break;
2248 
2249 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2250 						node)) {
2251 			kfree(hctxs[i]);
2252 			hctxs[i] = NULL;
2253 			break;
2254 		}
2255 
2256 		atomic_set(&hctxs[i]->nr_active, 0);
2257 		hctxs[i]->numa_node = node;
2258 		hctxs[i]->queue_num = i;
2259 
2260 		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2261 			free_cpumask_var(hctxs[i]->cpumask);
2262 			kfree(hctxs[i]);
2263 			hctxs[i] = NULL;
2264 			break;
2265 		}
2266 		blk_mq_hctx_kobj_init(hctxs[i]);
2267 	}
2268 	for (j = i; j < q->nr_hw_queues; j++) {
2269 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2270 
2271 		if (hctx) {
2272 			if (hctx->tags)
2273 				blk_mq_free_map_and_requests(set, j);
2274 			blk_mq_exit_hctx(q, set, hctx, j);
2275 			free_cpumask_var(hctx->cpumask);
2276 			kobject_put(&hctx->kobj);
2277 			kfree(hctx->ctxs);
2278 			kfree(hctx);
2279 			hctxs[j] = NULL;
2280 
2281 		}
2282 	}
2283 	q->nr_hw_queues = i;
2284 	blk_mq_sysfs_register(q);
2285 }
2286 
2287 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2288 						  struct request_queue *q)
2289 {
2290 	/* mark the queue as mq asap */
2291 	q->mq_ops = set->ops;
2292 
2293 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2294 	if (!q->queue_ctx)
2295 		goto err_exit;
2296 
2297 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2298 						GFP_KERNEL, set->numa_node);
2299 	if (!q->queue_hw_ctx)
2300 		goto err_percpu;
2301 
2302 	q->mq_map = set->mq_map;
2303 
2304 	blk_mq_realloc_hw_ctxs(set, q);
2305 	if (!q->nr_hw_queues)
2306 		goto err_hctxs;
2307 
2308 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2309 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2310 
2311 	q->nr_queues = nr_cpu_ids;
2312 
2313 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2314 
2315 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2316 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2317 
2318 	q->sg_reserved_size = INT_MAX;
2319 
2320 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2321 	INIT_LIST_HEAD(&q->requeue_list);
2322 	spin_lock_init(&q->requeue_lock);
2323 
2324 	if (q->nr_hw_queues > 1)
2325 		blk_queue_make_request(q, blk_mq_make_request);
2326 	else
2327 		blk_queue_make_request(q, blk_sq_make_request);
2328 
2329 	/*
2330 	 * Do this after blk_queue_make_request() overrides it...
2331 	 */
2332 	q->nr_requests = set->queue_depth;
2333 
2334 	/*
2335 	 * Default to classic polling
2336 	 */
2337 	q->poll_nsec = -1;
2338 
2339 	if (set->ops->complete)
2340 		blk_queue_softirq_done(q, set->ops->complete);
2341 
2342 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2343 
2344 	get_online_cpus();
2345 	mutex_lock(&all_q_mutex);
2346 
2347 	list_add_tail(&q->all_q_node, &all_q_list);
2348 	blk_mq_add_queue_tag_set(set, q);
2349 	blk_mq_map_swqueue(q, cpu_online_mask);
2350 
2351 	mutex_unlock(&all_q_mutex);
2352 	put_online_cpus();
2353 
2354 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2355 		int ret;
2356 
2357 		ret = blk_mq_sched_init(q);
2358 		if (ret)
2359 			return ERR_PTR(ret);
2360 	}
2361 
2362 	return q;
2363 
2364 err_hctxs:
2365 	kfree(q->queue_hw_ctx);
2366 err_percpu:
2367 	free_percpu(q->queue_ctx);
2368 err_exit:
2369 	q->mq_ops = NULL;
2370 	return ERR_PTR(-ENOMEM);
2371 }
2372 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2373 
2374 void blk_mq_free_queue(struct request_queue *q)
2375 {
2376 	struct blk_mq_tag_set	*set = q->tag_set;
2377 
2378 	mutex_lock(&all_q_mutex);
2379 	list_del_init(&q->all_q_node);
2380 	mutex_unlock(&all_q_mutex);
2381 
2382 	wbt_exit(q);
2383 
2384 	blk_mq_del_queue_tag_set(q);
2385 
2386 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2387 	blk_mq_free_hw_queues(q, set);
2388 }
2389 
2390 /* Basically redo blk_mq_init_queue with queue frozen */
2391 static void blk_mq_queue_reinit(struct request_queue *q,
2392 				const struct cpumask *online_mask)
2393 {
2394 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2395 
2396 	blk_mq_sysfs_unregister(q);
2397 
2398 	/*
2399 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2400 	 * we should change hctx numa_node according to new topology (this
2401 	 * involves free and re-allocate memory, worthy doing?)
2402 	 */
2403 
2404 	blk_mq_map_swqueue(q, online_mask);
2405 
2406 	blk_mq_sysfs_register(q);
2407 }
2408 
2409 /*
2410  * New online cpumask which is going to be set in this hotplug event.
2411  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2412  * one-by-one and dynamically allocating this could result in a failure.
2413  */
2414 static struct cpumask cpuhp_online_new;
2415 
2416 static void blk_mq_queue_reinit_work(void)
2417 {
2418 	struct request_queue *q;
2419 
2420 	mutex_lock(&all_q_mutex);
2421 	/*
2422 	 * We need to freeze and reinit all existing queues.  Freezing
2423 	 * involves synchronous wait for an RCU grace period and doing it
2424 	 * one by one may take a long time.  Start freezing all queues in
2425 	 * one swoop and then wait for the completions so that freezing can
2426 	 * take place in parallel.
2427 	 */
2428 	list_for_each_entry(q, &all_q_list, all_q_node)
2429 		blk_mq_freeze_queue_start(q);
2430 	list_for_each_entry(q, &all_q_list, all_q_node)
2431 		blk_mq_freeze_queue_wait(q);
2432 
2433 	list_for_each_entry(q, &all_q_list, all_q_node)
2434 		blk_mq_queue_reinit(q, &cpuhp_online_new);
2435 
2436 	list_for_each_entry(q, &all_q_list, all_q_node)
2437 		blk_mq_unfreeze_queue(q);
2438 
2439 	mutex_unlock(&all_q_mutex);
2440 }
2441 
2442 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2443 {
2444 	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2445 	blk_mq_queue_reinit_work();
2446 	return 0;
2447 }
2448 
2449 /*
2450  * Before hotadded cpu starts handling requests, new mappings must be
2451  * established.  Otherwise, these requests in hw queue might never be
2452  * dispatched.
2453  *
2454  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2455  * for CPU0, and ctx1 for CPU1).
2456  *
2457  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2458  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2459  *
2460  * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2461  * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2462  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2463  * ignored.
2464  */
2465 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2466 {
2467 	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2468 	cpumask_set_cpu(cpu, &cpuhp_online_new);
2469 	blk_mq_queue_reinit_work();
2470 	return 0;
2471 }
2472 
2473 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2474 {
2475 	int i;
2476 
2477 	for (i = 0; i < set->nr_hw_queues; i++)
2478 		if (!__blk_mq_alloc_rq_map(set, i))
2479 			goto out_unwind;
2480 
2481 	return 0;
2482 
2483 out_unwind:
2484 	while (--i >= 0)
2485 		blk_mq_free_rq_map(set->tags[i]);
2486 
2487 	return -ENOMEM;
2488 }
2489 
2490 /*
2491  * Allocate the request maps associated with this tag_set. Note that this
2492  * may reduce the depth asked for, if memory is tight. set->queue_depth
2493  * will be updated to reflect the allocated depth.
2494  */
2495 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2496 {
2497 	unsigned int depth;
2498 	int err;
2499 
2500 	depth = set->queue_depth;
2501 	do {
2502 		err = __blk_mq_alloc_rq_maps(set);
2503 		if (!err)
2504 			break;
2505 
2506 		set->queue_depth >>= 1;
2507 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2508 			err = -ENOMEM;
2509 			break;
2510 		}
2511 	} while (set->queue_depth);
2512 
2513 	if (!set->queue_depth || err) {
2514 		pr_err("blk-mq: failed to allocate request map\n");
2515 		return -ENOMEM;
2516 	}
2517 
2518 	if (depth != set->queue_depth)
2519 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2520 						depth, set->queue_depth);
2521 
2522 	return 0;
2523 }
2524 
2525 /*
2526  * Alloc a tag set to be associated with one or more request queues.
2527  * May fail with EINVAL for various error conditions. May adjust the
2528  * requested depth down, if if it too large. In that case, the set
2529  * value will be stored in set->queue_depth.
2530  */
2531 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2532 {
2533 	int ret;
2534 
2535 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2536 
2537 	if (!set->nr_hw_queues)
2538 		return -EINVAL;
2539 	if (!set->queue_depth)
2540 		return -EINVAL;
2541 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2542 		return -EINVAL;
2543 
2544 	if (!set->ops->queue_rq)
2545 		return -EINVAL;
2546 
2547 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2548 		pr_info("blk-mq: reduced tag depth to %u\n",
2549 			BLK_MQ_MAX_DEPTH);
2550 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2551 	}
2552 
2553 	/*
2554 	 * If a crashdump is active, then we are potentially in a very
2555 	 * memory constrained environment. Limit us to 1 queue and
2556 	 * 64 tags to prevent using too much memory.
2557 	 */
2558 	if (is_kdump_kernel()) {
2559 		set->nr_hw_queues = 1;
2560 		set->queue_depth = min(64U, set->queue_depth);
2561 	}
2562 	/*
2563 	 * There is no use for more h/w queues than cpus.
2564 	 */
2565 	if (set->nr_hw_queues > nr_cpu_ids)
2566 		set->nr_hw_queues = nr_cpu_ids;
2567 
2568 	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2569 				 GFP_KERNEL, set->numa_node);
2570 	if (!set->tags)
2571 		return -ENOMEM;
2572 
2573 	ret = -ENOMEM;
2574 	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2575 			GFP_KERNEL, set->numa_node);
2576 	if (!set->mq_map)
2577 		goto out_free_tags;
2578 
2579 	if (set->ops->map_queues)
2580 		ret = set->ops->map_queues(set);
2581 	else
2582 		ret = blk_mq_map_queues(set);
2583 	if (ret)
2584 		goto out_free_mq_map;
2585 
2586 	ret = blk_mq_alloc_rq_maps(set);
2587 	if (ret)
2588 		goto out_free_mq_map;
2589 
2590 	mutex_init(&set->tag_list_lock);
2591 	INIT_LIST_HEAD(&set->tag_list);
2592 
2593 	return 0;
2594 
2595 out_free_mq_map:
2596 	kfree(set->mq_map);
2597 	set->mq_map = NULL;
2598 out_free_tags:
2599 	kfree(set->tags);
2600 	set->tags = NULL;
2601 	return ret;
2602 }
2603 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2604 
2605 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2606 {
2607 	int i;
2608 
2609 	for (i = 0; i < nr_cpu_ids; i++)
2610 		blk_mq_free_map_and_requests(set, i);
2611 
2612 	kfree(set->mq_map);
2613 	set->mq_map = NULL;
2614 
2615 	kfree(set->tags);
2616 	set->tags = NULL;
2617 }
2618 EXPORT_SYMBOL(blk_mq_free_tag_set);
2619 
2620 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2621 {
2622 	struct blk_mq_tag_set *set = q->tag_set;
2623 	struct blk_mq_hw_ctx *hctx;
2624 	int i, ret;
2625 
2626 	if (!set)
2627 		return -EINVAL;
2628 
2629 	blk_mq_freeze_queue(q);
2630 	blk_mq_quiesce_queue(q);
2631 
2632 	ret = 0;
2633 	queue_for_each_hw_ctx(q, hctx, i) {
2634 		if (!hctx->tags)
2635 			continue;
2636 		/*
2637 		 * If we're using an MQ scheduler, just update the scheduler
2638 		 * queue depth. This is similar to what the old code would do.
2639 		 */
2640 		if (!hctx->sched_tags) {
2641 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2642 							min(nr, set->queue_depth),
2643 							false);
2644 		} else {
2645 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2646 							nr, true);
2647 		}
2648 		if (ret)
2649 			break;
2650 	}
2651 
2652 	if (!ret)
2653 		q->nr_requests = nr;
2654 
2655 	blk_mq_unfreeze_queue(q);
2656 	blk_mq_start_stopped_hw_queues(q, true);
2657 
2658 	return ret;
2659 }
2660 
2661 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2662 {
2663 	struct request_queue *q;
2664 
2665 	if (nr_hw_queues > nr_cpu_ids)
2666 		nr_hw_queues = nr_cpu_ids;
2667 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2668 		return;
2669 
2670 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2671 		blk_mq_freeze_queue(q);
2672 
2673 	set->nr_hw_queues = nr_hw_queues;
2674 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2675 		blk_mq_realloc_hw_ctxs(set, q);
2676 
2677 		/*
2678 		 * Manually set the make_request_fn as blk_queue_make_request
2679 		 * resets a lot of the queue settings.
2680 		 */
2681 		if (q->nr_hw_queues > 1)
2682 			q->make_request_fn = blk_mq_make_request;
2683 		else
2684 			q->make_request_fn = blk_sq_make_request;
2685 
2686 		blk_mq_queue_reinit(q, cpu_online_mask);
2687 	}
2688 
2689 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2690 		blk_mq_unfreeze_queue(q);
2691 }
2692 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2693 
2694 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2695 				       struct blk_mq_hw_ctx *hctx,
2696 				       struct request *rq)
2697 {
2698 	struct blk_rq_stat stat[2];
2699 	unsigned long ret = 0;
2700 
2701 	/*
2702 	 * If stats collection isn't on, don't sleep but turn it on for
2703 	 * future users
2704 	 */
2705 	if (!blk_stat_enable(q))
2706 		return 0;
2707 
2708 	/*
2709 	 * We don't have to do this once per IO, should optimize this
2710 	 * to just use the current window of stats until it changes
2711 	 */
2712 	memset(&stat, 0, sizeof(stat));
2713 	blk_hctx_stat_get(hctx, stat);
2714 
2715 	/*
2716 	 * As an optimistic guess, use half of the mean service time
2717 	 * for this type of request. We can (and should) make this smarter.
2718 	 * For instance, if the completion latencies are tight, we can
2719 	 * get closer than just half the mean. This is especially
2720 	 * important on devices where the completion latencies are longer
2721 	 * than ~10 usec.
2722 	 */
2723 	if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2724 		ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2725 	else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2726 		ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2727 
2728 	return ret;
2729 }
2730 
2731 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2732 				     struct blk_mq_hw_ctx *hctx,
2733 				     struct request *rq)
2734 {
2735 	struct hrtimer_sleeper hs;
2736 	enum hrtimer_mode mode;
2737 	unsigned int nsecs;
2738 	ktime_t kt;
2739 
2740 	if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2741 		return false;
2742 
2743 	/*
2744 	 * poll_nsec can be:
2745 	 *
2746 	 * -1:	don't ever hybrid sleep
2747 	 *  0:	use half of prev avg
2748 	 * >0:	use this specific value
2749 	 */
2750 	if (q->poll_nsec == -1)
2751 		return false;
2752 	else if (q->poll_nsec > 0)
2753 		nsecs = q->poll_nsec;
2754 	else
2755 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2756 
2757 	if (!nsecs)
2758 		return false;
2759 
2760 	set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2761 
2762 	/*
2763 	 * This will be replaced with the stats tracking code, using
2764 	 * 'avg_completion_time / 2' as the pre-sleep target.
2765 	 */
2766 	kt = nsecs;
2767 
2768 	mode = HRTIMER_MODE_REL;
2769 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2770 	hrtimer_set_expires(&hs.timer, kt);
2771 
2772 	hrtimer_init_sleeper(&hs, current);
2773 	do {
2774 		if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2775 			break;
2776 		set_current_state(TASK_UNINTERRUPTIBLE);
2777 		hrtimer_start_expires(&hs.timer, mode);
2778 		if (hs.task)
2779 			io_schedule();
2780 		hrtimer_cancel(&hs.timer);
2781 		mode = HRTIMER_MODE_ABS;
2782 	} while (hs.task && !signal_pending(current));
2783 
2784 	__set_current_state(TASK_RUNNING);
2785 	destroy_hrtimer_on_stack(&hs.timer);
2786 	return true;
2787 }
2788 
2789 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2790 {
2791 	struct request_queue *q = hctx->queue;
2792 	long state;
2793 
2794 	/*
2795 	 * If we sleep, have the caller restart the poll loop to reset
2796 	 * the state. Like for the other success return cases, the
2797 	 * caller is responsible for checking if the IO completed. If
2798 	 * the IO isn't complete, we'll get called again and will go
2799 	 * straight to the busy poll loop.
2800 	 */
2801 	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2802 		return true;
2803 
2804 	hctx->poll_considered++;
2805 
2806 	state = current->state;
2807 	while (!need_resched()) {
2808 		int ret;
2809 
2810 		hctx->poll_invoked++;
2811 
2812 		ret = q->mq_ops->poll(hctx, rq->tag);
2813 		if (ret > 0) {
2814 			hctx->poll_success++;
2815 			set_current_state(TASK_RUNNING);
2816 			return true;
2817 		}
2818 
2819 		if (signal_pending_state(state, current))
2820 			set_current_state(TASK_RUNNING);
2821 
2822 		if (current->state == TASK_RUNNING)
2823 			return true;
2824 		if (ret < 0)
2825 			break;
2826 		cpu_relax();
2827 	}
2828 
2829 	return false;
2830 }
2831 
2832 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2833 {
2834 	struct blk_mq_hw_ctx *hctx;
2835 	struct blk_plug *plug;
2836 	struct request *rq;
2837 
2838 	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2839 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2840 		return false;
2841 
2842 	plug = current->plug;
2843 	if (plug)
2844 		blk_flush_plug_list(plug, false);
2845 
2846 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2847 	if (!blk_qc_t_is_internal(cookie))
2848 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2849 	else
2850 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2851 
2852 	return __blk_mq_poll(hctx, rq);
2853 }
2854 EXPORT_SYMBOL_GPL(blk_mq_poll);
2855 
2856 void blk_mq_disable_hotplug(void)
2857 {
2858 	mutex_lock(&all_q_mutex);
2859 }
2860 
2861 void blk_mq_enable_hotplug(void)
2862 {
2863 	mutex_unlock(&all_q_mutex);
2864 }
2865 
2866 static int __init blk_mq_init(void)
2867 {
2868 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2869 				blk_mq_hctx_notify_dead);
2870 
2871 	cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2872 				  blk_mq_queue_reinit_prepare,
2873 				  blk_mq_queue_reinit_dead);
2874 	return 0;
2875 }
2876 subsys_initcall(blk_mq_init);
2877