xref: /openbmc/linux/block/blk-mq.c (revision 1f9f6a78)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24 
25 #include <trace/events/block.h>
26 
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31 
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34 
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36 
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42 	unsigned int i;
43 
44 	for (i = 0; i < hctx->ctx_map.map_size; i++)
45 		if (hctx->ctx_map.map[i].word)
46 			return true;
47 
48 	return false;
49 }
50 
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52 					      struct blk_mq_ctx *ctx)
53 {
54 	return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56 
57 #define CTX_TO_BIT(hctx, ctx)	\
58 	((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59 
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64 				     struct blk_mq_ctx *ctx)
65 {
66 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67 
68 	if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69 		set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71 
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73 				      struct blk_mq_ctx *ctx)
74 {
75 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76 
77 	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79 
80 static int blk_mq_queue_enter(struct request_queue *q)
81 {
82 	while (true) {
83 		int ret;
84 
85 		if (percpu_ref_tryget_live(&q->mq_usage_counter))
86 			return 0;
87 
88 		ret = wait_event_interruptible(q->mq_freeze_wq,
89 				!q->mq_freeze_depth || blk_queue_dying(q));
90 		if (blk_queue_dying(q))
91 			return -ENODEV;
92 		if (ret)
93 			return ret;
94 	}
95 }
96 
97 static void blk_mq_queue_exit(struct request_queue *q)
98 {
99 	percpu_ref_put(&q->mq_usage_counter);
100 }
101 
102 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103 {
104 	struct request_queue *q =
105 		container_of(ref, struct request_queue, mq_usage_counter);
106 
107 	wake_up_all(&q->mq_freeze_wq);
108 }
109 
110 static void blk_mq_freeze_queue_start(struct request_queue *q)
111 {
112 	bool freeze;
113 
114 	spin_lock_irq(q->queue_lock);
115 	freeze = !q->mq_freeze_depth++;
116 	spin_unlock_irq(q->queue_lock);
117 
118 	if (freeze) {
119 		percpu_ref_kill(&q->mq_usage_counter);
120 		blk_mq_run_queues(q, false);
121 	}
122 }
123 
124 static void blk_mq_freeze_queue_wait(struct request_queue *q)
125 {
126 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127 }
128 
129 /*
130  * Guarantee no request is in use, so we can change any data structure of
131  * the queue afterward.
132  */
133 void blk_mq_freeze_queue(struct request_queue *q)
134 {
135 	blk_mq_freeze_queue_start(q);
136 	blk_mq_freeze_queue_wait(q);
137 }
138 
139 static void blk_mq_unfreeze_queue(struct request_queue *q)
140 {
141 	bool wake;
142 
143 	spin_lock_irq(q->queue_lock);
144 	wake = !--q->mq_freeze_depth;
145 	WARN_ON_ONCE(q->mq_freeze_depth < 0);
146 	spin_unlock_irq(q->queue_lock);
147 	if (wake) {
148 		percpu_ref_reinit(&q->mq_usage_counter);
149 		wake_up_all(&q->mq_freeze_wq);
150 	}
151 }
152 
153 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
154 {
155 	return blk_mq_has_free_tags(hctx->tags);
156 }
157 EXPORT_SYMBOL(blk_mq_can_queue);
158 
159 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
160 			       struct request *rq, unsigned int rw_flags)
161 {
162 	if (blk_queue_io_stat(q))
163 		rw_flags |= REQ_IO_STAT;
164 
165 	INIT_LIST_HEAD(&rq->queuelist);
166 	/* csd/requeue_work/fifo_time is initialized before use */
167 	rq->q = q;
168 	rq->mq_ctx = ctx;
169 	rq->cmd_flags |= rw_flags;
170 	/* do not touch atomic flags, it needs atomic ops against the timer */
171 	rq->cpu = -1;
172 	INIT_HLIST_NODE(&rq->hash);
173 	RB_CLEAR_NODE(&rq->rb_node);
174 	rq->rq_disk = NULL;
175 	rq->part = NULL;
176 	rq->start_time = jiffies;
177 #ifdef CONFIG_BLK_CGROUP
178 	rq->rl = NULL;
179 	set_start_time_ns(rq);
180 	rq->io_start_time_ns = 0;
181 #endif
182 	rq->nr_phys_segments = 0;
183 #if defined(CONFIG_BLK_DEV_INTEGRITY)
184 	rq->nr_integrity_segments = 0;
185 #endif
186 	rq->special = NULL;
187 	/* tag was already set */
188 	rq->errors = 0;
189 
190 	rq->cmd = rq->__cmd;
191 
192 	rq->extra_len = 0;
193 	rq->sense_len = 0;
194 	rq->resid_len = 0;
195 	rq->sense = NULL;
196 
197 	INIT_LIST_HEAD(&rq->timeout_list);
198 	rq->timeout = 0;
199 
200 	rq->end_io = NULL;
201 	rq->end_io_data = NULL;
202 	rq->next_rq = NULL;
203 
204 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
205 }
206 
207 static struct request *
208 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
209 {
210 	struct request *rq;
211 	unsigned int tag;
212 
213 	tag = blk_mq_get_tag(data);
214 	if (tag != BLK_MQ_TAG_FAIL) {
215 		rq = data->hctx->tags->rqs[tag];
216 
217 		if (blk_mq_tag_busy(data->hctx)) {
218 			rq->cmd_flags = REQ_MQ_INFLIGHT;
219 			atomic_inc(&data->hctx->nr_active);
220 		}
221 
222 		rq->tag = tag;
223 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
224 		return rq;
225 	}
226 
227 	return NULL;
228 }
229 
230 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
231 		bool reserved)
232 {
233 	struct blk_mq_ctx *ctx;
234 	struct blk_mq_hw_ctx *hctx;
235 	struct request *rq;
236 	struct blk_mq_alloc_data alloc_data;
237 	int ret;
238 
239 	ret = blk_mq_queue_enter(q);
240 	if (ret)
241 		return ERR_PTR(ret);
242 
243 	ctx = blk_mq_get_ctx(q);
244 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
245 	blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
246 			reserved, ctx, hctx);
247 
248 	rq = __blk_mq_alloc_request(&alloc_data, rw);
249 	if (!rq && (gfp & __GFP_WAIT)) {
250 		__blk_mq_run_hw_queue(hctx);
251 		blk_mq_put_ctx(ctx);
252 
253 		ctx = blk_mq_get_ctx(q);
254 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
255 		blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
256 				hctx);
257 		rq =  __blk_mq_alloc_request(&alloc_data, rw);
258 		ctx = alloc_data.ctx;
259 	}
260 	blk_mq_put_ctx(ctx);
261 	if (!rq)
262 		return ERR_PTR(-EWOULDBLOCK);
263 	return rq;
264 }
265 EXPORT_SYMBOL(blk_mq_alloc_request);
266 
267 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
268 				  struct blk_mq_ctx *ctx, struct request *rq)
269 {
270 	const int tag = rq->tag;
271 	struct request_queue *q = rq->q;
272 
273 	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
274 		atomic_dec(&hctx->nr_active);
275 	rq->cmd_flags = 0;
276 
277 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
278 	blk_mq_put_tag(hctx, tag, &ctx->last_tag);
279 	blk_mq_queue_exit(q);
280 }
281 
282 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
283 {
284 	struct blk_mq_ctx *ctx = rq->mq_ctx;
285 
286 	ctx->rq_completed[rq_is_sync(rq)]++;
287 	__blk_mq_free_request(hctx, ctx, rq);
288 
289 }
290 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
291 
292 void blk_mq_free_request(struct request *rq)
293 {
294 	struct blk_mq_hw_ctx *hctx;
295 	struct request_queue *q = rq->q;
296 
297 	hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
298 	blk_mq_free_hctx_request(hctx, rq);
299 }
300 EXPORT_SYMBOL_GPL(blk_mq_free_request);
301 
302 inline void __blk_mq_end_request(struct request *rq, int error)
303 {
304 	blk_account_io_done(rq);
305 
306 	if (rq->end_io) {
307 		rq->end_io(rq, error);
308 	} else {
309 		if (unlikely(blk_bidi_rq(rq)))
310 			blk_mq_free_request(rq->next_rq);
311 		blk_mq_free_request(rq);
312 	}
313 }
314 EXPORT_SYMBOL(__blk_mq_end_request);
315 
316 void blk_mq_end_request(struct request *rq, int error)
317 {
318 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
319 		BUG();
320 	__blk_mq_end_request(rq, error);
321 }
322 EXPORT_SYMBOL(blk_mq_end_request);
323 
324 static void __blk_mq_complete_request_remote(void *data)
325 {
326 	struct request *rq = data;
327 
328 	rq->q->softirq_done_fn(rq);
329 }
330 
331 static void blk_mq_ipi_complete_request(struct request *rq)
332 {
333 	struct blk_mq_ctx *ctx = rq->mq_ctx;
334 	bool shared = false;
335 	int cpu;
336 
337 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
338 		rq->q->softirq_done_fn(rq);
339 		return;
340 	}
341 
342 	cpu = get_cpu();
343 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
344 		shared = cpus_share_cache(cpu, ctx->cpu);
345 
346 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
347 		rq->csd.func = __blk_mq_complete_request_remote;
348 		rq->csd.info = rq;
349 		rq->csd.flags = 0;
350 		smp_call_function_single_async(ctx->cpu, &rq->csd);
351 	} else {
352 		rq->q->softirq_done_fn(rq);
353 	}
354 	put_cpu();
355 }
356 
357 void __blk_mq_complete_request(struct request *rq)
358 {
359 	struct request_queue *q = rq->q;
360 
361 	if (!q->softirq_done_fn)
362 		blk_mq_end_request(rq, rq->errors);
363 	else
364 		blk_mq_ipi_complete_request(rq);
365 }
366 
367 /**
368  * blk_mq_complete_request - end I/O on a request
369  * @rq:		the request being processed
370  *
371  * Description:
372  *	Ends all I/O on a request. It does not handle partial completions.
373  *	The actual completion happens out-of-order, through a IPI handler.
374  **/
375 void blk_mq_complete_request(struct request *rq)
376 {
377 	struct request_queue *q = rq->q;
378 
379 	if (unlikely(blk_should_fake_timeout(q)))
380 		return;
381 	if (!blk_mark_rq_complete(rq))
382 		__blk_mq_complete_request(rq);
383 }
384 EXPORT_SYMBOL(blk_mq_complete_request);
385 
386 void blk_mq_start_request(struct request *rq)
387 {
388 	struct request_queue *q = rq->q;
389 
390 	trace_block_rq_issue(q, rq);
391 
392 	rq->resid_len = blk_rq_bytes(rq);
393 	if (unlikely(blk_bidi_rq(rq)))
394 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
395 
396 	blk_add_timer(rq);
397 
398 	/*
399 	 * Ensure that ->deadline is visible before set the started
400 	 * flag and clear the completed flag.
401 	 */
402 	smp_mb__before_atomic();
403 
404 	/*
405 	 * Mark us as started and clear complete. Complete might have been
406 	 * set if requeue raced with timeout, which then marked it as
407 	 * complete. So be sure to clear complete again when we start
408 	 * the request, otherwise we'll ignore the completion event.
409 	 */
410 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
411 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
412 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
413 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
414 
415 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
416 		/*
417 		 * Make sure space for the drain appears.  We know we can do
418 		 * this because max_hw_segments has been adjusted to be one
419 		 * fewer than the device can handle.
420 		 */
421 		rq->nr_phys_segments++;
422 	}
423 }
424 EXPORT_SYMBOL(blk_mq_start_request);
425 
426 static void __blk_mq_requeue_request(struct request *rq)
427 {
428 	struct request_queue *q = rq->q;
429 
430 	trace_block_rq_requeue(q, rq);
431 
432 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
433 		if (q->dma_drain_size && blk_rq_bytes(rq))
434 			rq->nr_phys_segments--;
435 	}
436 }
437 
438 void blk_mq_requeue_request(struct request *rq)
439 {
440 	__blk_mq_requeue_request(rq);
441 
442 	BUG_ON(blk_queued_rq(rq));
443 	blk_mq_add_to_requeue_list(rq, true);
444 }
445 EXPORT_SYMBOL(blk_mq_requeue_request);
446 
447 static void blk_mq_requeue_work(struct work_struct *work)
448 {
449 	struct request_queue *q =
450 		container_of(work, struct request_queue, requeue_work);
451 	LIST_HEAD(rq_list);
452 	struct request *rq, *next;
453 	unsigned long flags;
454 
455 	spin_lock_irqsave(&q->requeue_lock, flags);
456 	list_splice_init(&q->requeue_list, &rq_list);
457 	spin_unlock_irqrestore(&q->requeue_lock, flags);
458 
459 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
460 		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
461 			continue;
462 
463 		rq->cmd_flags &= ~REQ_SOFTBARRIER;
464 		list_del_init(&rq->queuelist);
465 		blk_mq_insert_request(rq, true, false, false);
466 	}
467 
468 	while (!list_empty(&rq_list)) {
469 		rq = list_entry(rq_list.next, struct request, queuelist);
470 		list_del_init(&rq->queuelist);
471 		blk_mq_insert_request(rq, false, false, false);
472 	}
473 
474 	/*
475 	 * Use the start variant of queue running here, so that running
476 	 * the requeue work will kick stopped queues.
477 	 */
478 	blk_mq_start_hw_queues(q);
479 }
480 
481 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
482 {
483 	struct request_queue *q = rq->q;
484 	unsigned long flags;
485 
486 	/*
487 	 * We abuse this flag that is otherwise used by the I/O scheduler to
488 	 * request head insertation from the workqueue.
489 	 */
490 	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
491 
492 	spin_lock_irqsave(&q->requeue_lock, flags);
493 	if (at_head) {
494 		rq->cmd_flags |= REQ_SOFTBARRIER;
495 		list_add(&rq->queuelist, &q->requeue_list);
496 	} else {
497 		list_add_tail(&rq->queuelist, &q->requeue_list);
498 	}
499 	spin_unlock_irqrestore(&q->requeue_lock, flags);
500 }
501 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
502 
503 void blk_mq_kick_requeue_list(struct request_queue *q)
504 {
505 	kblockd_schedule_work(&q->requeue_work);
506 }
507 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
508 
509 static inline bool is_flush_request(struct request *rq,
510 		struct blk_flush_queue *fq, unsigned int tag)
511 {
512 	return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
513 			fq->flush_rq->tag == tag);
514 }
515 
516 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
517 {
518 	struct request *rq = tags->rqs[tag];
519 	/* mq_ctx of flush rq is always cloned from the corresponding req */
520 	struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
521 
522 	if (!is_flush_request(rq, fq, tag))
523 		return rq;
524 
525 	return fq->flush_rq;
526 }
527 EXPORT_SYMBOL(blk_mq_tag_to_rq);
528 
529 struct blk_mq_timeout_data {
530 	unsigned long next;
531 	unsigned int next_set;
532 };
533 
534 void blk_mq_rq_timed_out(struct request *req, bool reserved)
535 {
536 	struct blk_mq_ops *ops = req->q->mq_ops;
537 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
538 
539 	/*
540 	 * We know that complete is set at this point. If STARTED isn't set
541 	 * anymore, then the request isn't active and the "timeout" should
542 	 * just be ignored. This can happen due to the bitflag ordering.
543 	 * Timeout first checks if STARTED is set, and if it is, assumes
544 	 * the request is active. But if we race with completion, then
545 	 * we both flags will get cleared. So check here again, and ignore
546 	 * a timeout event with a request that isn't active.
547 	 */
548 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
549 		return;
550 
551 	if (ops->timeout)
552 		ret = ops->timeout(req, reserved);
553 
554 	switch (ret) {
555 	case BLK_EH_HANDLED:
556 		__blk_mq_complete_request(req);
557 		break;
558 	case BLK_EH_RESET_TIMER:
559 		blk_add_timer(req);
560 		blk_clear_rq_complete(req);
561 		break;
562 	case BLK_EH_NOT_HANDLED:
563 		break;
564 	default:
565 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
566 		break;
567 	}
568 }
569 
570 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
571 		struct request *rq, void *priv, bool reserved)
572 {
573 	struct blk_mq_timeout_data *data = priv;
574 
575 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
576 		return;
577 
578 	if (time_after_eq(jiffies, rq->deadline)) {
579 		if (!blk_mark_rq_complete(rq))
580 			blk_mq_rq_timed_out(rq, reserved);
581 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
582 		data->next = rq->deadline;
583 		data->next_set = 1;
584 	}
585 }
586 
587 static void blk_mq_rq_timer(unsigned long priv)
588 {
589 	struct request_queue *q = (struct request_queue *)priv;
590 	struct blk_mq_timeout_data data = {
591 		.next		= 0,
592 		.next_set	= 0,
593 	};
594 	struct blk_mq_hw_ctx *hctx;
595 	int i;
596 
597 	queue_for_each_hw_ctx(q, hctx, i) {
598 		/*
599 		 * If not software queues are currently mapped to this
600 		 * hardware queue, there's nothing to check
601 		 */
602 		if (!blk_mq_hw_queue_mapped(hctx))
603 			continue;
604 
605 		blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
606 	}
607 
608 	if (data.next_set) {
609 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
610 		mod_timer(&q->timeout, data.next);
611 	} else {
612 		queue_for_each_hw_ctx(q, hctx, i)
613 			blk_mq_tag_idle(hctx);
614 	}
615 }
616 
617 /*
618  * Reverse check our software queue for entries that we could potentially
619  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
620  * too much time checking for merges.
621  */
622 static bool blk_mq_attempt_merge(struct request_queue *q,
623 				 struct blk_mq_ctx *ctx, struct bio *bio)
624 {
625 	struct request *rq;
626 	int checked = 8;
627 
628 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
629 		int el_ret;
630 
631 		if (!checked--)
632 			break;
633 
634 		if (!blk_rq_merge_ok(rq, bio))
635 			continue;
636 
637 		el_ret = blk_try_merge(rq, bio);
638 		if (el_ret == ELEVATOR_BACK_MERGE) {
639 			if (bio_attempt_back_merge(q, rq, bio)) {
640 				ctx->rq_merged++;
641 				return true;
642 			}
643 			break;
644 		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
645 			if (bio_attempt_front_merge(q, rq, bio)) {
646 				ctx->rq_merged++;
647 				return true;
648 			}
649 			break;
650 		}
651 	}
652 
653 	return false;
654 }
655 
656 /*
657  * Process software queues that have been marked busy, splicing them
658  * to the for-dispatch
659  */
660 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
661 {
662 	struct blk_mq_ctx *ctx;
663 	int i;
664 
665 	for (i = 0; i < hctx->ctx_map.map_size; i++) {
666 		struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
667 		unsigned int off, bit;
668 
669 		if (!bm->word)
670 			continue;
671 
672 		bit = 0;
673 		off = i * hctx->ctx_map.bits_per_word;
674 		do {
675 			bit = find_next_bit(&bm->word, bm->depth, bit);
676 			if (bit >= bm->depth)
677 				break;
678 
679 			ctx = hctx->ctxs[bit + off];
680 			clear_bit(bit, &bm->word);
681 			spin_lock(&ctx->lock);
682 			list_splice_tail_init(&ctx->rq_list, list);
683 			spin_unlock(&ctx->lock);
684 
685 			bit++;
686 		} while (1);
687 	}
688 }
689 
690 /*
691  * Run this hardware queue, pulling any software queues mapped to it in.
692  * Note that this function currently has various problems around ordering
693  * of IO. In particular, we'd like FIFO behaviour on handling existing
694  * items on the hctx->dispatch list. Ignore that for now.
695  */
696 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
697 {
698 	struct request_queue *q = hctx->queue;
699 	struct request *rq;
700 	LIST_HEAD(rq_list);
701 	LIST_HEAD(driver_list);
702 	struct list_head *dptr;
703 	int queued;
704 
705 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
706 
707 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
708 		return;
709 
710 	hctx->run++;
711 
712 	/*
713 	 * Touch any software queue that has pending entries.
714 	 */
715 	flush_busy_ctxs(hctx, &rq_list);
716 
717 	/*
718 	 * If we have previous entries on our dispatch list, grab them
719 	 * and stuff them at the front for more fair dispatch.
720 	 */
721 	if (!list_empty_careful(&hctx->dispatch)) {
722 		spin_lock(&hctx->lock);
723 		if (!list_empty(&hctx->dispatch))
724 			list_splice_init(&hctx->dispatch, &rq_list);
725 		spin_unlock(&hctx->lock);
726 	}
727 
728 	/*
729 	 * Start off with dptr being NULL, so we start the first request
730 	 * immediately, even if we have more pending.
731 	 */
732 	dptr = NULL;
733 
734 	/*
735 	 * Now process all the entries, sending them to the driver.
736 	 */
737 	queued = 0;
738 	while (!list_empty(&rq_list)) {
739 		struct blk_mq_queue_data bd;
740 		int ret;
741 
742 		rq = list_first_entry(&rq_list, struct request, queuelist);
743 		list_del_init(&rq->queuelist);
744 
745 		bd.rq = rq;
746 		bd.list = dptr;
747 		bd.last = list_empty(&rq_list);
748 
749 		ret = q->mq_ops->queue_rq(hctx, &bd);
750 		switch (ret) {
751 		case BLK_MQ_RQ_QUEUE_OK:
752 			queued++;
753 			continue;
754 		case BLK_MQ_RQ_QUEUE_BUSY:
755 			list_add(&rq->queuelist, &rq_list);
756 			__blk_mq_requeue_request(rq);
757 			break;
758 		default:
759 			pr_err("blk-mq: bad return on queue: %d\n", ret);
760 		case BLK_MQ_RQ_QUEUE_ERROR:
761 			rq->errors = -EIO;
762 			blk_mq_end_request(rq, rq->errors);
763 			break;
764 		}
765 
766 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
767 			break;
768 
769 		/*
770 		 * We've done the first request. If we have more than 1
771 		 * left in the list, set dptr to defer issue.
772 		 */
773 		if (!dptr && rq_list.next != rq_list.prev)
774 			dptr = &driver_list;
775 	}
776 
777 	if (!queued)
778 		hctx->dispatched[0]++;
779 	else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
780 		hctx->dispatched[ilog2(queued) + 1]++;
781 
782 	/*
783 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
784 	 * that is where we will continue on next queue run.
785 	 */
786 	if (!list_empty(&rq_list)) {
787 		spin_lock(&hctx->lock);
788 		list_splice(&rq_list, &hctx->dispatch);
789 		spin_unlock(&hctx->lock);
790 	}
791 }
792 
793 /*
794  * It'd be great if the workqueue API had a way to pass
795  * in a mask and had some smarts for more clever placement.
796  * For now we just round-robin here, switching for every
797  * BLK_MQ_CPU_WORK_BATCH queued items.
798  */
799 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
800 {
801 	if (hctx->queue->nr_hw_queues == 1)
802 		return WORK_CPU_UNBOUND;
803 
804 	if (--hctx->next_cpu_batch <= 0) {
805 		int cpu = hctx->next_cpu, next_cpu;
806 
807 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
808 		if (next_cpu >= nr_cpu_ids)
809 			next_cpu = cpumask_first(hctx->cpumask);
810 
811 		hctx->next_cpu = next_cpu;
812 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
813 
814 		return cpu;
815 	}
816 
817 	return hctx->next_cpu;
818 }
819 
820 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
821 {
822 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
823 	    !blk_mq_hw_queue_mapped(hctx)))
824 		return;
825 
826 	if (!async) {
827 		int cpu = get_cpu();
828 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
829 			__blk_mq_run_hw_queue(hctx);
830 			put_cpu();
831 			return;
832 		}
833 
834 		put_cpu();
835 	}
836 
837 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
838 			&hctx->run_work, 0);
839 }
840 
841 void blk_mq_run_queues(struct request_queue *q, bool async)
842 {
843 	struct blk_mq_hw_ctx *hctx;
844 	int i;
845 
846 	queue_for_each_hw_ctx(q, hctx, i) {
847 		if ((!blk_mq_hctx_has_pending(hctx) &&
848 		    list_empty_careful(&hctx->dispatch)) ||
849 		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
850 			continue;
851 
852 		blk_mq_run_hw_queue(hctx, async);
853 	}
854 }
855 EXPORT_SYMBOL(blk_mq_run_queues);
856 
857 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
858 {
859 	cancel_delayed_work(&hctx->run_work);
860 	cancel_delayed_work(&hctx->delay_work);
861 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
862 }
863 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
864 
865 void blk_mq_stop_hw_queues(struct request_queue *q)
866 {
867 	struct blk_mq_hw_ctx *hctx;
868 	int i;
869 
870 	queue_for_each_hw_ctx(q, hctx, i)
871 		blk_mq_stop_hw_queue(hctx);
872 }
873 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
874 
875 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
876 {
877 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
878 
879 	blk_mq_run_hw_queue(hctx, false);
880 }
881 EXPORT_SYMBOL(blk_mq_start_hw_queue);
882 
883 void blk_mq_start_hw_queues(struct request_queue *q)
884 {
885 	struct blk_mq_hw_ctx *hctx;
886 	int i;
887 
888 	queue_for_each_hw_ctx(q, hctx, i)
889 		blk_mq_start_hw_queue(hctx);
890 }
891 EXPORT_SYMBOL(blk_mq_start_hw_queues);
892 
893 
894 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
895 {
896 	struct blk_mq_hw_ctx *hctx;
897 	int i;
898 
899 	queue_for_each_hw_ctx(q, hctx, i) {
900 		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
901 			continue;
902 
903 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
904 		blk_mq_run_hw_queue(hctx, async);
905 	}
906 }
907 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
908 
909 static void blk_mq_run_work_fn(struct work_struct *work)
910 {
911 	struct blk_mq_hw_ctx *hctx;
912 
913 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
914 
915 	__blk_mq_run_hw_queue(hctx);
916 }
917 
918 static void blk_mq_delay_work_fn(struct work_struct *work)
919 {
920 	struct blk_mq_hw_ctx *hctx;
921 
922 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
923 
924 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
925 		__blk_mq_run_hw_queue(hctx);
926 }
927 
928 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
929 {
930 	if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
931 		return;
932 
933 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
934 			&hctx->delay_work, msecs_to_jiffies(msecs));
935 }
936 EXPORT_SYMBOL(blk_mq_delay_queue);
937 
938 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
939 				    struct request *rq, bool at_head)
940 {
941 	struct blk_mq_ctx *ctx = rq->mq_ctx;
942 
943 	trace_block_rq_insert(hctx->queue, rq);
944 
945 	if (at_head)
946 		list_add(&rq->queuelist, &ctx->rq_list);
947 	else
948 		list_add_tail(&rq->queuelist, &ctx->rq_list);
949 
950 	blk_mq_hctx_mark_pending(hctx, ctx);
951 }
952 
953 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
954 		bool async)
955 {
956 	struct request_queue *q = rq->q;
957 	struct blk_mq_hw_ctx *hctx;
958 	struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
959 
960 	current_ctx = blk_mq_get_ctx(q);
961 	if (!cpu_online(ctx->cpu))
962 		rq->mq_ctx = ctx = current_ctx;
963 
964 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
965 
966 	spin_lock(&ctx->lock);
967 	__blk_mq_insert_request(hctx, rq, at_head);
968 	spin_unlock(&ctx->lock);
969 
970 	if (run_queue)
971 		blk_mq_run_hw_queue(hctx, async);
972 
973 	blk_mq_put_ctx(current_ctx);
974 }
975 
976 static void blk_mq_insert_requests(struct request_queue *q,
977 				     struct blk_mq_ctx *ctx,
978 				     struct list_head *list,
979 				     int depth,
980 				     bool from_schedule)
981 
982 {
983 	struct blk_mq_hw_ctx *hctx;
984 	struct blk_mq_ctx *current_ctx;
985 
986 	trace_block_unplug(q, depth, !from_schedule);
987 
988 	current_ctx = blk_mq_get_ctx(q);
989 
990 	if (!cpu_online(ctx->cpu))
991 		ctx = current_ctx;
992 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
993 
994 	/*
995 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
996 	 * offline now
997 	 */
998 	spin_lock(&ctx->lock);
999 	while (!list_empty(list)) {
1000 		struct request *rq;
1001 
1002 		rq = list_first_entry(list, struct request, queuelist);
1003 		list_del_init(&rq->queuelist);
1004 		rq->mq_ctx = ctx;
1005 		__blk_mq_insert_request(hctx, rq, false);
1006 	}
1007 	spin_unlock(&ctx->lock);
1008 
1009 	blk_mq_run_hw_queue(hctx, from_schedule);
1010 	blk_mq_put_ctx(current_ctx);
1011 }
1012 
1013 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1014 {
1015 	struct request *rqa = container_of(a, struct request, queuelist);
1016 	struct request *rqb = container_of(b, struct request, queuelist);
1017 
1018 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1019 		 (rqa->mq_ctx == rqb->mq_ctx &&
1020 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1021 }
1022 
1023 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1024 {
1025 	struct blk_mq_ctx *this_ctx;
1026 	struct request_queue *this_q;
1027 	struct request *rq;
1028 	LIST_HEAD(list);
1029 	LIST_HEAD(ctx_list);
1030 	unsigned int depth;
1031 
1032 	list_splice_init(&plug->mq_list, &list);
1033 
1034 	list_sort(NULL, &list, plug_ctx_cmp);
1035 
1036 	this_q = NULL;
1037 	this_ctx = NULL;
1038 	depth = 0;
1039 
1040 	while (!list_empty(&list)) {
1041 		rq = list_entry_rq(list.next);
1042 		list_del_init(&rq->queuelist);
1043 		BUG_ON(!rq->q);
1044 		if (rq->mq_ctx != this_ctx) {
1045 			if (this_ctx) {
1046 				blk_mq_insert_requests(this_q, this_ctx,
1047 							&ctx_list, depth,
1048 							from_schedule);
1049 			}
1050 
1051 			this_ctx = rq->mq_ctx;
1052 			this_q = rq->q;
1053 			depth = 0;
1054 		}
1055 
1056 		depth++;
1057 		list_add_tail(&rq->queuelist, &ctx_list);
1058 	}
1059 
1060 	/*
1061 	 * If 'this_ctx' is set, we know we have entries to complete
1062 	 * on 'ctx_list'. Do those.
1063 	 */
1064 	if (this_ctx) {
1065 		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1066 				       from_schedule);
1067 	}
1068 }
1069 
1070 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1071 {
1072 	init_request_from_bio(rq, bio);
1073 
1074 	if (blk_do_io_stat(rq))
1075 		blk_account_io_start(rq, 1);
1076 }
1077 
1078 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1079 {
1080 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1081 		!blk_queue_nomerges(hctx->queue);
1082 }
1083 
1084 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1085 					 struct blk_mq_ctx *ctx,
1086 					 struct request *rq, struct bio *bio)
1087 {
1088 	if (!hctx_allow_merges(hctx)) {
1089 		blk_mq_bio_to_request(rq, bio);
1090 		spin_lock(&ctx->lock);
1091 insert_rq:
1092 		__blk_mq_insert_request(hctx, rq, false);
1093 		spin_unlock(&ctx->lock);
1094 		return false;
1095 	} else {
1096 		struct request_queue *q = hctx->queue;
1097 
1098 		spin_lock(&ctx->lock);
1099 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1100 			blk_mq_bio_to_request(rq, bio);
1101 			goto insert_rq;
1102 		}
1103 
1104 		spin_unlock(&ctx->lock);
1105 		__blk_mq_free_request(hctx, ctx, rq);
1106 		return true;
1107 	}
1108 }
1109 
1110 struct blk_map_ctx {
1111 	struct blk_mq_hw_ctx *hctx;
1112 	struct blk_mq_ctx *ctx;
1113 };
1114 
1115 static struct request *blk_mq_map_request(struct request_queue *q,
1116 					  struct bio *bio,
1117 					  struct blk_map_ctx *data)
1118 {
1119 	struct blk_mq_hw_ctx *hctx;
1120 	struct blk_mq_ctx *ctx;
1121 	struct request *rq;
1122 	int rw = bio_data_dir(bio);
1123 	struct blk_mq_alloc_data alloc_data;
1124 
1125 	if (unlikely(blk_mq_queue_enter(q))) {
1126 		bio_endio(bio, -EIO);
1127 		return NULL;
1128 	}
1129 
1130 	ctx = blk_mq_get_ctx(q);
1131 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1132 
1133 	if (rw_is_sync(bio->bi_rw))
1134 		rw |= REQ_SYNC;
1135 
1136 	trace_block_getrq(q, bio, rw);
1137 	blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1138 			hctx);
1139 	rq = __blk_mq_alloc_request(&alloc_data, rw);
1140 	if (unlikely(!rq)) {
1141 		__blk_mq_run_hw_queue(hctx);
1142 		blk_mq_put_ctx(ctx);
1143 		trace_block_sleeprq(q, bio, rw);
1144 
1145 		ctx = blk_mq_get_ctx(q);
1146 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
1147 		blk_mq_set_alloc_data(&alloc_data, q,
1148 				__GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1149 		rq = __blk_mq_alloc_request(&alloc_data, rw);
1150 		ctx = alloc_data.ctx;
1151 		hctx = alloc_data.hctx;
1152 	}
1153 
1154 	hctx->queued++;
1155 	data->hctx = hctx;
1156 	data->ctx = ctx;
1157 	return rq;
1158 }
1159 
1160 /*
1161  * Multiple hardware queue variant. This will not use per-process plugs,
1162  * but will attempt to bypass the hctx queueing if we can go straight to
1163  * hardware for SYNC IO.
1164  */
1165 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1166 {
1167 	const int is_sync = rw_is_sync(bio->bi_rw);
1168 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1169 	struct blk_map_ctx data;
1170 	struct request *rq;
1171 
1172 	blk_queue_bounce(q, &bio);
1173 
1174 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1175 		bio_endio(bio, -EIO);
1176 		return;
1177 	}
1178 
1179 	rq = blk_mq_map_request(q, bio, &data);
1180 	if (unlikely(!rq))
1181 		return;
1182 
1183 	if (unlikely(is_flush_fua)) {
1184 		blk_mq_bio_to_request(rq, bio);
1185 		blk_insert_flush(rq);
1186 		goto run_queue;
1187 	}
1188 
1189 	/*
1190 	 * If the driver supports defer issued based on 'last', then
1191 	 * queue it up like normal since we can potentially save some
1192 	 * CPU this way.
1193 	 */
1194 	if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1195 		struct blk_mq_queue_data bd = {
1196 			.rq = rq,
1197 			.list = NULL,
1198 			.last = 1
1199 		};
1200 		int ret;
1201 
1202 		blk_mq_bio_to_request(rq, bio);
1203 
1204 		/*
1205 		 * For OK queue, we are done. For error, kill it. Any other
1206 		 * error (busy), just add it to our list as we previously
1207 		 * would have done
1208 		 */
1209 		ret = q->mq_ops->queue_rq(data.hctx, &bd);
1210 		if (ret == BLK_MQ_RQ_QUEUE_OK)
1211 			goto done;
1212 		else {
1213 			__blk_mq_requeue_request(rq);
1214 
1215 			if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1216 				rq->errors = -EIO;
1217 				blk_mq_end_request(rq, rq->errors);
1218 				goto done;
1219 			}
1220 		}
1221 	}
1222 
1223 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1224 		/*
1225 		 * For a SYNC request, send it to the hardware immediately. For
1226 		 * an ASYNC request, just ensure that we run it later on. The
1227 		 * latter allows for merging opportunities and more efficient
1228 		 * dispatching.
1229 		 */
1230 run_queue:
1231 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1232 	}
1233 done:
1234 	blk_mq_put_ctx(data.ctx);
1235 }
1236 
1237 /*
1238  * Single hardware queue variant. This will attempt to use any per-process
1239  * plug for merging and IO deferral.
1240  */
1241 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1242 {
1243 	const int is_sync = rw_is_sync(bio->bi_rw);
1244 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1245 	unsigned int use_plug, request_count = 0;
1246 	struct blk_map_ctx data;
1247 	struct request *rq;
1248 
1249 	/*
1250 	 * If we have multiple hardware queues, just go directly to
1251 	 * one of those for sync IO.
1252 	 */
1253 	use_plug = !is_flush_fua && !is_sync;
1254 
1255 	blk_queue_bounce(q, &bio);
1256 
1257 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1258 		bio_endio(bio, -EIO);
1259 		return;
1260 	}
1261 
1262 	if (use_plug && !blk_queue_nomerges(q) &&
1263 	    blk_attempt_plug_merge(q, bio, &request_count))
1264 		return;
1265 
1266 	rq = blk_mq_map_request(q, bio, &data);
1267 	if (unlikely(!rq))
1268 		return;
1269 
1270 	if (unlikely(is_flush_fua)) {
1271 		blk_mq_bio_to_request(rq, bio);
1272 		blk_insert_flush(rq);
1273 		goto run_queue;
1274 	}
1275 
1276 	/*
1277 	 * A task plug currently exists. Since this is completely lockless,
1278 	 * utilize that to temporarily store requests until the task is
1279 	 * either done or scheduled away.
1280 	 */
1281 	if (use_plug) {
1282 		struct blk_plug *plug = current->plug;
1283 
1284 		if (plug) {
1285 			blk_mq_bio_to_request(rq, bio);
1286 			if (list_empty(&plug->mq_list))
1287 				trace_block_plug(q);
1288 			else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1289 				blk_flush_plug_list(plug, false);
1290 				trace_block_plug(q);
1291 			}
1292 			list_add_tail(&rq->queuelist, &plug->mq_list);
1293 			blk_mq_put_ctx(data.ctx);
1294 			return;
1295 		}
1296 	}
1297 
1298 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1299 		/*
1300 		 * For a SYNC request, send it to the hardware immediately. For
1301 		 * an ASYNC request, just ensure that we run it later on. The
1302 		 * latter allows for merging opportunities and more efficient
1303 		 * dispatching.
1304 		 */
1305 run_queue:
1306 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1307 	}
1308 
1309 	blk_mq_put_ctx(data.ctx);
1310 }
1311 
1312 /*
1313  * Default mapping to a software queue, since we use one per CPU.
1314  */
1315 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1316 {
1317 	return q->queue_hw_ctx[q->mq_map[cpu]];
1318 }
1319 EXPORT_SYMBOL(blk_mq_map_queue);
1320 
1321 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1322 		struct blk_mq_tags *tags, unsigned int hctx_idx)
1323 {
1324 	struct page *page;
1325 
1326 	if (tags->rqs && set->ops->exit_request) {
1327 		int i;
1328 
1329 		for (i = 0; i < tags->nr_tags; i++) {
1330 			if (!tags->rqs[i])
1331 				continue;
1332 			set->ops->exit_request(set->driver_data, tags->rqs[i],
1333 						hctx_idx, i);
1334 			tags->rqs[i] = NULL;
1335 		}
1336 	}
1337 
1338 	while (!list_empty(&tags->page_list)) {
1339 		page = list_first_entry(&tags->page_list, struct page, lru);
1340 		list_del_init(&page->lru);
1341 		__free_pages(page, page->private);
1342 	}
1343 
1344 	kfree(tags->rqs);
1345 
1346 	blk_mq_free_tags(tags);
1347 }
1348 
1349 static size_t order_to_size(unsigned int order)
1350 {
1351 	return (size_t)PAGE_SIZE << order;
1352 }
1353 
1354 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1355 		unsigned int hctx_idx)
1356 {
1357 	struct blk_mq_tags *tags;
1358 	unsigned int i, j, entries_per_page, max_order = 4;
1359 	size_t rq_size, left;
1360 
1361 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1362 				set->numa_node);
1363 	if (!tags)
1364 		return NULL;
1365 
1366 	INIT_LIST_HEAD(&tags->page_list);
1367 
1368 	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1369 				 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1370 				 set->numa_node);
1371 	if (!tags->rqs) {
1372 		blk_mq_free_tags(tags);
1373 		return NULL;
1374 	}
1375 
1376 	/*
1377 	 * rq_size is the size of the request plus driver payload, rounded
1378 	 * to the cacheline size
1379 	 */
1380 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1381 				cache_line_size());
1382 	left = rq_size * set->queue_depth;
1383 
1384 	for (i = 0; i < set->queue_depth; ) {
1385 		int this_order = max_order;
1386 		struct page *page;
1387 		int to_do;
1388 		void *p;
1389 
1390 		while (left < order_to_size(this_order - 1) && this_order)
1391 			this_order--;
1392 
1393 		do {
1394 			page = alloc_pages_node(set->numa_node,
1395 				GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1396 				this_order);
1397 			if (page)
1398 				break;
1399 			if (!this_order--)
1400 				break;
1401 			if (order_to_size(this_order) < rq_size)
1402 				break;
1403 		} while (1);
1404 
1405 		if (!page)
1406 			goto fail;
1407 
1408 		page->private = this_order;
1409 		list_add_tail(&page->lru, &tags->page_list);
1410 
1411 		p = page_address(page);
1412 		entries_per_page = order_to_size(this_order) / rq_size;
1413 		to_do = min(entries_per_page, set->queue_depth - i);
1414 		left -= to_do * rq_size;
1415 		for (j = 0; j < to_do; j++) {
1416 			tags->rqs[i] = p;
1417 			tags->rqs[i]->atomic_flags = 0;
1418 			tags->rqs[i]->cmd_flags = 0;
1419 			if (set->ops->init_request) {
1420 				if (set->ops->init_request(set->driver_data,
1421 						tags->rqs[i], hctx_idx, i,
1422 						set->numa_node)) {
1423 					tags->rqs[i] = NULL;
1424 					goto fail;
1425 				}
1426 			}
1427 
1428 			p += rq_size;
1429 			i++;
1430 		}
1431 	}
1432 
1433 	return tags;
1434 
1435 fail:
1436 	blk_mq_free_rq_map(set, tags, hctx_idx);
1437 	return NULL;
1438 }
1439 
1440 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1441 {
1442 	kfree(bitmap->map);
1443 }
1444 
1445 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1446 {
1447 	unsigned int bpw = 8, total, num_maps, i;
1448 
1449 	bitmap->bits_per_word = bpw;
1450 
1451 	num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1452 	bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1453 					GFP_KERNEL, node);
1454 	if (!bitmap->map)
1455 		return -ENOMEM;
1456 
1457 	bitmap->map_size = num_maps;
1458 
1459 	total = nr_cpu_ids;
1460 	for (i = 0; i < num_maps; i++) {
1461 		bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1462 		total -= bitmap->map[i].depth;
1463 	}
1464 
1465 	return 0;
1466 }
1467 
1468 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1469 {
1470 	struct request_queue *q = hctx->queue;
1471 	struct blk_mq_ctx *ctx;
1472 	LIST_HEAD(tmp);
1473 
1474 	/*
1475 	 * Move ctx entries to new CPU, if this one is going away.
1476 	 */
1477 	ctx = __blk_mq_get_ctx(q, cpu);
1478 
1479 	spin_lock(&ctx->lock);
1480 	if (!list_empty(&ctx->rq_list)) {
1481 		list_splice_init(&ctx->rq_list, &tmp);
1482 		blk_mq_hctx_clear_pending(hctx, ctx);
1483 	}
1484 	spin_unlock(&ctx->lock);
1485 
1486 	if (list_empty(&tmp))
1487 		return NOTIFY_OK;
1488 
1489 	ctx = blk_mq_get_ctx(q);
1490 	spin_lock(&ctx->lock);
1491 
1492 	while (!list_empty(&tmp)) {
1493 		struct request *rq;
1494 
1495 		rq = list_first_entry(&tmp, struct request, queuelist);
1496 		rq->mq_ctx = ctx;
1497 		list_move_tail(&rq->queuelist, &ctx->rq_list);
1498 	}
1499 
1500 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1501 	blk_mq_hctx_mark_pending(hctx, ctx);
1502 
1503 	spin_unlock(&ctx->lock);
1504 
1505 	blk_mq_run_hw_queue(hctx, true);
1506 	blk_mq_put_ctx(ctx);
1507 	return NOTIFY_OK;
1508 }
1509 
1510 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1511 {
1512 	struct request_queue *q = hctx->queue;
1513 	struct blk_mq_tag_set *set = q->tag_set;
1514 
1515 	if (set->tags[hctx->queue_num])
1516 		return NOTIFY_OK;
1517 
1518 	set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1519 	if (!set->tags[hctx->queue_num])
1520 		return NOTIFY_STOP;
1521 
1522 	hctx->tags = set->tags[hctx->queue_num];
1523 	return NOTIFY_OK;
1524 }
1525 
1526 static int blk_mq_hctx_notify(void *data, unsigned long action,
1527 			      unsigned int cpu)
1528 {
1529 	struct blk_mq_hw_ctx *hctx = data;
1530 
1531 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1532 		return blk_mq_hctx_cpu_offline(hctx, cpu);
1533 	else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1534 		return blk_mq_hctx_cpu_online(hctx, cpu);
1535 
1536 	return NOTIFY_OK;
1537 }
1538 
1539 static void blk_mq_exit_hctx(struct request_queue *q,
1540 		struct blk_mq_tag_set *set,
1541 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1542 {
1543 	unsigned flush_start_tag = set->queue_depth;
1544 
1545 	blk_mq_tag_idle(hctx);
1546 
1547 	if (set->ops->exit_request)
1548 		set->ops->exit_request(set->driver_data,
1549 				       hctx->fq->flush_rq, hctx_idx,
1550 				       flush_start_tag + hctx_idx);
1551 
1552 	if (set->ops->exit_hctx)
1553 		set->ops->exit_hctx(hctx, hctx_idx);
1554 
1555 	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1556 	blk_free_flush_queue(hctx->fq);
1557 	kfree(hctx->ctxs);
1558 	blk_mq_free_bitmap(&hctx->ctx_map);
1559 }
1560 
1561 static void blk_mq_exit_hw_queues(struct request_queue *q,
1562 		struct blk_mq_tag_set *set, int nr_queue)
1563 {
1564 	struct blk_mq_hw_ctx *hctx;
1565 	unsigned int i;
1566 
1567 	queue_for_each_hw_ctx(q, hctx, i) {
1568 		if (i == nr_queue)
1569 			break;
1570 		blk_mq_exit_hctx(q, set, hctx, i);
1571 	}
1572 }
1573 
1574 static void blk_mq_free_hw_queues(struct request_queue *q,
1575 		struct blk_mq_tag_set *set)
1576 {
1577 	struct blk_mq_hw_ctx *hctx;
1578 	unsigned int i;
1579 
1580 	queue_for_each_hw_ctx(q, hctx, i) {
1581 		free_cpumask_var(hctx->cpumask);
1582 		kfree(hctx);
1583 	}
1584 }
1585 
1586 static int blk_mq_init_hctx(struct request_queue *q,
1587 		struct blk_mq_tag_set *set,
1588 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1589 {
1590 	int node;
1591 	unsigned flush_start_tag = set->queue_depth;
1592 
1593 	node = hctx->numa_node;
1594 	if (node == NUMA_NO_NODE)
1595 		node = hctx->numa_node = set->numa_node;
1596 
1597 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1598 	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1599 	spin_lock_init(&hctx->lock);
1600 	INIT_LIST_HEAD(&hctx->dispatch);
1601 	hctx->queue = q;
1602 	hctx->queue_num = hctx_idx;
1603 	hctx->flags = set->flags;
1604 	hctx->cmd_size = set->cmd_size;
1605 
1606 	blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1607 					blk_mq_hctx_notify, hctx);
1608 	blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1609 
1610 	hctx->tags = set->tags[hctx_idx];
1611 
1612 	/*
1613 	 * Allocate space for all possible cpus to avoid allocation at
1614 	 * runtime
1615 	 */
1616 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1617 					GFP_KERNEL, node);
1618 	if (!hctx->ctxs)
1619 		goto unregister_cpu_notifier;
1620 
1621 	if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1622 		goto free_ctxs;
1623 
1624 	hctx->nr_ctx = 0;
1625 
1626 	if (set->ops->init_hctx &&
1627 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1628 		goto free_bitmap;
1629 
1630 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1631 	if (!hctx->fq)
1632 		goto exit_hctx;
1633 
1634 	if (set->ops->init_request &&
1635 	    set->ops->init_request(set->driver_data,
1636 				   hctx->fq->flush_rq, hctx_idx,
1637 				   flush_start_tag + hctx_idx, node))
1638 		goto free_fq;
1639 
1640 	return 0;
1641 
1642  free_fq:
1643 	kfree(hctx->fq);
1644  exit_hctx:
1645 	if (set->ops->exit_hctx)
1646 		set->ops->exit_hctx(hctx, hctx_idx);
1647  free_bitmap:
1648 	blk_mq_free_bitmap(&hctx->ctx_map);
1649  free_ctxs:
1650 	kfree(hctx->ctxs);
1651  unregister_cpu_notifier:
1652 	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1653 
1654 	return -1;
1655 }
1656 
1657 static int blk_mq_init_hw_queues(struct request_queue *q,
1658 		struct blk_mq_tag_set *set)
1659 {
1660 	struct blk_mq_hw_ctx *hctx;
1661 	unsigned int i;
1662 
1663 	/*
1664 	 * Initialize hardware queues
1665 	 */
1666 	queue_for_each_hw_ctx(q, hctx, i) {
1667 		if (blk_mq_init_hctx(q, set, hctx, i))
1668 			break;
1669 	}
1670 
1671 	if (i == q->nr_hw_queues)
1672 		return 0;
1673 
1674 	/*
1675 	 * Init failed
1676 	 */
1677 	blk_mq_exit_hw_queues(q, set, i);
1678 
1679 	return 1;
1680 }
1681 
1682 static void blk_mq_init_cpu_queues(struct request_queue *q,
1683 				   unsigned int nr_hw_queues)
1684 {
1685 	unsigned int i;
1686 
1687 	for_each_possible_cpu(i) {
1688 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1689 		struct blk_mq_hw_ctx *hctx;
1690 
1691 		memset(__ctx, 0, sizeof(*__ctx));
1692 		__ctx->cpu = i;
1693 		spin_lock_init(&__ctx->lock);
1694 		INIT_LIST_HEAD(&__ctx->rq_list);
1695 		__ctx->queue = q;
1696 
1697 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1698 		if (!cpu_online(i))
1699 			continue;
1700 
1701 		hctx = q->mq_ops->map_queue(q, i);
1702 		cpumask_set_cpu(i, hctx->cpumask);
1703 		hctx->nr_ctx++;
1704 
1705 		/*
1706 		 * Set local node, IFF we have more than one hw queue. If
1707 		 * not, we remain on the home node of the device
1708 		 */
1709 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1710 			hctx->numa_node = cpu_to_node(i);
1711 	}
1712 }
1713 
1714 static void blk_mq_map_swqueue(struct request_queue *q)
1715 {
1716 	unsigned int i;
1717 	struct blk_mq_hw_ctx *hctx;
1718 	struct blk_mq_ctx *ctx;
1719 
1720 	queue_for_each_hw_ctx(q, hctx, i) {
1721 		cpumask_clear(hctx->cpumask);
1722 		hctx->nr_ctx = 0;
1723 	}
1724 
1725 	/*
1726 	 * Map software to hardware queues
1727 	 */
1728 	queue_for_each_ctx(q, ctx, i) {
1729 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1730 		if (!cpu_online(i))
1731 			continue;
1732 
1733 		hctx = q->mq_ops->map_queue(q, i);
1734 		cpumask_set_cpu(i, hctx->cpumask);
1735 		ctx->index_hw = hctx->nr_ctx;
1736 		hctx->ctxs[hctx->nr_ctx++] = ctx;
1737 	}
1738 
1739 	queue_for_each_hw_ctx(q, hctx, i) {
1740 		/*
1741 		 * If no software queues are mapped to this hardware queue,
1742 		 * disable it and free the request entries.
1743 		 */
1744 		if (!hctx->nr_ctx) {
1745 			struct blk_mq_tag_set *set = q->tag_set;
1746 
1747 			if (set->tags[i]) {
1748 				blk_mq_free_rq_map(set, set->tags[i], i);
1749 				set->tags[i] = NULL;
1750 				hctx->tags = NULL;
1751 			}
1752 			continue;
1753 		}
1754 
1755 		/*
1756 		 * Initialize batch roundrobin counts
1757 		 */
1758 		hctx->next_cpu = cpumask_first(hctx->cpumask);
1759 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1760 	}
1761 }
1762 
1763 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1764 {
1765 	struct blk_mq_hw_ctx *hctx;
1766 	struct request_queue *q;
1767 	bool shared;
1768 	int i;
1769 
1770 	if (set->tag_list.next == set->tag_list.prev)
1771 		shared = false;
1772 	else
1773 		shared = true;
1774 
1775 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
1776 		blk_mq_freeze_queue(q);
1777 
1778 		queue_for_each_hw_ctx(q, hctx, i) {
1779 			if (shared)
1780 				hctx->flags |= BLK_MQ_F_TAG_SHARED;
1781 			else
1782 				hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1783 		}
1784 		blk_mq_unfreeze_queue(q);
1785 	}
1786 }
1787 
1788 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1789 {
1790 	struct blk_mq_tag_set *set = q->tag_set;
1791 
1792 	mutex_lock(&set->tag_list_lock);
1793 	list_del_init(&q->tag_set_list);
1794 	blk_mq_update_tag_set_depth(set);
1795 	mutex_unlock(&set->tag_list_lock);
1796 }
1797 
1798 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1799 				     struct request_queue *q)
1800 {
1801 	q->tag_set = set;
1802 
1803 	mutex_lock(&set->tag_list_lock);
1804 	list_add_tail(&q->tag_set_list, &set->tag_list);
1805 	blk_mq_update_tag_set_depth(set);
1806 	mutex_unlock(&set->tag_list_lock);
1807 }
1808 
1809 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1810 {
1811 	struct blk_mq_hw_ctx **hctxs;
1812 	struct blk_mq_ctx __percpu *ctx;
1813 	struct request_queue *q;
1814 	unsigned int *map;
1815 	int i;
1816 
1817 	ctx = alloc_percpu(struct blk_mq_ctx);
1818 	if (!ctx)
1819 		return ERR_PTR(-ENOMEM);
1820 
1821 	hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1822 			set->numa_node);
1823 
1824 	if (!hctxs)
1825 		goto err_percpu;
1826 
1827 	map = blk_mq_make_queue_map(set);
1828 	if (!map)
1829 		goto err_map;
1830 
1831 	for (i = 0; i < set->nr_hw_queues; i++) {
1832 		int node = blk_mq_hw_queue_to_node(map, i);
1833 
1834 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1835 					GFP_KERNEL, node);
1836 		if (!hctxs[i])
1837 			goto err_hctxs;
1838 
1839 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1840 						node))
1841 			goto err_hctxs;
1842 
1843 		atomic_set(&hctxs[i]->nr_active, 0);
1844 		hctxs[i]->numa_node = node;
1845 		hctxs[i]->queue_num = i;
1846 	}
1847 
1848 	q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1849 	if (!q)
1850 		goto err_hctxs;
1851 
1852 	/*
1853 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1854 	 * See blk_register_queue() for details.
1855 	 */
1856 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1857 			    PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1858 		goto err_map;
1859 
1860 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1861 	blk_queue_rq_timeout(q, 30000);
1862 
1863 	q->nr_queues = nr_cpu_ids;
1864 	q->nr_hw_queues = set->nr_hw_queues;
1865 	q->mq_map = map;
1866 
1867 	q->queue_ctx = ctx;
1868 	q->queue_hw_ctx = hctxs;
1869 
1870 	q->mq_ops = set->ops;
1871 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1872 
1873 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
1874 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1875 
1876 	q->sg_reserved_size = INT_MAX;
1877 
1878 	INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1879 	INIT_LIST_HEAD(&q->requeue_list);
1880 	spin_lock_init(&q->requeue_lock);
1881 
1882 	if (q->nr_hw_queues > 1)
1883 		blk_queue_make_request(q, blk_mq_make_request);
1884 	else
1885 		blk_queue_make_request(q, blk_sq_make_request);
1886 
1887 	if (set->timeout)
1888 		blk_queue_rq_timeout(q, set->timeout);
1889 
1890 	/*
1891 	 * Do this after blk_queue_make_request() overrides it...
1892 	 */
1893 	q->nr_requests = set->queue_depth;
1894 
1895 	if (set->ops->complete)
1896 		blk_queue_softirq_done(q, set->ops->complete);
1897 
1898 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1899 
1900 	if (blk_mq_init_hw_queues(q, set))
1901 		goto err_hw;
1902 
1903 	mutex_lock(&all_q_mutex);
1904 	list_add_tail(&q->all_q_node, &all_q_list);
1905 	mutex_unlock(&all_q_mutex);
1906 
1907 	blk_mq_add_queue_tag_set(set, q);
1908 
1909 	blk_mq_map_swqueue(q);
1910 
1911 	return q;
1912 
1913 err_hw:
1914 	blk_cleanup_queue(q);
1915 err_hctxs:
1916 	kfree(map);
1917 	for (i = 0; i < set->nr_hw_queues; i++) {
1918 		if (!hctxs[i])
1919 			break;
1920 		free_cpumask_var(hctxs[i]->cpumask);
1921 		kfree(hctxs[i]);
1922 	}
1923 err_map:
1924 	kfree(hctxs);
1925 err_percpu:
1926 	free_percpu(ctx);
1927 	return ERR_PTR(-ENOMEM);
1928 }
1929 EXPORT_SYMBOL(blk_mq_init_queue);
1930 
1931 void blk_mq_free_queue(struct request_queue *q)
1932 {
1933 	struct blk_mq_tag_set	*set = q->tag_set;
1934 
1935 	blk_mq_del_queue_tag_set(q);
1936 
1937 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1938 	blk_mq_free_hw_queues(q, set);
1939 
1940 	percpu_ref_exit(&q->mq_usage_counter);
1941 
1942 	free_percpu(q->queue_ctx);
1943 	kfree(q->queue_hw_ctx);
1944 	kfree(q->mq_map);
1945 
1946 	q->queue_ctx = NULL;
1947 	q->queue_hw_ctx = NULL;
1948 	q->mq_map = NULL;
1949 
1950 	mutex_lock(&all_q_mutex);
1951 	list_del_init(&q->all_q_node);
1952 	mutex_unlock(&all_q_mutex);
1953 }
1954 
1955 /* Basically redo blk_mq_init_queue with queue frozen */
1956 static void blk_mq_queue_reinit(struct request_queue *q)
1957 {
1958 	WARN_ON_ONCE(!q->mq_freeze_depth);
1959 
1960 	blk_mq_sysfs_unregister(q);
1961 
1962 	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1963 
1964 	/*
1965 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1966 	 * we should change hctx numa_node according to new topology (this
1967 	 * involves free and re-allocate memory, worthy doing?)
1968 	 */
1969 
1970 	blk_mq_map_swqueue(q);
1971 
1972 	blk_mq_sysfs_register(q);
1973 }
1974 
1975 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1976 				      unsigned long action, void *hcpu)
1977 {
1978 	struct request_queue *q;
1979 
1980 	/*
1981 	 * Before new mappings are established, hotadded cpu might already
1982 	 * start handling requests. This doesn't break anything as we map
1983 	 * offline CPUs to first hardware queue. We will re-init the queue
1984 	 * below to get optimal settings.
1985 	 */
1986 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1987 	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1988 		return NOTIFY_OK;
1989 
1990 	mutex_lock(&all_q_mutex);
1991 
1992 	/*
1993 	 * We need to freeze and reinit all existing queues.  Freezing
1994 	 * involves synchronous wait for an RCU grace period and doing it
1995 	 * one by one may take a long time.  Start freezing all queues in
1996 	 * one swoop and then wait for the completions so that freezing can
1997 	 * take place in parallel.
1998 	 */
1999 	list_for_each_entry(q, &all_q_list, all_q_node)
2000 		blk_mq_freeze_queue_start(q);
2001 	list_for_each_entry(q, &all_q_list, all_q_node)
2002 		blk_mq_freeze_queue_wait(q);
2003 
2004 	list_for_each_entry(q, &all_q_list, all_q_node)
2005 		blk_mq_queue_reinit(q);
2006 
2007 	list_for_each_entry(q, &all_q_list, all_q_node)
2008 		blk_mq_unfreeze_queue(q);
2009 
2010 	mutex_unlock(&all_q_mutex);
2011 	return NOTIFY_OK;
2012 }
2013 
2014 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2015 {
2016 	int i;
2017 
2018 	for (i = 0; i < set->nr_hw_queues; i++) {
2019 		set->tags[i] = blk_mq_init_rq_map(set, i);
2020 		if (!set->tags[i])
2021 			goto out_unwind;
2022 	}
2023 
2024 	return 0;
2025 
2026 out_unwind:
2027 	while (--i >= 0)
2028 		blk_mq_free_rq_map(set, set->tags[i], i);
2029 
2030 	return -ENOMEM;
2031 }
2032 
2033 /*
2034  * Allocate the request maps associated with this tag_set. Note that this
2035  * may reduce the depth asked for, if memory is tight. set->queue_depth
2036  * will be updated to reflect the allocated depth.
2037  */
2038 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2039 {
2040 	unsigned int depth;
2041 	int err;
2042 
2043 	depth = set->queue_depth;
2044 	do {
2045 		err = __blk_mq_alloc_rq_maps(set);
2046 		if (!err)
2047 			break;
2048 
2049 		set->queue_depth >>= 1;
2050 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2051 			err = -ENOMEM;
2052 			break;
2053 		}
2054 	} while (set->queue_depth);
2055 
2056 	if (!set->queue_depth || err) {
2057 		pr_err("blk-mq: failed to allocate request map\n");
2058 		return -ENOMEM;
2059 	}
2060 
2061 	if (depth != set->queue_depth)
2062 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2063 						depth, set->queue_depth);
2064 
2065 	return 0;
2066 }
2067 
2068 /*
2069  * Alloc a tag set to be associated with one or more request queues.
2070  * May fail with EINVAL for various error conditions. May adjust the
2071  * requested depth down, if if it too large. In that case, the set
2072  * value will be stored in set->queue_depth.
2073  */
2074 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2075 {
2076 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2077 
2078 	if (!set->nr_hw_queues)
2079 		return -EINVAL;
2080 	if (!set->queue_depth)
2081 		return -EINVAL;
2082 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2083 		return -EINVAL;
2084 
2085 	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2086 		return -EINVAL;
2087 
2088 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2089 		pr_info("blk-mq: reduced tag depth to %u\n",
2090 			BLK_MQ_MAX_DEPTH);
2091 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2092 	}
2093 
2094 	/*
2095 	 * If a crashdump is active, then we are potentially in a very
2096 	 * memory constrained environment. Limit us to 1 queue and
2097 	 * 64 tags to prevent using too much memory.
2098 	 */
2099 	if (is_kdump_kernel()) {
2100 		set->nr_hw_queues = 1;
2101 		set->queue_depth = min(64U, set->queue_depth);
2102 	}
2103 
2104 	set->tags = kmalloc_node(set->nr_hw_queues *
2105 				 sizeof(struct blk_mq_tags *),
2106 				 GFP_KERNEL, set->numa_node);
2107 	if (!set->tags)
2108 		return -ENOMEM;
2109 
2110 	if (blk_mq_alloc_rq_maps(set))
2111 		goto enomem;
2112 
2113 	mutex_init(&set->tag_list_lock);
2114 	INIT_LIST_HEAD(&set->tag_list);
2115 
2116 	return 0;
2117 enomem:
2118 	kfree(set->tags);
2119 	set->tags = NULL;
2120 	return -ENOMEM;
2121 }
2122 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2123 
2124 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2125 {
2126 	int i;
2127 
2128 	for (i = 0; i < set->nr_hw_queues; i++) {
2129 		if (set->tags[i])
2130 			blk_mq_free_rq_map(set, set->tags[i], i);
2131 	}
2132 
2133 	kfree(set->tags);
2134 	set->tags = NULL;
2135 }
2136 EXPORT_SYMBOL(blk_mq_free_tag_set);
2137 
2138 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2139 {
2140 	struct blk_mq_tag_set *set = q->tag_set;
2141 	struct blk_mq_hw_ctx *hctx;
2142 	int i, ret;
2143 
2144 	if (!set || nr > set->queue_depth)
2145 		return -EINVAL;
2146 
2147 	ret = 0;
2148 	queue_for_each_hw_ctx(q, hctx, i) {
2149 		ret = blk_mq_tag_update_depth(hctx->tags, nr);
2150 		if (ret)
2151 			break;
2152 	}
2153 
2154 	if (!ret)
2155 		q->nr_requests = nr;
2156 
2157 	return ret;
2158 }
2159 
2160 void blk_mq_disable_hotplug(void)
2161 {
2162 	mutex_lock(&all_q_mutex);
2163 }
2164 
2165 void blk_mq_enable_hotplug(void)
2166 {
2167 	mutex_unlock(&all_q_mutex);
2168 }
2169 
2170 static int __init blk_mq_init(void)
2171 {
2172 	blk_mq_cpu_init();
2173 
2174 	hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2175 
2176 	return 0;
2177 }
2178 subsys_initcall(blk_mq_init);
2179