xref: /openbmc/linux/block/blk-mq.c (revision 94c7b6fc)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 
24 #include <trace/events/block.h>
25 
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30 
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33 
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35 
36 /*
37  * Check if any of the ctx's have pending work in this hardware queue
38  */
39 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40 {
41 	unsigned int i;
42 
43 	for (i = 0; i < hctx->ctx_map.map_size; i++)
44 		if (hctx->ctx_map.map[i].word)
45 			return true;
46 
47 	return false;
48 }
49 
50 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51 					      struct blk_mq_ctx *ctx)
52 {
53 	return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54 }
55 
56 #define CTX_TO_BIT(hctx, ctx)	\
57 	((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58 
59 /*
60  * Mark this ctx as having pending work in this hardware queue
61  */
62 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63 				     struct blk_mq_ctx *ctx)
64 {
65 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66 
67 	if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68 		set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69 }
70 
71 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72 				      struct blk_mq_ctx *ctx)
73 {
74 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75 
76 	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
77 }
78 
79 static int blk_mq_queue_enter(struct request_queue *q)
80 {
81 	int ret;
82 
83 	__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
84 	smp_wmb();
85 
86 	/* we have problems freezing the queue if it's initializing */
87 	if (!blk_queue_dying(q) &&
88 	    (!blk_queue_bypass(q) || !blk_queue_init_done(q)))
89 		return 0;
90 
91 	__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
92 
93 	spin_lock_irq(q->queue_lock);
94 	ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
95 		!blk_queue_bypass(q) || blk_queue_dying(q),
96 		*q->queue_lock);
97 	/* inc usage with lock hold to avoid freeze_queue runs here */
98 	if (!ret && !blk_queue_dying(q))
99 		__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
100 	else if (blk_queue_dying(q))
101 		ret = -ENODEV;
102 	spin_unlock_irq(q->queue_lock);
103 
104 	return ret;
105 }
106 
107 static void blk_mq_queue_exit(struct request_queue *q)
108 {
109 	__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
110 }
111 
112 void blk_mq_drain_queue(struct request_queue *q)
113 {
114 	while (true) {
115 		s64 count;
116 
117 		spin_lock_irq(q->queue_lock);
118 		count = percpu_counter_sum(&q->mq_usage_counter);
119 		spin_unlock_irq(q->queue_lock);
120 
121 		if (count == 0)
122 			break;
123 		blk_mq_start_hw_queues(q);
124 		msleep(10);
125 	}
126 }
127 
128 /*
129  * Guarantee no request is in use, so we can change any data structure of
130  * the queue afterward.
131  */
132 static void blk_mq_freeze_queue(struct request_queue *q)
133 {
134 	bool drain;
135 
136 	spin_lock_irq(q->queue_lock);
137 	drain = !q->bypass_depth++;
138 	queue_flag_set(QUEUE_FLAG_BYPASS, q);
139 	spin_unlock_irq(q->queue_lock);
140 
141 	if (drain)
142 		blk_mq_drain_queue(q);
143 }
144 
145 static void blk_mq_unfreeze_queue(struct request_queue *q)
146 {
147 	bool wake = false;
148 
149 	spin_lock_irq(q->queue_lock);
150 	if (!--q->bypass_depth) {
151 		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
152 		wake = true;
153 	}
154 	WARN_ON_ONCE(q->bypass_depth < 0);
155 	spin_unlock_irq(q->queue_lock);
156 	if (wake)
157 		wake_up_all(&q->mq_freeze_wq);
158 }
159 
160 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
161 {
162 	return blk_mq_has_free_tags(hctx->tags);
163 }
164 EXPORT_SYMBOL(blk_mq_can_queue);
165 
166 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
167 			       struct request *rq, unsigned int rw_flags)
168 {
169 	if (blk_queue_io_stat(q))
170 		rw_flags |= REQ_IO_STAT;
171 
172 	INIT_LIST_HEAD(&rq->queuelist);
173 	/* csd/requeue_work/fifo_time is initialized before use */
174 	rq->q = q;
175 	rq->mq_ctx = ctx;
176 	rq->cmd_flags |= rw_flags;
177 	/* do not touch atomic flags, it needs atomic ops against the timer */
178 	rq->cpu = -1;
179 	INIT_HLIST_NODE(&rq->hash);
180 	RB_CLEAR_NODE(&rq->rb_node);
181 	rq->rq_disk = NULL;
182 	rq->part = NULL;
183 	rq->start_time = jiffies;
184 #ifdef CONFIG_BLK_CGROUP
185 	rq->rl = NULL;
186 	set_start_time_ns(rq);
187 	rq->io_start_time_ns = 0;
188 #endif
189 	rq->nr_phys_segments = 0;
190 #if defined(CONFIG_BLK_DEV_INTEGRITY)
191 	rq->nr_integrity_segments = 0;
192 #endif
193 	rq->special = NULL;
194 	/* tag was already set */
195 	rq->errors = 0;
196 
197 	rq->extra_len = 0;
198 	rq->sense_len = 0;
199 	rq->resid_len = 0;
200 	rq->sense = NULL;
201 
202 	INIT_LIST_HEAD(&rq->timeout_list);
203 	rq->timeout = 0;
204 
205 	rq->end_io = NULL;
206 	rq->end_io_data = NULL;
207 	rq->next_rq = NULL;
208 
209 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
210 }
211 
212 static struct request *
213 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
214 {
215 	struct request *rq;
216 	unsigned int tag;
217 
218 	tag = blk_mq_get_tag(data);
219 	if (tag != BLK_MQ_TAG_FAIL) {
220 		rq = data->hctx->tags->rqs[tag];
221 
222 		rq->cmd_flags = 0;
223 		if (blk_mq_tag_busy(data->hctx)) {
224 			rq->cmd_flags = REQ_MQ_INFLIGHT;
225 			atomic_inc(&data->hctx->nr_active);
226 		}
227 
228 		rq->tag = tag;
229 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
230 		return rq;
231 	}
232 
233 	return NULL;
234 }
235 
236 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
237 		bool reserved)
238 {
239 	struct blk_mq_ctx *ctx;
240 	struct blk_mq_hw_ctx *hctx;
241 	struct request *rq;
242 	struct blk_mq_alloc_data alloc_data;
243 
244 	if (blk_mq_queue_enter(q))
245 		return NULL;
246 
247 	ctx = blk_mq_get_ctx(q);
248 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
249 	blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
250 			reserved, ctx, hctx);
251 
252 	rq = __blk_mq_alloc_request(&alloc_data, rw);
253 	if (!rq && (gfp & __GFP_WAIT)) {
254 		__blk_mq_run_hw_queue(hctx);
255 		blk_mq_put_ctx(ctx);
256 
257 		ctx = blk_mq_get_ctx(q);
258 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
259 		blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
260 				hctx);
261 		rq =  __blk_mq_alloc_request(&alloc_data, rw);
262 		ctx = alloc_data.ctx;
263 	}
264 	blk_mq_put_ctx(ctx);
265 	return rq;
266 }
267 EXPORT_SYMBOL(blk_mq_alloc_request);
268 
269 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
270 				  struct blk_mq_ctx *ctx, struct request *rq)
271 {
272 	const int tag = rq->tag;
273 	struct request_queue *q = rq->q;
274 
275 	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
276 		atomic_dec(&hctx->nr_active);
277 
278 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
279 	blk_mq_put_tag(hctx, tag, &ctx->last_tag);
280 	blk_mq_queue_exit(q);
281 }
282 
283 void blk_mq_free_request(struct request *rq)
284 {
285 	struct blk_mq_ctx *ctx = rq->mq_ctx;
286 	struct blk_mq_hw_ctx *hctx;
287 	struct request_queue *q = rq->q;
288 
289 	ctx->rq_completed[rq_is_sync(rq)]++;
290 
291 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
292 	__blk_mq_free_request(hctx, ctx, rq);
293 }
294 
295 /*
296  * Clone all relevant state from a request that has been put on hold in
297  * the flush state machine into the preallocated flush request that hangs
298  * off the request queue.
299  *
300  * For a driver the flush request should be invisible, that's why we are
301  * impersonating the original request here.
302  */
303 void blk_mq_clone_flush_request(struct request *flush_rq,
304 		struct request *orig_rq)
305 {
306 	struct blk_mq_hw_ctx *hctx =
307 		orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
308 
309 	flush_rq->mq_ctx = orig_rq->mq_ctx;
310 	flush_rq->tag = orig_rq->tag;
311 	memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
312 		hctx->cmd_size);
313 }
314 
315 inline void __blk_mq_end_io(struct request *rq, int error)
316 {
317 	blk_account_io_done(rq);
318 
319 	if (rq->end_io) {
320 		rq->end_io(rq, error);
321 	} else {
322 		if (unlikely(blk_bidi_rq(rq)))
323 			blk_mq_free_request(rq->next_rq);
324 		blk_mq_free_request(rq);
325 	}
326 }
327 EXPORT_SYMBOL(__blk_mq_end_io);
328 
329 void blk_mq_end_io(struct request *rq, int error)
330 {
331 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
332 		BUG();
333 	__blk_mq_end_io(rq, error);
334 }
335 EXPORT_SYMBOL(blk_mq_end_io);
336 
337 static void __blk_mq_complete_request_remote(void *data)
338 {
339 	struct request *rq = data;
340 
341 	rq->q->softirq_done_fn(rq);
342 }
343 
344 static void blk_mq_ipi_complete_request(struct request *rq)
345 {
346 	struct blk_mq_ctx *ctx = rq->mq_ctx;
347 	bool shared = false;
348 	int cpu;
349 
350 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
351 		rq->q->softirq_done_fn(rq);
352 		return;
353 	}
354 
355 	cpu = get_cpu();
356 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
357 		shared = cpus_share_cache(cpu, ctx->cpu);
358 
359 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
360 		rq->csd.func = __blk_mq_complete_request_remote;
361 		rq->csd.info = rq;
362 		rq->csd.flags = 0;
363 		smp_call_function_single_async(ctx->cpu, &rq->csd);
364 	} else {
365 		rq->q->softirq_done_fn(rq);
366 	}
367 	put_cpu();
368 }
369 
370 void __blk_mq_complete_request(struct request *rq)
371 {
372 	struct request_queue *q = rq->q;
373 
374 	if (!q->softirq_done_fn)
375 		blk_mq_end_io(rq, rq->errors);
376 	else
377 		blk_mq_ipi_complete_request(rq);
378 }
379 
380 /**
381  * blk_mq_complete_request - end I/O on a request
382  * @rq:		the request being processed
383  *
384  * Description:
385  *	Ends all I/O on a request. It does not handle partial completions.
386  *	The actual completion happens out-of-order, through a IPI handler.
387  **/
388 void blk_mq_complete_request(struct request *rq)
389 {
390 	struct request_queue *q = rq->q;
391 
392 	if (unlikely(blk_should_fake_timeout(q)))
393 		return;
394 	if (!blk_mark_rq_complete(rq))
395 		__blk_mq_complete_request(rq);
396 }
397 EXPORT_SYMBOL(blk_mq_complete_request);
398 
399 static void blk_mq_start_request(struct request *rq, bool last)
400 {
401 	struct request_queue *q = rq->q;
402 
403 	trace_block_rq_issue(q, rq);
404 
405 	rq->resid_len = blk_rq_bytes(rq);
406 	if (unlikely(blk_bidi_rq(rq)))
407 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
408 
409 	blk_add_timer(rq);
410 
411 	/*
412 	 * Mark us as started and clear complete. Complete might have been
413 	 * set if requeue raced with timeout, which then marked it as
414 	 * complete. So be sure to clear complete again when we start
415 	 * the request, otherwise we'll ignore the completion event.
416 	 */
417 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
418 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
419 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
420 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
421 
422 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
423 		/*
424 		 * Make sure space for the drain appears.  We know we can do
425 		 * this because max_hw_segments has been adjusted to be one
426 		 * fewer than the device can handle.
427 		 */
428 		rq->nr_phys_segments++;
429 	}
430 
431 	/*
432 	 * Flag the last request in the series so that drivers know when IO
433 	 * should be kicked off, if they don't do it on a per-request basis.
434 	 *
435 	 * Note: the flag isn't the only condition drivers should do kick off.
436 	 * If drive is busy, the last request might not have the bit set.
437 	 */
438 	if (last)
439 		rq->cmd_flags |= REQ_END;
440 }
441 
442 static void __blk_mq_requeue_request(struct request *rq)
443 {
444 	struct request_queue *q = rq->q;
445 
446 	trace_block_rq_requeue(q, rq);
447 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
448 
449 	rq->cmd_flags &= ~REQ_END;
450 
451 	if (q->dma_drain_size && blk_rq_bytes(rq))
452 		rq->nr_phys_segments--;
453 }
454 
455 void blk_mq_requeue_request(struct request *rq)
456 {
457 	__blk_mq_requeue_request(rq);
458 	blk_clear_rq_complete(rq);
459 
460 	BUG_ON(blk_queued_rq(rq));
461 	blk_mq_add_to_requeue_list(rq, true);
462 }
463 EXPORT_SYMBOL(blk_mq_requeue_request);
464 
465 static void blk_mq_requeue_work(struct work_struct *work)
466 {
467 	struct request_queue *q =
468 		container_of(work, struct request_queue, requeue_work);
469 	LIST_HEAD(rq_list);
470 	struct request *rq, *next;
471 	unsigned long flags;
472 
473 	spin_lock_irqsave(&q->requeue_lock, flags);
474 	list_splice_init(&q->requeue_list, &rq_list);
475 	spin_unlock_irqrestore(&q->requeue_lock, flags);
476 
477 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
478 		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
479 			continue;
480 
481 		rq->cmd_flags &= ~REQ_SOFTBARRIER;
482 		list_del_init(&rq->queuelist);
483 		blk_mq_insert_request(rq, true, false, false);
484 	}
485 
486 	while (!list_empty(&rq_list)) {
487 		rq = list_entry(rq_list.next, struct request, queuelist);
488 		list_del_init(&rq->queuelist);
489 		blk_mq_insert_request(rq, false, false, false);
490 	}
491 
492 	blk_mq_run_queues(q, false);
493 }
494 
495 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
496 {
497 	struct request_queue *q = rq->q;
498 	unsigned long flags;
499 
500 	/*
501 	 * We abuse this flag that is otherwise used by the I/O scheduler to
502 	 * request head insertation from the workqueue.
503 	 */
504 	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
505 
506 	spin_lock_irqsave(&q->requeue_lock, flags);
507 	if (at_head) {
508 		rq->cmd_flags |= REQ_SOFTBARRIER;
509 		list_add(&rq->queuelist, &q->requeue_list);
510 	} else {
511 		list_add_tail(&rq->queuelist, &q->requeue_list);
512 	}
513 	spin_unlock_irqrestore(&q->requeue_lock, flags);
514 }
515 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
516 
517 void blk_mq_kick_requeue_list(struct request_queue *q)
518 {
519 	kblockd_schedule_work(&q->requeue_work);
520 }
521 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
522 
523 static inline bool is_flush_request(struct request *rq, unsigned int tag)
524 {
525 	return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
526 			rq->q->flush_rq->tag == tag);
527 }
528 
529 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
530 {
531 	struct request *rq = tags->rqs[tag];
532 
533 	if (!is_flush_request(rq, tag))
534 		return rq;
535 
536 	return rq->q->flush_rq;
537 }
538 EXPORT_SYMBOL(blk_mq_tag_to_rq);
539 
540 struct blk_mq_timeout_data {
541 	struct blk_mq_hw_ctx *hctx;
542 	unsigned long *next;
543 	unsigned int *next_set;
544 };
545 
546 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
547 {
548 	struct blk_mq_timeout_data *data = __data;
549 	struct blk_mq_hw_ctx *hctx = data->hctx;
550 	unsigned int tag;
551 
552 	 /* It may not be in flight yet (this is where
553 	 * the REQ_ATOMIC_STARTED flag comes in). The requests are
554 	 * statically allocated, so we know it's always safe to access the
555 	 * memory associated with a bit offset into ->rqs[].
556 	 */
557 	tag = 0;
558 	do {
559 		struct request *rq;
560 
561 		tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
562 		if (tag >= hctx->tags->nr_tags)
563 			break;
564 
565 		rq = blk_mq_tag_to_rq(hctx->tags, tag++);
566 		if (rq->q != hctx->queue)
567 			continue;
568 		if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
569 			continue;
570 
571 		blk_rq_check_expired(rq, data->next, data->next_set);
572 	} while (1);
573 }
574 
575 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
576 					unsigned long *next,
577 					unsigned int *next_set)
578 {
579 	struct blk_mq_timeout_data data = {
580 		.hctx		= hctx,
581 		.next		= next,
582 		.next_set	= next_set,
583 	};
584 
585 	/*
586 	 * Ask the tagging code to iterate busy requests, so we can
587 	 * check them for timeout.
588 	 */
589 	blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
590 }
591 
592 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
593 {
594 	struct request_queue *q = rq->q;
595 
596 	/*
597 	 * We know that complete is set at this point. If STARTED isn't set
598 	 * anymore, then the request isn't active and the "timeout" should
599 	 * just be ignored. This can happen due to the bitflag ordering.
600 	 * Timeout first checks if STARTED is set, and if it is, assumes
601 	 * the request is active. But if we race with completion, then
602 	 * we both flags will get cleared. So check here again, and ignore
603 	 * a timeout event with a request that isn't active.
604 	 */
605 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
606 		return BLK_EH_NOT_HANDLED;
607 
608 	if (!q->mq_ops->timeout)
609 		return BLK_EH_RESET_TIMER;
610 
611 	return q->mq_ops->timeout(rq);
612 }
613 
614 static void blk_mq_rq_timer(unsigned long data)
615 {
616 	struct request_queue *q = (struct request_queue *) data;
617 	struct blk_mq_hw_ctx *hctx;
618 	unsigned long next = 0;
619 	int i, next_set = 0;
620 
621 	queue_for_each_hw_ctx(q, hctx, i) {
622 		/*
623 		 * If not software queues are currently mapped to this
624 		 * hardware queue, there's nothing to check
625 		 */
626 		if (!hctx->nr_ctx || !hctx->tags)
627 			continue;
628 
629 		blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
630 	}
631 
632 	if (next_set) {
633 		next = blk_rq_timeout(round_jiffies_up(next));
634 		mod_timer(&q->timeout, next);
635 	} else {
636 		queue_for_each_hw_ctx(q, hctx, i)
637 			blk_mq_tag_idle(hctx);
638 	}
639 }
640 
641 /*
642  * Reverse check our software queue for entries that we could potentially
643  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
644  * too much time checking for merges.
645  */
646 static bool blk_mq_attempt_merge(struct request_queue *q,
647 				 struct blk_mq_ctx *ctx, struct bio *bio)
648 {
649 	struct request *rq;
650 	int checked = 8;
651 
652 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
653 		int el_ret;
654 
655 		if (!checked--)
656 			break;
657 
658 		if (!blk_rq_merge_ok(rq, bio))
659 			continue;
660 
661 		el_ret = blk_try_merge(rq, bio);
662 		if (el_ret == ELEVATOR_BACK_MERGE) {
663 			if (bio_attempt_back_merge(q, rq, bio)) {
664 				ctx->rq_merged++;
665 				return true;
666 			}
667 			break;
668 		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
669 			if (bio_attempt_front_merge(q, rq, bio)) {
670 				ctx->rq_merged++;
671 				return true;
672 			}
673 			break;
674 		}
675 	}
676 
677 	return false;
678 }
679 
680 /*
681  * Process software queues that have been marked busy, splicing them
682  * to the for-dispatch
683  */
684 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
685 {
686 	struct blk_mq_ctx *ctx;
687 	int i;
688 
689 	for (i = 0; i < hctx->ctx_map.map_size; i++) {
690 		struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
691 		unsigned int off, bit;
692 
693 		if (!bm->word)
694 			continue;
695 
696 		bit = 0;
697 		off = i * hctx->ctx_map.bits_per_word;
698 		do {
699 			bit = find_next_bit(&bm->word, bm->depth, bit);
700 			if (bit >= bm->depth)
701 				break;
702 
703 			ctx = hctx->ctxs[bit + off];
704 			clear_bit(bit, &bm->word);
705 			spin_lock(&ctx->lock);
706 			list_splice_tail_init(&ctx->rq_list, list);
707 			spin_unlock(&ctx->lock);
708 
709 			bit++;
710 		} while (1);
711 	}
712 }
713 
714 /*
715  * Run this hardware queue, pulling any software queues mapped to it in.
716  * Note that this function currently has various problems around ordering
717  * of IO. In particular, we'd like FIFO behaviour on handling existing
718  * items on the hctx->dispatch list. Ignore that for now.
719  */
720 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
721 {
722 	struct request_queue *q = hctx->queue;
723 	struct request *rq;
724 	LIST_HEAD(rq_list);
725 	int queued;
726 
727 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
728 
729 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
730 		return;
731 
732 	hctx->run++;
733 
734 	/*
735 	 * Touch any software queue that has pending entries.
736 	 */
737 	flush_busy_ctxs(hctx, &rq_list);
738 
739 	/*
740 	 * If we have previous entries on our dispatch list, grab them
741 	 * and stuff them at the front for more fair dispatch.
742 	 */
743 	if (!list_empty_careful(&hctx->dispatch)) {
744 		spin_lock(&hctx->lock);
745 		if (!list_empty(&hctx->dispatch))
746 			list_splice_init(&hctx->dispatch, &rq_list);
747 		spin_unlock(&hctx->lock);
748 	}
749 
750 	/*
751 	 * Now process all the entries, sending them to the driver.
752 	 */
753 	queued = 0;
754 	while (!list_empty(&rq_list)) {
755 		int ret;
756 
757 		rq = list_first_entry(&rq_list, struct request, queuelist);
758 		list_del_init(&rq->queuelist);
759 
760 		blk_mq_start_request(rq, list_empty(&rq_list));
761 
762 		ret = q->mq_ops->queue_rq(hctx, rq);
763 		switch (ret) {
764 		case BLK_MQ_RQ_QUEUE_OK:
765 			queued++;
766 			continue;
767 		case BLK_MQ_RQ_QUEUE_BUSY:
768 			list_add(&rq->queuelist, &rq_list);
769 			__blk_mq_requeue_request(rq);
770 			break;
771 		default:
772 			pr_err("blk-mq: bad return on queue: %d\n", ret);
773 		case BLK_MQ_RQ_QUEUE_ERROR:
774 			rq->errors = -EIO;
775 			blk_mq_end_io(rq, rq->errors);
776 			break;
777 		}
778 
779 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
780 			break;
781 	}
782 
783 	if (!queued)
784 		hctx->dispatched[0]++;
785 	else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
786 		hctx->dispatched[ilog2(queued) + 1]++;
787 
788 	/*
789 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
790 	 * that is where we will continue on next queue run.
791 	 */
792 	if (!list_empty(&rq_list)) {
793 		spin_lock(&hctx->lock);
794 		list_splice(&rq_list, &hctx->dispatch);
795 		spin_unlock(&hctx->lock);
796 	}
797 }
798 
799 /*
800  * It'd be great if the workqueue API had a way to pass
801  * in a mask and had some smarts for more clever placement.
802  * For now we just round-robin here, switching for every
803  * BLK_MQ_CPU_WORK_BATCH queued items.
804  */
805 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
806 {
807 	int cpu = hctx->next_cpu;
808 
809 	if (--hctx->next_cpu_batch <= 0) {
810 		int next_cpu;
811 
812 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
813 		if (next_cpu >= nr_cpu_ids)
814 			next_cpu = cpumask_first(hctx->cpumask);
815 
816 		hctx->next_cpu = next_cpu;
817 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
818 	}
819 
820 	return cpu;
821 }
822 
823 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
824 {
825 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
826 		return;
827 
828 	if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
829 		__blk_mq_run_hw_queue(hctx);
830 	else if (hctx->queue->nr_hw_queues == 1)
831 		kblockd_schedule_delayed_work(&hctx->run_work, 0);
832 	else {
833 		unsigned int cpu;
834 
835 		cpu = blk_mq_hctx_next_cpu(hctx);
836 		kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
837 	}
838 }
839 
840 void blk_mq_run_queues(struct request_queue *q, bool async)
841 {
842 	struct blk_mq_hw_ctx *hctx;
843 	int i;
844 
845 	queue_for_each_hw_ctx(q, hctx, i) {
846 		if ((!blk_mq_hctx_has_pending(hctx) &&
847 		    list_empty_careful(&hctx->dispatch)) ||
848 		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
849 			continue;
850 
851 		preempt_disable();
852 		blk_mq_run_hw_queue(hctx, async);
853 		preempt_enable();
854 	}
855 }
856 EXPORT_SYMBOL(blk_mq_run_queues);
857 
858 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
859 {
860 	cancel_delayed_work(&hctx->run_work);
861 	cancel_delayed_work(&hctx->delay_work);
862 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
863 }
864 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
865 
866 void blk_mq_stop_hw_queues(struct request_queue *q)
867 {
868 	struct blk_mq_hw_ctx *hctx;
869 	int i;
870 
871 	queue_for_each_hw_ctx(q, hctx, i)
872 		blk_mq_stop_hw_queue(hctx);
873 }
874 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
875 
876 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
877 {
878 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
879 
880 	preempt_disable();
881 	blk_mq_run_hw_queue(hctx, false);
882 	preempt_enable();
883 }
884 EXPORT_SYMBOL(blk_mq_start_hw_queue);
885 
886 void blk_mq_start_hw_queues(struct request_queue *q)
887 {
888 	struct blk_mq_hw_ctx *hctx;
889 	int i;
890 
891 	queue_for_each_hw_ctx(q, hctx, i)
892 		blk_mq_start_hw_queue(hctx);
893 }
894 EXPORT_SYMBOL(blk_mq_start_hw_queues);
895 
896 
897 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
898 {
899 	struct blk_mq_hw_ctx *hctx;
900 	int i;
901 
902 	queue_for_each_hw_ctx(q, hctx, i) {
903 		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
904 			continue;
905 
906 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
907 		preempt_disable();
908 		blk_mq_run_hw_queue(hctx, async);
909 		preempt_enable();
910 	}
911 }
912 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
913 
914 static void blk_mq_run_work_fn(struct work_struct *work)
915 {
916 	struct blk_mq_hw_ctx *hctx;
917 
918 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
919 
920 	__blk_mq_run_hw_queue(hctx);
921 }
922 
923 static void blk_mq_delay_work_fn(struct work_struct *work)
924 {
925 	struct blk_mq_hw_ctx *hctx;
926 
927 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
928 
929 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
930 		__blk_mq_run_hw_queue(hctx);
931 }
932 
933 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
934 {
935 	unsigned long tmo = msecs_to_jiffies(msecs);
936 
937 	if (hctx->queue->nr_hw_queues == 1)
938 		kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
939 	else {
940 		unsigned int cpu;
941 
942 		cpu = blk_mq_hctx_next_cpu(hctx);
943 		kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
944 	}
945 }
946 EXPORT_SYMBOL(blk_mq_delay_queue);
947 
948 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
949 				    struct request *rq, bool at_head)
950 {
951 	struct blk_mq_ctx *ctx = rq->mq_ctx;
952 
953 	trace_block_rq_insert(hctx->queue, rq);
954 
955 	if (at_head)
956 		list_add(&rq->queuelist, &ctx->rq_list);
957 	else
958 		list_add_tail(&rq->queuelist, &ctx->rq_list);
959 
960 	blk_mq_hctx_mark_pending(hctx, ctx);
961 }
962 
963 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
964 		bool async)
965 {
966 	struct request_queue *q = rq->q;
967 	struct blk_mq_hw_ctx *hctx;
968 	struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
969 
970 	current_ctx = blk_mq_get_ctx(q);
971 	if (!cpu_online(ctx->cpu))
972 		rq->mq_ctx = ctx = current_ctx;
973 
974 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
975 
976 	if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
977 	    !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
978 		blk_insert_flush(rq);
979 	} else {
980 		spin_lock(&ctx->lock);
981 		__blk_mq_insert_request(hctx, rq, at_head);
982 		spin_unlock(&ctx->lock);
983 	}
984 
985 	if (run_queue)
986 		blk_mq_run_hw_queue(hctx, async);
987 
988 	blk_mq_put_ctx(current_ctx);
989 }
990 
991 static void blk_mq_insert_requests(struct request_queue *q,
992 				     struct blk_mq_ctx *ctx,
993 				     struct list_head *list,
994 				     int depth,
995 				     bool from_schedule)
996 
997 {
998 	struct blk_mq_hw_ctx *hctx;
999 	struct blk_mq_ctx *current_ctx;
1000 
1001 	trace_block_unplug(q, depth, !from_schedule);
1002 
1003 	current_ctx = blk_mq_get_ctx(q);
1004 
1005 	if (!cpu_online(ctx->cpu))
1006 		ctx = current_ctx;
1007 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1008 
1009 	/*
1010 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1011 	 * offline now
1012 	 */
1013 	spin_lock(&ctx->lock);
1014 	while (!list_empty(list)) {
1015 		struct request *rq;
1016 
1017 		rq = list_first_entry(list, struct request, queuelist);
1018 		list_del_init(&rq->queuelist);
1019 		rq->mq_ctx = ctx;
1020 		__blk_mq_insert_request(hctx, rq, false);
1021 	}
1022 	spin_unlock(&ctx->lock);
1023 
1024 	blk_mq_run_hw_queue(hctx, from_schedule);
1025 	blk_mq_put_ctx(current_ctx);
1026 }
1027 
1028 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1029 {
1030 	struct request *rqa = container_of(a, struct request, queuelist);
1031 	struct request *rqb = container_of(b, struct request, queuelist);
1032 
1033 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1034 		 (rqa->mq_ctx == rqb->mq_ctx &&
1035 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1036 }
1037 
1038 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1039 {
1040 	struct blk_mq_ctx *this_ctx;
1041 	struct request_queue *this_q;
1042 	struct request *rq;
1043 	LIST_HEAD(list);
1044 	LIST_HEAD(ctx_list);
1045 	unsigned int depth;
1046 
1047 	list_splice_init(&plug->mq_list, &list);
1048 
1049 	list_sort(NULL, &list, plug_ctx_cmp);
1050 
1051 	this_q = NULL;
1052 	this_ctx = NULL;
1053 	depth = 0;
1054 
1055 	while (!list_empty(&list)) {
1056 		rq = list_entry_rq(list.next);
1057 		list_del_init(&rq->queuelist);
1058 		BUG_ON(!rq->q);
1059 		if (rq->mq_ctx != this_ctx) {
1060 			if (this_ctx) {
1061 				blk_mq_insert_requests(this_q, this_ctx,
1062 							&ctx_list, depth,
1063 							from_schedule);
1064 			}
1065 
1066 			this_ctx = rq->mq_ctx;
1067 			this_q = rq->q;
1068 			depth = 0;
1069 		}
1070 
1071 		depth++;
1072 		list_add_tail(&rq->queuelist, &ctx_list);
1073 	}
1074 
1075 	/*
1076 	 * If 'this_ctx' is set, we know we have entries to complete
1077 	 * on 'ctx_list'. Do those.
1078 	 */
1079 	if (this_ctx) {
1080 		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1081 				       from_schedule);
1082 	}
1083 }
1084 
1085 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1086 {
1087 	init_request_from_bio(rq, bio);
1088 
1089 	if (blk_do_io_stat(rq))
1090 		blk_account_io_start(rq, 1);
1091 }
1092 
1093 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1094 					 struct blk_mq_ctx *ctx,
1095 					 struct request *rq, struct bio *bio)
1096 {
1097 	struct request_queue *q = hctx->queue;
1098 
1099 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1100 		blk_mq_bio_to_request(rq, bio);
1101 		spin_lock(&ctx->lock);
1102 insert_rq:
1103 		__blk_mq_insert_request(hctx, rq, false);
1104 		spin_unlock(&ctx->lock);
1105 		return false;
1106 	} else {
1107 		spin_lock(&ctx->lock);
1108 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1109 			blk_mq_bio_to_request(rq, bio);
1110 			goto insert_rq;
1111 		}
1112 
1113 		spin_unlock(&ctx->lock);
1114 		__blk_mq_free_request(hctx, ctx, rq);
1115 		return true;
1116 	}
1117 }
1118 
1119 struct blk_map_ctx {
1120 	struct blk_mq_hw_ctx *hctx;
1121 	struct blk_mq_ctx *ctx;
1122 };
1123 
1124 static struct request *blk_mq_map_request(struct request_queue *q,
1125 					  struct bio *bio,
1126 					  struct blk_map_ctx *data)
1127 {
1128 	struct blk_mq_hw_ctx *hctx;
1129 	struct blk_mq_ctx *ctx;
1130 	struct request *rq;
1131 	int rw = bio_data_dir(bio);
1132 	struct blk_mq_alloc_data alloc_data;
1133 
1134 	if (unlikely(blk_mq_queue_enter(q))) {
1135 		bio_endio(bio, -EIO);
1136 		return NULL;
1137 	}
1138 
1139 	ctx = blk_mq_get_ctx(q);
1140 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1141 
1142 	if (rw_is_sync(bio->bi_rw))
1143 		rw |= REQ_SYNC;
1144 
1145 	trace_block_getrq(q, bio, rw);
1146 	blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1147 			hctx);
1148 	rq = __blk_mq_alloc_request(&alloc_data, rw);
1149 	if (unlikely(!rq)) {
1150 		__blk_mq_run_hw_queue(hctx);
1151 		blk_mq_put_ctx(ctx);
1152 		trace_block_sleeprq(q, bio, rw);
1153 
1154 		ctx = blk_mq_get_ctx(q);
1155 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
1156 		blk_mq_set_alloc_data(&alloc_data, q,
1157 				__GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1158 		rq = __blk_mq_alloc_request(&alloc_data, rw);
1159 		ctx = alloc_data.ctx;
1160 		hctx = alloc_data.hctx;
1161 	}
1162 
1163 	hctx->queued++;
1164 	data->hctx = hctx;
1165 	data->ctx = ctx;
1166 	return rq;
1167 }
1168 
1169 /*
1170  * Multiple hardware queue variant. This will not use per-process plugs,
1171  * but will attempt to bypass the hctx queueing if we can go straight to
1172  * hardware for SYNC IO.
1173  */
1174 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1175 {
1176 	const int is_sync = rw_is_sync(bio->bi_rw);
1177 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1178 	struct blk_map_ctx data;
1179 	struct request *rq;
1180 
1181 	blk_queue_bounce(q, &bio);
1182 
1183 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1184 		bio_endio(bio, -EIO);
1185 		return;
1186 	}
1187 
1188 	rq = blk_mq_map_request(q, bio, &data);
1189 	if (unlikely(!rq))
1190 		return;
1191 
1192 	if (unlikely(is_flush_fua)) {
1193 		blk_mq_bio_to_request(rq, bio);
1194 		blk_insert_flush(rq);
1195 		goto run_queue;
1196 	}
1197 
1198 	if (is_sync) {
1199 		int ret;
1200 
1201 		blk_mq_bio_to_request(rq, bio);
1202 		blk_mq_start_request(rq, true);
1203 
1204 		/*
1205 		 * For OK queue, we are done. For error, kill it. Any other
1206 		 * error (busy), just add it to our list as we previously
1207 		 * would have done
1208 		 */
1209 		ret = q->mq_ops->queue_rq(data.hctx, rq);
1210 		if (ret == BLK_MQ_RQ_QUEUE_OK)
1211 			goto done;
1212 		else {
1213 			__blk_mq_requeue_request(rq);
1214 
1215 			if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1216 				rq->errors = -EIO;
1217 				blk_mq_end_io(rq, rq->errors);
1218 				goto done;
1219 			}
1220 		}
1221 	}
1222 
1223 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1224 		/*
1225 		 * For a SYNC request, send it to the hardware immediately. For
1226 		 * an ASYNC request, just ensure that we run it later on. The
1227 		 * latter allows for merging opportunities and more efficient
1228 		 * dispatching.
1229 		 */
1230 run_queue:
1231 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1232 	}
1233 done:
1234 	blk_mq_put_ctx(data.ctx);
1235 }
1236 
1237 /*
1238  * Single hardware queue variant. This will attempt to use any per-process
1239  * plug for merging and IO deferral.
1240  */
1241 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1242 {
1243 	const int is_sync = rw_is_sync(bio->bi_rw);
1244 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1245 	unsigned int use_plug, request_count = 0;
1246 	struct blk_map_ctx data;
1247 	struct request *rq;
1248 
1249 	/*
1250 	 * If we have multiple hardware queues, just go directly to
1251 	 * one of those for sync IO.
1252 	 */
1253 	use_plug = !is_flush_fua && !is_sync;
1254 
1255 	blk_queue_bounce(q, &bio);
1256 
1257 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1258 		bio_endio(bio, -EIO);
1259 		return;
1260 	}
1261 
1262 	if (use_plug && !blk_queue_nomerges(q) &&
1263 	    blk_attempt_plug_merge(q, bio, &request_count))
1264 		return;
1265 
1266 	rq = blk_mq_map_request(q, bio, &data);
1267 	if (unlikely(!rq))
1268 		return;
1269 
1270 	if (unlikely(is_flush_fua)) {
1271 		blk_mq_bio_to_request(rq, bio);
1272 		blk_insert_flush(rq);
1273 		goto run_queue;
1274 	}
1275 
1276 	/*
1277 	 * A task plug currently exists. Since this is completely lockless,
1278 	 * utilize that to temporarily store requests until the task is
1279 	 * either done or scheduled away.
1280 	 */
1281 	if (use_plug) {
1282 		struct blk_plug *plug = current->plug;
1283 
1284 		if (plug) {
1285 			blk_mq_bio_to_request(rq, bio);
1286 			if (list_empty(&plug->mq_list))
1287 				trace_block_plug(q);
1288 			else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1289 				blk_flush_plug_list(plug, false);
1290 				trace_block_plug(q);
1291 			}
1292 			list_add_tail(&rq->queuelist, &plug->mq_list);
1293 			blk_mq_put_ctx(data.ctx);
1294 			return;
1295 		}
1296 	}
1297 
1298 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1299 		/*
1300 		 * For a SYNC request, send it to the hardware immediately. For
1301 		 * an ASYNC request, just ensure that we run it later on. The
1302 		 * latter allows for merging opportunities and more efficient
1303 		 * dispatching.
1304 		 */
1305 run_queue:
1306 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1307 	}
1308 
1309 	blk_mq_put_ctx(data.ctx);
1310 }
1311 
1312 /*
1313  * Default mapping to a software queue, since we use one per CPU.
1314  */
1315 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1316 {
1317 	return q->queue_hw_ctx[q->mq_map[cpu]];
1318 }
1319 EXPORT_SYMBOL(blk_mq_map_queue);
1320 
1321 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1322 		struct blk_mq_tags *tags, unsigned int hctx_idx)
1323 {
1324 	struct page *page;
1325 
1326 	if (tags->rqs && set->ops->exit_request) {
1327 		int i;
1328 
1329 		for (i = 0; i < tags->nr_tags; i++) {
1330 			if (!tags->rqs[i])
1331 				continue;
1332 			set->ops->exit_request(set->driver_data, tags->rqs[i],
1333 						hctx_idx, i);
1334 		}
1335 	}
1336 
1337 	while (!list_empty(&tags->page_list)) {
1338 		page = list_first_entry(&tags->page_list, struct page, lru);
1339 		list_del_init(&page->lru);
1340 		__free_pages(page, page->private);
1341 	}
1342 
1343 	kfree(tags->rqs);
1344 
1345 	blk_mq_free_tags(tags);
1346 }
1347 
1348 static size_t order_to_size(unsigned int order)
1349 {
1350 	return (size_t)PAGE_SIZE << order;
1351 }
1352 
1353 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1354 		unsigned int hctx_idx)
1355 {
1356 	struct blk_mq_tags *tags;
1357 	unsigned int i, j, entries_per_page, max_order = 4;
1358 	size_t rq_size, left;
1359 
1360 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1361 				set->numa_node);
1362 	if (!tags)
1363 		return NULL;
1364 
1365 	INIT_LIST_HEAD(&tags->page_list);
1366 
1367 	tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1368 					GFP_KERNEL, set->numa_node);
1369 	if (!tags->rqs) {
1370 		blk_mq_free_tags(tags);
1371 		return NULL;
1372 	}
1373 
1374 	/*
1375 	 * rq_size is the size of the request plus driver payload, rounded
1376 	 * to the cacheline size
1377 	 */
1378 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1379 				cache_line_size());
1380 	left = rq_size * set->queue_depth;
1381 
1382 	for (i = 0; i < set->queue_depth; ) {
1383 		int this_order = max_order;
1384 		struct page *page;
1385 		int to_do;
1386 		void *p;
1387 
1388 		while (left < order_to_size(this_order - 1) && this_order)
1389 			this_order--;
1390 
1391 		do {
1392 			page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1393 						this_order);
1394 			if (page)
1395 				break;
1396 			if (!this_order--)
1397 				break;
1398 			if (order_to_size(this_order) < rq_size)
1399 				break;
1400 		} while (1);
1401 
1402 		if (!page)
1403 			goto fail;
1404 
1405 		page->private = this_order;
1406 		list_add_tail(&page->lru, &tags->page_list);
1407 
1408 		p = page_address(page);
1409 		entries_per_page = order_to_size(this_order) / rq_size;
1410 		to_do = min(entries_per_page, set->queue_depth - i);
1411 		left -= to_do * rq_size;
1412 		for (j = 0; j < to_do; j++) {
1413 			tags->rqs[i] = p;
1414 			if (set->ops->init_request) {
1415 				if (set->ops->init_request(set->driver_data,
1416 						tags->rqs[i], hctx_idx, i,
1417 						set->numa_node))
1418 					goto fail;
1419 			}
1420 
1421 			p += rq_size;
1422 			i++;
1423 		}
1424 	}
1425 
1426 	return tags;
1427 
1428 fail:
1429 	pr_warn("%s: failed to allocate requests\n", __func__);
1430 	blk_mq_free_rq_map(set, tags, hctx_idx);
1431 	return NULL;
1432 }
1433 
1434 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1435 {
1436 	kfree(bitmap->map);
1437 }
1438 
1439 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1440 {
1441 	unsigned int bpw = 8, total, num_maps, i;
1442 
1443 	bitmap->bits_per_word = bpw;
1444 
1445 	num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1446 	bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1447 					GFP_KERNEL, node);
1448 	if (!bitmap->map)
1449 		return -ENOMEM;
1450 
1451 	bitmap->map_size = num_maps;
1452 
1453 	total = nr_cpu_ids;
1454 	for (i = 0; i < num_maps; i++) {
1455 		bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1456 		total -= bitmap->map[i].depth;
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1463 {
1464 	struct request_queue *q = hctx->queue;
1465 	struct blk_mq_ctx *ctx;
1466 	LIST_HEAD(tmp);
1467 
1468 	/*
1469 	 * Move ctx entries to new CPU, if this one is going away.
1470 	 */
1471 	ctx = __blk_mq_get_ctx(q, cpu);
1472 
1473 	spin_lock(&ctx->lock);
1474 	if (!list_empty(&ctx->rq_list)) {
1475 		list_splice_init(&ctx->rq_list, &tmp);
1476 		blk_mq_hctx_clear_pending(hctx, ctx);
1477 	}
1478 	spin_unlock(&ctx->lock);
1479 
1480 	if (list_empty(&tmp))
1481 		return NOTIFY_OK;
1482 
1483 	ctx = blk_mq_get_ctx(q);
1484 	spin_lock(&ctx->lock);
1485 
1486 	while (!list_empty(&tmp)) {
1487 		struct request *rq;
1488 
1489 		rq = list_first_entry(&tmp, struct request, queuelist);
1490 		rq->mq_ctx = ctx;
1491 		list_move_tail(&rq->queuelist, &ctx->rq_list);
1492 	}
1493 
1494 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1495 	blk_mq_hctx_mark_pending(hctx, ctx);
1496 
1497 	spin_unlock(&ctx->lock);
1498 
1499 	blk_mq_run_hw_queue(hctx, true);
1500 	blk_mq_put_ctx(ctx);
1501 	return NOTIFY_OK;
1502 }
1503 
1504 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1505 {
1506 	struct request_queue *q = hctx->queue;
1507 	struct blk_mq_tag_set *set = q->tag_set;
1508 
1509 	if (set->tags[hctx->queue_num])
1510 		return NOTIFY_OK;
1511 
1512 	set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1513 	if (!set->tags[hctx->queue_num])
1514 		return NOTIFY_STOP;
1515 
1516 	hctx->tags = set->tags[hctx->queue_num];
1517 	return NOTIFY_OK;
1518 }
1519 
1520 static int blk_mq_hctx_notify(void *data, unsigned long action,
1521 			      unsigned int cpu)
1522 {
1523 	struct blk_mq_hw_ctx *hctx = data;
1524 
1525 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1526 		return blk_mq_hctx_cpu_offline(hctx, cpu);
1527 	else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1528 		return blk_mq_hctx_cpu_online(hctx, cpu);
1529 
1530 	return NOTIFY_OK;
1531 }
1532 
1533 static void blk_mq_exit_hw_queues(struct request_queue *q,
1534 		struct blk_mq_tag_set *set, int nr_queue)
1535 {
1536 	struct blk_mq_hw_ctx *hctx;
1537 	unsigned int i;
1538 
1539 	queue_for_each_hw_ctx(q, hctx, i) {
1540 		if (i == nr_queue)
1541 			break;
1542 
1543 		blk_mq_tag_idle(hctx);
1544 
1545 		if (set->ops->exit_hctx)
1546 			set->ops->exit_hctx(hctx, i);
1547 
1548 		blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1549 		kfree(hctx->ctxs);
1550 		blk_mq_free_bitmap(&hctx->ctx_map);
1551 	}
1552 
1553 }
1554 
1555 static void blk_mq_free_hw_queues(struct request_queue *q,
1556 		struct blk_mq_tag_set *set)
1557 {
1558 	struct blk_mq_hw_ctx *hctx;
1559 	unsigned int i;
1560 
1561 	queue_for_each_hw_ctx(q, hctx, i) {
1562 		free_cpumask_var(hctx->cpumask);
1563 		kfree(hctx);
1564 	}
1565 }
1566 
1567 static int blk_mq_init_hw_queues(struct request_queue *q,
1568 		struct blk_mq_tag_set *set)
1569 {
1570 	struct blk_mq_hw_ctx *hctx;
1571 	unsigned int i;
1572 
1573 	/*
1574 	 * Initialize hardware queues
1575 	 */
1576 	queue_for_each_hw_ctx(q, hctx, i) {
1577 		int node;
1578 
1579 		node = hctx->numa_node;
1580 		if (node == NUMA_NO_NODE)
1581 			node = hctx->numa_node = set->numa_node;
1582 
1583 		INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1584 		INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1585 		spin_lock_init(&hctx->lock);
1586 		INIT_LIST_HEAD(&hctx->dispatch);
1587 		hctx->queue = q;
1588 		hctx->queue_num = i;
1589 		hctx->flags = set->flags;
1590 		hctx->cmd_size = set->cmd_size;
1591 
1592 		blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1593 						blk_mq_hctx_notify, hctx);
1594 		blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1595 
1596 		hctx->tags = set->tags[i];
1597 
1598 		/*
1599 		 * Allocate space for all possible cpus to avoid allocation in
1600 		 * runtime
1601 		 */
1602 		hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1603 						GFP_KERNEL, node);
1604 		if (!hctx->ctxs)
1605 			break;
1606 
1607 		if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1608 			break;
1609 
1610 		hctx->nr_ctx = 0;
1611 
1612 		if (set->ops->init_hctx &&
1613 		    set->ops->init_hctx(hctx, set->driver_data, i))
1614 			break;
1615 	}
1616 
1617 	if (i == q->nr_hw_queues)
1618 		return 0;
1619 
1620 	/*
1621 	 * Init failed
1622 	 */
1623 	blk_mq_exit_hw_queues(q, set, i);
1624 
1625 	return 1;
1626 }
1627 
1628 static void blk_mq_init_cpu_queues(struct request_queue *q,
1629 				   unsigned int nr_hw_queues)
1630 {
1631 	unsigned int i;
1632 
1633 	for_each_possible_cpu(i) {
1634 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1635 		struct blk_mq_hw_ctx *hctx;
1636 
1637 		memset(__ctx, 0, sizeof(*__ctx));
1638 		__ctx->cpu = i;
1639 		spin_lock_init(&__ctx->lock);
1640 		INIT_LIST_HEAD(&__ctx->rq_list);
1641 		__ctx->queue = q;
1642 
1643 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1644 		if (!cpu_online(i))
1645 			continue;
1646 
1647 		hctx = q->mq_ops->map_queue(q, i);
1648 		cpumask_set_cpu(i, hctx->cpumask);
1649 		hctx->nr_ctx++;
1650 
1651 		/*
1652 		 * Set local node, IFF we have more than one hw queue. If
1653 		 * not, we remain on the home node of the device
1654 		 */
1655 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1656 			hctx->numa_node = cpu_to_node(i);
1657 	}
1658 }
1659 
1660 static void blk_mq_map_swqueue(struct request_queue *q)
1661 {
1662 	unsigned int i;
1663 	struct blk_mq_hw_ctx *hctx;
1664 	struct blk_mq_ctx *ctx;
1665 
1666 	queue_for_each_hw_ctx(q, hctx, i) {
1667 		cpumask_clear(hctx->cpumask);
1668 		hctx->nr_ctx = 0;
1669 	}
1670 
1671 	/*
1672 	 * Map software to hardware queues
1673 	 */
1674 	queue_for_each_ctx(q, ctx, i) {
1675 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1676 		if (!cpu_online(i))
1677 			continue;
1678 
1679 		hctx = q->mq_ops->map_queue(q, i);
1680 		cpumask_set_cpu(i, hctx->cpumask);
1681 		ctx->index_hw = hctx->nr_ctx;
1682 		hctx->ctxs[hctx->nr_ctx++] = ctx;
1683 	}
1684 
1685 	queue_for_each_hw_ctx(q, hctx, i) {
1686 		/*
1687 		 * If not software queues are mapped to this hardware queue,
1688 		 * disable it and free the request entries
1689 		 */
1690 		if (!hctx->nr_ctx) {
1691 			struct blk_mq_tag_set *set = q->tag_set;
1692 
1693 			if (set->tags[i]) {
1694 				blk_mq_free_rq_map(set, set->tags[i], i);
1695 				set->tags[i] = NULL;
1696 				hctx->tags = NULL;
1697 			}
1698 			continue;
1699 		}
1700 
1701 		/*
1702 		 * Initialize batch roundrobin counts
1703 		 */
1704 		hctx->next_cpu = cpumask_first(hctx->cpumask);
1705 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1706 	}
1707 }
1708 
1709 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1710 {
1711 	struct blk_mq_hw_ctx *hctx;
1712 	struct request_queue *q;
1713 	bool shared;
1714 	int i;
1715 
1716 	if (set->tag_list.next == set->tag_list.prev)
1717 		shared = false;
1718 	else
1719 		shared = true;
1720 
1721 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
1722 		blk_mq_freeze_queue(q);
1723 
1724 		queue_for_each_hw_ctx(q, hctx, i) {
1725 			if (shared)
1726 				hctx->flags |= BLK_MQ_F_TAG_SHARED;
1727 			else
1728 				hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1729 		}
1730 		blk_mq_unfreeze_queue(q);
1731 	}
1732 }
1733 
1734 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1735 {
1736 	struct blk_mq_tag_set *set = q->tag_set;
1737 
1738 	blk_mq_freeze_queue(q);
1739 
1740 	mutex_lock(&set->tag_list_lock);
1741 	list_del_init(&q->tag_set_list);
1742 	blk_mq_update_tag_set_depth(set);
1743 	mutex_unlock(&set->tag_list_lock);
1744 
1745 	blk_mq_unfreeze_queue(q);
1746 }
1747 
1748 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1749 				     struct request_queue *q)
1750 {
1751 	q->tag_set = set;
1752 
1753 	mutex_lock(&set->tag_list_lock);
1754 	list_add_tail(&q->tag_set_list, &set->tag_list);
1755 	blk_mq_update_tag_set_depth(set);
1756 	mutex_unlock(&set->tag_list_lock);
1757 }
1758 
1759 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1760 {
1761 	struct blk_mq_hw_ctx **hctxs;
1762 	struct blk_mq_ctx __percpu *ctx;
1763 	struct request_queue *q;
1764 	unsigned int *map;
1765 	int i;
1766 
1767 	ctx = alloc_percpu(struct blk_mq_ctx);
1768 	if (!ctx)
1769 		return ERR_PTR(-ENOMEM);
1770 
1771 	hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1772 			set->numa_node);
1773 
1774 	if (!hctxs)
1775 		goto err_percpu;
1776 
1777 	map = blk_mq_make_queue_map(set);
1778 	if (!map)
1779 		goto err_map;
1780 
1781 	for (i = 0; i < set->nr_hw_queues; i++) {
1782 		int node = blk_mq_hw_queue_to_node(map, i);
1783 
1784 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1785 					GFP_KERNEL, node);
1786 		if (!hctxs[i])
1787 			goto err_hctxs;
1788 
1789 		if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1790 			goto err_hctxs;
1791 
1792 		atomic_set(&hctxs[i]->nr_active, 0);
1793 		hctxs[i]->numa_node = node;
1794 		hctxs[i]->queue_num = i;
1795 	}
1796 
1797 	q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1798 	if (!q)
1799 		goto err_hctxs;
1800 
1801 	if (percpu_counter_init(&q->mq_usage_counter, 0))
1802 		goto err_map;
1803 
1804 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1805 	blk_queue_rq_timeout(q, 30000);
1806 
1807 	q->nr_queues = nr_cpu_ids;
1808 	q->nr_hw_queues = set->nr_hw_queues;
1809 	q->mq_map = map;
1810 
1811 	q->queue_ctx = ctx;
1812 	q->queue_hw_ctx = hctxs;
1813 
1814 	q->mq_ops = set->ops;
1815 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1816 
1817 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
1818 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1819 
1820 	q->sg_reserved_size = INT_MAX;
1821 
1822 	INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1823 	INIT_LIST_HEAD(&q->requeue_list);
1824 	spin_lock_init(&q->requeue_lock);
1825 
1826 	if (q->nr_hw_queues > 1)
1827 		blk_queue_make_request(q, blk_mq_make_request);
1828 	else
1829 		blk_queue_make_request(q, blk_sq_make_request);
1830 
1831 	blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1832 	if (set->timeout)
1833 		blk_queue_rq_timeout(q, set->timeout);
1834 
1835 	/*
1836 	 * Do this after blk_queue_make_request() overrides it...
1837 	 */
1838 	q->nr_requests = set->queue_depth;
1839 
1840 	if (set->ops->complete)
1841 		blk_queue_softirq_done(q, set->ops->complete);
1842 
1843 	blk_mq_init_flush(q);
1844 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1845 
1846 	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1847 				set->cmd_size, cache_line_size()),
1848 				GFP_KERNEL);
1849 	if (!q->flush_rq)
1850 		goto err_hw;
1851 
1852 	if (blk_mq_init_hw_queues(q, set))
1853 		goto err_flush_rq;
1854 
1855 	mutex_lock(&all_q_mutex);
1856 	list_add_tail(&q->all_q_node, &all_q_list);
1857 	mutex_unlock(&all_q_mutex);
1858 
1859 	blk_mq_add_queue_tag_set(set, q);
1860 
1861 	blk_mq_map_swqueue(q);
1862 
1863 	return q;
1864 
1865 err_flush_rq:
1866 	kfree(q->flush_rq);
1867 err_hw:
1868 	blk_cleanup_queue(q);
1869 err_hctxs:
1870 	kfree(map);
1871 	for (i = 0; i < set->nr_hw_queues; i++) {
1872 		if (!hctxs[i])
1873 			break;
1874 		free_cpumask_var(hctxs[i]->cpumask);
1875 		kfree(hctxs[i]);
1876 	}
1877 err_map:
1878 	kfree(hctxs);
1879 err_percpu:
1880 	free_percpu(ctx);
1881 	return ERR_PTR(-ENOMEM);
1882 }
1883 EXPORT_SYMBOL(blk_mq_init_queue);
1884 
1885 void blk_mq_free_queue(struct request_queue *q)
1886 {
1887 	struct blk_mq_tag_set	*set = q->tag_set;
1888 
1889 	blk_mq_del_queue_tag_set(q);
1890 
1891 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1892 	blk_mq_free_hw_queues(q, set);
1893 
1894 	percpu_counter_destroy(&q->mq_usage_counter);
1895 
1896 	free_percpu(q->queue_ctx);
1897 	kfree(q->queue_hw_ctx);
1898 	kfree(q->mq_map);
1899 
1900 	q->queue_ctx = NULL;
1901 	q->queue_hw_ctx = NULL;
1902 	q->mq_map = NULL;
1903 
1904 	mutex_lock(&all_q_mutex);
1905 	list_del_init(&q->all_q_node);
1906 	mutex_unlock(&all_q_mutex);
1907 }
1908 
1909 /* Basically redo blk_mq_init_queue with queue frozen */
1910 static void blk_mq_queue_reinit(struct request_queue *q)
1911 {
1912 	blk_mq_freeze_queue(q);
1913 
1914 	blk_mq_sysfs_unregister(q);
1915 
1916 	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1917 
1918 	/*
1919 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1920 	 * we should change hctx numa_node according to new topology (this
1921 	 * involves free and re-allocate memory, worthy doing?)
1922 	 */
1923 
1924 	blk_mq_map_swqueue(q);
1925 
1926 	blk_mq_sysfs_register(q);
1927 
1928 	blk_mq_unfreeze_queue(q);
1929 }
1930 
1931 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1932 				      unsigned long action, void *hcpu)
1933 {
1934 	struct request_queue *q;
1935 
1936 	/*
1937 	 * Before new mappings are established, hotadded cpu might already
1938 	 * start handling requests. This doesn't break anything as we map
1939 	 * offline CPUs to first hardware queue. We will re-init the queue
1940 	 * below to get optimal settings.
1941 	 */
1942 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1943 	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1944 		return NOTIFY_OK;
1945 
1946 	mutex_lock(&all_q_mutex);
1947 	list_for_each_entry(q, &all_q_list, all_q_node)
1948 		blk_mq_queue_reinit(q);
1949 	mutex_unlock(&all_q_mutex);
1950 	return NOTIFY_OK;
1951 }
1952 
1953 /*
1954  * Alloc a tag set to be associated with one or more request queues.
1955  * May fail with EINVAL for various error conditions. May adjust the
1956  * requested depth down, if if it too large. In that case, the set
1957  * value will be stored in set->queue_depth.
1958  */
1959 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1960 {
1961 	int i;
1962 
1963 	if (!set->nr_hw_queues)
1964 		return -EINVAL;
1965 	if (!set->queue_depth)
1966 		return -EINVAL;
1967 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1968 		return -EINVAL;
1969 
1970 	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1971 		return -EINVAL;
1972 
1973 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
1974 		pr_info("blk-mq: reduced tag depth to %u\n",
1975 			BLK_MQ_MAX_DEPTH);
1976 		set->queue_depth = BLK_MQ_MAX_DEPTH;
1977 	}
1978 
1979 	set->tags = kmalloc_node(set->nr_hw_queues *
1980 				 sizeof(struct blk_mq_tags *),
1981 				 GFP_KERNEL, set->numa_node);
1982 	if (!set->tags)
1983 		goto out;
1984 
1985 	for (i = 0; i < set->nr_hw_queues; i++) {
1986 		set->tags[i] = blk_mq_init_rq_map(set, i);
1987 		if (!set->tags[i])
1988 			goto out_unwind;
1989 	}
1990 
1991 	mutex_init(&set->tag_list_lock);
1992 	INIT_LIST_HEAD(&set->tag_list);
1993 
1994 	return 0;
1995 
1996 out_unwind:
1997 	while (--i >= 0)
1998 		blk_mq_free_rq_map(set, set->tags[i], i);
1999 out:
2000 	return -ENOMEM;
2001 }
2002 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2003 
2004 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2005 {
2006 	int i;
2007 
2008 	for (i = 0; i < set->nr_hw_queues; i++) {
2009 		if (set->tags[i])
2010 			blk_mq_free_rq_map(set, set->tags[i], i);
2011 	}
2012 
2013 	kfree(set->tags);
2014 }
2015 EXPORT_SYMBOL(blk_mq_free_tag_set);
2016 
2017 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2018 {
2019 	struct blk_mq_tag_set *set = q->tag_set;
2020 	struct blk_mq_hw_ctx *hctx;
2021 	int i, ret;
2022 
2023 	if (!set || nr > set->queue_depth)
2024 		return -EINVAL;
2025 
2026 	ret = 0;
2027 	queue_for_each_hw_ctx(q, hctx, i) {
2028 		ret = blk_mq_tag_update_depth(hctx->tags, nr);
2029 		if (ret)
2030 			break;
2031 	}
2032 
2033 	if (!ret)
2034 		q->nr_requests = nr;
2035 
2036 	return ret;
2037 }
2038 
2039 void blk_mq_disable_hotplug(void)
2040 {
2041 	mutex_lock(&all_q_mutex);
2042 }
2043 
2044 void blk_mq_enable_hotplug(void)
2045 {
2046 	mutex_unlock(&all_q_mutex);
2047 }
2048 
2049 static int __init blk_mq_init(void)
2050 {
2051 	blk_mq_cpu_init();
2052 
2053 	/* Must be called after percpu_counter_hotcpu_callback() */
2054 	hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2055 
2056 	return 0;
2057 }
2058 subsys_initcall(blk_mq_init);
2059