xref: /openbmc/linux/block/blk-mq.c (revision 275876e2)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 
24 #include <trace/events/block.h>
25 
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30 
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33 
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35 
36 /*
37  * Check if any of the ctx's have pending work in this hardware queue
38  */
39 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40 {
41 	unsigned int i;
42 
43 	for (i = 0; i < hctx->ctx_map.map_size; i++)
44 		if (hctx->ctx_map.map[i].word)
45 			return true;
46 
47 	return false;
48 }
49 
50 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51 					      struct blk_mq_ctx *ctx)
52 {
53 	return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54 }
55 
56 #define CTX_TO_BIT(hctx, ctx)	\
57 	((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58 
59 /*
60  * Mark this ctx as having pending work in this hardware queue
61  */
62 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63 				     struct blk_mq_ctx *ctx)
64 {
65 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66 
67 	if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68 		set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69 }
70 
71 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72 				      struct blk_mq_ctx *ctx)
73 {
74 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75 
76 	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
77 }
78 
79 static int blk_mq_queue_enter(struct request_queue *q)
80 {
81 	while (true) {
82 		int ret;
83 
84 		if (percpu_ref_tryget_live(&q->mq_usage_counter))
85 			return 0;
86 
87 		ret = wait_event_interruptible(q->mq_freeze_wq,
88 				!q->mq_freeze_depth || blk_queue_dying(q));
89 		if (blk_queue_dying(q))
90 			return -ENODEV;
91 		if (ret)
92 			return ret;
93 	}
94 }
95 
96 static void blk_mq_queue_exit(struct request_queue *q)
97 {
98 	percpu_ref_put(&q->mq_usage_counter);
99 }
100 
101 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
102 {
103 	struct request_queue *q =
104 		container_of(ref, struct request_queue, mq_usage_counter);
105 
106 	wake_up_all(&q->mq_freeze_wq);
107 }
108 
109 /*
110  * Guarantee no request is in use, so we can change any data structure of
111  * the queue afterward.
112  */
113 void blk_mq_freeze_queue(struct request_queue *q)
114 {
115 	spin_lock_irq(q->queue_lock);
116 	q->mq_freeze_depth++;
117 	spin_unlock_irq(q->queue_lock);
118 
119 	percpu_ref_kill(&q->mq_usage_counter);
120 	blk_mq_run_queues(q, false);
121 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
122 }
123 
124 static void blk_mq_unfreeze_queue(struct request_queue *q)
125 {
126 	bool wake = false;
127 
128 	spin_lock_irq(q->queue_lock);
129 	wake = !--q->mq_freeze_depth;
130 	WARN_ON_ONCE(q->mq_freeze_depth < 0);
131 	spin_unlock_irq(q->queue_lock);
132 	if (wake) {
133 		percpu_ref_reinit(&q->mq_usage_counter);
134 		wake_up_all(&q->mq_freeze_wq);
135 	}
136 }
137 
138 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
139 {
140 	return blk_mq_has_free_tags(hctx->tags);
141 }
142 EXPORT_SYMBOL(blk_mq_can_queue);
143 
144 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
145 			       struct request *rq, unsigned int rw_flags)
146 {
147 	if (blk_queue_io_stat(q))
148 		rw_flags |= REQ_IO_STAT;
149 
150 	INIT_LIST_HEAD(&rq->queuelist);
151 	/* csd/requeue_work/fifo_time is initialized before use */
152 	rq->q = q;
153 	rq->mq_ctx = ctx;
154 	rq->cmd_flags |= rw_flags;
155 	/* do not touch atomic flags, it needs atomic ops against the timer */
156 	rq->cpu = -1;
157 	INIT_HLIST_NODE(&rq->hash);
158 	RB_CLEAR_NODE(&rq->rb_node);
159 	rq->rq_disk = NULL;
160 	rq->part = NULL;
161 	rq->start_time = jiffies;
162 #ifdef CONFIG_BLK_CGROUP
163 	rq->rl = NULL;
164 	set_start_time_ns(rq);
165 	rq->io_start_time_ns = 0;
166 #endif
167 	rq->nr_phys_segments = 0;
168 #if defined(CONFIG_BLK_DEV_INTEGRITY)
169 	rq->nr_integrity_segments = 0;
170 #endif
171 	rq->special = NULL;
172 	/* tag was already set */
173 	rq->errors = 0;
174 
175 	rq->extra_len = 0;
176 	rq->sense_len = 0;
177 	rq->resid_len = 0;
178 	rq->sense = NULL;
179 
180 	INIT_LIST_HEAD(&rq->timeout_list);
181 	rq->timeout = 0;
182 
183 	rq->end_io = NULL;
184 	rq->end_io_data = NULL;
185 	rq->next_rq = NULL;
186 
187 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
188 }
189 
190 static struct request *
191 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
192 {
193 	struct request *rq;
194 	unsigned int tag;
195 
196 	tag = blk_mq_get_tag(data);
197 	if (tag != BLK_MQ_TAG_FAIL) {
198 		rq = data->hctx->tags->rqs[tag];
199 
200 		rq->cmd_flags = 0;
201 		if (blk_mq_tag_busy(data->hctx)) {
202 			rq->cmd_flags = REQ_MQ_INFLIGHT;
203 			atomic_inc(&data->hctx->nr_active);
204 		}
205 
206 		rq->tag = tag;
207 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
208 		return rq;
209 	}
210 
211 	return NULL;
212 }
213 
214 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
215 		bool reserved)
216 {
217 	struct blk_mq_ctx *ctx;
218 	struct blk_mq_hw_ctx *hctx;
219 	struct request *rq;
220 	struct blk_mq_alloc_data alloc_data;
221 
222 	if (blk_mq_queue_enter(q))
223 		return NULL;
224 
225 	ctx = blk_mq_get_ctx(q);
226 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
227 	blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
228 			reserved, ctx, hctx);
229 
230 	rq = __blk_mq_alloc_request(&alloc_data, rw);
231 	if (!rq && (gfp & __GFP_WAIT)) {
232 		__blk_mq_run_hw_queue(hctx);
233 		blk_mq_put_ctx(ctx);
234 
235 		ctx = blk_mq_get_ctx(q);
236 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
237 		blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
238 				hctx);
239 		rq =  __blk_mq_alloc_request(&alloc_data, rw);
240 		ctx = alloc_data.ctx;
241 	}
242 	blk_mq_put_ctx(ctx);
243 	return rq;
244 }
245 EXPORT_SYMBOL(blk_mq_alloc_request);
246 
247 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
248 				  struct blk_mq_ctx *ctx, struct request *rq)
249 {
250 	const int tag = rq->tag;
251 	struct request_queue *q = rq->q;
252 
253 	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
254 		atomic_dec(&hctx->nr_active);
255 
256 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
257 	blk_mq_put_tag(hctx, tag, &ctx->last_tag);
258 	blk_mq_queue_exit(q);
259 }
260 
261 void blk_mq_free_request(struct request *rq)
262 {
263 	struct blk_mq_ctx *ctx = rq->mq_ctx;
264 	struct blk_mq_hw_ctx *hctx;
265 	struct request_queue *q = rq->q;
266 
267 	ctx->rq_completed[rq_is_sync(rq)]++;
268 
269 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
270 	__blk_mq_free_request(hctx, ctx, rq);
271 }
272 
273 /*
274  * Clone all relevant state from a request that has been put on hold in
275  * the flush state machine into the preallocated flush request that hangs
276  * off the request queue.
277  *
278  * For a driver the flush request should be invisible, that's why we are
279  * impersonating the original request here.
280  */
281 void blk_mq_clone_flush_request(struct request *flush_rq,
282 		struct request *orig_rq)
283 {
284 	struct blk_mq_hw_ctx *hctx =
285 		orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
286 
287 	flush_rq->mq_ctx = orig_rq->mq_ctx;
288 	flush_rq->tag = orig_rq->tag;
289 	memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
290 		hctx->cmd_size);
291 }
292 
293 inline void __blk_mq_end_io(struct request *rq, int error)
294 {
295 	blk_account_io_done(rq);
296 
297 	if (rq->end_io) {
298 		rq->end_io(rq, error);
299 	} else {
300 		if (unlikely(blk_bidi_rq(rq)))
301 			blk_mq_free_request(rq->next_rq);
302 		blk_mq_free_request(rq);
303 	}
304 }
305 EXPORT_SYMBOL(__blk_mq_end_io);
306 
307 void blk_mq_end_io(struct request *rq, int error)
308 {
309 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
310 		BUG();
311 	__blk_mq_end_io(rq, error);
312 }
313 EXPORT_SYMBOL(blk_mq_end_io);
314 
315 static void __blk_mq_complete_request_remote(void *data)
316 {
317 	struct request *rq = data;
318 
319 	rq->q->softirq_done_fn(rq);
320 }
321 
322 static void blk_mq_ipi_complete_request(struct request *rq)
323 {
324 	struct blk_mq_ctx *ctx = rq->mq_ctx;
325 	bool shared = false;
326 	int cpu;
327 
328 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
329 		rq->q->softirq_done_fn(rq);
330 		return;
331 	}
332 
333 	cpu = get_cpu();
334 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
335 		shared = cpus_share_cache(cpu, ctx->cpu);
336 
337 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
338 		rq->csd.func = __blk_mq_complete_request_remote;
339 		rq->csd.info = rq;
340 		rq->csd.flags = 0;
341 		smp_call_function_single_async(ctx->cpu, &rq->csd);
342 	} else {
343 		rq->q->softirq_done_fn(rq);
344 	}
345 	put_cpu();
346 }
347 
348 void __blk_mq_complete_request(struct request *rq)
349 {
350 	struct request_queue *q = rq->q;
351 
352 	if (!q->softirq_done_fn)
353 		blk_mq_end_io(rq, rq->errors);
354 	else
355 		blk_mq_ipi_complete_request(rq);
356 }
357 
358 /**
359  * blk_mq_complete_request - end I/O on a request
360  * @rq:		the request being processed
361  *
362  * Description:
363  *	Ends all I/O on a request. It does not handle partial completions.
364  *	The actual completion happens out-of-order, through a IPI handler.
365  **/
366 void blk_mq_complete_request(struct request *rq)
367 {
368 	struct request_queue *q = rq->q;
369 
370 	if (unlikely(blk_should_fake_timeout(q)))
371 		return;
372 	if (!blk_mark_rq_complete(rq))
373 		__blk_mq_complete_request(rq);
374 }
375 EXPORT_SYMBOL(blk_mq_complete_request);
376 
377 static void blk_mq_start_request(struct request *rq, bool last)
378 {
379 	struct request_queue *q = rq->q;
380 
381 	trace_block_rq_issue(q, rq);
382 
383 	rq->resid_len = blk_rq_bytes(rq);
384 	if (unlikely(blk_bidi_rq(rq)))
385 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
386 
387 	blk_add_timer(rq);
388 
389 	/*
390 	 * Mark us as started and clear complete. Complete might have been
391 	 * set if requeue raced with timeout, which then marked it as
392 	 * complete. So be sure to clear complete again when we start
393 	 * the request, otherwise we'll ignore the completion event.
394 	 */
395 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
396 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
397 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
398 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
399 
400 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
401 		/*
402 		 * Make sure space for the drain appears.  We know we can do
403 		 * this because max_hw_segments has been adjusted to be one
404 		 * fewer than the device can handle.
405 		 */
406 		rq->nr_phys_segments++;
407 	}
408 
409 	/*
410 	 * Flag the last request in the series so that drivers know when IO
411 	 * should be kicked off, if they don't do it on a per-request basis.
412 	 *
413 	 * Note: the flag isn't the only condition drivers should do kick off.
414 	 * If drive is busy, the last request might not have the bit set.
415 	 */
416 	if (last)
417 		rq->cmd_flags |= REQ_END;
418 }
419 
420 static void __blk_mq_requeue_request(struct request *rq)
421 {
422 	struct request_queue *q = rq->q;
423 
424 	trace_block_rq_requeue(q, rq);
425 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
426 
427 	rq->cmd_flags &= ~REQ_END;
428 
429 	if (q->dma_drain_size && blk_rq_bytes(rq))
430 		rq->nr_phys_segments--;
431 }
432 
433 void blk_mq_requeue_request(struct request *rq)
434 {
435 	__blk_mq_requeue_request(rq);
436 	blk_clear_rq_complete(rq);
437 
438 	BUG_ON(blk_queued_rq(rq));
439 	blk_mq_add_to_requeue_list(rq, true);
440 }
441 EXPORT_SYMBOL(blk_mq_requeue_request);
442 
443 static void blk_mq_requeue_work(struct work_struct *work)
444 {
445 	struct request_queue *q =
446 		container_of(work, struct request_queue, requeue_work);
447 	LIST_HEAD(rq_list);
448 	struct request *rq, *next;
449 	unsigned long flags;
450 
451 	spin_lock_irqsave(&q->requeue_lock, flags);
452 	list_splice_init(&q->requeue_list, &rq_list);
453 	spin_unlock_irqrestore(&q->requeue_lock, flags);
454 
455 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
456 		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
457 			continue;
458 
459 		rq->cmd_flags &= ~REQ_SOFTBARRIER;
460 		list_del_init(&rq->queuelist);
461 		blk_mq_insert_request(rq, true, false, false);
462 	}
463 
464 	while (!list_empty(&rq_list)) {
465 		rq = list_entry(rq_list.next, struct request, queuelist);
466 		list_del_init(&rq->queuelist);
467 		blk_mq_insert_request(rq, false, false, false);
468 	}
469 
470 	blk_mq_run_queues(q, false);
471 }
472 
473 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
474 {
475 	struct request_queue *q = rq->q;
476 	unsigned long flags;
477 
478 	/*
479 	 * We abuse this flag that is otherwise used by the I/O scheduler to
480 	 * request head insertation from the workqueue.
481 	 */
482 	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
483 
484 	spin_lock_irqsave(&q->requeue_lock, flags);
485 	if (at_head) {
486 		rq->cmd_flags |= REQ_SOFTBARRIER;
487 		list_add(&rq->queuelist, &q->requeue_list);
488 	} else {
489 		list_add_tail(&rq->queuelist, &q->requeue_list);
490 	}
491 	spin_unlock_irqrestore(&q->requeue_lock, flags);
492 }
493 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
494 
495 void blk_mq_kick_requeue_list(struct request_queue *q)
496 {
497 	kblockd_schedule_work(&q->requeue_work);
498 }
499 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
500 
501 static inline bool is_flush_request(struct request *rq, unsigned int tag)
502 {
503 	return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
504 			rq->q->flush_rq->tag == tag);
505 }
506 
507 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
508 {
509 	struct request *rq = tags->rqs[tag];
510 
511 	if (!is_flush_request(rq, tag))
512 		return rq;
513 
514 	return rq->q->flush_rq;
515 }
516 EXPORT_SYMBOL(blk_mq_tag_to_rq);
517 
518 struct blk_mq_timeout_data {
519 	struct blk_mq_hw_ctx *hctx;
520 	unsigned long *next;
521 	unsigned int *next_set;
522 };
523 
524 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
525 {
526 	struct blk_mq_timeout_data *data = __data;
527 	struct blk_mq_hw_ctx *hctx = data->hctx;
528 	unsigned int tag;
529 
530 	 /* It may not be in flight yet (this is where
531 	 * the REQ_ATOMIC_STARTED flag comes in). The requests are
532 	 * statically allocated, so we know it's always safe to access the
533 	 * memory associated with a bit offset into ->rqs[].
534 	 */
535 	tag = 0;
536 	do {
537 		struct request *rq;
538 
539 		tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
540 		if (tag >= hctx->tags->nr_tags)
541 			break;
542 
543 		rq = blk_mq_tag_to_rq(hctx->tags, tag++);
544 		if (rq->q != hctx->queue)
545 			continue;
546 		if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
547 			continue;
548 
549 		blk_rq_check_expired(rq, data->next, data->next_set);
550 	} while (1);
551 }
552 
553 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
554 					unsigned long *next,
555 					unsigned int *next_set)
556 {
557 	struct blk_mq_timeout_data data = {
558 		.hctx		= hctx,
559 		.next		= next,
560 		.next_set	= next_set,
561 	};
562 
563 	/*
564 	 * Ask the tagging code to iterate busy requests, so we can
565 	 * check them for timeout.
566 	 */
567 	blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
568 }
569 
570 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
571 {
572 	struct request_queue *q = rq->q;
573 
574 	/*
575 	 * We know that complete is set at this point. If STARTED isn't set
576 	 * anymore, then the request isn't active and the "timeout" should
577 	 * just be ignored. This can happen due to the bitflag ordering.
578 	 * Timeout first checks if STARTED is set, and if it is, assumes
579 	 * the request is active. But if we race with completion, then
580 	 * we both flags will get cleared. So check here again, and ignore
581 	 * a timeout event with a request that isn't active.
582 	 */
583 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
584 		return BLK_EH_NOT_HANDLED;
585 
586 	if (!q->mq_ops->timeout)
587 		return BLK_EH_RESET_TIMER;
588 
589 	return q->mq_ops->timeout(rq);
590 }
591 
592 static void blk_mq_rq_timer(unsigned long data)
593 {
594 	struct request_queue *q = (struct request_queue *) data;
595 	struct blk_mq_hw_ctx *hctx;
596 	unsigned long next = 0;
597 	int i, next_set = 0;
598 
599 	queue_for_each_hw_ctx(q, hctx, i) {
600 		/*
601 		 * If not software queues are currently mapped to this
602 		 * hardware queue, there's nothing to check
603 		 */
604 		if (!hctx->nr_ctx || !hctx->tags)
605 			continue;
606 
607 		blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
608 	}
609 
610 	if (next_set) {
611 		next = blk_rq_timeout(round_jiffies_up(next));
612 		mod_timer(&q->timeout, next);
613 	} else {
614 		queue_for_each_hw_ctx(q, hctx, i)
615 			blk_mq_tag_idle(hctx);
616 	}
617 }
618 
619 /*
620  * Reverse check our software queue for entries that we could potentially
621  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
622  * too much time checking for merges.
623  */
624 static bool blk_mq_attempt_merge(struct request_queue *q,
625 				 struct blk_mq_ctx *ctx, struct bio *bio)
626 {
627 	struct request *rq;
628 	int checked = 8;
629 
630 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
631 		int el_ret;
632 
633 		if (!checked--)
634 			break;
635 
636 		if (!blk_rq_merge_ok(rq, bio))
637 			continue;
638 
639 		el_ret = blk_try_merge(rq, bio);
640 		if (el_ret == ELEVATOR_BACK_MERGE) {
641 			if (bio_attempt_back_merge(q, rq, bio)) {
642 				ctx->rq_merged++;
643 				return true;
644 			}
645 			break;
646 		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
647 			if (bio_attempt_front_merge(q, rq, bio)) {
648 				ctx->rq_merged++;
649 				return true;
650 			}
651 			break;
652 		}
653 	}
654 
655 	return false;
656 }
657 
658 /*
659  * Process software queues that have been marked busy, splicing them
660  * to the for-dispatch
661  */
662 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
663 {
664 	struct blk_mq_ctx *ctx;
665 	int i;
666 
667 	for (i = 0; i < hctx->ctx_map.map_size; i++) {
668 		struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
669 		unsigned int off, bit;
670 
671 		if (!bm->word)
672 			continue;
673 
674 		bit = 0;
675 		off = i * hctx->ctx_map.bits_per_word;
676 		do {
677 			bit = find_next_bit(&bm->word, bm->depth, bit);
678 			if (bit >= bm->depth)
679 				break;
680 
681 			ctx = hctx->ctxs[bit + off];
682 			clear_bit(bit, &bm->word);
683 			spin_lock(&ctx->lock);
684 			list_splice_tail_init(&ctx->rq_list, list);
685 			spin_unlock(&ctx->lock);
686 
687 			bit++;
688 		} while (1);
689 	}
690 }
691 
692 /*
693  * Run this hardware queue, pulling any software queues mapped to it in.
694  * Note that this function currently has various problems around ordering
695  * of IO. In particular, we'd like FIFO behaviour on handling existing
696  * items on the hctx->dispatch list. Ignore that for now.
697  */
698 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
699 {
700 	struct request_queue *q = hctx->queue;
701 	struct request *rq;
702 	LIST_HEAD(rq_list);
703 	int queued;
704 
705 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
706 
707 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
708 		return;
709 
710 	hctx->run++;
711 
712 	/*
713 	 * Touch any software queue that has pending entries.
714 	 */
715 	flush_busy_ctxs(hctx, &rq_list);
716 
717 	/*
718 	 * If we have previous entries on our dispatch list, grab them
719 	 * and stuff them at the front for more fair dispatch.
720 	 */
721 	if (!list_empty_careful(&hctx->dispatch)) {
722 		spin_lock(&hctx->lock);
723 		if (!list_empty(&hctx->dispatch))
724 			list_splice_init(&hctx->dispatch, &rq_list);
725 		spin_unlock(&hctx->lock);
726 	}
727 
728 	/*
729 	 * Now process all the entries, sending them to the driver.
730 	 */
731 	queued = 0;
732 	while (!list_empty(&rq_list)) {
733 		int ret;
734 
735 		rq = list_first_entry(&rq_list, struct request, queuelist);
736 		list_del_init(&rq->queuelist);
737 
738 		blk_mq_start_request(rq, list_empty(&rq_list));
739 
740 		ret = q->mq_ops->queue_rq(hctx, rq);
741 		switch (ret) {
742 		case BLK_MQ_RQ_QUEUE_OK:
743 			queued++;
744 			continue;
745 		case BLK_MQ_RQ_QUEUE_BUSY:
746 			list_add(&rq->queuelist, &rq_list);
747 			__blk_mq_requeue_request(rq);
748 			break;
749 		default:
750 			pr_err("blk-mq: bad return on queue: %d\n", ret);
751 		case BLK_MQ_RQ_QUEUE_ERROR:
752 			rq->errors = -EIO;
753 			blk_mq_end_io(rq, rq->errors);
754 			break;
755 		}
756 
757 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
758 			break;
759 	}
760 
761 	if (!queued)
762 		hctx->dispatched[0]++;
763 	else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
764 		hctx->dispatched[ilog2(queued) + 1]++;
765 
766 	/*
767 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
768 	 * that is where we will continue on next queue run.
769 	 */
770 	if (!list_empty(&rq_list)) {
771 		spin_lock(&hctx->lock);
772 		list_splice(&rq_list, &hctx->dispatch);
773 		spin_unlock(&hctx->lock);
774 	}
775 }
776 
777 /*
778  * It'd be great if the workqueue API had a way to pass
779  * in a mask and had some smarts for more clever placement.
780  * For now we just round-robin here, switching for every
781  * BLK_MQ_CPU_WORK_BATCH queued items.
782  */
783 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
784 {
785 	int cpu = hctx->next_cpu;
786 
787 	if (--hctx->next_cpu_batch <= 0) {
788 		int next_cpu;
789 
790 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
791 		if (next_cpu >= nr_cpu_ids)
792 			next_cpu = cpumask_first(hctx->cpumask);
793 
794 		hctx->next_cpu = next_cpu;
795 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
796 	}
797 
798 	return cpu;
799 }
800 
801 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
802 {
803 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
804 		return;
805 
806 	if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
807 		__blk_mq_run_hw_queue(hctx);
808 	else if (hctx->queue->nr_hw_queues == 1)
809 		kblockd_schedule_delayed_work(&hctx->run_work, 0);
810 	else {
811 		unsigned int cpu;
812 
813 		cpu = blk_mq_hctx_next_cpu(hctx);
814 		kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
815 	}
816 }
817 
818 void blk_mq_run_queues(struct request_queue *q, bool async)
819 {
820 	struct blk_mq_hw_ctx *hctx;
821 	int i;
822 
823 	queue_for_each_hw_ctx(q, hctx, i) {
824 		if ((!blk_mq_hctx_has_pending(hctx) &&
825 		    list_empty_careful(&hctx->dispatch)) ||
826 		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
827 			continue;
828 
829 		preempt_disable();
830 		blk_mq_run_hw_queue(hctx, async);
831 		preempt_enable();
832 	}
833 }
834 EXPORT_SYMBOL(blk_mq_run_queues);
835 
836 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
837 {
838 	cancel_delayed_work(&hctx->run_work);
839 	cancel_delayed_work(&hctx->delay_work);
840 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
841 }
842 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
843 
844 void blk_mq_stop_hw_queues(struct request_queue *q)
845 {
846 	struct blk_mq_hw_ctx *hctx;
847 	int i;
848 
849 	queue_for_each_hw_ctx(q, hctx, i)
850 		blk_mq_stop_hw_queue(hctx);
851 }
852 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
853 
854 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
855 {
856 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
857 
858 	preempt_disable();
859 	blk_mq_run_hw_queue(hctx, false);
860 	preempt_enable();
861 }
862 EXPORT_SYMBOL(blk_mq_start_hw_queue);
863 
864 void blk_mq_start_hw_queues(struct request_queue *q)
865 {
866 	struct blk_mq_hw_ctx *hctx;
867 	int i;
868 
869 	queue_for_each_hw_ctx(q, hctx, i)
870 		blk_mq_start_hw_queue(hctx);
871 }
872 EXPORT_SYMBOL(blk_mq_start_hw_queues);
873 
874 
875 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
876 {
877 	struct blk_mq_hw_ctx *hctx;
878 	int i;
879 
880 	queue_for_each_hw_ctx(q, hctx, i) {
881 		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
882 			continue;
883 
884 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
885 		preempt_disable();
886 		blk_mq_run_hw_queue(hctx, async);
887 		preempt_enable();
888 	}
889 }
890 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
891 
892 static void blk_mq_run_work_fn(struct work_struct *work)
893 {
894 	struct blk_mq_hw_ctx *hctx;
895 
896 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
897 
898 	__blk_mq_run_hw_queue(hctx);
899 }
900 
901 static void blk_mq_delay_work_fn(struct work_struct *work)
902 {
903 	struct blk_mq_hw_ctx *hctx;
904 
905 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
906 
907 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
908 		__blk_mq_run_hw_queue(hctx);
909 }
910 
911 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
912 {
913 	unsigned long tmo = msecs_to_jiffies(msecs);
914 
915 	if (hctx->queue->nr_hw_queues == 1)
916 		kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
917 	else {
918 		unsigned int cpu;
919 
920 		cpu = blk_mq_hctx_next_cpu(hctx);
921 		kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
922 	}
923 }
924 EXPORT_SYMBOL(blk_mq_delay_queue);
925 
926 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
927 				    struct request *rq, bool at_head)
928 {
929 	struct blk_mq_ctx *ctx = rq->mq_ctx;
930 
931 	trace_block_rq_insert(hctx->queue, rq);
932 
933 	if (at_head)
934 		list_add(&rq->queuelist, &ctx->rq_list);
935 	else
936 		list_add_tail(&rq->queuelist, &ctx->rq_list);
937 
938 	blk_mq_hctx_mark_pending(hctx, ctx);
939 }
940 
941 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
942 		bool async)
943 {
944 	struct request_queue *q = rq->q;
945 	struct blk_mq_hw_ctx *hctx;
946 	struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
947 
948 	current_ctx = blk_mq_get_ctx(q);
949 	if (!cpu_online(ctx->cpu))
950 		rq->mq_ctx = ctx = current_ctx;
951 
952 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
953 
954 	if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
955 	    !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
956 		blk_insert_flush(rq);
957 	} else {
958 		spin_lock(&ctx->lock);
959 		__blk_mq_insert_request(hctx, rq, at_head);
960 		spin_unlock(&ctx->lock);
961 	}
962 
963 	if (run_queue)
964 		blk_mq_run_hw_queue(hctx, async);
965 
966 	blk_mq_put_ctx(current_ctx);
967 }
968 
969 static void blk_mq_insert_requests(struct request_queue *q,
970 				     struct blk_mq_ctx *ctx,
971 				     struct list_head *list,
972 				     int depth,
973 				     bool from_schedule)
974 
975 {
976 	struct blk_mq_hw_ctx *hctx;
977 	struct blk_mq_ctx *current_ctx;
978 
979 	trace_block_unplug(q, depth, !from_schedule);
980 
981 	current_ctx = blk_mq_get_ctx(q);
982 
983 	if (!cpu_online(ctx->cpu))
984 		ctx = current_ctx;
985 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
986 
987 	/*
988 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
989 	 * offline now
990 	 */
991 	spin_lock(&ctx->lock);
992 	while (!list_empty(list)) {
993 		struct request *rq;
994 
995 		rq = list_first_entry(list, struct request, queuelist);
996 		list_del_init(&rq->queuelist);
997 		rq->mq_ctx = ctx;
998 		__blk_mq_insert_request(hctx, rq, false);
999 	}
1000 	spin_unlock(&ctx->lock);
1001 
1002 	blk_mq_run_hw_queue(hctx, from_schedule);
1003 	blk_mq_put_ctx(current_ctx);
1004 }
1005 
1006 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1007 {
1008 	struct request *rqa = container_of(a, struct request, queuelist);
1009 	struct request *rqb = container_of(b, struct request, queuelist);
1010 
1011 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1012 		 (rqa->mq_ctx == rqb->mq_ctx &&
1013 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1014 }
1015 
1016 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1017 {
1018 	struct blk_mq_ctx *this_ctx;
1019 	struct request_queue *this_q;
1020 	struct request *rq;
1021 	LIST_HEAD(list);
1022 	LIST_HEAD(ctx_list);
1023 	unsigned int depth;
1024 
1025 	list_splice_init(&plug->mq_list, &list);
1026 
1027 	list_sort(NULL, &list, plug_ctx_cmp);
1028 
1029 	this_q = NULL;
1030 	this_ctx = NULL;
1031 	depth = 0;
1032 
1033 	while (!list_empty(&list)) {
1034 		rq = list_entry_rq(list.next);
1035 		list_del_init(&rq->queuelist);
1036 		BUG_ON(!rq->q);
1037 		if (rq->mq_ctx != this_ctx) {
1038 			if (this_ctx) {
1039 				blk_mq_insert_requests(this_q, this_ctx,
1040 							&ctx_list, depth,
1041 							from_schedule);
1042 			}
1043 
1044 			this_ctx = rq->mq_ctx;
1045 			this_q = rq->q;
1046 			depth = 0;
1047 		}
1048 
1049 		depth++;
1050 		list_add_tail(&rq->queuelist, &ctx_list);
1051 	}
1052 
1053 	/*
1054 	 * If 'this_ctx' is set, we know we have entries to complete
1055 	 * on 'ctx_list'. Do those.
1056 	 */
1057 	if (this_ctx) {
1058 		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1059 				       from_schedule);
1060 	}
1061 }
1062 
1063 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1064 {
1065 	init_request_from_bio(rq, bio);
1066 
1067 	if (blk_do_io_stat(rq))
1068 		blk_account_io_start(rq, 1);
1069 }
1070 
1071 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1072 					 struct blk_mq_ctx *ctx,
1073 					 struct request *rq, struct bio *bio)
1074 {
1075 	struct request_queue *q = hctx->queue;
1076 
1077 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1078 		blk_mq_bio_to_request(rq, bio);
1079 		spin_lock(&ctx->lock);
1080 insert_rq:
1081 		__blk_mq_insert_request(hctx, rq, false);
1082 		spin_unlock(&ctx->lock);
1083 		return false;
1084 	} else {
1085 		spin_lock(&ctx->lock);
1086 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1087 			blk_mq_bio_to_request(rq, bio);
1088 			goto insert_rq;
1089 		}
1090 
1091 		spin_unlock(&ctx->lock);
1092 		__blk_mq_free_request(hctx, ctx, rq);
1093 		return true;
1094 	}
1095 }
1096 
1097 struct blk_map_ctx {
1098 	struct blk_mq_hw_ctx *hctx;
1099 	struct blk_mq_ctx *ctx;
1100 };
1101 
1102 static struct request *blk_mq_map_request(struct request_queue *q,
1103 					  struct bio *bio,
1104 					  struct blk_map_ctx *data)
1105 {
1106 	struct blk_mq_hw_ctx *hctx;
1107 	struct blk_mq_ctx *ctx;
1108 	struct request *rq;
1109 	int rw = bio_data_dir(bio);
1110 	struct blk_mq_alloc_data alloc_data;
1111 
1112 	if (unlikely(blk_mq_queue_enter(q))) {
1113 		bio_endio(bio, -EIO);
1114 		return NULL;
1115 	}
1116 
1117 	ctx = blk_mq_get_ctx(q);
1118 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1119 
1120 	if (rw_is_sync(bio->bi_rw))
1121 		rw |= REQ_SYNC;
1122 
1123 	trace_block_getrq(q, bio, rw);
1124 	blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1125 			hctx);
1126 	rq = __blk_mq_alloc_request(&alloc_data, rw);
1127 	if (unlikely(!rq)) {
1128 		__blk_mq_run_hw_queue(hctx);
1129 		blk_mq_put_ctx(ctx);
1130 		trace_block_sleeprq(q, bio, rw);
1131 
1132 		ctx = blk_mq_get_ctx(q);
1133 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
1134 		blk_mq_set_alloc_data(&alloc_data, q,
1135 				__GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1136 		rq = __blk_mq_alloc_request(&alloc_data, rw);
1137 		ctx = alloc_data.ctx;
1138 		hctx = alloc_data.hctx;
1139 	}
1140 
1141 	hctx->queued++;
1142 	data->hctx = hctx;
1143 	data->ctx = ctx;
1144 	return rq;
1145 }
1146 
1147 /*
1148  * Multiple hardware queue variant. This will not use per-process plugs,
1149  * but will attempt to bypass the hctx queueing if we can go straight to
1150  * hardware for SYNC IO.
1151  */
1152 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1153 {
1154 	const int is_sync = rw_is_sync(bio->bi_rw);
1155 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1156 	struct blk_map_ctx data;
1157 	struct request *rq;
1158 
1159 	blk_queue_bounce(q, &bio);
1160 
1161 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1162 		bio_endio(bio, -EIO);
1163 		return;
1164 	}
1165 
1166 	rq = blk_mq_map_request(q, bio, &data);
1167 	if (unlikely(!rq))
1168 		return;
1169 
1170 	if (unlikely(is_flush_fua)) {
1171 		blk_mq_bio_to_request(rq, bio);
1172 		blk_insert_flush(rq);
1173 		goto run_queue;
1174 	}
1175 
1176 	if (is_sync) {
1177 		int ret;
1178 
1179 		blk_mq_bio_to_request(rq, bio);
1180 		blk_mq_start_request(rq, true);
1181 
1182 		/*
1183 		 * For OK queue, we are done. For error, kill it. Any other
1184 		 * error (busy), just add it to our list as we previously
1185 		 * would have done
1186 		 */
1187 		ret = q->mq_ops->queue_rq(data.hctx, rq);
1188 		if (ret == BLK_MQ_RQ_QUEUE_OK)
1189 			goto done;
1190 		else {
1191 			__blk_mq_requeue_request(rq);
1192 
1193 			if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1194 				rq->errors = -EIO;
1195 				blk_mq_end_io(rq, rq->errors);
1196 				goto done;
1197 			}
1198 		}
1199 	}
1200 
1201 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1202 		/*
1203 		 * For a SYNC request, send it to the hardware immediately. For
1204 		 * an ASYNC request, just ensure that we run it later on. The
1205 		 * latter allows for merging opportunities and more efficient
1206 		 * dispatching.
1207 		 */
1208 run_queue:
1209 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1210 	}
1211 done:
1212 	blk_mq_put_ctx(data.ctx);
1213 }
1214 
1215 /*
1216  * Single hardware queue variant. This will attempt to use any per-process
1217  * plug for merging and IO deferral.
1218  */
1219 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1220 {
1221 	const int is_sync = rw_is_sync(bio->bi_rw);
1222 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1223 	unsigned int use_plug, request_count = 0;
1224 	struct blk_map_ctx data;
1225 	struct request *rq;
1226 
1227 	/*
1228 	 * If we have multiple hardware queues, just go directly to
1229 	 * one of those for sync IO.
1230 	 */
1231 	use_plug = !is_flush_fua && !is_sync;
1232 
1233 	blk_queue_bounce(q, &bio);
1234 
1235 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1236 		bio_endio(bio, -EIO);
1237 		return;
1238 	}
1239 
1240 	if (use_plug && !blk_queue_nomerges(q) &&
1241 	    blk_attempt_plug_merge(q, bio, &request_count))
1242 		return;
1243 
1244 	rq = blk_mq_map_request(q, bio, &data);
1245 	if (unlikely(!rq))
1246 		return;
1247 
1248 	if (unlikely(is_flush_fua)) {
1249 		blk_mq_bio_to_request(rq, bio);
1250 		blk_insert_flush(rq);
1251 		goto run_queue;
1252 	}
1253 
1254 	/*
1255 	 * A task plug currently exists. Since this is completely lockless,
1256 	 * utilize that to temporarily store requests until the task is
1257 	 * either done or scheduled away.
1258 	 */
1259 	if (use_plug) {
1260 		struct blk_plug *plug = current->plug;
1261 
1262 		if (plug) {
1263 			blk_mq_bio_to_request(rq, bio);
1264 			if (list_empty(&plug->mq_list))
1265 				trace_block_plug(q);
1266 			else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1267 				blk_flush_plug_list(plug, false);
1268 				trace_block_plug(q);
1269 			}
1270 			list_add_tail(&rq->queuelist, &plug->mq_list);
1271 			blk_mq_put_ctx(data.ctx);
1272 			return;
1273 		}
1274 	}
1275 
1276 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1277 		/*
1278 		 * For a SYNC request, send it to the hardware immediately. For
1279 		 * an ASYNC request, just ensure that we run it later on. The
1280 		 * latter allows for merging opportunities and more efficient
1281 		 * dispatching.
1282 		 */
1283 run_queue:
1284 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1285 	}
1286 
1287 	blk_mq_put_ctx(data.ctx);
1288 }
1289 
1290 /*
1291  * Default mapping to a software queue, since we use one per CPU.
1292  */
1293 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1294 {
1295 	return q->queue_hw_ctx[q->mq_map[cpu]];
1296 }
1297 EXPORT_SYMBOL(blk_mq_map_queue);
1298 
1299 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1300 		struct blk_mq_tags *tags, unsigned int hctx_idx)
1301 {
1302 	struct page *page;
1303 
1304 	if (tags->rqs && set->ops->exit_request) {
1305 		int i;
1306 
1307 		for (i = 0; i < tags->nr_tags; i++) {
1308 			if (!tags->rqs[i])
1309 				continue;
1310 			set->ops->exit_request(set->driver_data, tags->rqs[i],
1311 						hctx_idx, i);
1312 		}
1313 	}
1314 
1315 	while (!list_empty(&tags->page_list)) {
1316 		page = list_first_entry(&tags->page_list, struct page, lru);
1317 		list_del_init(&page->lru);
1318 		__free_pages(page, page->private);
1319 	}
1320 
1321 	kfree(tags->rqs);
1322 
1323 	blk_mq_free_tags(tags);
1324 }
1325 
1326 static size_t order_to_size(unsigned int order)
1327 {
1328 	return (size_t)PAGE_SIZE << order;
1329 }
1330 
1331 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1332 		unsigned int hctx_idx)
1333 {
1334 	struct blk_mq_tags *tags;
1335 	unsigned int i, j, entries_per_page, max_order = 4;
1336 	size_t rq_size, left;
1337 
1338 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1339 				set->numa_node);
1340 	if (!tags)
1341 		return NULL;
1342 
1343 	INIT_LIST_HEAD(&tags->page_list);
1344 
1345 	tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1346 					GFP_KERNEL, set->numa_node);
1347 	if (!tags->rqs) {
1348 		blk_mq_free_tags(tags);
1349 		return NULL;
1350 	}
1351 
1352 	/*
1353 	 * rq_size is the size of the request plus driver payload, rounded
1354 	 * to the cacheline size
1355 	 */
1356 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1357 				cache_line_size());
1358 	left = rq_size * set->queue_depth;
1359 
1360 	for (i = 0; i < set->queue_depth; ) {
1361 		int this_order = max_order;
1362 		struct page *page;
1363 		int to_do;
1364 		void *p;
1365 
1366 		while (left < order_to_size(this_order - 1) && this_order)
1367 			this_order--;
1368 
1369 		do {
1370 			page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1371 						this_order);
1372 			if (page)
1373 				break;
1374 			if (!this_order--)
1375 				break;
1376 			if (order_to_size(this_order) < rq_size)
1377 				break;
1378 		} while (1);
1379 
1380 		if (!page)
1381 			goto fail;
1382 
1383 		page->private = this_order;
1384 		list_add_tail(&page->lru, &tags->page_list);
1385 
1386 		p = page_address(page);
1387 		entries_per_page = order_to_size(this_order) / rq_size;
1388 		to_do = min(entries_per_page, set->queue_depth - i);
1389 		left -= to_do * rq_size;
1390 		for (j = 0; j < to_do; j++) {
1391 			tags->rqs[i] = p;
1392 			if (set->ops->init_request) {
1393 				if (set->ops->init_request(set->driver_data,
1394 						tags->rqs[i], hctx_idx, i,
1395 						set->numa_node))
1396 					goto fail;
1397 			}
1398 
1399 			p += rq_size;
1400 			i++;
1401 		}
1402 	}
1403 
1404 	return tags;
1405 
1406 fail:
1407 	pr_warn("%s: failed to allocate requests\n", __func__);
1408 	blk_mq_free_rq_map(set, tags, hctx_idx);
1409 	return NULL;
1410 }
1411 
1412 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1413 {
1414 	kfree(bitmap->map);
1415 }
1416 
1417 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1418 {
1419 	unsigned int bpw = 8, total, num_maps, i;
1420 
1421 	bitmap->bits_per_word = bpw;
1422 
1423 	num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1424 	bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1425 					GFP_KERNEL, node);
1426 	if (!bitmap->map)
1427 		return -ENOMEM;
1428 
1429 	bitmap->map_size = num_maps;
1430 
1431 	total = nr_cpu_ids;
1432 	for (i = 0; i < num_maps; i++) {
1433 		bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1434 		total -= bitmap->map[i].depth;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1441 {
1442 	struct request_queue *q = hctx->queue;
1443 	struct blk_mq_ctx *ctx;
1444 	LIST_HEAD(tmp);
1445 
1446 	/*
1447 	 * Move ctx entries to new CPU, if this one is going away.
1448 	 */
1449 	ctx = __blk_mq_get_ctx(q, cpu);
1450 
1451 	spin_lock(&ctx->lock);
1452 	if (!list_empty(&ctx->rq_list)) {
1453 		list_splice_init(&ctx->rq_list, &tmp);
1454 		blk_mq_hctx_clear_pending(hctx, ctx);
1455 	}
1456 	spin_unlock(&ctx->lock);
1457 
1458 	if (list_empty(&tmp))
1459 		return NOTIFY_OK;
1460 
1461 	ctx = blk_mq_get_ctx(q);
1462 	spin_lock(&ctx->lock);
1463 
1464 	while (!list_empty(&tmp)) {
1465 		struct request *rq;
1466 
1467 		rq = list_first_entry(&tmp, struct request, queuelist);
1468 		rq->mq_ctx = ctx;
1469 		list_move_tail(&rq->queuelist, &ctx->rq_list);
1470 	}
1471 
1472 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1473 	blk_mq_hctx_mark_pending(hctx, ctx);
1474 
1475 	spin_unlock(&ctx->lock);
1476 
1477 	blk_mq_run_hw_queue(hctx, true);
1478 	blk_mq_put_ctx(ctx);
1479 	return NOTIFY_OK;
1480 }
1481 
1482 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1483 {
1484 	struct request_queue *q = hctx->queue;
1485 	struct blk_mq_tag_set *set = q->tag_set;
1486 
1487 	if (set->tags[hctx->queue_num])
1488 		return NOTIFY_OK;
1489 
1490 	set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1491 	if (!set->tags[hctx->queue_num])
1492 		return NOTIFY_STOP;
1493 
1494 	hctx->tags = set->tags[hctx->queue_num];
1495 	return NOTIFY_OK;
1496 }
1497 
1498 static int blk_mq_hctx_notify(void *data, unsigned long action,
1499 			      unsigned int cpu)
1500 {
1501 	struct blk_mq_hw_ctx *hctx = data;
1502 
1503 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1504 		return blk_mq_hctx_cpu_offline(hctx, cpu);
1505 	else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1506 		return blk_mq_hctx_cpu_online(hctx, cpu);
1507 
1508 	return NOTIFY_OK;
1509 }
1510 
1511 static void blk_mq_exit_hw_queues(struct request_queue *q,
1512 		struct blk_mq_tag_set *set, int nr_queue)
1513 {
1514 	struct blk_mq_hw_ctx *hctx;
1515 	unsigned int i;
1516 
1517 	queue_for_each_hw_ctx(q, hctx, i) {
1518 		if (i == nr_queue)
1519 			break;
1520 
1521 		blk_mq_tag_idle(hctx);
1522 
1523 		if (set->ops->exit_hctx)
1524 			set->ops->exit_hctx(hctx, i);
1525 
1526 		blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1527 		kfree(hctx->ctxs);
1528 		blk_mq_free_bitmap(&hctx->ctx_map);
1529 	}
1530 
1531 }
1532 
1533 static void blk_mq_free_hw_queues(struct request_queue *q,
1534 		struct blk_mq_tag_set *set)
1535 {
1536 	struct blk_mq_hw_ctx *hctx;
1537 	unsigned int i;
1538 
1539 	queue_for_each_hw_ctx(q, hctx, i) {
1540 		free_cpumask_var(hctx->cpumask);
1541 		kfree(hctx);
1542 	}
1543 }
1544 
1545 static int blk_mq_init_hw_queues(struct request_queue *q,
1546 		struct blk_mq_tag_set *set)
1547 {
1548 	struct blk_mq_hw_ctx *hctx;
1549 	unsigned int i;
1550 
1551 	/*
1552 	 * Initialize hardware queues
1553 	 */
1554 	queue_for_each_hw_ctx(q, hctx, i) {
1555 		int node;
1556 
1557 		node = hctx->numa_node;
1558 		if (node == NUMA_NO_NODE)
1559 			node = hctx->numa_node = set->numa_node;
1560 
1561 		INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1562 		INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1563 		spin_lock_init(&hctx->lock);
1564 		INIT_LIST_HEAD(&hctx->dispatch);
1565 		hctx->queue = q;
1566 		hctx->queue_num = i;
1567 		hctx->flags = set->flags;
1568 		hctx->cmd_size = set->cmd_size;
1569 
1570 		blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1571 						blk_mq_hctx_notify, hctx);
1572 		blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1573 
1574 		hctx->tags = set->tags[i];
1575 
1576 		/*
1577 		 * Allocate space for all possible cpus to avoid allocation in
1578 		 * runtime
1579 		 */
1580 		hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1581 						GFP_KERNEL, node);
1582 		if (!hctx->ctxs)
1583 			break;
1584 
1585 		if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1586 			break;
1587 
1588 		hctx->nr_ctx = 0;
1589 
1590 		if (set->ops->init_hctx &&
1591 		    set->ops->init_hctx(hctx, set->driver_data, i))
1592 			break;
1593 	}
1594 
1595 	if (i == q->nr_hw_queues)
1596 		return 0;
1597 
1598 	/*
1599 	 * Init failed
1600 	 */
1601 	blk_mq_exit_hw_queues(q, set, i);
1602 
1603 	return 1;
1604 }
1605 
1606 static void blk_mq_init_cpu_queues(struct request_queue *q,
1607 				   unsigned int nr_hw_queues)
1608 {
1609 	unsigned int i;
1610 
1611 	for_each_possible_cpu(i) {
1612 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1613 		struct blk_mq_hw_ctx *hctx;
1614 
1615 		memset(__ctx, 0, sizeof(*__ctx));
1616 		__ctx->cpu = i;
1617 		spin_lock_init(&__ctx->lock);
1618 		INIT_LIST_HEAD(&__ctx->rq_list);
1619 		__ctx->queue = q;
1620 
1621 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1622 		if (!cpu_online(i))
1623 			continue;
1624 
1625 		hctx = q->mq_ops->map_queue(q, i);
1626 		cpumask_set_cpu(i, hctx->cpumask);
1627 		hctx->nr_ctx++;
1628 
1629 		/*
1630 		 * Set local node, IFF we have more than one hw queue. If
1631 		 * not, we remain on the home node of the device
1632 		 */
1633 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1634 			hctx->numa_node = cpu_to_node(i);
1635 	}
1636 }
1637 
1638 static void blk_mq_map_swqueue(struct request_queue *q)
1639 {
1640 	unsigned int i;
1641 	struct blk_mq_hw_ctx *hctx;
1642 	struct blk_mq_ctx *ctx;
1643 
1644 	queue_for_each_hw_ctx(q, hctx, i) {
1645 		cpumask_clear(hctx->cpumask);
1646 		hctx->nr_ctx = 0;
1647 	}
1648 
1649 	/*
1650 	 * Map software to hardware queues
1651 	 */
1652 	queue_for_each_ctx(q, ctx, i) {
1653 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1654 		if (!cpu_online(i))
1655 			continue;
1656 
1657 		hctx = q->mq_ops->map_queue(q, i);
1658 		cpumask_set_cpu(i, hctx->cpumask);
1659 		ctx->index_hw = hctx->nr_ctx;
1660 		hctx->ctxs[hctx->nr_ctx++] = ctx;
1661 	}
1662 
1663 	queue_for_each_hw_ctx(q, hctx, i) {
1664 		/*
1665 		 * If not software queues are mapped to this hardware queue,
1666 		 * disable it and free the request entries
1667 		 */
1668 		if (!hctx->nr_ctx) {
1669 			struct blk_mq_tag_set *set = q->tag_set;
1670 
1671 			if (set->tags[i]) {
1672 				blk_mq_free_rq_map(set, set->tags[i], i);
1673 				set->tags[i] = NULL;
1674 				hctx->tags = NULL;
1675 			}
1676 			continue;
1677 		}
1678 
1679 		/*
1680 		 * Initialize batch roundrobin counts
1681 		 */
1682 		hctx->next_cpu = cpumask_first(hctx->cpumask);
1683 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1684 	}
1685 }
1686 
1687 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1688 {
1689 	struct blk_mq_hw_ctx *hctx;
1690 	struct request_queue *q;
1691 	bool shared;
1692 	int i;
1693 
1694 	if (set->tag_list.next == set->tag_list.prev)
1695 		shared = false;
1696 	else
1697 		shared = true;
1698 
1699 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
1700 		blk_mq_freeze_queue(q);
1701 
1702 		queue_for_each_hw_ctx(q, hctx, i) {
1703 			if (shared)
1704 				hctx->flags |= BLK_MQ_F_TAG_SHARED;
1705 			else
1706 				hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1707 		}
1708 		blk_mq_unfreeze_queue(q);
1709 	}
1710 }
1711 
1712 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1713 {
1714 	struct blk_mq_tag_set *set = q->tag_set;
1715 
1716 	blk_mq_freeze_queue(q);
1717 
1718 	mutex_lock(&set->tag_list_lock);
1719 	list_del_init(&q->tag_set_list);
1720 	blk_mq_update_tag_set_depth(set);
1721 	mutex_unlock(&set->tag_list_lock);
1722 
1723 	blk_mq_unfreeze_queue(q);
1724 }
1725 
1726 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1727 				     struct request_queue *q)
1728 {
1729 	q->tag_set = set;
1730 
1731 	mutex_lock(&set->tag_list_lock);
1732 	list_add_tail(&q->tag_set_list, &set->tag_list);
1733 	blk_mq_update_tag_set_depth(set);
1734 	mutex_unlock(&set->tag_list_lock);
1735 }
1736 
1737 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1738 {
1739 	struct blk_mq_hw_ctx **hctxs;
1740 	struct blk_mq_ctx __percpu *ctx;
1741 	struct request_queue *q;
1742 	unsigned int *map;
1743 	int i;
1744 
1745 	ctx = alloc_percpu(struct blk_mq_ctx);
1746 	if (!ctx)
1747 		return ERR_PTR(-ENOMEM);
1748 
1749 	hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1750 			set->numa_node);
1751 
1752 	if (!hctxs)
1753 		goto err_percpu;
1754 
1755 	map = blk_mq_make_queue_map(set);
1756 	if (!map)
1757 		goto err_map;
1758 
1759 	for (i = 0; i < set->nr_hw_queues; i++) {
1760 		int node = blk_mq_hw_queue_to_node(map, i);
1761 
1762 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1763 					GFP_KERNEL, node);
1764 		if (!hctxs[i])
1765 			goto err_hctxs;
1766 
1767 		if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1768 			goto err_hctxs;
1769 
1770 		atomic_set(&hctxs[i]->nr_active, 0);
1771 		hctxs[i]->numa_node = node;
1772 		hctxs[i]->queue_num = i;
1773 	}
1774 
1775 	q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1776 	if (!q)
1777 		goto err_hctxs;
1778 
1779 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
1780 		goto err_map;
1781 
1782 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1783 	blk_queue_rq_timeout(q, 30000);
1784 
1785 	q->nr_queues = nr_cpu_ids;
1786 	q->nr_hw_queues = set->nr_hw_queues;
1787 	q->mq_map = map;
1788 
1789 	q->queue_ctx = ctx;
1790 	q->queue_hw_ctx = hctxs;
1791 
1792 	q->mq_ops = set->ops;
1793 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1794 
1795 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
1796 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1797 
1798 	q->sg_reserved_size = INT_MAX;
1799 
1800 	INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1801 	INIT_LIST_HEAD(&q->requeue_list);
1802 	spin_lock_init(&q->requeue_lock);
1803 
1804 	if (q->nr_hw_queues > 1)
1805 		blk_queue_make_request(q, blk_mq_make_request);
1806 	else
1807 		blk_queue_make_request(q, blk_sq_make_request);
1808 
1809 	blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1810 	if (set->timeout)
1811 		blk_queue_rq_timeout(q, set->timeout);
1812 
1813 	/*
1814 	 * Do this after blk_queue_make_request() overrides it...
1815 	 */
1816 	q->nr_requests = set->queue_depth;
1817 
1818 	if (set->ops->complete)
1819 		blk_queue_softirq_done(q, set->ops->complete);
1820 
1821 	blk_mq_init_flush(q);
1822 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1823 
1824 	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1825 				set->cmd_size, cache_line_size()),
1826 				GFP_KERNEL);
1827 	if (!q->flush_rq)
1828 		goto err_hw;
1829 
1830 	if (blk_mq_init_hw_queues(q, set))
1831 		goto err_flush_rq;
1832 
1833 	mutex_lock(&all_q_mutex);
1834 	list_add_tail(&q->all_q_node, &all_q_list);
1835 	mutex_unlock(&all_q_mutex);
1836 
1837 	blk_mq_add_queue_tag_set(set, q);
1838 
1839 	blk_mq_map_swqueue(q);
1840 
1841 	return q;
1842 
1843 err_flush_rq:
1844 	kfree(q->flush_rq);
1845 err_hw:
1846 	blk_cleanup_queue(q);
1847 err_hctxs:
1848 	kfree(map);
1849 	for (i = 0; i < set->nr_hw_queues; i++) {
1850 		if (!hctxs[i])
1851 			break;
1852 		free_cpumask_var(hctxs[i]->cpumask);
1853 		kfree(hctxs[i]);
1854 	}
1855 err_map:
1856 	kfree(hctxs);
1857 err_percpu:
1858 	free_percpu(ctx);
1859 	return ERR_PTR(-ENOMEM);
1860 }
1861 EXPORT_SYMBOL(blk_mq_init_queue);
1862 
1863 void blk_mq_free_queue(struct request_queue *q)
1864 {
1865 	struct blk_mq_tag_set	*set = q->tag_set;
1866 
1867 	blk_mq_del_queue_tag_set(q);
1868 
1869 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1870 	blk_mq_free_hw_queues(q, set);
1871 
1872 	percpu_ref_exit(&q->mq_usage_counter);
1873 
1874 	free_percpu(q->queue_ctx);
1875 	kfree(q->queue_hw_ctx);
1876 	kfree(q->mq_map);
1877 
1878 	q->queue_ctx = NULL;
1879 	q->queue_hw_ctx = NULL;
1880 	q->mq_map = NULL;
1881 
1882 	mutex_lock(&all_q_mutex);
1883 	list_del_init(&q->all_q_node);
1884 	mutex_unlock(&all_q_mutex);
1885 }
1886 
1887 /* Basically redo blk_mq_init_queue with queue frozen */
1888 static void blk_mq_queue_reinit(struct request_queue *q)
1889 {
1890 	blk_mq_freeze_queue(q);
1891 
1892 	blk_mq_sysfs_unregister(q);
1893 
1894 	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1895 
1896 	/*
1897 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1898 	 * we should change hctx numa_node according to new topology (this
1899 	 * involves free and re-allocate memory, worthy doing?)
1900 	 */
1901 
1902 	blk_mq_map_swqueue(q);
1903 
1904 	blk_mq_sysfs_register(q);
1905 
1906 	blk_mq_unfreeze_queue(q);
1907 }
1908 
1909 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1910 				      unsigned long action, void *hcpu)
1911 {
1912 	struct request_queue *q;
1913 
1914 	/*
1915 	 * Before new mappings are established, hotadded cpu might already
1916 	 * start handling requests. This doesn't break anything as we map
1917 	 * offline CPUs to first hardware queue. We will re-init the queue
1918 	 * below to get optimal settings.
1919 	 */
1920 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1921 	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1922 		return NOTIFY_OK;
1923 
1924 	mutex_lock(&all_q_mutex);
1925 	list_for_each_entry(q, &all_q_list, all_q_node)
1926 		blk_mq_queue_reinit(q);
1927 	mutex_unlock(&all_q_mutex);
1928 	return NOTIFY_OK;
1929 }
1930 
1931 /*
1932  * Alloc a tag set to be associated with one or more request queues.
1933  * May fail with EINVAL for various error conditions. May adjust the
1934  * requested depth down, if if it too large. In that case, the set
1935  * value will be stored in set->queue_depth.
1936  */
1937 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1938 {
1939 	int i;
1940 
1941 	if (!set->nr_hw_queues)
1942 		return -EINVAL;
1943 	if (!set->queue_depth)
1944 		return -EINVAL;
1945 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1946 		return -EINVAL;
1947 
1948 	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1949 		return -EINVAL;
1950 
1951 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
1952 		pr_info("blk-mq: reduced tag depth to %u\n",
1953 			BLK_MQ_MAX_DEPTH);
1954 		set->queue_depth = BLK_MQ_MAX_DEPTH;
1955 	}
1956 
1957 	set->tags = kmalloc_node(set->nr_hw_queues *
1958 				 sizeof(struct blk_mq_tags *),
1959 				 GFP_KERNEL, set->numa_node);
1960 	if (!set->tags)
1961 		goto out;
1962 
1963 	for (i = 0; i < set->nr_hw_queues; i++) {
1964 		set->tags[i] = blk_mq_init_rq_map(set, i);
1965 		if (!set->tags[i])
1966 			goto out_unwind;
1967 	}
1968 
1969 	mutex_init(&set->tag_list_lock);
1970 	INIT_LIST_HEAD(&set->tag_list);
1971 
1972 	return 0;
1973 
1974 out_unwind:
1975 	while (--i >= 0)
1976 		blk_mq_free_rq_map(set, set->tags[i], i);
1977 out:
1978 	return -ENOMEM;
1979 }
1980 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
1981 
1982 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1983 {
1984 	int i;
1985 
1986 	for (i = 0; i < set->nr_hw_queues; i++) {
1987 		if (set->tags[i])
1988 			blk_mq_free_rq_map(set, set->tags[i], i);
1989 	}
1990 
1991 	kfree(set->tags);
1992 }
1993 EXPORT_SYMBOL(blk_mq_free_tag_set);
1994 
1995 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
1996 {
1997 	struct blk_mq_tag_set *set = q->tag_set;
1998 	struct blk_mq_hw_ctx *hctx;
1999 	int i, ret;
2000 
2001 	if (!set || nr > set->queue_depth)
2002 		return -EINVAL;
2003 
2004 	ret = 0;
2005 	queue_for_each_hw_ctx(q, hctx, i) {
2006 		ret = blk_mq_tag_update_depth(hctx->tags, nr);
2007 		if (ret)
2008 			break;
2009 	}
2010 
2011 	if (!ret)
2012 		q->nr_requests = nr;
2013 
2014 	return ret;
2015 }
2016 
2017 void blk_mq_disable_hotplug(void)
2018 {
2019 	mutex_lock(&all_q_mutex);
2020 }
2021 
2022 void blk_mq_enable_hotplug(void)
2023 {
2024 	mutex_unlock(&all_q_mutex);
2025 }
2026 
2027 static int __init blk_mq_init(void)
2028 {
2029 	blk_mq_cpu_init();
2030 
2031 	hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2032 
2033 	return 0;
2034 }
2035 subsys_initcall(blk_mq_init);
2036