xref: /openbmc/linux/block/blk-mq.c (revision 7051924f771722c6dd235e693742cda6488ac700)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 
24 #include <trace/events/block.h>
25 
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30 
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33 
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35 
36 /*
37  * Check if any of the ctx's have pending work in this hardware queue
38  */
39 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40 {
41 	unsigned int i;
42 
43 	for (i = 0; i < hctx->ctx_map.map_size; i++)
44 		if (hctx->ctx_map.map[i].word)
45 			return true;
46 
47 	return false;
48 }
49 
50 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51 					      struct blk_mq_ctx *ctx)
52 {
53 	return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54 }
55 
56 #define CTX_TO_BIT(hctx, ctx)	\
57 	((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58 
59 /*
60  * Mark this ctx as having pending work in this hardware queue
61  */
62 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63 				     struct blk_mq_ctx *ctx)
64 {
65 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66 
67 	if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68 		set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69 }
70 
71 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72 				      struct blk_mq_ctx *ctx)
73 {
74 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75 
76 	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
77 }
78 
79 static int blk_mq_queue_enter(struct request_queue *q)
80 {
81 	while (true) {
82 		int ret;
83 
84 		if (percpu_ref_tryget_live(&q->mq_usage_counter))
85 			return 0;
86 
87 		ret = wait_event_interruptible(q->mq_freeze_wq,
88 				!q->mq_freeze_depth || blk_queue_dying(q));
89 		if (blk_queue_dying(q))
90 			return -ENODEV;
91 		if (ret)
92 			return ret;
93 	}
94 }
95 
96 static void blk_mq_queue_exit(struct request_queue *q)
97 {
98 	percpu_ref_put(&q->mq_usage_counter);
99 }
100 
101 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
102 {
103 	struct request_queue *q =
104 		container_of(ref, struct request_queue, mq_usage_counter);
105 
106 	wake_up_all(&q->mq_freeze_wq);
107 }
108 
109 /*
110  * Guarantee no request is in use, so we can change any data structure of
111  * the queue afterward.
112  */
113 void blk_mq_freeze_queue(struct request_queue *q)
114 {
115 	bool freeze;
116 
117 	spin_lock_irq(q->queue_lock);
118 	freeze = !q->mq_freeze_depth++;
119 	spin_unlock_irq(q->queue_lock);
120 
121 	if (freeze) {
122 		percpu_ref_kill(&q->mq_usage_counter);
123 		blk_mq_run_queues(q, false);
124 	}
125 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
126 }
127 
128 static void blk_mq_unfreeze_queue(struct request_queue *q)
129 {
130 	bool wake;
131 
132 	spin_lock_irq(q->queue_lock);
133 	wake = !--q->mq_freeze_depth;
134 	WARN_ON_ONCE(q->mq_freeze_depth < 0);
135 	spin_unlock_irq(q->queue_lock);
136 	if (wake) {
137 		percpu_ref_reinit(&q->mq_usage_counter);
138 		wake_up_all(&q->mq_freeze_wq);
139 	}
140 }
141 
142 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
143 {
144 	return blk_mq_has_free_tags(hctx->tags);
145 }
146 EXPORT_SYMBOL(blk_mq_can_queue);
147 
148 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
149 			       struct request *rq, unsigned int rw_flags)
150 {
151 	if (blk_queue_io_stat(q))
152 		rw_flags |= REQ_IO_STAT;
153 
154 	INIT_LIST_HEAD(&rq->queuelist);
155 	/* csd/requeue_work/fifo_time is initialized before use */
156 	rq->q = q;
157 	rq->mq_ctx = ctx;
158 	rq->cmd_flags |= rw_flags;
159 	/* do not touch atomic flags, it needs atomic ops against the timer */
160 	rq->cpu = -1;
161 	INIT_HLIST_NODE(&rq->hash);
162 	RB_CLEAR_NODE(&rq->rb_node);
163 	rq->rq_disk = NULL;
164 	rq->part = NULL;
165 	rq->start_time = jiffies;
166 #ifdef CONFIG_BLK_CGROUP
167 	rq->rl = NULL;
168 	set_start_time_ns(rq);
169 	rq->io_start_time_ns = 0;
170 #endif
171 	rq->nr_phys_segments = 0;
172 #if defined(CONFIG_BLK_DEV_INTEGRITY)
173 	rq->nr_integrity_segments = 0;
174 #endif
175 	rq->special = NULL;
176 	/* tag was already set */
177 	rq->errors = 0;
178 
179 	rq->cmd = rq->__cmd;
180 
181 	rq->extra_len = 0;
182 	rq->sense_len = 0;
183 	rq->resid_len = 0;
184 	rq->sense = NULL;
185 
186 	INIT_LIST_HEAD(&rq->timeout_list);
187 	rq->timeout = 0;
188 
189 	rq->end_io = NULL;
190 	rq->end_io_data = NULL;
191 	rq->next_rq = NULL;
192 
193 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
194 }
195 
196 static struct request *
197 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
198 {
199 	struct request *rq;
200 	unsigned int tag;
201 
202 	tag = blk_mq_get_tag(data);
203 	if (tag != BLK_MQ_TAG_FAIL) {
204 		rq = data->hctx->tags->rqs[tag];
205 
206 		rq->cmd_flags = 0;
207 		if (blk_mq_tag_busy(data->hctx)) {
208 			rq->cmd_flags = REQ_MQ_INFLIGHT;
209 			atomic_inc(&data->hctx->nr_active);
210 		}
211 
212 		rq->tag = tag;
213 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
214 		return rq;
215 	}
216 
217 	return NULL;
218 }
219 
220 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
221 		bool reserved)
222 {
223 	struct blk_mq_ctx *ctx;
224 	struct blk_mq_hw_ctx *hctx;
225 	struct request *rq;
226 	struct blk_mq_alloc_data alloc_data;
227 
228 	if (blk_mq_queue_enter(q))
229 		return NULL;
230 
231 	ctx = blk_mq_get_ctx(q);
232 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
233 	blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
234 			reserved, ctx, hctx);
235 
236 	rq = __blk_mq_alloc_request(&alloc_data, rw);
237 	if (!rq && (gfp & __GFP_WAIT)) {
238 		__blk_mq_run_hw_queue(hctx);
239 		blk_mq_put_ctx(ctx);
240 
241 		ctx = blk_mq_get_ctx(q);
242 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
243 		blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
244 				hctx);
245 		rq =  __blk_mq_alloc_request(&alloc_data, rw);
246 		ctx = alloc_data.ctx;
247 	}
248 	blk_mq_put_ctx(ctx);
249 	return rq;
250 }
251 EXPORT_SYMBOL(blk_mq_alloc_request);
252 
253 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
254 				  struct blk_mq_ctx *ctx, struct request *rq)
255 {
256 	const int tag = rq->tag;
257 	struct request_queue *q = rq->q;
258 
259 	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
260 		atomic_dec(&hctx->nr_active);
261 
262 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
263 	blk_mq_put_tag(hctx, tag, &ctx->last_tag);
264 	blk_mq_queue_exit(q);
265 }
266 
267 void blk_mq_free_request(struct request *rq)
268 {
269 	struct blk_mq_ctx *ctx = rq->mq_ctx;
270 	struct blk_mq_hw_ctx *hctx;
271 	struct request_queue *q = rq->q;
272 
273 	ctx->rq_completed[rq_is_sync(rq)]++;
274 
275 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
276 	__blk_mq_free_request(hctx, ctx, rq);
277 }
278 
279 /*
280  * Clone all relevant state from a request that has been put on hold in
281  * the flush state machine into the preallocated flush request that hangs
282  * off the request queue.
283  *
284  * For a driver the flush request should be invisible, that's why we are
285  * impersonating the original request here.
286  */
287 void blk_mq_clone_flush_request(struct request *flush_rq,
288 		struct request *orig_rq)
289 {
290 	struct blk_mq_hw_ctx *hctx =
291 		orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
292 
293 	flush_rq->mq_ctx = orig_rq->mq_ctx;
294 	flush_rq->tag = orig_rq->tag;
295 	memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
296 		hctx->cmd_size);
297 }
298 
299 inline void __blk_mq_end_io(struct request *rq, int error)
300 {
301 	blk_account_io_done(rq);
302 
303 	if (rq->end_io) {
304 		rq->end_io(rq, error);
305 	} else {
306 		if (unlikely(blk_bidi_rq(rq)))
307 			blk_mq_free_request(rq->next_rq);
308 		blk_mq_free_request(rq);
309 	}
310 }
311 EXPORT_SYMBOL(__blk_mq_end_io);
312 
313 void blk_mq_end_io(struct request *rq, int error)
314 {
315 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
316 		BUG();
317 	__blk_mq_end_io(rq, error);
318 }
319 EXPORT_SYMBOL(blk_mq_end_io);
320 
321 static void __blk_mq_complete_request_remote(void *data)
322 {
323 	struct request *rq = data;
324 
325 	rq->q->softirq_done_fn(rq);
326 }
327 
328 static void blk_mq_ipi_complete_request(struct request *rq)
329 {
330 	struct blk_mq_ctx *ctx = rq->mq_ctx;
331 	bool shared = false;
332 	int cpu;
333 
334 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
335 		rq->q->softirq_done_fn(rq);
336 		return;
337 	}
338 
339 	cpu = get_cpu();
340 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
341 		shared = cpus_share_cache(cpu, ctx->cpu);
342 
343 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
344 		rq->csd.func = __blk_mq_complete_request_remote;
345 		rq->csd.info = rq;
346 		rq->csd.flags = 0;
347 		smp_call_function_single_async(ctx->cpu, &rq->csd);
348 	} else {
349 		rq->q->softirq_done_fn(rq);
350 	}
351 	put_cpu();
352 }
353 
354 void __blk_mq_complete_request(struct request *rq)
355 {
356 	struct request_queue *q = rq->q;
357 
358 	if (!q->softirq_done_fn)
359 		blk_mq_end_io(rq, rq->errors);
360 	else
361 		blk_mq_ipi_complete_request(rq);
362 }
363 
364 /**
365  * blk_mq_complete_request - end I/O on a request
366  * @rq:		the request being processed
367  *
368  * Description:
369  *	Ends all I/O on a request. It does not handle partial completions.
370  *	The actual completion happens out-of-order, through a IPI handler.
371  **/
372 void blk_mq_complete_request(struct request *rq)
373 {
374 	struct request_queue *q = rq->q;
375 
376 	if (unlikely(blk_should_fake_timeout(q)))
377 		return;
378 	if (!blk_mark_rq_complete(rq))
379 		__blk_mq_complete_request(rq);
380 }
381 EXPORT_SYMBOL(blk_mq_complete_request);
382 
383 static void blk_mq_start_request(struct request *rq, bool last)
384 {
385 	struct request_queue *q = rq->q;
386 
387 	trace_block_rq_issue(q, rq);
388 
389 	rq->resid_len = blk_rq_bytes(rq);
390 	if (unlikely(blk_bidi_rq(rq)))
391 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
392 
393 	blk_add_timer(rq);
394 
395 	/*
396 	 * Mark us as started and clear complete. Complete might have been
397 	 * set if requeue raced with timeout, which then marked it as
398 	 * complete. So be sure to clear complete again when we start
399 	 * the request, otherwise we'll ignore the completion event.
400 	 */
401 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
402 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
403 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
404 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
405 
406 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
407 		/*
408 		 * Make sure space for the drain appears.  We know we can do
409 		 * this because max_hw_segments has been adjusted to be one
410 		 * fewer than the device can handle.
411 		 */
412 		rq->nr_phys_segments++;
413 	}
414 
415 	/*
416 	 * Flag the last request in the series so that drivers know when IO
417 	 * should be kicked off, if they don't do it on a per-request basis.
418 	 *
419 	 * Note: the flag isn't the only condition drivers should do kick off.
420 	 * If drive is busy, the last request might not have the bit set.
421 	 */
422 	if (last)
423 		rq->cmd_flags |= REQ_END;
424 }
425 
426 static void __blk_mq_requeue_request(struct request *rq)
427 {
428 	struct request_queue *q = rq->q;
429 
430 	trace_block_rq_requeue(q, rq);
431 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
432 
433 	rq->cmd_flags &= ~REQ_END;
434 
435 	if (q->dma_drain_size && blk_rq_bytes(rq))
436 		rq->nr_phys_segments--;
437 }
438 
439 void blk_mq_requeue_request(struct request *rq)
440 {
441 	__blk_mq_requeue_request(rq);
442 	blk_clear_rq_complete(rq);
443 
444 	BUG_ON(blk_queued_rq(rq));
445 	blk_mq_add_to_requeue_list(rq, true);
446 }
447 EXPORT_SYMBOL(blk_mq_requeue_request);
448 
449 static void blk_mq_requeue_work(struct work_struct *work)
450 {
451 	struct request_queue *q =
452 		container_of(work, struct request_queue, requeue_work);
453 	LIST_HEAD(rq_list);
454 	struct request *rq, *next;
455 	unsigned long flags;
456 
457 	spin_lock_irqsave(&q->requeue_lock, flags);
458 	list_splice_init(&q->requeue_list, &rq_list);
459 	spin_unlock_irqrestore(&q->requeue_lock, flags);
460 
461 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
462 		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
463 			continue;
464 
465 		rq->cmd_flags &= ~REQ_SOFTBARRIER;
466 		list_del_init(&rq->queuelist);
467 		blk_mq_insert_request(rq, true, false, false);
468 	}
469 
470 	while (!list_empty(&rq_list)) {
471 		rq = list_entry(rq_list.next, struct request, queuelist);
472 		list_del_init(&rq->queuelist);
473 		blk_mq_insert_request(rq, false, false, false);
474 	}
475 
476 	blk_mq_run_queues(q, false);
477 }
478 
479 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
480 {
481 	struct request_queue *q = rq->q;
482 	unsigned long flags;
483 
484 	/*
485 	 * We abuse this flag that is otherwise used by the I/O scheduler to
486 	 * request head insertation from the workqueue.
487 	 */
488 	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
489 
490 	spin_lock_irqsave(&q->requeue_lock, flags);
491 	if (at_head) {
492 		rq->cmd_flags |= REQ_SOFTBARRIER;
493 		list_add(&rq->queuelist, &q->requeue_list);
494 	} else {
495 		list_add_tail(&rq->queuelist, &q->requeue_list);
496 	}
497 	spin_unlock_irqrestore(&q->requeue_lock, flags);
498 }
499 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
500 
501 void blk_mq_kick_requeue_list(struct request_queue *q)
502 {
503 	kblockd_schedule_work(&q->requeue_work);
504 }
505 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
506 
507 static inline bool is_flush_request(struct request *rq, unsigned int tag)
508 {
509 	return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
510 			rq->q->flush_rq->tag == tag);
511 }
512 
513 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
514 {
515 	struct request *rq = tags->rqs[tag];
516 
517 	if (!is_flush_request(rq, tag))
518 		return rq;
519 
520 	return rq->q->flush_rq;
521 }
522 EXPORT_SYMBOL(blk_mq_tag_to_rq);
523 
524 struct blk_mq_timeout_data {
525 	struct blk_mq_hw_ctx *hctx;
526 	unsigned long *next;
527 	unsigned int *next_set;
528 };
529 
530 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
531 {
532 	struct blk_mq_timeout_data *data = __data;
533 	struct blk_mq_hw_ctx *hctx = data->hctx;
534 	unsigned int tag;
535 
536 	 /* It may not be in flight yet (this is where
537 	 * the REQ_ATOMIC_STARTED flag comes in). The requests are
538 	 * statically allocated, so we know it's always safe to access the
539 	 * memory associated with a bit offset into ->rqs[].
540 	 */
541 	tag = 0;
542 	do {
543 		struct request *rq;
544 
545 		tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
546 		if (tag >= hctx->tags->nr_tags)
547 			break;
548 
549 		rq = blk_mq_tag_to_rq(hctx->tags, tag++);
550 		if (rq->q != hctx->queue)
551 			continue;
552 		if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
553 			continue;
554 
555 		blk_rq_check_expired(rq, data->next, data->next_set);
556 	} while (1);
557 }
558 
559 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
560 					unsigned long *next,
561 					unsigned int *next_set)
562 {
563 	struct blk_mq_timeout_data data = {
564 		.hctx		= hctx,
565 		.next		= next,
566 		.next_set	= next_set,
567 	};
568 
569 	/*
570 	 * Ask the tagging code to iterate busy requests, so we can
571 	 * check them for timeout.
572 	 */
573 	blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
574 }
575 
576 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
577 {
578 	struct request_queue *q = rq->q;
579 
580 	/*
581 	 * We know that complete is set at this point. If STARTED isn't set
582 	 * anymore, then the request isn't active and the "timeout" should
583 	 * just be ignored. This can happen due to the bitflag ordering.
584 	 * Timeout first checks if STARTED is set, and if it is, assumes
585 	 * the request is active. But if we race with completion, then
586 	 * we both flags will get cleared. So check here again, and ignore
587 	 * a timeout event with a request that isn't active.
588 	 */
589 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
590 		return BLK_EH_NOT_HANDLED;
591 
592 	if (!q->mq_ops->timeout)
593 		return BLK_EH_RESET_TIMER;
594 
595 	return q->mq_ops->timeout(rq);
596 }
597 
598 static void blk_mq_rq_timer(unsigned long data)
599 {
600 	struct request_queue *q = (struct request_queue *) data;
601 	struct blk_mq_hw_ctx *hctx;
602 	unsigned long next = 0;
603 	int i, next_set = 0;
604 
605 	queue_for_each_hw_ctx(q, hctx, i) {
606 		/*
607 		 * If not software queues are currently mapped to this
608 		 * hardware queue, there's nothing to check
609 		 */
610 		if (!hctx->nr_ctx || !hctx->tags)
611 			continue;
612 
613 		blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
614 	}
615 
616 	if (next_set) {
617 		next = blk_rq_timeout(round_jiffies_up(next));
618 		mod_timer(&q->timeout, next);
619 	} else {
620 		queue_for_each_hw_ctx(q, hctx, i)
621 			blk_mq_tag_idle(hctx);
622 	}
623 }
624 
625 /*
626  * Reverse check our software queue for entries that we could potentially
627  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
628  * too much time checking for merges.
629  */
630 static bool blk_mq_attempt_merge(struct request_queue *q,
631 				 struct blk_mq_ctx *ctx, struct bio *bio)
632 {
633 	struct request *rq;
634 	int checked = 8;
635 
636 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
637 		int el_ret;
638 
639 		if (!checked--)
640 			break;
641 
642 		if (!blk_rq_merge_ok(rq, bio))
643 			continue;
644 
645 		el_ret = blk_try_merge(rq, bio);
646 		if (el_ret == ELEVATOR_BACK_MERGE) {
647 			if (bio_attempt_back_merge(q, rq, bio)) {
648 				ctx->rq_merged++;
649 				return true;
650 			}
651 			break;
652 		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
653 			if (bio_attempt_front_merge(q, rq, bio)) {
654 				ctx->rq_merged++;
655 				return true;
656 			}
657 			break;
658 		}
659 	}
660 
661 	return false;
662 }
663 
664 /*
665  * Process software queues that have been marked busy, splicing them
666  * to the for-dispatch
667  */
668 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
669 {
670 	struct blk_mq_ctx *ctx;
671 	int i;
672 
673 	for (i = 0; i < hctx->ctx_map.map_size; i++) {
674 		struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
675 		unsigned int off, bit;
676 
677 		if (!bm->word)
678 			continue;
679 
680 		bit = 0;
681 		off = i * hctx->ctx_map.bits_per_word;
682 		do {
683 			bit = find_next_bit(&bm->word, bm->depth, bit);
684 			if (bit >= bm->depth)
685 				break;
686 
687 			ctx = hctx->ctxs[bit + off];
688 			clear_bit(bit, &bm->word);
689 			spin_lock(&ctx->lock);
690 			list_splice_tail_init(&ctx->rq_list, list);
691 			spin_unlock(&ctx->lock);
692 
693 			bit++;
694 		} while (1);
695 	}
696 }
697 
698 /*
699  * Run this hardware queue, pulling any software queues mapped to it in.
700  * Note that this function currently has various problems around ordering
701  * of IO. In particular, we'd like FIFO behaviour on handling existing
702  * items on the hctx->dispatch list. Ignore that for now.
703  */
704 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
705 {
706 	struct request_queue *q = hctx->queue;
707 	struct request *rq;
708 	LIST_HEAD(rq_list);
709 	int queued;
710 
711 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
712 
713 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
714 		return;
715 
716 	hctx->run++;
717 
718 	/*
719 	 * Touch any software queue that has pending entries.
720 	 */
721 	flush_busy_ctxs(hctx, &rq_list);
722 
723 	/*
724 	 * If we have previous entries on our dispatch list, grab them
725 	 * and stuff them at the front for more fair dispatch.
726 	 */
727 	if (!list_empty_careful(&hctx->dispatch)) {
728 		spin_lock(&hctx->lock);
729 		if (!list_empty(&hctx->dispatch))
730 			list_splice_init(&hctx->dispatch, &rq_list);
731 		spin_unlock(&hctx->lock);
732 	}
733 
734 	/*
735 	 * Now process all the entries, sending them to the driver.
736 	 */
737 	queued = 0;
738 	while (!list_empty(&rq_list)) {
739 		int ret;
740 
741 		rq = list_first_entry(&rq_list, struct request, queuelist);
742 		list_del_init(&rq->queuelist);
743 
744 		blk_mq_start_request(rq, list_empty(&rq_list));
745 
746 		ret = q->mq_ops->queue_rq(hctx, rq);
747 		switch (ret) {
748 		case BLK_MQ_RQ_QUEUE_OK:
749 			queued++;
750 			continue;
751 		case BLK_MQ_RQ_QUEUE_BUSY:
752 			list_add(&rq->queuelist, &rq_list);
753 			__blk_mq_requeue_request(rq);
754 			break;
755 		default:
756 			pr_err("blk-mq: bad return on queue: %d\n", ret);
757 		case BLK_MQ_RQ_QUEUE_ERROR:
758 			rq->errors = -EIO;
759 			blk_mq_end_io(rq, rq->errors);
760 			break;
761 		}
762 
763 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
764 			break;
765 	}
766 
767 	if (!queued)
768 		hctx->dispatched[0]++;
769 	else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
770 		hctx->dispatched[ilog2(queued) + 1]++;
771 
772 	/*
773 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
774 	 * that is where we will continue on next queue run.
775 	 */
776 	if (!list_empty(&rq_list)) {
777 		spin_lock(&hctx->lock);
778 		list_splice(&rq_list, &hctx->dispatch);
779 		spin_unlock(&hctx->lock);
780 	}
781 }
782 
783 /*
784  * It'd be great if the workqueue API had a way to pass
785  * in a mask and had some smarts for more clever placement.
786  * For now we just round-robin here, switching for every
787  * BLK_MQ_CPU_WORK_BATCH queued items.
788  */
789 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
790 {
791 	int cpu = hctx->next_cpu;
792 
793 	if (--hctx->next_cpu_batch <= 0) {
794 		int next_cpu;
795 
796 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
797 		if (next_cpu >= nr_cpu_ids)
798 			next_cpu = cpumask_first(hctx->cpumask);
799 
800 		hctx->next_cpu = next_cpu;
801 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
802 	}
803 
804 	return cpu;
805 }
806 
807 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
808 {
809 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
810 		return;
811 
812 	if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
813 		__blk_mq_run_hw_queue(hctx);
814 	else if (hctx->queue->nr_hw_queues == 1)
815 		kblockd_schedule_delayed_work(&hctx->run_work, 0);
816 	else {
817 		unsigned int cpu;
818 
819 		cpu = blk_mq_hctx_next_cpu(hctx);
820 		kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
821 	}
822 }
823 
824 void blk_mq_run_queues(struct request_queue *q, bool async)
825 {
826 	struct blk_mq_hw_ctx *hctx;
827 	int i;
828 
829 	queue_for_each_hw_ctx(q, hctx, i) {
830 		if ((!blk_mq_hctx_has_pending(hctx) &&
831 		    list_empty_careful(&hctx->dispatch)) ||
832 		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
833 			continue;
834 
835 		preempt_disable();
836 		blk_mq_run_hw_queue(hctx, async);
837 		preempt_enable();
838 	}
839 }
840 EXPORT_SYMBOL(blk_mq_run_queues);
841 
842 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
843 {
844 	cancel_delayed_work(&hctx->run_work);
845 	cancel_delayed_work(&hctx->delay_work);
846 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
847 }
848 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
849 
850 void blk_mq_stop_hw_queues(struct request_queue *q)
851 {
852 	struct blk_mq_hw_ctx *hctx;
853 	int i;
854 
855 	queue_for_each_hw_ctx(q, hctx, i)
856 		blk_mq_stop_hw_queue(hctx);
857 }
858 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
859 
860 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
861 {
862 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
863 
864 	preempt_disable();
865 	blk_mq_run_hw_queue(hctx, false);
866 	preempt_enable();
867 }
868 EXPORT_SYMBOL(blk_mq_start_hw_queue);
869 
870 void blk_mq_start_hw_queues(struct request_queue *q)
871 {
872 	struct blk_mq_hw_ctx *hctx;
873 	int i;
874 
875 	queue_for_each_hw_ctx(q, hctx, i)
876 		blk_mq_start_hw_queue(hctx);
877 }
878 EXPORT_SYMBOL(blk_mq_start_hw_queues);
879 
880 
881 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
882 {
883 	struct blk_mq_hw_ctx *hctx;
884 	int i;
885 
886 	queue_for_each_hw_ctx(q, hctx, i) {
887 		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
888 			continue;
889 
890 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
891 		preempt_disable();
892 		blk_mq_run_hw_queue(hctx, async);
893 		preempt_enable();
894 	}
895 }
896 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
897 
898 static void blk_mq_run_work_fn(struct work_struct *work)
899 {
900 	struct blk_mq_hw_ctx *hctx;
901 
902 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
903 
904 	__blk_mq_run_hw_queue(hctx);
905 }
906 
907 static void blk_mq_delay_work_fn(struct work_struct *work)
908 {
909 	struct blk_mq_hw_ctx *hctx;
910 
911 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
912 
913 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
914 		__blk_mq_run_hw_queue(hctx);
915 }
916 
917 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
918 {
919 	unsigned long tmo = msecs_to_jiffies(msecs);
920 
921 	if (hctx->queue->nr_hw_queues == 1)
922 		kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
923 	else {
924 		unsigned int cpu;
925 
926 		cpu = blk_mq_hctx_next_cpu(hctx);
927 		kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
928 	}
929 }
930 EXPORT_SYMBOL(blk_mq_delay_queue);
931 
932 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
933 				    struct request *rq, bool at_head)
934 {
935 	struct blk_mq_ctx *ctx = rq->mq_ctx;
936 
937 	trace_block_rq_insert(hctx->queue, rq);
938 
939 	if (at_head)
940 		list_add(&rq->queuelist, &ctx->rq_list);
941 	else
942 		list_add_tail(&rq->queuelist, &ctx->rq_list);
943 
944 	blk_mq_hctx_mark_pending(hctx, ctx);
945 }
946 
947 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
948 		bool async)
949 {
950 	struct request_queue *q = rq->q;
951 	struct blk_mq_hw_ctx *hctx;
952 	struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
953 
954 	current_ctx = blk_mq_get_ctx(q);
955 	if (!cpu_online(ctx->cpu))
956 		rq->mq_ctx = ctx = current_ctx;
957 
958 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
959 
960 	if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
961 	    !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
962 		blk_insert_flush(rq);
963 	} else {
964 		spin_lock(&ctx->lock);
965 		__blk_mq_insert_request(hctx, rq, at_head);
966 		spin_unlock(&ctx->lock);
967 	}
968 
969 	if (run_queue)
970 		blk_mq_run_hw_queue(hctx, async);
971 
972 	blk_mq_put_ctx(current_ctx);
973 }
974 
975 static void blk_mq_insert_requests(struct request_queue *q,
976 				     struct blk_mq_ctx *ctx,
977 				     struct list_head *list,
978 				     int depth,
979 				     bool from_schedule)
980 
981 {
982 	struct blk_mq_hw_ctx *hctx;
983 	struct blk_mq_ctx *current_ctx;
984 
985 	trace_block_unplug(q, depth, !from_schedule);
986 
987 	current_ctx = blk_mq_get_ctx(q);
988 
989 	if (!cpu_online(ctx->cpu))
990 		ctx = current_ctx;
991 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
992 
993 	/*
994 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
995 	 * offline now
996 	 */
997 	spin_lock(&ctx->lock);
998 	while (!list_empty(list)) {
999 		struct request *rq;
1000 
1001 		rq = list_first_entry(list, struct request, queuelist);
1002 		list_del_init(&rq->queuelist);
1003 		rq->mq_ctx = ctx;
1004 		__blk_mq_insert_request(hctx, rq, false);
1005 	}
1006 	spin_unlock(&ctx->lock);
1007 
1008 	blk_mq_run_hw_queue(hctx, from_schedule);
1009 	blk_mq_put_ctx(current_ctx);
1010 }
1011 
1012 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1013 {
1014 	struct request *rqa = container_of(a, struct request, queuelist);
1015 	struct request *rqb = container_of(b, struct request, queuelist);
1016 
1017 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1018 		 (rqa->mq_ctx == rqb->mq_ctx &&
1019 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1020 }
1021 
1022 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1023 {
1024 	struct blk_mq_ctx *this_ctx;
1025 	struct request_queue *this_q;
1026 	struct request *rq;
1027 	LIST_HEAD(list);
1028 	LIST_HEAD(ctx_list);
1029 	unsigned int depth;
1030 
1031 	list_splice_init(&plug->mq_list, &list);
1032 
1033 	list_sort(NULL, &list, plug_ctx_cmp);
1034 
1035 	this_q = NULL;
1036 	this_ctx = NULL;
1037 	depth = 0;
1038 
1039 	while (!list_empty(&list)) {
1040 		rq = list_entry_rq(list.next);
1041 		list_del_init(&rq->queuelist);
1042 		BUG_ON(!rq->q);
1043 		if (rq->mq_ctx != this_ctx) {
1044 			if (this_ctx) {
1045 				blk_mq_insert_requests(this_q, this_ctx,
1046 							&ctx_list, depth,
1047 							from_schedule);
1048 			}
1049 
1050 			this_ctx = rq->mq_ctx;
1051 			this_q = rq->q;
1052 			depth = 0;
1053 		}
1054 
1055 		depth++;
1056 		list_add_tail(&rq->queuelist, &ctx_list);
1057 	}
1058 
1059 	/*
1060 	 * If 'this_ctx' is set, we know we have entries to complete
1061 	 * on 'ctx_list'. Do those.
1062 	 */
1063 	if (this_ctx) {
1064 		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1065 				       from_schedule);
1066 	}
1067 }
1068 
1069 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1070 {
1071 	init_request_from_bio(rq, bio);
1072 
1073 	if (blk_do_io_stat(rq))
1074 		blk_account_io_start(rq, 1);
1075 }
1076 
1077 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1078 {
1079 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1080 		!blk_queue_nomerges(hctx->queue);
1081 }
1082 
1083 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1084 					 struct blk_mq_ctx *ctx,
1085 					 struct request *rq, struct bio *bio)
1086 {
1087 	if (!hctx_allow_merges(hctx)) {
1088 		blk_mq_bio_to_request(rq, bio);
1089 		spin_lock(&ctx->lock);
1090 insert_rq:
1091 		__blk_mq_insert_request(hctx, rq, false);
1092 		spin_unlock(&ctx->lock);
1093 		return false;
1094 	} else {
1095 		struct request_queue *q = hctx->queue;
1096 
1097 		spin_lock(&ctx->lock);
1098 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1099 			blk_mq_bio_to_request(rq, bio);
1100 			goto insert_rq;
1101 		}
1102 
1103 		spin_unlock(&ctx->lock);
1104 		__blk_mq_free_request(hctx, ctx, rq);
1105 		return true;
1106 	}
1107 }
1108 
1109 struct blk_map_ctx {
1110 	struct blk_mq_hw_ctx *hctx;
1111 	struct blk_mq_ctx *ctx;
1112 };
1113 
1114 static struct request *blk_mq_map_request(struct request_queue *q,
1115 					  struct bio *bio,
1116 					  struct blk_map_ctx *data)
1117 {
1118 	struct blk_mq_hw_ctx *hctx;
1119 	struct blk_mq_ctx *ctx;
1120 	struct request *rq;
1121 	int rw = bio_data_dir(bio);
1122 	struct blk_mq_alloc_data alloc_data;
1123 
1124 	if (unlikely(blk_mq_queue_enter(q))) {
1125 		bio_endio(bio, -EIO);
1126 		return NULL;
1127 	}
1128 
1129 	ctx = blk_mq_get_ctx(q);
1130 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1131 
1132 	if (rw_is_sync(bio->bi_rw))
1133 		rw |= REQ_SYNC;
1134 
1135 	trace_block_getrq(q, bio, rw);
1136 	blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1137 			hctx);
1138 	rq = __blk_mq_alloc_request(&alloc_data, rw);
1139 	if (unlikely(!rq)) {
1140 		__blk_mq_run_hw_queue(hctx);
1141 		blk_mq_put_ctx(ctx);
1142 		trace_block_sleeprq(q, bio, rw);
1143 
1144 		ctx = blk_mq_get_ctx(q);
1145 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
1146 		blk_mq_set_alloc_data(&alloc_data, q,
1147 				__GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1148 		rq = __blk_mq_alloc_request(&alloc_data, rw);
1149 		ctx = alloc_data.ctx;
1150 		hctx = alloc_data.hctx;
1151 	}
1152 
1153 	hctx->queued++;
1154 	data->hctx = hctx;
1155 	data->ctx = ctx;
1156 	return rq;
1157 }
1158 
1159 /*
1160  * Multiple hardware queue variant. This will not use per-process plugs,
1161  * but will attempt to bypass the hctx queueing if we can go straight to
1162  * hardware for SYNC IO.
1163  */
1164 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1165 {
1166 	const int is_sync = rw_is_sync(bio->bi_rw);
1167 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1168 	struct blk_map_ctx data;
1169 	struct request *rq;
1170 
1171 	blk_queue_bounce(q, &bio);
1172 
1173 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1174 		bio_endio(bio, -EIO);
1175 		return;
1176 	}
1177 
1178 	rq = blk_mq_map_request(q, bio, &data);
1179 	if (unlikely(!rq))
1180 		return;
1181 
1182 	if (unlikely(is_flush_fua)) {
1183 		blk_mq_bio_to_request(rq, bio);
1184 		blk_insert_flush(rq);
1185 		goto run_queue;
1186 	}
1187 
1188 	if (is_sync) {
1189 		int ret;
1190 
1191 		blk_mq_bio_to_request(rq, bio);
1192 		blk_mq_start_request(rq, true);
1193 
1194 		/*
1195 		 * For OK queue, we are done. For error, kill it. Any other
1196 		 * error (busy), just add it to our list as we previously
1197 		 * would have done
1198 		 */
1199 		ret = q->mq_ops->queue_rq(data.hctx, rq);
1200 		if (ret == BLK_MQ_RQ_QUEUE_OK)
1201 			goto done;
1202 		else {
1203 			__blk_mq_requeue_request(rq);
1204 
1205 			if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1206 				rq->errors = -EIO;
1207 				blk_mq_end_io(rq, rq->errors);
1208 				goto done;
1209 			}
1210 		}
1211 	}
1212 
1213 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1214 		/*
1215 		 * For a SYNC request, send it to the hardware immediately. For
1216 		 * an ASYNC request, just ensure that we run it later on. The
1217 		 * latter allows for merging opportunities and more efficient
1218 		 * dispatching.
1219 		 */
1220 run_queue:
1221 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1222 	}
1223 done:
1224 	blk_mq_put_ctx(data.ctx);
1225 }
1226 
1227 /*
1228  * Single hardware queue variant. This will attempt to use any per-process
1229  * plug for merging and IO deferral.
1230  */
1231 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1232 {
1233 	const int is_sync = rw_is_sync(bio->bi_rw);
1234 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1235 	unsigned int use_plug, request_count = 0;
1236 	struct blk_map_ctx data;
1237 	struct request *rq;
1238 
1239 	/*
1240 	 * If we have multiple hardware queues, just go directly to
1241 	 * one of those for sync IO.
1242 	 */
1243 	use_plug = !is_flush_fua && !is_sync;
1244 
1245 	blk_queue_bounce(q, &bio);
1246 
1247 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1248 		bio_endio(bio, -EIO);
1249 		return;
1250 	}
1251 
1252 	if (use_plug && !blk_queue_nomerges(q) &&
1253 	    blk_attempt_plug_merge(q, bio, &request_count))
1254 		return;
1255 
1256 	rq = blk_mq_map_request(q, bio, &data);
1257 	if (unlikely(!rq))
1258 		return;
1259 
1260 	if (unlikely(is_flush_fua)) {
1261 		blk_mq_bio_to_request(rq, bio);
1262 		blk_insert_flush(rq);
1263 		goto run_queue;
1264 	}
1265 
1266 	/*
1267 	 * A task plug currently exists. Since this is completely lockless,
1268 	 * utilize that to temporarily store requests until the task is
1269 	 * either done or scheduled away.
1270 	 */
1271 	if (use_plug) {
1272 		struct blk_plug *plug = current->plug;
1273 
1274 		if (plug) {
1275 			blk_mq_bio_to_request(rq, bio);
1276 			if (list_empty(&plug->mq_list))
1277 				trace_block_plug(q);
1278 			else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1279 				blk_flush_plug_list(plug, false);
1280 				trace_block_plug(q);
1281 			}
1282 			list_add_tail(&rq->queuelist, &plug->mq_list);
1283 			blk_mq_put_ctx(data.ctx);
1284 			return;
1285 		}
1286 	}
1287 
1288 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1289 		/*
1290 		 * For a SYNC request, send it to the hardware immediately. For
1291 		 * an ASYNC request, just ensure that we run it later on. The
1292 		 * latter allows for merging opportunities and more efficient
1293 		 * dispatching.
1294 		 */
1295 run_queue:
1296 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1297 	}
1298 
1299 	blk_mq_put_ctx(data.ctx);
1300 }
1301 
1302 /*
1303  * Default mapping to a software queue, since we use one per CPU.
1304  */
1305 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1306 {
1307 	return q->queue_hw_ctx[q->mq_map[cpu]];
1308 }
1309 EXPORT_SYMBOL(blk_mq_map_queue);
1310 
1311 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1312 		struct blk_mq_tags *tags, unsigned int hctx_idx)
1313 {
1314 	struct page *page;
1315 
1316 	if (tags->rqs && set->ops->exit_request) {
1317 		int i;
1318 
1319 		for (i = 0; i < tags->nr_tags; i++) {
1320 			if (!tags->rqs[i])
1321 				continue;
1322 			set->ops->exit_request(set->driver_data, tags->rqs[i],
1323 						hctx_idx, i);
1324 			tags->rqs[i] = NULL;
1325 		}
1326 	}
1327 
1328 	while (!list_empty(&tags->page_list)) {
1329 		page = list_first_entry(&tags->page_list, struct page, lru);
1330 		list_del_init(&page->lru);
1331 		__free_pages(page, page->private);
1332 	}
1333 
1334 	kfree(tags->rqs);
1335 
1336 	blk_mq_free_tags(tags);
1337 }
1338 
1339 static size_t order_to_size(unsigned int order)
1340 {
1341 	return (size_t)PAGE_SIZE << order;
1342 }
1343 
1344 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1345 		unsigned int hctx_idx)
1346 {
1347 	struct blk_mq_tags *tags;
1348 	unsigned int i, j, entries_per_page, max_order = 4;
1349 	size_t rq_size, left;
1350 
1351 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1352 				set->numa_node);
1353 	if (!tags)
1354 		return NULL;
1355 
1356 	INIT_LIST_HEAD(&tags->page_list);
1357 
1358 	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1359 				 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1360 				 set->numa_node);
1361 	if (!tags->rqs) {
1362 		blk_mq_free_tags(tags);
1363 		return NULL;
1364 	}
1365 
1366 	/*
1367 	 * rq_size is the size of the request plus driver payload, rounded
1368 	 * to the cacheline size
1369 	 */
1370 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1371 				cache_line_size());
1372 	left = rq_size * set->queue_depth;
1373 
1374 	for (i = 0; i < set->queue_depth; ) {
1375 		int this_order = max_order;
1376 		struct page *page;
1377 		int to_do;
1378 		void *p;
1379 
1380 		while (left < order_to_size(this_order - 1) && this_order)
1381 			this_order--;
1382 
1383 		do {
1384 			page = alloc_pages_node(set->numa_node,
1385 				GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1386 				this_order);
1387 			if (page)
1388 				break;
1389 			if (!this_order--)
1390 				break;
1391 			if (order_to_size(this_order) < rq_size)
1392 				break;
1393 		} while (1);
1394 
1395 		if (!page)
1396 			goto fail;
1397 
1398 		page->private = this_order;
1399 		list_add_tail(&page->lru, &tags->page_list);
1400 
1401 		p = page_address(page);
1402 		entries_per_page = order_to_size(this_order) / rq_size;
1403 		to_do = min(entries_per_page, set->queue_depth - i);
1404 		left -= to_do * rq_size;
1405 		for (j = 0; j < to_do; j++) {
1406 			tags->rqs[i] = p;
1407 			if (set->ops->init_request) {
1408 				if (set->ops->init_request(set->driver_data,
1409 						tags->rqs[i], hctx_idx, i,
1410 						set->numa_node)) {
1411 					tags->rqs[i] = NULL;
1412 					goto fail;
1413 				}
1414 			}
1415 
1416 			p += rq_size;
1417 			i++;
1418 		}
1419 	}
1420 
1421 	return tags;
1422 
1423 fail:
1424 	blk_mq_free_rq_map(set, tags, hctx_idx);
1425 	return NULL;
1426 }
1427 
1428 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1429 {
1430 	kfree(bitmap->map);
1431 }
1432 
1433 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1434 {
1435 	unsigned int bpw = 8, total, num_maps, i;
1436 
1437 	bitmap->bits_per_word = bpw;
1438 
1439 	num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1440 	bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1441 					GFP_KERNEL, node);
1442 	if (!bitmap->map)
1443 		return -ENOMEM;
1444 
1445 	bitmap->map_size = num_maps;
1446 
1447 	total = nr_cpu_ids;
1448 	for (i = 0; i < num_maps; i++) {
1449 		bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1450 		total -= bitmap->map[i].depth;
1451 	}
1452 
1453 	return 0;
1454 }
1455 
1456 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1457 {
1458 	struct request_queue *q = hctx->queue;
1459 	struct blk_mq_ctx *ctx;
1460 	LIST_HEAD(tmp);
1461 
1462 	/*
1463 	 * Move ctx entries to new CPU, if this one is going away.
1464 	 */
1465 	ctx = __blk_mq_get_ctx(q, cpu);
1466 
1467 	spin_lock(&ctx->lock);
1468 	if (!list_empty(&ctx->rq_list)) {
1469 		list_splice_init(&ctx->rq_list, &tmp);
1470 		blk_mq_hctx_clear_pending(hctx, ctx);
1471 	}
1472 	spin_unlock(&ctx->lock);
1473 
1474 	if (list_empty(&tmp))
1475 		return NOTIFY_OK;
1476 
1477 	ctx = blk_mq_get_ctx(q);
1478 	spin_lock(&ctx->lock);
1479 
1480 	while (!list_empty(&tmp)) {
1481 		struct request *rq;
1482 
1483 		rq = list_first_entry(&tmp, struct request, queuelist);
1484 		rq->mq_ctx = ctx;
1485 		list_move_tail(&rq->queuelist, &ctx->rq_list);
1486 	}
1487 
1488 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1489 	blk_mq_hctx_mark_pending(hctx, ctx);
1490 
1491 	spin_unlock(&ctx->lock);
1492 
1493 	blk_mq_run_hw_queue(hctx, true);
1494 	blk_mq_put_ctx(ctx);
1495 	return NOTIFY_OK;
1496 }
1497 
1498 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1499 {
1500 	struct request_queue *q = hctx->queue;
1501 	struct blk_mq_tag_set *set = q->tag_set;
1502 
1503 	if (set->tags[hctx->queue_num])
1504 		return NOTIFY_OK;
1505 
1506 	set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1507 	if (!set->tags[hctx->queue_num])
1508 		return NOTIFY_STOP;
1509 
1510 	hctx->tags = set->tags[hctx->queue_num];
1511 	return NOTIFY_OK;
1512 }
1513 
1514 static int blk_mq_hctx_notify(void *data, unsigned long action,
1515 			      unsigned int cpu)
1516 {
1517 	struct blk_mq_hw_ctx *hctx = data;
1518 
1519 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1520 		return blk_mq_hctx_cpu_offline(hctx, cpu);
1521 	else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1522 		return blk_mq_hctx_cpu_online(hctx, cpu);
1523 
1524 	return NOTIFY_OK;
1525 }
1526 
1527 static void blk_mq_exit_hw_queues(struct request_queue *q,
1528 		struct blk_mq_tag_set *set, int nr_queue)
1529 {
1530 	struct blk_mq_hw_ctx *hctx;
1531 	unsigned int i;
1532 
1533 	queue_for_each_hw_ctx(q, hctx, i) {
1534 		if (i == nr_queue)
1535 			break;
1536 
1537 		blk_mq_tag_idle(hctx);
1538 
1539 		if (set->ops->exit_hctx)
1540 			set->ops->exit_hctx(hctx, i);
1541 
1542 		blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1543 		kfree(hctx->ctxs);
1544 		blk_mq_free_bitmap(&hctx->ctx_map);
1545 	}
1546 
1547 }
1548 
1549 static void blk_mq_free_hw_queues(struct request_queue *q,
1550 		struct blk_mq_tag_set *set)
1551 {
1552 	struct blk_mq_hw_ctx *hctx;
1553 	unsigned int i;
1554 
1555 	queue_for_each_hw_ctx(q, hctx, i) {
1556 		free_cpumask_var(hctx->cpumask);
1557 		kfree(hctx);
1558 	}
1559 }
1560 
1561 static int blk_mq_init_hw_queues(struct request_queue *q,
1562 		struct blk_mq_tag_set *set)
1563 {
1564 	struct blk_mq_hw_ctx *hctx;
1565 	unsigned int i;
1566 
1567 	/*
1568 	 * Initialize hardware queues
1569 	 */
1570 	queue_for_each_hw_ctx(q, hctx, i) {
1571 		int node;
1572 
1573 		node = hctx->numa_node;
1574 		if (node == NUMA_NO_NODE)
1575 			node = hctx->numa_node = set->numa_node;
1576 
1577 		INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1578 		INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1579 		spin_lock_init(&hctx->lock);
1580 		INIT_LIST_HEAD(&hctx->dispatch);
1581 		hctx->queue = q;
1582 		hctx->queue_num = i;
1583 		hctx->flags = set->flags;
1584 		hctx->cmd_size = set->cmd_size;
1585 
1586 		blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1587 						blk_mq_hctx_notify, hctx);
1588 		blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1589 
1590 		hctx->tags = set->tags[i];
1591 
1592 		/*
1593 		 * Allocate space for all possible cpus to avoid allocation at
1594 		 * runtime
1595 		 */
1596 		hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1597 						GFP_KERNEL, node);
1598 		if (!hctx->ctxs)
1599 			break;
1600 
1601 		if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1602 			break;
1603 
1604 		hctx->nr_ctx = 0;
1605 
1606 		if (set->ops->init_hctx &&
1607 		    set->ops->init_hctx(hctx, set->driver_data, i))
1608 			break;
1609 	}
1610 
1611 	if (i == q->nr_hw_queues)
1612 		return 0;
1613 
1614 	/*
1615 	 * Init failed
1616 	 */
1617 	blk_mq_exit_hw_queues(q, set, i);
1618 
1619 	return 1;
1620 }
1621 
1622 static void blk_mq_init_cpu_queues(struct request_queue *q,
1623 				   unsigned int nr_hw_queues)
1624 {
1625 	unsigned int i;
1626 
1627 	for_each_possible_cpu(i) {
1628 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1629 		struct blk_mq_hw_ctx *hctx;
1630 
1631 		memset(__ctx, 0, sizeof(*__ctx));
1632 		__ctx->cpu = i;
1633 		spin_lock_init(&__ctx->lock);
1634 		INIT_LIST_HEAD(&__ctx->rq_list);
1635 		__ctx->queue = q;
1636 
1637 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1638 		if (!cpu_online(i))
1639 			continue;
1640 
1641 		hctx = q->mq_ops->map_queue(q, i);
1642 		cpumask_set_cpu(i, hctx->cpumask);
1643 		hctx->nr_ctx++;
1644 
1645 		/*
1646 		 * Set local node, IFF we have more than one hw queue. If
1647 		 * not, we remain on the home node of the device
1648 		 */
1649 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1650 			hctx->numa_node = cpu_to_node(i);
1651 	}
1652 }
1653 
1654 static void blk_mq_map_swqueue(struct request_queue *q)
1655 {
1656 	unsigned int i;
1657 	struct blk_mq_hw_ctx *hctx;
1658 	struct blk_mq_ctx *ctx;
1659 
1660 	queue_for_each_hw_ctx(q, hctx, i) {
1661 		cpumask_clear(hctx->cpumask);
1662 		hctx->nr_ctx = 0;
1663 	}
1664 
1665 	/*
1666 	 * Map software to hardware queues
1667 	 */
1668 	queue_for_each_ctx(q, ctx, i) {
1669 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1670 		if (!cpu_online(i))
1671 			continue;
1672 
1673 		hctx = q->mq_ops->map_queue(q, i);
1674 		cpumask_set_cpu(i, hctx->cpumask);
1675 		ctx->index_hw = hctx->nr_ctx;
1676 		hctx->ctxs[hctx->nr_ctx++] = ctx;
1677 	}
1678 
1679 	queue_for_each_hw_ctx(q, hctx, i) {
1680 		/*
1681 		 * If no software queues are mapped to this hardware queue,
1682 		 * disable it and free the request entries.
1683 		 */
1684 		if (!hctx->nr_ctx) {
1685 			struct blk_mq_tag_set *set = q->tag_set;
1686 
1687 			if (set->tags[i]) {
1688 				blk_mq_free_rq_map(set, set->tags[i], i);
1689 				set->tags[i] = NULL;
1690 				hctx->tags = NULL;
1691 			}
1692 			continue;
1693 		}
1694 
1695 		/*
1696 		 * Initialize batch roundrobin counts
1697 		 */
1698 		hctx->next_cpu = cpumask_first(hctx->cpumask);
1699 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1700 	}
1701 }
1702 
1703 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1704 {
1705 	struct blk_mq_hw_ctx *hctx;
1706 	struct request_queue *q;
1707 	bool shared;
1708 	int i;
1709 
1710 	if (set->tag_list.next == set->tag_list.prev)
1711 		shared = false;
1712 	else
1713 		shared = true;
1714 
1715 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
1716 		blk_mq_freeze_queue(q);
1717 
1718 		queue_for_each_hw_ctx(q, hctx, i) {
1719 			if (shared)
1720 				hctx->flags |= BLK_MQ_F_TAG_SHARED;
1721 			else
1722 				hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1723 		}
1724 		blk_mq_unfreeze_queue(q);
1725 	}
1726 }
1727 
1728 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1729 {
1730 	struct blk_mq_tag_set *set = q->tag_set;
1731 
1732 	mutex_lock(&set->tag_list_lock);
1733 	list_del_init(&q->tag_set_list);
1734 	blk_mq_update_tag_set_depth(set);
1735 	mutex_unlock(&set->tag_list_lock);
1736 }
1737 
1738 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1739 				     struct request_queue *q)
1740 {
1741 	q->tag_set = set;
1742 
1743 	mutex_lock(&set->tag_list_lock);
1744 	list_add_tail(&q->tag_set_list, &set->tag_list);
1745 	blk_mq_update_tag_set_depth(set);
1746 	mutex_unlock(&set->tag_list_lock);
1747 }
1748 
1749 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1750 {
1751 	struct blk_mq_hw_ctx **hctxs;
1752 	struct blk_mq_ctx __percpu *ctx;
1753 	struct request_queue *q;
1754 	unsigned int *map;
1755 	int i;
1756 
1757 	ctx = alloc_percpu(struct blk_mq_ctx);
1758 	if (!ctx)
1759 		return ERR_PTR(-ENOMEM);
1760 
1761 	hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1762 			set->numa_node);
1763 
1764 	if (!hctxs)
1765 		goto err_percpu;
1766 
1767 	map = blk_mq_make_queue_map(set);
1768 	if (!map)
1769 		goto err_map;
1770 
1771 	for (i = 0; i < set->nr_hw_queues; i++) {
1772 		int node = blk_mq_hw_queue_to_node(map, i);
1773 
1774 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1775 					GFP_KERNEL, node);
1776 		if (!hctxs[i])
1777 			goto err_hctxs;
1778 
1779 		if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1780 			goto err_hctxs;
1781 
1782 		atomic_set(&hctxs[i]->nr_active, 0);
1783 		hctxs[i]->numa_node = node;
1784 		hctxs[i]->queue_num = i;
1785 	}
1786 
1787 	q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1788 	if (!q)
1789 		goto err_hctxs;
1790 
1791 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
1792 		goto err_map;
1793 
1794 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1795 	blk_queue_rq_timeout(q, 30000);
1796 
1797 	q->nr_queues = nr_cpu_ids;
1798 	q->nr_hw_queues = set->nr_hw_queues;
1799 	q->mq_map = map;
1800 
1801 	q->queue_ctx = ctx;
1802 	q->queue_hw_ctx = hctxs;
1803 
1804 	q->mq_ops = set->ops;
1805 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1806 
1807 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
1808 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1809 
1810 	q->sg_reserved_size = INT_MAX;
1811 
1812 	INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1813 	INIT_LIST_HEAD(&q->requeue_list);
1814 	spin_lock_init(&q->requeue_lock);
1815 
1816 	if (q->nr_hw_queues > 1)
1817 		blk_queue_make_request(q, blk_mq_make_request);
1818 	else
1819 		blk_queue_make_request(q, blk_sq_make_request);
1820 
1821 	blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1822 	if (set->timeout)
1823 		blk_queue_rq_timeout(q, set->timeout);
1824 
1825 	/*
1826 	 * Do this after blk_queue_make_request() overrides it...
1827 	 */
1828 	q->nr_requests = set->queue_depth;
1829 
1830 	if (set->ops->complete)
1831 		blk_queue_softirq_done(q, set->ops->complete);
1832 
1833 	blk_mq_init_flush(q);
1834 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1835 
1836 	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1837 				set->cmd_size, cache_line_size()),
1838 				GFP_KERNEL);
1839 	if (!q->flush_rq)
1840 		goto err_hw;
1841 
1842 	if (blk_mq_init_hw_queues(q, set))
1843 		goto err_flush_rq;
1844 
1845 	mutex_lock(&all_q_mutex);
1846 	list_add_tail(&q->all_q_node, &all_q_list);
1847 	mutex_unlock(&all_q_mutex);
1848 
1849 	blk_mq_add_queue_tag_set(set, q);
1850 
1851 	blk_mq_map_swqueue(q);
1852 
1853 	return q;
1854 
1855 err_flush_rq:
1856 	kfree(q->flush_rq);
1857 err_hw:
1858 	blk_cleanup_queue(q);
1859 err_hctxs:
1860 	kfree(map);
1861 	for (i = 0; i < set->nr_hw_queues; i++) {
1862 		if (!hctxs[i])
1863 			break;
1864 		free_cpumask_var(hctxs[i]->cpumask);
1865 		kfree(hctxs[i]);
1866 	}
1867 err_map:
1868 	kfree(hctxs);
1869 err_percpu:
1870 	free_percpu(ctx);
1871 	return ERR_PTR(-ENOMEM);
1872 }
1873 EXPORT_SYMBOL(blk_mq_init_queue);
1874 
1875 void blk_mq_free_queue(struct request_queue *q)
1876 {
1877 	struct blk_mq_tag_set	*set = q->tag_set;
1878 
1879 	blk_mq_del_queue_tag_set(q);
1880 
1881 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1882 	blk_mq_free_hw_queues(q, set);
1883 
1884 	percpu_ref_exit(&q->mq_usage_counter);
1885 
1886 	free_percpu(q->queue_ctx);
1887 	kfree(q->queue_hw_ctx);
1888 	kfree(q->mq_map);
1889 
1890 	q->queue_ctx = NULL;
1891 	q->queue_hw_ctx = NULL;
1892 	q->mq_map = NULL;
1893 
1894 	mutex_lock(&all_q_mutex);
1895 	list_del_init(&q->all_q_node);
1896 	mutex_unlock(&all_q_mutex);
1897 }
1898 
1899 /* Basically redo blk_mq_init_queue with queue frozen */
1900 static void blk_mq_queue_reinit(struct request_queue *q)
1901 {
1902 	blk_mq_freeze_queue(q);
1903 
1904 	blk_mq_sysfs_unregister(q);
1905 
1906 	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1907 
1908 	/*
1909 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1910 	 * we should change hctx numa_node according to new topology (this
1911 	 * involves free and re-allocate memory, worthy doing?)
1912 	 */
1913 
1914 	blk_mq_map_swqueue(q);
1915 
1916 	blk_mq_sysfs_register(q);
1917 
1918 	blk_mq_unfreeze_queue(q);
1919 }
1920 
1921 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1922 				      unsigned long action, void *hcpu)
1923 {
1924 	struct request_queue *q;
1925 
1926 	/*
1927 	 * Before new mappings are established, hotadded cpu might already
1928 	 * start handling requests. This doesn't break anything as we map
1929 	 * offline CPUs to first hardware queue. We will re-init the queue
1930 	 * below to get optimal settings.
1931 	 */
1932 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1933 	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1934 		return NOTIFY_OK;
1935 
1936 	mutex_lock(&all_q_mutex);
1937 	list_for_each_entry(q, &all_q_list, all_q_node)
1938 		blk_mq_queue_reinit(q);
1939 	mutex_unlock(&all_q_mutex);
1940 	return NOTIFY_OK;
1941 }
1942 
1943 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1944 {
1945 	int i;
1946 
1947 	for (i = 0; i < set->nr_hw_queues; i++) {
1948 		set->tags[i] = blk_mq_init_rq_map(set, i);
1949 		if (!set->tags[i])
1950 			goto out_unwind;
1951 	}
1952 
1953 	return 0;
1954 
1955 out_unwind:
1956 	while (--i >= 0)
1957 		blk_mq_free_rq_map(set, set->tags[i], i);
1958 
1959 	set->tags = NULL;
1960 	return -ENOMEM;
1961 }
1962 
1963 /*
1964  * Allocate the request maps associated with this tag_set. Note that this
1965  * may reduce the depth asked for, if memory is tight. set->queue_depth
1966  * will be updated to reflect the allocated depth.
1967  */
1968 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1969 {
1970 	unsigned int depth;
1971 	int err;
1972 
1973 	depth = set->queue_depth;
1974 	do {
1975 		err = __blk_mq_alloc_rq_maps(set);
1976 		if (!err)
1977 			break;
1978 
1979 		set->queue_depth >>= 1;
1980 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
1981 			err = -ENOMEM;
1982 			break;
1983 		}
1984 	} while (set->queue_depth);
1985 
1986 	if (!set->queue_depth || err) {
1987 		pr_err("blk-mq: failed to allocate request map\n");
1988 		return -ENOMEM;
1989 	}
1990 
1991 	if (depth != set->queue_depth)
1992 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
1993 						depth, set->queue_depth);
1994 
1995 	return 0;
1996 }
1997 
1998 /*
1999  * Alloc a tag set to be associated with one or more request queues.
2000  * May fail with EINVAL for various error conditions. May adjust the
2001  * requested depth down, if if it too large. In that case, the set
2002  * value will be stored in set->queue_depth.
2003  */
2004 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2005 {
2006 	if (!set->nr_hw_queues)
2007 		return -EINVAL;
2008 	if (!set->queue_depth)
2009 		return -EINVAL;
2010 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2011 		return -EINVAL;
2012 
2013 	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2014 		return -EINVAL;
2015 
2016 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2017 		pr_info("blk-mq: reduced tag depth to %u\n",
2018 			BLK_MQ_MAX_DEPTH);
2019 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2020 	}
2021 
2022 	set->tags = kmalloc_node(set->nr_hw_queues *
2023 				 sizeof(struct blk_mq_tags *),
2024 				 GFP_KERNEL, set->numa_node);
2025 	if (!set->tags)
2026 		return -ENOMEM;
2027 
2028 	if (blk_mq_alloc_rq_maps(set))
2029 		goto enomem;
2030 
2031 	mutex_init(&set->tag_list_lock);
2032 	INIT_LIST_HEAD(&set->tag_list);
2033 
2034 	return 0;
2035 enomem:
2036 	kfree(set->tags);
2037 	set->tags = NULL;
2038 	return -ENOMEM;
2039 }
2040 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2041 
2042 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2043 {
2044 	int i;
2045 
2046 	for (i = 0; i < set->nr_hw_queues; i++) {
2047 		if (set->tags[i])
2048 			blk_mq_free_rq_map(set, set->tags[i], i);
2049 	}
2050 
2051 	kfree(set->tags);
2052 	set->tags = NULL;
2053 }
2054 EXPORT_SYMBOL(blk_mq_free_tag_set);
2055 
2056 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2057 {
2058 	struct blk_mq_tag_set *set = q->tag_set;
2059 	struct blk_mq_hw_ctx *hctx;
2060 	int i, ret;
2061 
2062 	if (!set || nr > set->queue_depth)
2063 		return -EINVAL;
2064 
2065 	ret = 0;
2066 	queue_for_each_hw_ctx(q, hctx, i) {
2067 		ret = blk_mq_tag_update_depth(hctx->tags, nr);
2068 		if (ret)
2069 			break;
2070 	}
2071 
2072 	if (!ret)
2073 		q->nr_requests = nr;
2074 
2075 	return ret;
2076 }
2077 
2078 void blk_mq_disable_hotplug(void)
2079 {
2080 	mutex_lock(&all_q_mutex);
2081 }
2082 
2083 void blk_mq_enable_hotplug(void)
2084 {
2085 	mutex_unlock(&all_q_mutex);
2086 }
2087 
2088 static int __init blk_mq_init(void)
2089 {
2090 	blk_mq_cpu_init();
2091 
2092 	hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2093 
2094 	return 0;
2095 }
2096 subsys_initcall(blk_mq_init);
2097