xref: /openbmc/linux/block/blk-mq.c (revision aac5987a)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28 
29 #include <trace/events/block.h>
30 
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-tag.h"
35 #include "blk-stat.h"
36 #include "blk-wbt.h"
37 #include "blk-mq-sched.h"
38 
39 static DEFINE_MUTEX(all_q_mutex);
40 static LIST_HEAD(all_q_list);
41 
42 /*
43  * Check if any of the ctx's have pending work in this hardware queue
44  */
45 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
46 {
47 	return sbitmap_any_bit_set(&hctx->ctx_map) ||
48 			!list_empty_careful(&hctx->dispatch) ||
49 			blk_mq_sched_has_work(hctx);
50 }
51 
52 /*
53  * Mark this ctx as having pending work in this hardware queue
54  */
55 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
56 				     struct blk_mq_ctx *ctx)
57 {
58 	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
59 		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
60 }
61 
62 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
63 				      struct blk_mq_ctx *ctx)
64 {
65 	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
66 }
67 
68 void blk_mq_freeze_queue_start(struct request_queue *q)
69 {
70 	int freeze_depth;
71 
72 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
73 	if (freeze_depth == 1) {
74 		percpu_ref_kill(&q->q_usage_counter);
75 		blk_mq_run_hw_queues(q, false);
76 	}
77 }
78 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
79 
80 void blk_mq_freeze_queue_wait(struct request_queue *q)
81 {
82 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
83 }
84 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
85 
86 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
87 				     unsigned long timeout)
88 {
89 	return wait_event_timeout(q->mq_freeze_wq,
90 					percpu_ref_is_zero(&q->q_usage_counter),
91 					timeout);
92 }
93 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
94 
95 /*
96  * Guarantee no request is in use, so we can change any data structure of
97  * the queue afterward.
98  */
99 void blk_freeze_queue(struct request_queue *q)
100 {
101 	/*
102 	 * In the !blk_mq case we are only calling this to kill the
103 	 * q_usage_counter, otherwise this increases the freeze depth
104 	 * and waits for it to return to zero.  For this reason there is
105 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
106 	 * exported to drivers as the only user for unfreeze is blk_mq.
107 	 */
108 	blk_mq_freeze_queue_start(q);
109 	blk_mq_freeze_queue_wait(q);
110 }
111 
112 void blk_mq_freeze_queue(struct request_queue *q)
113 {
114 	/*
115 	 * ...just an alias to keep freeze and unfreeze actions balanced
116 	 * in the blk_mq_* namespace
117 	 */
118 	blk_freeze_queue(q);
119 }
120 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
121 
122 void blk_mq_unfreeze_queue(struct request_queue *q)
123 {
124 	int freeze_depth;
125 
126 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
127 	WARN_ON_ONCE(freeze_depth < 0);
128 	if (!freeze_depth) {
129 		percpu_ref_reinit(&q->q_usage_counter);
130 		wake_up_all(&q->mq_freeze_wq);
131 	}
132 }
133 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
134 
135 /**
136  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
137  * @q: request queue.
138  *
139  * Note: this function does not prevent that the struct request end_io()
140  * callback function is invoked. Additionally, it is not prevented that
141  * new queue_rq() calls occur unless the queue has been stopped first.
142  */
143 void blk_mq_quiesce_queue(struct request_queue *q)
144 {
145 	struct blk_mq_hw_ctx *hctx;
146 	unsigned int i;
147 	bool rcu = false;
148 
149 	blk_mq_stop_hw_queues(q);
150 
151 	queue_for_each_hw_ctx(q, hctx, i) {
152 		if (hctx->flags & BLK_MQ_F_BLOCKING)
153 			synchronize_srcu(&hctx->queue_rq_srcu);
154 		else
155 			rcu = true;
156 	}
157 	if (rcu)
158 		synchronize_rcu();
159 }
160 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
161 
162 void blk_mq_wake_waiters(struct request_queue *q)
163 {
164 	struct blk_mq_hw_ctx *hctx;
165 	unsigned int i;
166 
167 	queue_for_each_hw_ctx(q, hctx, i)
168 		if (blk_mq_hw_queue_mapped(hctx))
169 			blk_mq_tag_wakeup_all(hctx->tags, true);
170 
171 	/*
172 	 * If we are called because the queue has now been marked as
173 	 * dying, we need to ensure that processes currently waiting on
174 	 * the queue are notified as well.
175 	 */
176 	wake_up_all(&q->mq_freeze_wq);
177 }
178 
179 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
180 {
181 	return blk_mq_has_free_tags(hctx->tags);
182 }
183 EXPORT_SYMBOL(blk_mq_can_queue);
184 
185 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
186 			struct request *rq, unsigned int op)
187 {
188 	INIT_LIST_HEAD(&rq->queuelist);
189 	/* csd/requeue_work/fifo_time is initialized before use */
190 	rq->q = q;
191 	rq->mq_ctx = ctx;
192 	rq->cmd_flags = op;
193 	if (blk_queue_io_stat(q))
194 		rq->rq_flags |= RQF_IO_STAT;
195 	/* do not touch atomic flags, it needs atomic ops against the timer */
196 	rq->cpu = -1;
197 	INIT_HLIST_NODE(&rq->hash);
198 	RB_CLEAR_NODE(&rq->rb_node);
199 	rq->rq_disk = NULL;
200 	rq->part = NULL;
201 	rq->start_time = jiffies;
202 #ifdef CONFIG_BLK_CGROUP
203 	rq->rl = NULL;
204 	set_start_time_ns(rq);
205 	rq->io_start_time_ns = 0;
206 #endif
207 	rq->nr_phys_segments = 0;
208 #if defined(CONFIG_BLK_DEV_INTEGRITY)
209 	rq->nr_integrity_segments = 0;
210 #endif
211 	rq->special = NULL;
212 	/* tag was already set */
213 	rq->errors = 0;
214 	rq->extra_len = 0;
215 
216 	INIT_LIST_HEAD(&rq->timeout_list);
217 	rq->timeout = 0;
218 
219 	rq->end_io = NULL;
220 	rq->end_io_data = NULL;
221 	rq->next_rq = NULL;
222 
223 	ctx->rq_dispatched[op_is_sync(op)]++;
224 }
225 EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
226 
227 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
228 				       unsigned int op)
229 {
230 	struct request *rq;
231 	unsigned int tag;
232 
233 	tag = blk_mq_get_tag(data);
234 	if (tag != BLK_MQ_TAG_FAIL) {
235 		struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
236 
237 		rq = tags->static_rqs[tag];
238 
239 		if (data->flags & BLK_MQ_REQ_INTERNAL) {
240 			rq->tag = -1;
241 			rq->internal_tag = tag;
242 		} else {
243 			if (blk_mq_tag_busy(data->hctx)) {
244 				rq->rq_flags = RQF_MQ_INFLIGHT;
245 				atomic_inc(&data->hctx->nr_active);
246 			}
247 			rq->tag = tag;
248 			rq->internal_tag = -1;
249 			data->hctx->tags->rqs[rq->tag] = rq;
250 		}
251 
252 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
253 		return rq;
254 	}
255 
256 	return NULL;
257 }
258 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
259 
260 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
261 		unsigned int flags)
262 {
263 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
264 	struct request *rq;
265 	int ret;
266 
267 	ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
268 	if (ret)
269 		return ERR_PTR(ret);
270 
271 	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
272 
273 	blk_mq_put_ctx(alloc_data.ctx);
274 	blk_queue_exit(q);
275 
276 	if (!rq)
277 		return ERR_PTR(-EWOULDBLOCK);
278 
279 	rq->__data_len = 0;
280 	rq->__sector = (sector_t) -1;
281 	rq->bio = rq->biotail = NULL;
282 	return rq;
283 }
284 EXPORT_SYMBOL(blk_mq_alloc_request);
285 
286 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
287 		unsigned int flags, unsigned int hctx_idx)
288 {
289 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
290 	struct request *rq;
291 	unsigned int cpu;
292 	int ret;
293 
294 	/*
295 	 * If the tag allocator sleeps we could get an allocation for a
296 	 * different hardware context.  No need to complicate the low level
297 	 * allocator for this for the rare use case of a command tied to
298 	 * a specific queue.
299 	 */
300 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
301 		return ERR_PTR(-EINVAL);
302 
303 	if (hctx_idx >= q->nr_hw_queues)
304 		return ERR_PTR(-EIO);
305 
306 	ret = blk_queue_enter(q, true);
307 	if (ret)
308 		return ERR_PTR(ret);
309 
310 	/*
311 	 * Check if the hardware context is actually mapped to anything.
312 	 * If not tell the caller that it should skip this queue.
313 	 */
314 	alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
315 	if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
316 		blk_queue_exit(q);
317 		return ERR_PTR(-EXDEV);
318 	}
319 	cpu = cpumask_first(alloc_data.hctx->cpumask);
320 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
321 
322 	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323 
324 	blk_mq_put_ctx(alloc_data.ctx);
325 	blk_queue_exit(q);
326 
327 	if (!rq)
328 		return ERR_PTR(-EWOULDBLOCK);
329 
330 	return rq;
331 }
332 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
333 
334 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
335 			     struct request *rq)
336 {
337 	const int sched_tag = rq->internal_tag;
338 	struct request_queue *q = rq->q;
339 
340 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
341 		atomic_dec(&hctx->nr_active);
342 
343 	wbt_done(q->rq_wb, &rq->issue_stat);
344 	rq->rq_flags = 0;
345 
346 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
347 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
348 	if (rq->tag != -1)
349 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
350 	if (sched_tag != -1)
351 		blk_mq_sched_completed_request(hctx, rq);
352 	blk_mq_sched_restart_queues(hctx);
353 	blk_queue_exit(q);
354 }
355 
356 static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
357 				     struct request *rq)
358 {
359 	struct blk_mq_ctx *ctx = rq->mq_ctx;
360 
361 	ctx->rq_completed[rq_is_sync(rq)]++;
362 	__blk_mq_finish_request(hctx, ctx, rq);
363 }
364 
365 void blk_mq_finish_request(struct request *rq)
366 {
367 	blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
368 }
369 
370 void blk_mq_free_request(struct request *rq)
371 {
372 	blk_mq_sched_put_request(rq);
373 }
374 EXPORT_SYMBOL_GPL(blk_mq_free_request);
375 
376 inline void __blk_mq_end_request(struct request *rq, int error)
377 {
378 	blk_account_io_done(rq);
379 
380 	if (rq->end_io) {
381 		wbt_done(rq->q->rq_wb, &rq->issue_stat);
382 		rq->end_io(rq, error);
383 	} else {
384 		if (unlikely(blk_bidi_rq(rq)))
385 			blk_mq_free_request(rq->next_rq);
386 		blk_mq_free_request(rq);
387 	}
388 }
389 EXPORT_SYMBOL(__blk_mq_end_request);
390 
391 void blk_mq_end_request(struct request *rq, int error)
392 {
393 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
394 		BUG();
395 	__blk_mq_end_request(rq, error);
396 }
397 EXPORT_SYMBOL(blk_mq_end_request);
398 
399 static void __blk_mq_complete_request_remote(void *data)
400 {
401 	struct request *rq = data;
402 
403 	rq->q->softirq_done_fn(rq);
404 }
405 
406 static void blk_mq_ipi_complete_request(struct request *rq)
407 {
408 	struct blk_mq_ctx *ctx = rq->mq_ctx;
409 	bool shared = false;
410 	int cpu;
411 
412 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
413 		rq->q->softirq_done_fn(rq);
414 		return;
415 	}
416 
417 	cpu = get_cpu();
418 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
419 		shared = cpus_share_cache(cpu, ctx->cpu);
420 
421 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
422 		rq->csd.func = __blk_mq_complete_request_remote;
423 		rq->csd.info = rq;
424 		rq->csd.flags = 0;
425 		smp_call_function_single_async(ctx->cpu, &rq->csd);
426 	} else {
427 		rq->q->softirq_done_fn(rq);
428 	}
429 	put_cpu();
430 }
431 
432 static void blk_mq_stat_add(struct request *rq)
433 {
434 	if (rq->rq_flags & RQF_STATS) {
435 		/*
436 		 * We could rq->mq_ctx here, but there's less of a risk
437 		 * of races if we have the completion event add the stats
438 		 * to the local software queue.
439 		 */
440 		struct blk_mq_ctx *ctx;
441 
442 		ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
443 		blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
444 	}
445 }
446 
447 static void __blk_mq_complete_request(struct request *rq)
448 {
449 	struct request_queue *q = rq->q;
450 
451 	blk_mq_stat_add(rq);
452 
453 	if (!q->softirq_done_fn)
454 		blk_mq_end_request(rq, rq->errors);
455 	else
456 		blk_mq_ipi_complete_request(rq);
457 }
458 
459 /**
460  * blk_mq_complete_request - end I/O on a request
461  * @rq:		the request being processed
462  *
463  * Description:
464  *	Ends all I/O on a request. It does not handle partial completions.
465  *	The actual completion happens out-of-order, through a IPI handler.
466  **/
467 void blk_mq_complete_request(struct request *rq, int error)
468 {
469 	struct request_queue *q = rq->q;
470 
471 	if (unlikely(blk_should_fake_timeout(q)))
472 		return;
473 	if (!blk_mark_rq_complete(rq)) {
474 		rq->errors = error;
475 		__blk_mq_complete_request(rq);
476 	}
477 }
478 EXPORT_SYMBOL(blk_mq_complete_request);
479 
480 int blk_mq_request_started(struct request *rq)
481 {
482 	return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
483 }
484 EXPORT_SYMBOL_GPL(blk_mq_request_started);
485 
486 void blk_mq_start_request(struct request *rq)
487 {
488 	struct request_queue *q = rq->q;
489 
490 	blk_mq_sched_started_request(rq);
491 
492 	trace_block_rq_issue(q, rq);
493 
494 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
495 		blk_stat_set_issue_time(&rq->issue_stat);
496 		rq->rq_flags |= RQF_STATS;
497 		wbt_issue(q->rq_wb, &rq->issue_stat);
498 	}
499 
500 	blk_add_timer(rq);
501 
502 	/*
503 	 * Ensure that ->deadline is visible before set the started
504 	 * flag and clear the completed flag.
505 	 */
506 	smp_mb__before_atomic();
507 
508 	/*
509 	 * Mark us as started and clear complete. Complete might have been
510 	 * set if requeue raced with timeout, which then marked it as
511 	 * complete. So be sure to clear complete again when we start
512 	 * the request, otherwise we'll ignore the completion event.
513 	 */
514 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
515 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
516 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
517 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
518 
519 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
520 		/*
521 		 * Make sure space for the drain appears.  We know we can do
522 		 * this because max_hw_segments has been adjusted to be one
523 		 * fewer than the device can handle.
524 		 */
525 		rq->nr_phys_segments++;
526 	}
527 }
528 EXPORT_SYMBOL(blk_mq_start_request);
529 
530 static void __blk_mq_requeue_request(struct request *rq)
531 {
532 	struct request_queue *q = rq->q;
533 
534 	trace_block_rq_requeue(q, rq);
535 	wbt_requeue(q->rq_wb, &rq->issue_stat);
536 	blk_mq_sched_requeue_request(rq);
537 
538 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
539 		if (q->dma_drain_size && blk_rq_bytes(rq))
540 			rq->nr_phys_segments--;
541 	}
542 }
543 
544 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
545 {
546 	__blk_mq_requeue_request(rq);
547 
548 	BUG_ON(blk_queued_rq(rq));
549 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
550 }
551 EXPORT_SYMBOL(blk_mq_requeue_request);
552 
553 static void blk_mq_requeue_work(struct work_struct *work)
554 {
555 	struct request_queue *q =
556 		container_of(work, struct request_queue, requeue_work.work);
557 	LIST_HEAD(rq_list);
558 	struct request *rq, *next;
559 	unsigned long flags;
560 
561 	spin_lock_irqsave(&q->requeue_lock, flags);
562 	list_splice_init(&q->requeue_list, &rq_list);
563 	spin_unlock_irqrestore(&q->requeue_lock, flags);
564 
565 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
566 		if (!(rq->rq_flags & RQF_SOFTBARRIER))
567 			continue;
568 
569 		rq->rq_flags &= ~RQF_SOFTBARRIER;
570 		list_del_init(&rq->queuelist);
571 		blk_mq_sched_insert_request(rq, true, false, false, true);
572 	}
573 
574 	while (!list_empty(&rq_list)) {
575 		rq = list_entry(rq_list.next, struct request, queuelist);
576 		list_del_init(&rq->queuelist);
577 		blk_mq_sched_insert_request(rq, false, false, false, true);
578 	}
579 
580 	blk_mq_run_hw_queues(q, false);
581 }
582 
583 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
584 				bool kick_requeue_list)
585 {
586 	struct request_queue *q = rq->q;
587 	unsigned long flags;
588 
589 	/*
590 	 * We abuse this flag that is otherwise used by the I/O scheduler to
591 	 * request head insertation from the workqueue.
592 	 */
593 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
594 
595 	spin_lock_irqsave(&q->requeue_lock, flags);
596 	if (at_head) {
597 		rq->rq_flags |= RQF_SOFTBARRIER;
598 		list_add(&rq->queuelist, &q->requeue_list);
599 	} else {
600 		list_add_tail(&rq->queuelist, &q->requeue_list);
601 	}
602 	spin_unlock_irqrestore(&q->requeue_lock, flags);
603 
604 	if (kick_requeue_list)
605 		blk_mq_kick_requeue_list(q);
606 }
607 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
608 
609 void blk_mq_kick_requeue_list(struct request_queue *q)
610 {
611 	kblockd_schedule_delayed_work(&q->requeue_work, 0);
612 }
613 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
614 
615 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
616 				    unsigned long msecs)
617 {
618 	kblockd_schedule_delayed_work(&q->requeue_work,
619 				      msecs_to_jiffies(msecs));
620 }
621 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
622 
623 void blk_mq_abort_requeue_list(struct request_queue *q)
624 {
625 	unsigned long flags;
626 	LIST_HEAD(rq_list);
627 
628 	spin_lock_irqsave(&q->requeue_lock, flags);
629 	list_splice_init(&q->requeue_list, &rq_list);
630 	spin_unlock_irqrestore(&q->requeue_lock, flags);
631 
632 	while (!list_empty(&rq_list)) {
633 		struct request *rq;
634 
635 		rq = list_first_entry(&rq_list, struct request, queuelist);
636 		list_del_init(&rq->queuelist);
637 		rq->errors = -EIO;
638 		blk_mq_end_request(rq, rq->errors);
639 	}
640 }
641 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
642 
643 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
644 {
645 	if (tag < tags->nr_tags) {
646 		prefetch(tags->rqs[tag]);
647 		return tags->rqs[tag];
648 	}
649 
650 	return NULL;
651 }
652 EXPORT_SYMBOL(blk_mq_tag_to_rq);
653 
654 struct blk_mq_timeout_data {
655 	unsigned long next;
656 	unsigned int next_set;
657 };
658 
659 void blk_mq_rq_timed_out(struct request *req, bool reserved)
660 {
661 	const struct blk_mq_ops *ops = req->q->mq_ops;
662 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
663 
664 	/*
665 	 * We know that complete is set at this point. If STARTED isn't set
666 	 * anymore, then the request isn't active and the "timeout" should
667 	 * just be ignored. This can happen due to the bitflag ordering.
668 	 * Timeout first checks if STARTED is set, and if it is, assumes
669 	 * the request is active. But if we race with completion, then
670 	 * we both flags will get cleared. So check here again, and ignore
671 	 * a timeout event with a request that isn't active.
672 	 */
673 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
674 		return;
675 
676 	if (ops->timeout)
677 		ret = ops->timeout(req, reserved);
678 
679 	switch (ret) {
680 	case BLK_EH_HANDLED:
681 		__blk_mq_complete_request(req);
682 		break;
683 	case BLK_EH_RESET_TIMER:
684 		blk_add_timer(req);
685 		blk_clear_rq_complete(req);
686 		break;
687 	case BLK_EH_NOT_HANDLED:
688 		break;
689 	default:
690 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
691 		break;
692 	}
693 }
694 
695 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
696 		struct request *rq, void *priv, bool reserved)
697 {
698 	struct blk_mq_timeout_data *data = priv;
699 
700 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
701 		/*
702 		 * If a request wasn't started before the queue was
703 		 * marked dying, kill it here or it'll go unnoticed.
704 		 */
705 		if (unlikely(blk_queue_dying(rq->q))) {
706 			rq->errors = -EIO;
707 			blk_mq_end_request(rq, rq->errors);
708 		}
709 		return;
710 	}
711 
712 	if (time_after_eq(jiffies, rq->deadline)) {
713 		if (!blk_mark_rq_complete(rq))
714 			blk_mq_rq_timed_out(rq, reserved);
715 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
716 		data->next = rq->deadline;
717 		data->next_set = 1;
718 	}
719 }
720 
721 static void blk_mq_timeout_work(struct work_struct *work)
722 {
723 	struct request_queue *q =
724 		container_of(work, struct request_queue, timeout_work);
725 	struct blk_mq_timeout_data data = {
726 		.next		= 0,
727 		.next_set	= 0,
728 	};
729 	int i;
730 
731 	/* A deadlock might occur if a request is stuck requiring a
732 	 * timeout at the same time a queue freeze is waiting
733 	 * completion, since the timeout code would not be able to
734 	 * acquire the queue reference here.
735 	 *
736 	 * That's why we don't use blk_queue_enter here; instead, we use
737 	 * percpu_ref_tryget directly, because we need to be able to
738 	 * obtain a reference even in the short window between the queue
739 	 * starting to freeze, by dropping the first reference in
740 	 * blk_mq_freeze_queue_start, and the moment the last request is
741 	 * consumed, marked by the instant q_usage_counter reaches
742 	 * zero.
743 	 */
744 	if (!percpu_ref_tryget(&q->q_usage_counter))
745 		return;
746 
747 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
748 
749 	if (data.next_set) {
750 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
751 		mod_timer(&q->timeout, data.next);
752 	} else {
753 		struct blk_mq_hw_ctx *hctx;
754 
755 		queue_for_each_hw_ctx(q, hctx, i) {
756 			/* the hctx may be unmapped, so check it here */
757 			if (blk_mq_hw_queue_mapped(hctx))
758 				blk_mq_tag_idle(hctx);
759 		}
760 	}
761 	blk_queue_exit(q);
762 }
763 
764 /*
765  * Reverse check our software queue for entries that we could potentially
766  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
767  * too much time checking for merges.
768  */
769 static bool blk_mq_attempt_merge(struct request_queue *q,
770 				 struct blk_mq_ctx *ctx, struct bio *bio)
771 {
772 	struct request *rq;
773 	int checked = 8;
774 
775 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
776 		bool merged = false;
777 
778 		if (!checked--)
779 			break;
780 
781 		if (!blk_rq_merge_ok(rq, bio))
782 			continue;
783 
784 		switch (blk_try_merge(rq, bio)) {
785 		case ELEVATOR_BACK_MERGE:
786 			if (blk_mq_sched_allow_merge(q, rq, bio))
787 				merged = bio_attempt_back_merge(q, rq, bio);
788 			break;
789 		case ELEVATOR_FRONT_MERGE:
790 			if (blk_mq_sched_allow_merge(q, rq, bio))
791 				merged = bio_attempt_front_merge(q, rq, bio);
792 			break;
793 		case ELEVATOR_DISCARD_MERGE:
794 			merged = bio_attempt_discard_merge(q, rq, bio);
795 			break;
796 		default:
797 			continue;
798 		}
799 
800 		if (merged)
801 			ctx->rq_merged++;
802 		return merged;
803 	}
804 
805 	return false;
806 }
807 
808 struct flush_busy_ctx_data {
809 	struct blk_mq_hw_ctx *hctx;
810 	struct list_head *list;
811 };
812 
813 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
814 {
815 	struct flush_busy_ctx_data *flush_data = data;
816 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
817 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
818 
819 	sbitmap_clear_bit(sb, bitnr);
820 	spin_lock(&ctx->lock);
821 	list_splice_tail_init(&ctx->rq_list, flush_data->list);
822 	spin_unlock(&ctx->lock);
823 	return true;
824 }
825 
826 /*
827  * Process software queues that have been marked busy, splicing them
828  * to the for-dispatch
829  */
830 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
831 {
832 	struct flush_busy_ctx_data data = {
833 		.hctx = hctx,
834 		.list = list,
835 	};
836 
837 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
838 }
839 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
840 
841 static inline unsigned int queued_to_index(unsigned int queued)
842 {
843 	if (!queued)
844 		return 0;
845 
846 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
847 }
848 
849 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
850 			   bool wait)
851 {
852 	struct blk_mq_alloc_data data = {
853 		.q = rq->q,
854 		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
855 		.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
856 	};
857 
858 	if (rq->tag != -1) {
859 done:
860 		if (hctx)
861 			*hctx = data.hctx;
862 		return true;
863 	}
864 
865 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
866 		data.flags |= BLK_MQ_REQ_RESERVED;
867 
868 	rq->tag = blk_mq_get_tag(&data);
869 	if (rq->tag >= 0) {
870 		if (blk_mq_tag_busy(data.hctx)) {
871 			rq->rq_flags |= RQF_MQ_INFLIGHT;
872 			atomic_inc(&data.hctx->nr_active);
873 		}
874 		data.hctx->tags->rqs[rq->tag] = rq;
875 		goto done;
876 	}
877 
878 	return false;
879 }
880 
881 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
882 				    struct request *rq)
883 {
884 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
885 	rq->tag = -1;
886 
887 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
888 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
889 		atomic_dec(&hctx->nr_active);
890 	}
891 }
892 
893 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
894 				       struct request *rq)
895 {
896 	if (rq->tag == -1 || rq->internal_tag == -1)
897 		return;
898 
899 	__blk_mq_put_driver_tag(hctx, rq);
900 }
901 
902 static void blk_mq_put_driver_tag(struct request *rq)
903 {
904 	struct blk_mq_hw_ctx *hctx;
905 
906 	if (rq->tag == -1 || rq->internal_tag == -1)
907 		return;
908 
909 	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
910 	__blk_mq_put_driver_tag(hctx, rq);
911 }
912 
913 /*
914  * If we fail getting a driver tag because all the driver tags are already
915  * assigned and on the dispatch list, BUT the first entry does not have a
916  * tag, then we could deadlock. For that case, move entries with assigned
917  * driver tags to the front, leaving the set of tagged requests in the
918  * same order, and the untagged set in the same order.
919  */
920 static bool reorder_tags_to_front(struct list_head *list)
921 {
922 	struct request *rq, *tmp, *first = NULL;
923 
924 	list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
925 		if (rq == first)
926 			break;
927 		if (rq->tag != -1) {
928 			list_move(&rq->queuelist, list);
929 			if (!first)
930 				first = rq;
931 		}
932 	}
933 
934 	return first != NULL;
935 }
936 
937 static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
938 				void *key)
939 {
940 	struct blk_mq_hw_ctx *hctx;
941 
942 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
943 
944 	list_del(&wait->task_list);
945 	clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
946 	blk_mq_run_hw_queue(hctx, true);
947 	return 1;
948 }
949 
950 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
951 {
952 	struct sbq_wait_state *ws;
953 
954 	/*
955 	 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
956 	 * The thread which wins the race to grab this bit adds the hardware
957 	 * queue to the wait queue.
958 	 */
959 	if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
960 	    test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
961 		return false;
962 
963 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
964 	ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
965 
966 	/*
967 	 * As soon as this returns, it's no longer safe to fiddle with
968 	 * hctx->dispatch_wait, since a completion can wake up the wait queue
969 	 * and unlock the bit.
970 	 */
971 	add_wait_queue(&ws->wait, &hctx->dispatch_wait);
972 	return true;
973 }
974 
975 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
976 {
977 	struct request_queue *q = hctx->queue;
978 	struct request *rq;
979 	LIST_HEAD(driver_list);
980 	struct list_head *dptr;
981 	int queued, ret = BLK_MQ_RQ_QUEUE_OK;
982 
983 	/*
984 	 * Start off with dptr being NULL, so we start the first request
985 	 * immediately, even if we have more pending.
986 	 */
987 	dptr = NULL;
988 
989 	/*
990 	 * Now process all the entries, sending them to the driver.
991 	 */
992 	queued = 0;
993 	while (!list_empty(list)) {
994 		struct blk_mq_queue_data bd;
995 
996 		rq = list_first_entry(list, struct request, queuelist);
997 		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
998 			if (!queued && reorder_tags_to_front(list))
999 				continue;
1000 
1001 			/*
1002 			 * The initial allocation attempt failed, so we need to
1003 			 * rerun the hardware queue when a tag is freed.
1004 			 */
1005 			if (blk_mq_dispatch_wait_add(hctx)) {
1006 				/*
1007 				 * It's possible that a tag was freed in the
1008 				 * window between the allocation failure and
1009 				 * adding the hardware queue to the wait queue.
1010 				 */
1011 				if (!blk_mq_get_driver_tag(rq, &hctx, false))
1012 					break;
1013 			} else {
1014 				break;
1015 			}
1016 		}
1017 
1018 		list_del_init(&rq->queuelist);
1019 
1020 		bd.rq = rq;
1021 		bd.list = dptr;
1022 
1023 		/*
1024 		 * Flag last if we have no more requests, or if we have more
1025 		 * but can't assign a driver tag to it.
1026 		 */
1027 		if (list_empty(list))
1028 			bd.last = true;
1029 		else {
1030 			struct request *nxt;
1031 
1032 			nxt = list_first_entry(list, struct request, queuelist);
1033 			bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1034 		}
1035 
1036 		ret = q->mq_ops->queue_rq(hctx, &bd);
1037 		switch (ret) {
1038 		case BLK_MQ_RQ_QUEUE_OK:
1039 			queued++;
1040 			break;
1041 		case BLK_MQ_RQ_QUEUE_BUSY:
1042 			blk_mq_put_driver_tag_hctx(hctx, rq);
1043 			list_add(&rq->queuelist, list);
1044 			__blk_mq_requeue_request(rq);
1045 			break;
1046 		default:
1047 			pr_err("blk-mq: bad return on queue: %d\n", ret);
1048 		case BLK_MQ_RQ_QUEUE_ERROR:
1049 			rq->errors = -EIO;
1050 			blk_mq_end_request(rq, rq->errors);
1051 			break;
1052 		}
1053 
1054 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1055 			break;
1056 
1057 		/*
1058 		 * We've done the first request. If we have more than 1
1059 		 * left in the list, set dptr to defer issue.
1060 		 */
1061 		if (!dptr && list->next != list->prev)
1062 			dptr = &driver_list;
1063 	}
1064 
1065 	hctx->dispatched[queued_to_index(queued)]++;
1066 
1067 	/*
1068 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1069 	 * that is where we will continue on next queue run.
1070 	 */
1071 	if (!list_empty(list)) {
1072 		/*
1073 		 * If we got a driver tag for the next request already,
1074 		 * free it again.
1075 		 */
1076 		rq = list_first_entry(list, struct request, queuelist);
1077 		blk_mq_put_driver_tag(rq);
1078 
1079 		spin_lock(&hctx->lock);
1080 		list_splice_init(list, &hctx->dispatch);
1081 		spin_unlock(&hctx->lock);
1082 
1083 		/*
1084 		 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1085 		 * it's possible the queue is stopped and restarted again
1086 		 * before this. Queue restart will dispatch requests. And since
1087 		 * requests in rq_list aren't added into hctx->dispatch yet,
1088 		 * the requests in rq_list might get lost.
1089 		 *
1090 		 * blk_mq_run_hw_queue() already checks the STOPPED bit
1091 		 *
1092 		 * If RESTART or TAG_WAITING is set, then let completion restart
1093 		 * the queue instead of potentially looping here.
1094 		 */
1095 		if (!blk_mq_sched_needs_restart(hctx) &&
1096 		    !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1097 			blk_mq_run_hw_queue(hctx, true);
1098 	}
1099 
1100 	return queued != 0;
1101 }
1102 
1103 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1104 {
1105 	int srcu_idx;
1106 
1107 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1108 		cpu_online(hctx->next_cpu));
1109 
1110 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1111 		rcu_read_lock();
1112 		blk_mq_sched_dispatch_requests(hctx);
1113 		rcu_read_unlock();
1114 	} else {
1115 		srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1116 		blk_mq_sched_dispatch_requests(hctx);
1117 		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1118 	}
1119 }
1120 
1121 /*
1122  * It'd be great if the workqueue API had a way to pass
1123  * in a mask and had some smarts for more clever placement.
1124  * For now we just round-robin here, switching for every
1125  * BLK_MQ_CPU_WORK_BATCH queued items.
1126  */
1127 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1128 {
1129 	if (hctx->queue->nr_hw_queues == 1)
1130 		return WORK_CPU_UNBOUND;
1131 
1132 	if (--hctx->next_cpu_batch <= 0) {
1133 		int next_cpu;
1134 
1135 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1136 		if (next_cpu >= nr_cpu_ids)
1137 			next_cpu = cpumask_first(hctx->cpumask);
1138 
1139 		hctx->next_cpu = next_cpu;
1140 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1141 	}
1142 
1143 	return hctx->next_cpu;
1144 }
1145 
1146 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1147 {
1148 	if (unlikely(blk_mq_hctx_stopped(hctx) ||
1149 		     !blk_mq_hw_queue_mapped(hctx)))
1150 		return;
1151 
1152 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1153 		int cpu = get_cpu();
1154 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1155 			__blk_mq_run_hw_queue(hctx);
1156 			put_cpu();
1157 			return;
1158 		}
1159 
1160 		put_cpu();
1161 	}
1162 
1163 	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
1164 }
1165 
1166 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1167 {
1168 	struct blk_mq_hw_ctx *hctx;
1169 	int i;
1170 
1171 	queue_for_each_hw_ctx(q, hctx, i) {
1172 		if (!blk_mq_hctx_has_pending(hctx) ||
1173 		    blk_mq_hctx_stopped(hctx))
1174 			continue;
1175 
1176 		blk_mq_run_hw_queue(hctx, async);
1177 	}
1178 }
1179 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1180 
1181 /**
1182  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1183  * @q: request queue.
1184  *
1185  * The caller is responsible for serializing this function against
1186  * blk_mq_{start,stop}_hw_queue().
1187  */
1188 bool blk_mq_queue_stopped(struct request_queue *q)
1189 {
1190 	struct blk_mq_hw_ctx *hctx;
1191 	int i;
1192 
1193 	queue_for_each_hw_ctx(q, hctx, i)
1194 		if (blk_mq_hctx_stopped(hctx))
1195 			return true;
1196 
1197 	return false;
1198 }
1199 EXPORT_SYMBOL(blk_mq_queue_stopped);
1200 
1201 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1202 {
1203 	cancel_work(&hctx->run_work);
1204 	cancel_delayed_work(&hctx->delay_work);
1205 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1206 }
1207 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1208 
1209 void blk_mq_stop_hw_queues(struct request_queue *q)
1210 {
1211 	struct blk_mq_hw_ctx *hctx;
1212 	int i;
1213 
1214 	queue_for_each_hw_ctx(q, hctx, i)
1215 		blk_mq_stop_hw_queue(hctx);
1216 }
1217 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1218 
1219 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1220 {
1221 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1222 
1223 	blk_mq_run_hw_queue(hctx, false);
1224 }
1225 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1226 
1227 void blk_mq_start_hw_queues(struct request_queue *q)
1228 {
1229 	struct blk_mq_hw_ctx *hctx;
1230 	int i;
1231 
1232 	queue_for_each_hw_ctx(q, hctx, i)
1233 		blk_mq_start_hw_queue(hctx);
1234 }
1235 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1236 
1237 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1238 {
1239 	if (!blk_mq_hctx_stopped(hctx))
1240 		return;
1241 
1242 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1243 	blk_mq_run_hw_queue(hctx, async);
1244 }
1245 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1246 
1247 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1248 {
1249 	struct blk_mq_hw_ctx *hctx;
1250 	int i;
1251 
1252 	queue_for_each_hw_ctx(q, hctx, i)
1253 		blk_mq_start_stopped_hw_queue(hctx, async);
1254 }
1255 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1256 
1257 static void blk_mq_run_work_fn(struct work_struct *work)
1258 {
1259 	struct blk_mq_hw_ctx *hctx;
1260 
1261 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1262 
1263 	__blk_mq_run_hw_queue(hctx);
1264 }
1265 
1266 static void blk_mq_delay_work_fn(struct work_struct *work)
1267 {
1268 	struct blk_mq_hw_ctx *hctx;
1269 
1270 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1271 
1272 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1273 		__blk_mq_run_hw_queue(hctx);
1274 }
1275 
1276 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1277 {
1278 	if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1279 		return;
1280 
1281 	blk_mq_stop_hw_queue(hctx);
1282 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1283 			&hctx->delay_work, msecs_to_jiffies(msecs));
1284 }
1285 EXPORT_SYMBOL(blk_mq_delay_queue);
1286 
1287 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1288 					    struct request *rq,
1289 					    bool at_head)
1290 {
1291 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1292 
1293 	trace_block_rq_insert(hctx->queue, rq);
1294 
1295 	if (at_head)
1296 		list_add(&rq->queuelist, &ctx->rq_list);
1297 	else
1298 		list_add_tail(&rq->queuelist, &ctx->rq_list);
1299 }
1300 
1301 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1302 			     bool at_head)
1303 {
1304 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1305 
1306 	__blk_mq_insert_req_list(hctx, rq, at_head);
1307 	blk_mq_hctx_mark_pending(hctx, ctx);
1308 }
1309 
1310 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1311 			    struct list_head *list)
1312 
1313 {
1314 	/*
1315 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1316 	 * offline now
1317 	 */
1318 	spin_lock(&ctx->lock);
1319 	while (!list_empty(list)) {
1320 		struct request *rq;
1321 
1322 		rq = list_first_entry(list, struct request, queuelist);
1323 		BUG_ON(rq->mq_ctx != ctx);
1324 		list_del_init(&rq->queuelist);
1325 		__blk_mq_insert_req_list(hctx, rq, false);
1326 	}
1327 	blk_mq_hctx_mark_pending(hctx, ctx);
1328 	spin_unlock(&ctx->lock);
1329 }
1330 
1331 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1332 {
1333 	struct request *rqa = container_of(a, struct request, queuelist);
1334 	struct request *rqb = container_of(b, struct request, queuelist);
1335 
1336 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1337 		 (rqa->mq_ctx == rqb->mq_ctx &&
1338 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1339 }
1340 
1341 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1342 {
1343 	struct blk_mq_ctx *this_ctx;
1344 	struct request_queue *this_q;
1345 	struct request *rq;
1346 	LIST_HEAD(list);
1347 	LIST_HEAD(ctx_list);
1348 	unsigned int depth;
1349 
1350 	list_splice_init(&plug->mq_list, &list);
1351 
1352 	list_sort(NULL, &list, plug_ctx_cmp);
1353 
1354 	this_q = NULL;
1355 	this_ctx = NULL;
1356 	depth = 0;
1357 
1358 	while (!list_empty(&list)) {
1359 		rq = list_entry_rq(list.next);
1360 		list_del_init(&rq->queuelist);
1361 		BUG_ON(!rq->q);
1362 		if (rq->mq_ctx != this_ctx) {
1363 			if (this_ctx) {
1364 				trace_block_unplug(this_q, depth, from_schedule);
1365 				blk_mq_sched_insert_requests(this_q, this_ctx,
1366 								&ctx_list,
1367 								from_schedule);
1368 			}
1369 
1370 			this_ctx = rq->mq_ctx;
1371 			this_q = rq->q;
1372 			depth = 0;
1373 		}
1374 
1375 		depth++;
1376 		list_add_tail(&rq->queuelist, &ctx_list);
1377 	}
1378 
1379 	/*
1380 	 * If 'this_ctx' is set, we know we have entries to complete
1381 	 * on 'ctx_list'. Do those.
1382 	 */
1383 	if (this_ctx) {
1384 		trace_block_unplug(this_q, depth, from_schedule);
1385 		blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1386 						from_schedule);
1387 	}
1388 }
1389 
1390 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1391 {
1392 	init_request_from_bio(rq, bio);
1393 
1394 	blk_account_io_start(rq, true);
1395 }
1396 
1397 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1398 {
1399 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1400 		!blk_queue_nomerges(hctx->queue);
1401 }
1402 
1403 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1404 					 struct blk_mq_ctx *ctx,
1405 					 struct request *rq, struct bio *bio)
1406 {
1407 	if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1408 		blk_mq_bio_to_request(rq, bio);
1409 		spin_lock(&ctx->lock);
1410 insert_rq:
1411 		__blk_mq_insert_request(hctx, rq, false);
1412 		spin_unlock(&ctx->lock);
1413 		return false;
1414 	} else {
1415 		struct request_queue *q = hctx->queue;
1416 
1417 		spin_lock(&ctx->lock);
1418 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1419 			blk_mq_bio_to_request(rq, bio);
1420 			goto insert_rq;
1421 		}
1422 
1423 		spin_unlock(&ctx->lock);
1424 		__blk_mq_finish_request(hctx, ctx, rq);
1425 		return true;
1426 	}
1427 }
1428 
1429 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1430 {
1431 	if (rq->tag != -1)
1432 		return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1433 
1434 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1435 }
1436 
1437 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1438 {
1439 	struct request_queue *q = rq->q;
1440 	struct blk_mq_queue_data bd = {
1441 		.rq = rq,
1442 		.list = NULL,
1443 		.last = 1
1444 	};
1445 	struct blk_mq_hw_ctx *hctx;
1446 	blk_qc_t new_cookie;
1447 	int ret;
1448 
1449 	if (q->elevator)
1450 		goto insert;
1451 
1452 	if (!blk_mq_get_driver_tag(rq, &hctx, false))
1453 		goto insert;
1454 
1455 	new_cookie = request_to_qc_t(hctx, rq);
1456 
1457 	/*
1458 	 * For OK queue, we are done. For error, kill it. Any other
1459 	 * error (busy), just add it to our list as we previously
1460 	 * would have done
1461 	 */
1462 	ret = q->mq_ops->queue_rq(hctx, &bd);
1463 	if (ret == BLK_MQ_RQ_QUEUE_OK) {
1464 		*cookie = new_cookie;
1465 		return;
1466 	}
1467 
1468 	__blk_mq_requeue_request(rq);
1469 
1470 	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1471 		*cookie = BLK_QC_T_NONE;
1472 		rq->errors = -EIO;
1473 		blk_mq_end_request(rq, rq->errors);
1474 		return;
1475 	}
1476 
1477 insert:
1478 	blk_mq_sched_insert_request(rq, false, true, true, false);
1479 }
1480 
1481 /*
1482  * Multiple hardware queue variant. This will not use per-process plugs,
1483  * but will attempt to bypass the hctx queueing if we can go straight to
1484  * hardware for SYNC IO.
1485  */
1486 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1487 {
1488 	const int is_sync = op_is_sync(bio->bi_opf);
1489 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1490 	struct blk_mq_alloc_data data = { .flags = 0 };
1491 	struct request *rq;
1492 	unsigned int request_count = 0, srcu_idx;
1493 	struct blk_plug *plug;
1494 	struct request *same_queue_rq = NULL;
1495 	blk_qc_t cookie;
1496 	unsigned int wb_acct;
1497 
1498 	blk_queue_bounce(q, &bio);
1499 
1500 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1501 		bio_io_error(bio);
1502 		return BLK_QC_T_NONE;
1503 	}
1504 
1505 	blk_queue_split(q, &bio, q->bio_split);
1506 
1507 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1508 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1509 		return BLK_QC_T_NONE;
1510 
1511 	if (blk_mq_sched_bio_merge(q, bio))
1512 		return BLK_QC_T_NONE;
1513 
1514 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1515 
1516 	trace_block_getrq(q, bio, bio->bi_opf);
1517 
1518 	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1519 	if (unlikely(!rq)) {
1520 		__wbt_done(q->rq_wb, wb_acct);
1521 		return BLK_QC_T_NONE;
1522 	}
1523 
1524 	wbt_track(&rq->issue_stat, wb_acct);
1525 
1526 	cookie = request_to_qc_t(data.hctx, rq);
1527 
1528 	if (unlikely(is_flush_fua)) {
1529 		if (q->elevator)
1530 			goto elv_insert;
1531 		blk_mq_bio_to_request(rq, bio);
1532 		blk_insert_flush(rq);
1533 		goto run_queue;
1534 	}
1535 
1536 	plug = current->plug;
1537 	/*
1538 	 * If the driver supports defer issued based on 'last', then
1539 	 * queue it up like normal since we can potentially save some
1540 	 * CPU this way.
1541 	 */
1542 	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1543 	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1544 		struct request *old_rq = NULL;
1545 
1546 		blk_mq_bio_to_request(rq, bio);
1547 
1548 		/*
1549 		 * We do limited plugging. If the bio can be merged, do that.
1550 		 * Otherwise the existing request in the plug list will be
1551 		 * issued. So the plug list will have one request at most
1552 		 */
1553 		if (plug) {
1554 			/*
1555 			 * The plug list might get flushed before this. If that
1556 			 * happens, same_queue_rq is invalid and plug list is
1557 			 * empty
1558 			 */
1559 			if (same_queue_rq && !list_empty(&plug->mq_list)) {
1560 				old_rq = same_queue_rq;
1561 				list_del_init(&old_rq->queuelist);
1562 			}
1563 			list_add_tail(&rq->queuelist, &plug->mq_list);
1564 		} else /* is_sync */
1565 			old_rq = rq;
1566 		blk_mq_put_ctx(data.ctx);
1567 		if (!old_rq)
1568 			goto done;
1569 
1570 		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1571 			rcu_read_lock();
1572 			blk_mq_try_issue_directly(old_rq, &cookie);
1573 			rcu_read_unlock();
1574 		} else {
1575 			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1576 			blk_mq_try_issue_directly(old_rq, &cookie);
1577 			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1578 		}
1579 		goto done;
1580 	}
1581 
1582 	if (q->elevator) {
1583 elv_insert:
1584 		blk_mq_put_ctx(data.ctx);
1585 		blk_mq_bio_to_request(rq, bio);
1586 		blk_mq_sched_insert_request(rq, false, true,
1587 						!is_sync || is_flush_fua, true);
1588 		goto done;
1589 	}
1590 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1591 		/*
1592 		 * For a SYNC request, send it to the hardware immediately. For
1593 		 * an ASYNC request, just ensure that we run it later on. The
1594 		 * latter allows for merging opportunities and more efficient
1595 		 * dispatching.
1596 		 */
1597 run_queue:
1598 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1599 	}
1600 	blk_mq_put_ctx(data.ctx);
1601 done:
1602 	return cookie;
1603 }
1604 
1605 /*
1606  * Single hardware queue variant. This will attempt to use any per-process
1607  * plug for merging and IO deferral.
1608  */
1609 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1610 {
1611 	const int is_sync = op_is_sync(bio->bi_opf);
1612 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1613 	struct blk_plug *plug;
1614 	unsigned int request_count = 0;
1615 	struct blk_mq_alloc_data data = { .flags = 0 };
1616 	struct request *rq;
1617 	blk_qc_t cookie;
1618 	unsigned int wb_acct;
1619 
1620 	blk_queue_bounce(q, &bio);
1621 
1622 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1623 		bio_io_error(bio);
1624 		return BLK_QC_T_NONE;
1625 	}
1626 
1627 	blk_queue_split(q, &bio, q->bio_split);
1628 
1629 	if (!is_flush_fua && !blk_queue_nomerges(q)) {
1630 		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1631 			return BLK_QC_T_NONE;
1632 	} else
1633 		request_count = blk_plug_queued_count(q);
1634 
1635 	if (blk_mq_sched_bio_merge(q, bio))
1636 		return BLK_QC_T_NONE;
1637 
1638 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1639 
1640 	trace_block_getrq(q, bio, bio->bi_opf);
1641 
1642 	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1643 	if (unlikely(!rq)) {
1644 		__wbt_done(q->rq_wb, wb_acct);
1645 		return BLK_QC_T_NONE;
1646 	}
1647 
1648 	wbt_track(&rq->issue_stat, wb_acct);
1649 
1650 	cookie = request_to_qc_t(data.hctx, rq);
1651 
1652 	if (unlikely(is_flush_fua)) {
1653 		if (q->elevator)
1654 			goto elv_insert;
1655 		blk_mq_bio_to_request(rq, bio);
1656 		blk_insert_flush(rq);
1657 		goto run_queue;
1658 	}
1659 
1660 	/*
1661 	 * A task plug currently exists. Since this is completely lockless,
1662 	 * utilize that to temporarily store requests until the task is
1663 	 * either done or scheduled away.
1664 	 */
1665 	plug = current->plug;
1666 	if (plug) {
1667 		struct request *last = NULL;
1668 
1669 		blk_mq_bio_to_request(rq, bio);
1670 
1671 		/*
1672 		 * @request_count may become stale because of schedule
1673 		 * out, so check the list again.
1674 		 */
1675 		if (list_empty(&plug->mq_list))
1676 			request_count = 0;
1677 		if (!request_count)
1678 			trace_block_plug(q);
1679 		else
1680 			last = list_entry_rq(plug->mq_list.prev);
1681 
1682 		blk_mq_put_ctx(data.ctx);
1683 
1684 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1685 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1686 			blk_flush_plug_list(plug, false);
1687 			trace_block_plug(q);
1688 		}
1689 
1690 		list_add_tail(&rq->queuelist, &plug->mq_list);
1691 		return cookie;
1692 	}
1693 
1694 	if (q->elevator) {
1695 elv_insert:
1696 		blk_mq_put_ctx(data.ctx);
1697 		blk_mq_bio_to_request(rq, bio);
1698 		blk_mq_sched_insert_request(rq, false, true,
1699 						!is_sync || is_flush_fua, true);
1700 		goto done;
1701 	}
1702 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1703 		/*
1704 		 * For a SYNC request, send it to the hardware immediately. For
1705 		 * an ASYNC request, just ensure that we run it later on. The
1706 		 * latter allows for merging opportunities and more efficient
1707 		 * dispatching.
1708 		 */
1709 run_queue:
1710 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1711 	}
1712 
1713 	blk_mq_put_ctx(data.ctx);
1714 done:
1715 	return cookie;
1716 }
1717 
1718 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1719 		     unsigned int hctx_idx)
1720 {
1721 	struct page *page;
1722 
1723 	if (tags->rqs && set->ops->exit_request) {
1724 		int i;
1725 
1726 		for (i = 0; i < tags->nr_tags; i++) {
1727 			struct request *rq = tags->static_rqs[i];
1728 
1729 			if (!rq)
1730 				continue;
1731 			set->ops->exit_request(set->driver_data, rq,
1732 						hctx_idx, i);
1733 			tags->static_rqs[i] = NULL;
1734 		}
1735 	}
1736 
1737 	while (!list_empty(&tags->page_list)) {
1738 		page = list_first_entry(&tags->page_list, struct page, lru);
1739 		list_del_init(&page->lru);
1740 		/*
1741 		 * Remove kmemleak object previously allocated in
1742 		 * blk_mq_init_rq_map().
1743 		 */
1744 		kmemleak_free(page_address(page));
1745 		__free_pages(page, page->private);
1746 	}
1747 }
1748 
1749 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1750 {
1751 	kfree(tags->rqs);
1752 	tags->rqs = NULL;
1753 	kfree(tags->static_rqs);
1754 	tags->static_rqs = NULL;
1755 
1756 	blk_mq_free_tags(tags);
1757 }
1758 
1759 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1760 					unsigned int hctx_idx,
1761 					unsigned int nr_tags,
1762 					unsigned int reserved_tags)
1763 {
1764 	struct blk_mq_tags *tags;
1765 	int node;
1766 
1767 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1768 	if (node == NUMA_NO_NODE)
1769 		node = set->numa_node;
1770 
1771 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1772 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1773 	if (!tags)
1774 		return NULL;
1775 
1776 	tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1777 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1778 				 node);
1779 	if (!tags->rqs) {
1780 		blk_mq_free_tags(tags);
1781 		return NULL;
1782 	}
1783 
1784 	tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1785 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1786 				 node);
1787 	if (!tags->static_rqs) {
1788 		kfree(tags->rqs);
1789 		blk_mq_free_tags(tags);
1790 		return NULL;
1791 	}
1792 
1793 	return tags;
1794 }
1795 
1796 static size_t order_to_size(unsigned int order)
1797 {
1798 	return (size_t)PAGE_SIZE << order;
1799 }
1800 
1801 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1802 		     unsigned int hctx_idx, unsigned int depth)
1803 {
1804 	unsigned int i, j, entries_per_page, max_order = 4;
1805 	size_t rq_size, left;
1806 	int node;
1807 
1808 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1809 	if (node == NUMA_NO_NODE)
1810 		node = set->numa_node;
1811 
1812 	INIT_LIST_HEAD(&tags->page_list);
1813 
1814 	/*
1815 	 * rq_size is the size of the request plus driver payload, rounded
1816 	 * to the cacheline size
1817 	 */
1818 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1819 				cache_line_size());
1820 	left = rq_size * depth;
1821 
1822 	for (i = 0; i < depth; ) {
1823 		int this_order = max_order;
1824 		struct page *page;
1825 		int to_do;
1826 		void *p;
1827 
1828 		while (this_order && left < order_to_size(this_order - 1))
1829 			this_order--;
1830 
1831 		do {
1832 			page = alloc_pages_node(node,
1833 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1834 				this_order);
1835 			if (page)
1836 				break;
1837 			if (!this_order--)
1838 				break;
1839 			if (order_to_size(this_order) < rq_size)
1840 				break;
1841 		} while (1);
1842 
1843 		if (!page)
1844 			goto fail;
1845 
1846 		page->private = this_order;
1847 		list_add_tail(&page->lru, &tags->page_list);
1848 
1849 		p = page_address(page);
1850 		/*
1851 		 * Allow kmemleak to scan these pages as they contain pointers
1852 		 * to additional allocations like via ops->init_request().
1853 		 */
1854 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1855 		entries_per_page = order_to_size(this_order) / rq_size;
1856 		to_do = min(entries_per_page, depth - i);
1857 		left -= to_do * rq_size;
1858 		for (j = 0; j < to_do; j++) {
1859 			struct request *rq = p;
1860 
1861 			tags->static_rqs[i] = rq;
1862 			if (set->ops->init_request) {
1863 				if (set->ops->init_request(set->driver_data,
1864 						rq, hctx_idx, i,
1865 						node)) {
1866 					tags->static_rqs[i] = NULL;
1867 					goto fail;
1868 				}
1869 			}
1870 
1871 			p += rq_size;
1872 			i++;
1873 		}
1874 	}
1875 	return 0;
1876 
1877 fail:
1878 	blk_mq_free_rqs(set, tags, hctx_idx);
1879 	return -ENOMEM;
1880 }
1881 
1882 /*
1883  * 'cpu' is going away. splice any existing rq_list entries from this
1884  * software queue to the hw queue dispatch list, and ensure that it
1885  * gets run.
1886  */
1887 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1888 {
1889 	struct blk_mq_hw_ctx *hctx;
1890 	struct blk_mq_ctx *ctx;
1891 	LIST_HEAD(tmp);
1892 
1893 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1894 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1895 
1896 	spin_lock(&ctx->lock);
1897 	if (!list_empty(&ctx->rq_list)) {
1898 		list_splice_init(&ctx->rq_list, &tmp);
1899 		blk_mq_hctx_clear_pending(hctx, ctx);
1900 	}
1901 	spin_unlock(&ctx->lock);
1902 
1903 	if (list_empty(&tmp))
1904 		return 0;
1905 
1906 	spin_lock(&hctx->lock);
1907 	list_splice_tail_init(&tmp, &hctx->dispatch);
1908 	spin_unlock(&hctx->lock);
1909 
1910 	blk_mq_run_hw_queue(hctx, true);
1911 	return 0;
1912 }
1913 
1914 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1915 {
1916 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1917 					    &hctx->cpuhp_dead);
1918 }
1919 
1920 /* hctx->ctxs will be freed in queue's release handler */
1921 static void blk_mq_exit_hctx(struct request_queue *q,
1922 		struct blk_mq_tag_set *set,
1923 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1924 {
1925 	unsigned flush_start_tag = set->queue_depth;
1926 
1927 	blk_mq_tag_idle(hctx);
1928 
1929 	if (set->ops->exit_request)
1930 		set->ops->exit_request(set->driver_data,
1931 				       hctx->fq->flush_rq, hctx_idx,
1932 				       flush_start_tag + hctx_idx);
1933 
1934 	if (set->ops->exit_hctx)
1935 		set->ops->exit_hctx(hctx, hctx_idx);
1936 
1937 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1938 		cleanup_srcu_struct(&hctx->queue_rq_srcu);
1939 
1940 	blk_mq_remove_cpuhp(hctx);
1941 	blk_free_flush_queue(hctx->fq);
1942 	sbitmap_free(&hctx->ctx_map);
1943 }
1944 
1945 static void blk_mq_exit_hw_queues(struct request_queue *q,
1946 		struct blk_mq_tag_set *set, int nr_queue)
1947 {
1948 	struct blk_mq_hw_ctx *hctx;
1949 	unsigned int i;
1950 
1951 	queue_for_each_hw_ctx(q, hctx, i) {
1952 		if (i == nr_queue)
1953 			break;
1954 		blk_mq_exit_hctx(q, set, hctx, i);
1955 	}
1956 }
1957 
1958 static void blk_mq_free_hw_queues(struct request_queue *q,
1959 		struct blk_mq_tag_set *set)
1960 {
1961 	struct blk_mq_hw_ctx *hctx;
1962 	unsigned int i;
1963 
1964 	queue_for_each_hw_ctx(q, hctx, i)
1965 		free_cpumask_var(hctx->cpumask);
1966 }
1967 
1968 static int blk_mq_init_hctx(struct request_queue *q,
1969 		struct blk_mq_tag_set *set,
1970 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1971 {
1972 	int node;
1973 	unsigned flush_start_tag = set->queue_depth;
1974 
1975 	node = hctx->numa_node;
1976 	if (node == NUMA_NO_NODE)
1977 		node = hctx->numa_node = set->numa_node;
1978 
1979 	INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1980 	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1981 	spin_lock_init(&hctx->lock);
1982 	INIT_LIST_HEAD(&hctx->dispatch);
1983 	hctx->queue = q;
1984 	hctx->queue_num = hctx_idx;
1985 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1986 
1987 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1988 
1989 	hctx->tags = set->tags[hctx_idx];
1990 
1991 	/*
1992 	 * Allocate space for all possible cpus to avoid allocation at
1993 	 * runtime
1994 	 */
1995 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1996 					GFP_KERNEL, node);
1997 	if (!hctx->ctxs)
1998 		goto unregister_cpu_notifier;
1999 
2000 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2001 			      node))
2002 		goto free_ctxs;
2003 
2004 	hctx->nr_ctx = 0;
2005 
2006 	if (set->ops->init_hctx &&
2007 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2008 		goto free_bitmap;
2009 
2010 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2011 	if (!hctx->fq)
2012 		goto exit_hctx;
2013 
2014 	if (set->ops->init_request &&
2015 	    set->ops->init_request(set->driver_data,
2016 				   hctx->fq->flush_rq, hctx_idx,
2017 				   flush_start_tag + hctx_idx, node))
2018 		goto free_fq;
2019 
2020 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2021 		init_srcu_struct(&hctx->queue_rq_srcu);
2022 
2023 	return 0;
2024 
2025  free_fq:
2026 	kfree(hctx->fq);
2027  exit_hctx:
2028 	if (set->ops->exit_hctx)
2029 		set->ops->exit_hctx(hctx, hctx_idx);
2030  free_bitmap:
2031 	sbitmap_free(&hctx->ctx_map);
2032  free_ctxs:
2033 	kfree(hctx->ctxs);
2034  unregister_cpu_notifier:
2035 	blk_mq_remove_cpuhp(hctx);
2036 	return -1;
2037 }
2038 
2039 static void blk_mq_init_cpu_queues(struct request_queue *q,
2040 				   unsigned int nr_hw_queues)
2041 {
2042 	unsigned int i;
2043 
2044 	for_each_possible_cpu(i) {
2045 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2046 		struct blk_mq_hw_ctx *hctx;
2047 
2048 		memset(__ctx, 0, sizeof(*__ctx));
2049 		__ctx->cpu = i;
2050 		spin_lock_init(&__ctx->lock);
2051 		INIT_LIST_HEAD(&__ctx->rq_list);
2052 		__ctx->queue = q;
2053 		blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
2054 		blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
2055 
2056 		/* If the cpu isn't online, the cpu is mapped to first hctx */
2057 		if (!cpu_online(i))
2058 			continue;
2059 
2060 		hctx = blk_mq_map_queue(q, i);
2061 
2062 		/*
2063 		 * Set local node, IFF we have more than one hw queue. If
2064 		 * not, we remain on the home node of the device
2065 		 */
2066 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2067 			hctx->numa_node = local_memory_node(cpu_to_node(i));
2068 	}
2069 }
2070 
2071 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2072 {
2073 	int ret = 0;
2074 
2075 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2076 					set->queue_depth, set->reserved_tags);
2077 	if (!set->tags[hctx_idx])
2078 		return false;
2079 
2080 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2081 				set->queue_depth);
2082 	if (!ret)
2083 		return true;
2084 
2085 	blk_mq_free_rq_map(set->tags[hctx_idx]);
2086 	set->tags[hctx_idx] = NULL;
2087 	return false;
2088 }
2089 
2090 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2091 					 unsigned int hctx_idx)
2092 {
2093 	if (set->tags[hctx_idx]) {
2094 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2095 		blk_mq_free_rq_map(set->tags[hctx_idx]);
2096 		set->tags[hctx_idx] = NULL;
2097 	}
2098 }
2099 
2100 static void blk_mq_map_swqueue(struct request_queue *q,
2101 			       const struct cpumask *online_mask)
2102 {
2103 	unsigned int i, hctx_idx;
2104 	struct blk_mq_hw_ctx *hctx;
2105 	struct blk_mq_ctx *ctx;
2106 	struct blk_mq_tag_set *set = q->tag_set;
2107 
2108 	/*
2109 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
2110 	 */
2111 	mutex_lock(&q->sysfs_lock);
2112 
2113 	queue_for_each_hw_ctx(q, hctx, i) {
2114 		cpumask_clear(hctx->cpumask);
2115 		hctx->nr_ctx = 0;
2116 	}
2117 
2118 	/*
2119 	 * Map software to hardware queues
2120 	 */
2121 	for_each_possible_cpu(i) {
2122 		/* If the cpu isn't online, the cpu is mapped to first hctx */
2123 		if (!cpumask_test_cpu(i, online_mask))
2124 			continue;
2125 
2126 		hctx_idx = q->mq_map[i];
2127 		/* unmapped hw queue can be remapped after CPU topo changed */
2128 		if (!set->tags[hctx_idx] &&
2129 		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2130 			/*
2131 			 * If tags initialization fail for some hctx,
2132 			 * that hctx won't be brought online.  In this
2133 			 * case, remap the current ctx to hctx[0] which
2134 			 * is guaranteed to always have tags allocated
2135 			 */
2136 			q->mq_map[i] = 0;
2137 		}
2138 
2139 		ctx = per_cpu_ptr(q->queue_ctx, i);
2140 		hctx = blk_mq_map_queue(q, i);
2141 
2142 		cpumask_set_cpu(i, hctx->cpumask);
2143 		ctx->index_hw = hctx->nr_ctx;
2144 		hctx->ctxs[hctx->nr_ctx++] = ctx;
2145 	}
2146 
2147 	mutex_unlock(&q->sysfs_lock);
2148 
2149 	queue_for_each_hw_ctx(q, hctx, i) {
2150 		/*
2151 		 * If no software queues are mapped to this hardware queue,
2152 		 * disable it and free the request entries.
2153 		 */
2154 		if (!hctx->nr_ctx) {
2155 			/* Never unmap queue 0.  We need it as a
2156 			 * fallback in case of a new remap fails
2157 			 * allocation
2158 			 */
2159 			if (i && set->tags[i])
2160 				blk_mq_free_map_and_requests(set, i);
2161 
2162 			hctx->tags = NULL;
2163 			continue;
2164 		}
2165 
2166 		hctx->tags = set->tags[i];
2167 		WARN_ON(!hctx->tags);
2168 
2169 		/*
2170 		 * Set the map size to the number of mapped software queues.
2171 		 * This is more accurate and more efficient than looping
2172 		 * over all possibly mapped software queues.
2173 		 */
2174 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2175 
2176 		/*
2177 		 * Initialize batch roundrobin counts
2178 		 */
2179 		hctx->next_cpu = cpumask_first(hctx->cpumask);
2180 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2181 	}
2182 }
2183 
2184 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2185 {
2186 	struct blk_mq_hw_ctx *hctx;
2187 	int i;
2188 
2189 	queue_for_each_hw_ctx(q, hctx, i) {
2190 		if (shared)
2191 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2192 		else
2193 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2194 	}
2195 }
2196 
2197 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2198 {
2199 	struct request_queue *q;
2200 
2201 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2202 		blk_mq_freeze_queue(q);
2203 		queue_set_hctx_shared(q, shared);
2204 		blk_mq_unfreeze_queue(q);
2205 	}
2206 }
2207 
2208 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2209 {
2210 	struct blk_mq_tag_set *set = q->tag_set;
2211 
2212 	mutex_lock(&set->tag_list_lock);
2213 	list_del_init(&q->tag_set_list);
2214 	if (list_is_singular(&set->tag_list)) {
2215 		/* just transitioned to unshared */
2216 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
2217 		/* update existing queue */
2218 		blk_mq_update_tag_set_depth(set, false);
2219 	}
2220 	mutex_unlock(&set->tag_list_lock);
2221 }
2222 
2223 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2224 				     struct request_queue *q)
2225 {
2226 	q->tag_set = set;
2227 
2228 	mutex_lock(&set->tag_list_lock);
2229 
2230 	/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2231 	if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2232 		set->flags |= BLK_MQ_F_TAG_SHARED;
2233 		/* update existing queue */
2234 		blk_mq_update_tag_set_depth(set, true);
2235 	}
2236 	if (set->flags & BLK_MQ_F_TAG_SHARED)
2237 		queue_set_hctx_shared(q, true);
2238 	list_add_tail(&q->tag_set_list, &set->tag_list);
2239 
2240 	mutex_unlock(&set->tag_list_lock);
2241 }
2242 
2243 /*
2244  * It is the actual release handler for mq, but we do it from
2245  * request queue's release handler for avoiding use-after-free
2246  * and headache because q->mq_kobj shouldn't have been introduced,
2247  * but we can't group ctx/kctx kobj without it.
2248  */
2249 void blk_mq_release(struct request_queue *q)
2250 {
2251 	struct blk_mq_hw_ctx *hctx;
2252 	unsigned int i;
2253 
2254 	blk_mq_sched_teardown(q);
2255 
2256 	/* hctx kobj stays in hctx */
2257 	queue_for_each_hw_ctx(q, hctx, i) {
2258 		if (!hctx)
2259 			continue;
2260 		kfree(hctx->ctxs);
2261 		kfree(hctx);
2262 	}
2263 
2264 	q->mq_map = NULL;
2265 
2266 	kfree(q->queue_hw_ctx);
2267 
2268 	/* ctx kobj stays in queue_ctx */
2269 	free_percpu(q->queue_ctx);
2270 }
2271 
2272 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2273 {
2274 	struct request_queue *uninit_q, *q;
2275 
2276 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2277 	if (!uninit_q)
2278 		return ERR_PTR(-ENOMEM);
2279 
2280 	q = blk_mq_init_allocated_queue(set, uninit_q);
2281 	if (IS_ERR(q))
2282 		blk_cleanup_queue(uninit_q);
2283 
2284 	return q;
2285 }
2286 EXPORT_SYMBOL(blk_mq_init_queue);
2287 
2288 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2289 						struct request_queue *q)
2290 {
2291 	int i, j;
2292 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2293 
2294 	blk_mq_sysfs_unregister(q);
2295 	for (i = 0; i < set->nr_hw_queues; i++) {
2296 		int node;
2297 
2298 		if (hctxs[i])
2299 			continue;
2300 
2301 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2302 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2303 					GFP_KERNEL, node);
2304 		if (!hctxs[i])
2305 			break;
2306 
2307 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2308 						node)) {
2309 			kfree(hctxs[i]);
2310 			hctxs[i] = NULL;
2311 			break;
2312 		}
2313 
2314 		atomic_set(&hctxs[i]->nr_active, 0);
2315 		hctxs[i]->numa_node = node;
2316 		hctxs[i]->queue_num = i;
2317 
2318 		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2319 			free_cpumask_var(hctxs[i]->cpumask);
2320 			kfree(hctxs[i]);
2321 			hctxs[i] = NULL;
2322 			break;
2323 		}
2324 		blk_mq_hctx_kobj_init(hctxs[i]);
2325 	}
2326 	for (j = i; j < q->nr_hw_queues; j++) {
2327 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2328 
2329 		if (hctx) {
2330 			if (hctx->tags)
2331 				blk_mq_free_map_and_requests(set, j);
2332 			blk_mq_exit_hctx(q, set, hctx, j);
2333 			free_cpumask_var(hctx->cpumask);
2334 			kobject_put(&hctx->kobj);
2335 			kfree(hctx->ctxs);
2336 			kfree(hctx);
2337 			hctxs[j] = NULL;
2338 
2339 		}
2340 	}
2341 	q->nr_hw_queues = i;
2342 	blk_mq_sysfs_register(q);
2343 }
2344 
2345 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2346 						  struct request_queue *q)
2347 {
2348 	/* mark the queue as mq asap */
2349 	q->mq_ops = set->ops;
2350 
2351 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2352 	if (!q->queue_ctx)
2353 		goto err_exit;
2354 
2355 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2356 						GFP_KERNEL, set->numa_node);
2357 	if (!q->queue_hw_ctx)
2358 		goto err_percpu;
2359 
2360 	q->mq_map = set->mq_map;
2361 
2362 	blk_mq_realloc_hw_ctxs(set, q);
2363 	if (!q->nr_hw_queues)
2364 		goto err_hctxs;
2365 
2366 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2367 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2368 
2369 	q->nr_queues = nr_cpu_ids;
2370 
2371 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2372 
2373 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2374 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2375 
2376 	q->sg_reserved_size = INT_MAX;
2377 
2378 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2379 	INIT_LIST_HEAD(&q->requeue_list);
2380 	spin_lock_init(&q->requeue_lock);
2381 
2382 	if (q->nr_hw_queues > 1)
2383 		blk_queue_make_request(q, blk_mq_make_request);
2384 	else
2385 		blk_queue_make_request(q, blk_sq_make_request);
2386 
2387 	/*
2388 	 * Do this after blk_queue_make_request() overrides it...
2389 	 */
2390 	q->nr_requests = set->queue_depth;
2391 
2392 	/*
2393 	 * Default to classic polling
2394 	 */
2395 	q->poll_nsec = -1;
2396 
2397 	if (set->ops->complete)
2398 		blk_queue_softirq_done(q, set->ops->complete);
2399 
2400 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2401 
2402 	get_online_cpus();
2403 	mutex_lock(&all_q_mutex);
2404 
2405 	list_add_tail(&q->all_q_node, &all_q_list);
2406 	blk_mq_add_queue_tag_set(set, q);
2407 	blk_mq_map_swqueue(q, cpu_online_mask);
2408 
2409 	mutex_unlock(&all_q_mutex);
2410 	put_online_cpus();
2411 
2412 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2413 		int ret;
2414 
2415 		ret = blk_mq_sched_init(q);
2416 		if (ret)
2417 			return ERR_PTR(ret);
2418 	}
2419 
2420 	return q;
2421 
2422 err_hctxs:
2423 	kfree(q->queue_hw_ctx);
2424 err_percpu:
2425 	free_percpu(q->queue_ctx);
2426 err_exit:
2427 	q->mq_ops = NULL;
2428 	return ERR_PTR(-ENOMEM);
2429 }
2430 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2431 
2432 void blk_mq_free_queue(struct request_queue *q)
2433 {
2434 	struct blk_mq_tag_set	*set = q->tag_set;
2435 
2436 	mutex_lock(&all_q_mutex);
2437 	list_del_init(&q->all_q_node);
2438 	mutex_unlock(&all_q_mutex);
2439 
2440 	wbt_exit(q);
2441 
2442 	blk_mq_del_queue_tag_set(q);
2443 
2444 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2445 	blk_mq_free_hw_queues(q, set);
2446 }
2447 
2448 /* Basically redo blk_mq_init_queue with queue frozen */
2449 static void blk_mq_queue_reinit(struct request_queue *q,
2450 				const struct cpumask *online_mask)
2451 {
2452 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2453 
2454 	blk_mq_sysfs_unregister(q);
2455 
2456 	/*
2457 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2458 	 * we should change hctx numa_node according to new topology (this
2459 	 * involves free and re-allocate memory, worthy doing?)
2460 	 */
2461 
2462 	blk_mq_map_swqueue(q, online_mask);
2463 
2464 	blk_mq_sysfs_register(q);
2465 }
2466 
2467 /*
2468  * New online cpumask which is going to be set in this hotplug event.
2469  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2470  * one-by-one and dynamically allocating this could result in a failure.
2471  */
2472 static struct cpumask cpuhp_online_new;
2473 
2474 static void blk_mq_queue_reinit_work(void)
2475 {
2476 	struct request_queue *q;
2477 
2478 	mutex_lock(&all_q_mutex);
2479 	/*
2480 	 * We need to freeze and reinit all existing queues.  Freezing
2481 	 * involves synchronous wait for an RCU grace period and doing it
2482 	 * one by one may take a long time.  Start freezing all queues in
2483 	 * one swoop and then wait for the completions so that freezing can
2484 	 * take place in parallel.
2485 	 */
2486 	list_for_each_entry(q, &all_q_list, all_q_node)
2487 		blk_mq_freeze_queue_start(q);
2488 	list_for_each_entry(q, &all_q_list, all_q_node)
2489 		blk_mq_freeze_queue_wait(q);
2490 
2491 	list_for_each_entry(q, &all_q_list, all_q_node)
2492 		blk_mq_queue_reinit(q, &cpuhp_online_new);
2493 
2494 	list_for_each_entry(q, &all_q_list, all_q_node)
2495 		blk_mq_unfreeze_queue(q);
2496 
2497 	mutex_unlock(&all_q_mutex);
2498 }
2499 
2500 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2501 {
2502 	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2503 	blk_mq_queue_reinit_work();
2504 	return 0;
2505 }
2506 
2507 /*
2508  * Before hotadded cpu starts handling requests, new mappings must be
2509  * established.  Otherwise, these requests in hw queue might never be
2510  * dispatched.
2511  *
2512  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2513  * for CPU0, and ctx1 for CPU1).
2514  *
2515  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2516  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2517  *
2518  * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2519  * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2520  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2521  * ignored.
2522  */
2523 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2524 {
2525 	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2526 	cpumask_set_cpu(cpu, &cpuhp_online_new);
2527 	blk_mq_queue_reinit_work();
2528 	return 0;
2529 }
2530 
2531 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2532 {
2533 	int i;
2534 
2535 	for (i = 0; i < set->nr_hw_queues; i++)
2536 		if (!__blk_mq_alloc_rq_map(set, i))
2537 			goto out_unwind;
2538 
2539 	return 0;
2540 
2541 out_unwind:
2542 	while (--i >= 0)
2543 		blk_mq_free_rq_map(set->tags[i]);
2544 
2545 	return -ENOMEM;
2546 }
2547 
2548 /*
2549  * Allocate the request maps associated with this tag_set. Note that this
2550  * may reduce the depth asked for, if memory is tight. set->queue_depth
2551  * will be updated to reflect the allocated depth.
2552  */
2553 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2554 {
2555 	unsigned int depth;
2556 	int err;
2557 
2558 	depth = set->queue_depth;
2559 	do {
2560 		err = __blk_mq_alloc_rq_maps(set);
2561 		if (!err)
2562 			break;
2563 
2564 		set->queue_depth >>= 1;
2565 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2566 			err = -ENOMEM;
2567 			break;
2568 		}
2569 	} while (set->queue_depth);
2570 
2571 	if (!set->queue_depth || err) {
2572 		pr_err("blk-mq: failed to allocate request map\n");
2573 		return -ENOMEM;
2574 	}
2575 
2576 	if (depth != set->queue_depth)
2577 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2578 						depth, set->queue_depth);
2579 
2580 	return 0;
2581 }
2582 
2583 /*
2584  * Alloc a tag set to be associated with one or more request queues.
2585  * May fail with EINVAL for various error conditions. May adjust the
2586  * requested depth down, if if it too large. In that case, the set
2587  * value will be stored in set->queue_depth.
2588  */
2589 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2590 {
2591 	int ret;
2592 
2593 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2594 
2595 	if (!set->nr_hw_queues)
2596 		return -EINVAL;
2597 	if (!set->queue_depth)
2598 		return -EINVAL;
2599 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2600 		return -EINVAL;
2601 
2602 	if (!set->ops->queue_rq)
2603 		return -EINVAL;
2604 
2605 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2606 		pr_info("blk-mq: reduced tag depth to %u\n",
2607 			BLK_MQ_MAX_DEPTH);
2608 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2609 	}
2610 
2611 	/*
2612 	 * If a crashdump is active, then we are potentially in a very
2613 	 * memory constrained environment. Limit us to 1 queue and
2614 	 * 64 tags to prevent using too much memory.
2615 	 */
2616 	if (is_kdump_kernel()) {
2617 		set->nr_hw_queues = 1;
2618 		set->queue_depth = min(64U, set->queue_depth);
2619 	}
2620 	/*
2621 	 * There is no use for more h/w queues than cpus.
2622 	 */
2623 	if (set->nr_hw_queues > nr_cpu_ids)
2624 		set->nr_hw_queues = nr_cpu_ids;
2625 
2626 	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2627 				 GFP_KERNEL, set->numa_node);
2628 	if (!set->tags)
2629 		return -ENOMEM;
2630 
2631 	ret = -ENOMEM;
2632 	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2633 			GFP_KERNEL, set->numa_node);
2634 	if (!set->mq_map)
2635 		goto out_free_tags;
2636 
2637 	if (set->ops->map_queues)
2638 		ret = set->ops->map_queues(set);
2639 	else
2640 		ret = blk_mq_map_queues(set);
2641 	if (ret)
2642 		goto out_free_mq_map;
2643 
2644 	ret = blk_mq_alloc_rq_maps(set);
2645 	if (ret)
2646 		goto out_free_mq_map;
2647 
2648 	mutex_init(&set->tag_list_lock);
2649 	INIT_LIST_HEAD(&set->tag_list);
2650 
2651 	return 0;
2652 
2653 out_free_mq_map:
2654 	kfree(set->mq_map);
2655 	set->mq_map = NULL;
2656 out_free_tags:
2657 	kfree(set->tags);
2658 	set->tags = NULL;
2659 	return ret;
2660 }
2661 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2662 
2663 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2664 {
2665 	int i;
2666 
2667 	for (i = 0; i < nr_cpu_ids; i++)
2668 		blk_mq_free_map_and_requests(set, i);
2669 
2670 	kfree(set->mq_map);
2671 	set->mq_map = NULL;
2672 
2673 	kfree(set->tags);
2674 	set->tags = NULL;
2675 }
2676 EXPORT_SYMBOL(blk_mq_free_tag_set);
2677 
2678 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2679 {
2680 	struct blk_mq_tag_set *set = q->tag_set;
2681 	struct blk_mq_hw_ctx *hctx;
2682 	int i, ret;
2683 
2684 	if (!set)
2685 		return -EINVAL;
2686 
2687 	blk_mq_freeze_queue(q);
2688 	blk_mq_quiesce_queue(q);
2689 
2690 	ret = 0;
2691 	queue_for_each_hw_ctx(q, hctx, i) {
2692 		if (!hctx->tags)
2693 			continue;
2694 		/*
2695 		 * If we're using an MQ scheduler, just update the scheduler
2696 		 * queue depth. This is similar to what the old code would do.
2697 		 */
2698 		if (!hctx->sched_tags) {
2699 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2700 							min(nr, set->queue_depth),
2701 							false);
2702 		} else {
2703 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2704 							nr, true);
2705 		}
2706 		if (ret)
2707 			break;
2708 	}
2709 
2710 	if (!ret)
2711 		q->nr_requests = nr;
2712 
2713 	blk_mq_unfreeze_queue(q);
2714 	blk_mq_start_stopped_hw_queues(q, true);
2715 
2716 	return ret;
2717 }
2718 
2719 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2720 {
2721 	struct request_queue *q;
2722 
2723 	if (nr_hw_queues > nr_cpu_ids)
2724 		nr_hw_queues = nr_cpu_ids;
2725 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2726 		return;
2727 
2728 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2729 		blk_mq_freeze_queue(q);
2730 
2731 	set->nr_hw_queues = nr_hw_queues;
2732 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2733 		blk_mq_realloc_hw_ctxs(set, q);
2734 
2735 		/*
2736 		 * Manually set the make_request_fn as blk_queue_make_request
2737 		 * resets a lot of the queue settings.
2738 		 */
2739 		if (q->nr_hw_queues > 1)
2740 			q->make_request_fn = blk_mq_make_request;
2741 		else
2742 			q->make_request_fn = blk_sq_make_request;
2743 
2744 		blk_mq_queue_reinit(q, cpu_online_mask);
2745 	}
2746 
2747 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2748 		blk_mq_unfreeze_queue(q);
2749 }
2750 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2751 
2752 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2753 				       struct blk_mq_hw_ctx *hctx,
2754 				       struct request *rq)
2755 {
2756 	struct blk_rq_stat stat[2];
2757 	unsigned long ret = 0;
2758 
2759 	/*
2760 	 * If stats collection isn't on, don't sleep but turn it on for
2761 	 * future users
2762 	 */
2763 	if (!blk_stat_enable(q))
2764 		return 0;
2765 
2766 	/*
2767 	 * We don't have to do this once per IO, should optimize this
2768 	 * to just use the current window of stats until it changes
2769 	 */
2770 	memset(&stat, 0, sizeof(stat));
2771 	blk_hctx_stat_get(hctx, stat);
2772 
2773 	/*
2774 	 * As an optimistic guess, use half of the mean service time
2775 	 * for this type of request. We can (and should) make this smarter.
2776 	 * For instance, if the completion latencies are tight, we can
2777 	 * get closer than just half the mean. This is especially
2778 	 * important on devices where the completion latencies are longer
2779 	 * than ~10 usec.
2780 	 */
2781 	if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2782 		ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2783 	else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2784 		ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2785 
2786 	return ret;
2787 }
2788 
2789 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2790 				     struct blk_mq_hw_ctx *hctx,
2791 				     struct request *rq)
2792 {
2793 	struct hrtimer_sleeper hs;
2794 	enum hrtimer_mode mode;
2795 	unsigned int nsecs;
2796 	ktime_t kt;
2797 
2798 	if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2799 		return false;
2800 
2801 	/*
2802 	 * poll_nsec can be:
2803 	 *
2804 	 * -1:	don't ever hybrid sleep
2805 	 *  0:	use half of prev avg
2806 	 * >0:	use this specific value
2807 	 */
2808 	if (q->poll_nsec == -1)
2809 		return false;
2810 	else if (q->poll_nsec > 0)
2811 		nsecs = q->poll_nsec;
2812 	else
2813 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2814 
2815 	if (!nsecs)
2816 		return false;
2817 
2818 	set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2819 
2820 	/*
2821 	 * This will be replaced with the stats tracking code, using
2822 	 * 'avg_completion_time / 2' as the pre-sleep target.
2823 	 */
2824 	kt = nsecs;
2825 
2826 	mode = HRTIMER_MODE_REL;
2827 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2828 	hrtimer_set_expires(&hs.timer, kt);
2829 
2830 	hrtimer_init_sleeper(&hs, current);
2831 	do {
2832 		if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2833 			break;
2834 		set_current_state(TASK_UNINTERRUPTIBLE);
2835 		hrtimer_start_expires(&hs.timer, mode);
2836 		if (hs.task)
2837 			io_schedule();
2838 		hrtimer_cancel(&hs.timer);
2839 		mode = HRTIMER_MODE_ABS;
2840 	} while (hs.task && !signal_pending(current));
2841 
2842 	__set_current_state(TASK_RUNNING);
2843 	destroy_hrtimer_on_stack(&hs.timer);
2844 	return true;
2845 }
2846 
2847 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2848 {
2849 	struct request_queue *q = hctx->queue;
2850 	long state;
2851 
2852 	/*
2853 	 * If we sleep, have the caller restart the poll loop to reset
2854 	 * the state. Like for the other success return cases, the
2855 	 * caller is responsible for checking if the IO completed. If
2856 	 * the IO isn't complete, we'll get called again and will go
2857 	 * straight to the busy poll loop.
2858 	 */
2859 	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2860 		return true;
2861 
2862 	hctx->poll_considered++;
2863 
2864 	state = current->state;
2865 	while (!need_resched()) {
2866 		int ret;
2867 
2868 		hctx->poll_invoked++;
2869 
2870 		ret = q->mq_ops->poll(hctx, rq->tag);
2871 		if (ret > 0) {
2872 			hctx->poll_success++;
2873 			set_current_state(TASK_RUNNING);
2874 			return true;
2875 		}
2876 
2877 		if (signal_pending_state(state, current))
2878 			set_current_state(TASK_RUNNING);
2879 
2880 		if (current->state == TASK_RUNNING)
2881 			return true;
2882 		if (ret < 0)
2883 			break;
2884 		cpu_relax();
2885 	}
2886 
2887 	return false;
2888 }
2889 
2890 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2891 {
2892 	struct blk_mq_hw_ctx *hctx;
2893 	struct blk_plug *plug;
2894 	struct request *rq;
2895 
2896 	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2897 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2898 		return false;
2899 
2900 	plug = current->plug;
2901 	if (plug)
2902 		blk_flush_plug_list(plug, false);
2903 
2904 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2905 	if (!blk_qc_t_is_internal(cookie))
2906 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2907 	else
2908 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2909 
2910 	return __blk_mq_poll(hctx, rq);
2911 }
2912 EXPORT_SYMBOL_GPL(blk_mq_poll);
2913 
2914 void blk_mq_disable_hotplug(void)
2915 {
2916 	mutex_lock(&all_q_mutex);
2917 }
2918 
2919 void blk_mq_enable_hotplug(void)
2920 {
2921 	mutex_unlock(&all_q_mutex);
2922 }
2923 
2924 static int __init blk_mq_init(void)
2925 {
2926 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2927 				blk_mq_hctx_notify_dead);
2928 
2929 	cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2930 				  blk_mq_queue_reinit_prepare,
2931 				  blk_mq_queue_reinit_dead);
2932 	return 0;
2933 }
2934 subsys_initcall(blk_mq_init);
2935