xref: /openbmc/linux/block/blk-mq.c (revision dea54fba)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28 
29 #include <trace/events/block.h>
30 
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39 
40 static void blk_mq_poll_stats_start(struct request_queue *q);
41 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
42 
43 static int blk_mq_poll_stats_bkt(const struct request *rq)
44 {
45 	int ddir, bytes, bucket;
46 
47 	ddir = rq_data_dir(rq);
48 	bytes = blk_rq_bytes(rq);
49 
50 	bucket = ddir + 2*(ilog2(bytes) - 9);
51 
52 	if (bucket < 0)
53 		return -1;
54 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
55 		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
56 
57 	return bucket;
58 }
59 
60 /*
61  * Check if any of the ctx's have pending work in this hardware queue
62  */
63 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
64 {
65 	return sbitmap_any_bit_set(&hctx->ctx_map) ||
66 			!list_empty_careful(&hctx->dispatch) ||
67 			blk_mq_sched_has_work(hctx);
68 }
69 
70 /*
71  * Mark this ctx as having pending work in this hardware queue
72  */
73 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
74 				     struct blk_mq_ctx *ctx)
75 {
76 	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
77 		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
78 }
79 
80 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
81 				      struct blk_mq_ctx *ctx)
82 {
83 	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
84 }
85 
86 void blk_freeze_queue_start(struct request_queue *q)
87 {
88 	int freeze_depth;
89 
90 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
91 	if (freeze_depth == 1) {
92 		percpu_ref_kill(&q->q_usage_counter);
93 		blk_mq_run_hw_queues(q, false);
94 	}
95 }
96 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
97 
98 void blk_mq_freeze_queue_wait(struct request_queue *q)
99 {
100 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
101 }
102 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
103 
104 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
105 				     unsigned long timeout)
106 {
107 	return wait_event_timeout(q->mq_freeze_wq,
108 					percpu_ref_is_zero(&q->q_usage_counter),
109 					timeout);
110 }
111 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
112 
113 /*
114  * Guarantee no request is in use, so we can change any data structure of
115  * the queue afterward.
116  */
117 void blk_freeze_queue(struct request_queue *q)
118 {
119 	/*
120 	 * In the !blk_mq case we are only calling this to kill the
121 	 * q_usage_counter, otherwise this increases the freeze depth
122 	 * and waits for it to return to zero.  For this reason there is
123 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
124 	 * exported to drivers as the only user for unfreeze is blk_mq.
125 	 */
126 	blk_freeze_queue_start(q);
127 	blk_mq_freeze_queue_wait(q);
128 }
129 
130 void blk_mq_freeze_queue(struct request_queue *q)
131 {
132 	/*
133 	 * ...just an alias to keep freeze and unfreeze actions balanced
134 	 * in the blk_mq_* namespace
135 	 */
136 	blk_freeze_queue(q);
137 }
138 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
139 
140 void blk_mq_unfreeze_queue(struct request_queue *q)
141 {
142 	int freeze_depth;
143 
144 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
145 	WARN_ON_ONCE(freeze_depth < 0);
146 	if (!freeze_depth) {
147 		percpu_ref_reinit(&q->q_usage_counter);
148 		wake_up_all(&q->mq_freeze_wq);
149 	}
150 }
151 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
152 
153 /*
154  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
155  * mpt3sas driver such that this function can be removed.
156  */
157 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
158 {
159 	unsigned long flags;
160 
161 	spin_lock_irqsave(q->queue_lock, flags);
162 	queue_flag_set(QUEUE_FLAG_QUIESCED, q);
163 	spin_unlock_irqrestore(q->queue_lock, flags);
164 }
165 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
166 
167 /**
168  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
169  * @q: request queue.
170  *
171  * Note: this function does not prevent that the struct request end_io()
172  * callback function is invoked. Once this function is returned, we make
173  * sure no dispatch can happen until the queue is unquiesced via
174  * blk_mq_unquiesce_queue().
175  */
176 void blk_mq_quiesce_queue(struct request_queue *q)
177 {
178 	struct blk_mq_hw_ctx *hctx;
179 	unsigned int i;
180 	bool rcu = false;
181 
182 	blk_mq_quiesce_queue_nowait(q);
183 
184 	queue_for_each_hw_ctx(q, hctx, i) {
185 		if (hctx->flags & BLK_MQ_F_BLOCKING)
186 			synchronize_srcu(hctx->queue_rq_srcu);
187 		else
188 			rcu = true;
189 	}
190 	if (rcu)
191 		synchronize_rcu();
192 }
193 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
194 
195 /*
196  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
197  * @q: request queue.
198  *
199  * This function recovers queue into the state before quiescing
200  * which is done by blk_mq_quiesce_queue.
201  */
202 void blk_mq_unquiesce_queue(struct request_queue *q)
203 {
204 	unsigned long flags;
205 
206 	spin_lock_irqsave(q->queue_lock, flags);
207 	queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
208 	spin_unlock_irqrestore(q->queue_lock, flags);
209 
210 	/* dispatch requests which are inserted during quiescing */
211 	blk_mq_run_hw_queues(q, true);
212 }
213 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
214 
215 void blk_mq_wake_waiters(struct request_queue *q)
216 {
217 	struct blk_mq_hw_ctx *hctx;
218 	unsigned int i;
219 
220 	queue_for_each_hw_ctx(q, hctx, i)
221 		if (blk_mq_hw_queue_mapped(hctx))
222 			blk_mq_tag_wakeup_all(hctx->tags, true);
223 
224 	/*
225 	 * If we are called because the queue has now been marked as
226 	 * dying, we need to ensure that processes currently waiting on
227 	 * the queue are notified as well.
228 	 */
229 	wake_up_all(&q->mq_freeze_wq);
230 }
231 
232 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
233 {
234 	return blk_mq_has_free_tags(hctx->tags);
235 }
236 EXPORT_SYMBOL(blk_mq_can_queue);
237 
238 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
239 		unsigned int tag, unsigned int op)
240 {
241 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
242 	struct request *rq = tags->static_rqs[tag];
243 
244 	rq->rq_flags = 0;
245 
246 	if (data->flags & BLK_MQ_REQ_INTERNAL) {
247 		rq->tag = -1;
248 		rq->internal_tag = tag;
249 	} else {
250 		if (blk_mq_tag_busy(data->hctx)) {
251 			rq->rq_flags = RQF_MQ_INFLIGHT;
252 			atomic_inc(&data->hctx->nr_active);
253 		}
254 		rq->tag = tag;
255 		rq->internal_tag = -1;
256 		data->hctx->tags->rqs[rq->tag] = rq;
257 	}
258 
259 	INIT_LIST_HEAD(&rq->queuelist);
260 	/* csd/requeue_work/fifo_time is initialized before use */
261 	rq->q = data->q;
262 	rq->mq_ctx = data->ctx;
263 	rq->cmd_flags = op;
264 	if (blk_queue_io_stat(data->q))
265 		rq->rq_flags |= RQF_IO_STAT;
266 	/* do not touch atomic flags, it needs atomic ops against the timer */
267 	rq->cpu = -1;
268 	INIT_HLIST_NODE(&rq->hash);
269 	RB_CLEAR_NODE(&rq->rb_node);
270 	rq->rq_disk = NULL;
271 	rq->part = NULL;
272 	rq->start_time = jiffies;
273 #ifdef CONFIG_BLK_CGROUP
274 	rq->rl = NULL;
275 	set_start_time_ns(rq);
276 	rq->io_start_time_ns = 0;
277 #endif
278 	rq->nr_phys_segments = 0;
279 #if defined(CONFIG_BLK_DEV_INTEGRITY)
280 	rq->nr_integrity_segments = 0;
281 #endif
282 	rq->special = NULL;
283 	/* tag was already set */
284 	rq->extra_len = 0;
285 
286 	INIT_LIST_HEAD(&rq->timeout_list);
287 	rq->timeout = 0;
288 
289 	rq->end_io = NULL;
290 	rq->end_io_data = NULL;
291 	rq->next_rq = NULL;
292 
293 	data->ctx->rq_dispatched[op_is_sync(op)]++;
294 	return rq;
295 }
296 
297 static struct request *blk_mq_get_request(struct request_queue *q,
298 		struct bio *bio, unsigned int op,
299 		struct blk_mq_alloc_data *data)
300 {
301 	struct elevator_queue *e = q->elevator;
302 	struct request *rq;
303 	unsigned int tag;
304 	struct blk_mq_ctx *local_ctx = NULL;
305 
306 	blk_queue_enter_live(q);
307 	data->q = q;
308 	if (likely(!data->ctx))
309 		data->ctx = local_ctx = blk_mq_get_ctx(q);
310 	if (likely(!data->hctx))
311 		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
312 	if (op & REQ_NOWAIT)
313 		data->flags |= BLK_MQ_REQ_NOWAIT;
314 
315 	if (e) {
316 		data->flags |= BLK_MQ_REQ_INTERNAL;
317 
318 		/*
319 		 * Flush requests are special and go directly to the
320 		 * dispatch list.
321 		 */
322 		if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
323 			e->type->ops.mq.limit_depth(op, data);
324 	}
325 
326 	tag = blk_mq_get_tag(data);
327 	if (tag == BLK_MQ_TAG_FAIL) {
328 		if (local_ctx) {
329 			blk_mq_put_ctx(local_ctx);
330 			data->ctx = NULL;
331 		}
332 		blk_queue_exit(q);
333 		return NULL;
334 	}
335 
336 	rq = blk_mq_rq_ctx_init(data, tag, op);
337 	if (!op_is_flush(op)) {
338 		rq->elv.icq = NULL;
339 		if (e && e->type->ops.mq.prepare_request) {
340 			if (e->type->icq_cache && rq_ioc(bio))
341 				blk_mq_sched_assign_ioc(rq, bio);
342 
343 			e->type->ops.mq.prepare_request(rq, bio);
344 			rq->rq_flags |= RQF_ELVPRIV;
345 		}
346 	}
347 	data->hctx->queued++;
348 	return rq;
349 }
350 
351 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
352 		unsigned int flags)
353 {
354 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
355 	struct request *rq;
356 	int ret;
357 
358 	ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
359 	if (ret)
360 		return ERR_PTR(ret);
361 
362 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
363 	blk_queue_exit(q);
364 
365 	if (!rq)
366 		return ERR_PTR(-EWOULDBLOCK);
367 
368 	blk_mq_put_ctx(alloc_data.ctx);
369 
370 	rq->__data_len = 0;
371 	rq->__sector = (sector_t) -1;
372 	rq->bio = rq->biotail = NULL;
373 	return rq;
374 }
375 EXPORT_SYMBOL(blk_mq_alloc_request);
376 
377 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
378 		unsigned int op, unsigned int flags, unsigned int hctx_idx)
379 {
380 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
381 	struct request *rq;
382 	unsigned int cpu;
383 	int ret;
384 
385 	/*
386 	 * If the tag allocator sleeps we could get an allocation for a
387 	 * different hardware context.  No need to complicate the low level
388 	 * allocator for this for the rare use case of a command tied to
389 	 * a specific queue.
390 	 */
391 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
392 		return ERR_PTR(-EINVAL);
393 
394 	if (hctx_idx >= q->nr_hw_queues)
395 		return ERR_PTR(-EIO);
396 
397 	ret = blk_queue_enter(q, true);
398 	if (ret)
399 		return ERR_PTR(ret);
400 
401 	/*
402 	 * Check if the hardware context is actually mapped to anything.
403 	 * If not tell the caller that it should skip this queue.
404 	 */
405 	alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
406 	if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
407 		blk_queue_exit(q);
408 		return ERR_PTR(-EXDEV);
409 	}
410 	cpu = cpumask_first(alloc_data.hctx->cpumask);
411 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
412 
413 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
414 	blk_queue_exit(q);
415 
416 	if (!rq)
417 		return ERR_PTR(-EWOULDBLOCK);
418 
419 	return rq;
420 }
421 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
422 
423 void blk_mq_free_request(struct request *rq)
424 {
425 	struct request_queue *q = rq->q;
426 	struct elevator_queue *e = q->elevator;
427 	struct blk_mq_ctx *ctx = rq->mq_ctx;
428 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
429 	const int sched_tag = rq->internal_tag;
430 
431 	if (rq->rq_flags & RQF_ELVPRIV) {
432 		if (e && e->type->ops.mq.finish_request)
433 			e->type->ops.mq.finish_request(rq);
434 		if (rq->elv.icq) {
435 			put_io_context(rq->elv.icq->ioc);
436 			rq->elv.icq = NULL;
437 		}
438 	}
439 
440 	ctx->rq_completed[rq_is_sync(rq)]++;
441 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
442 		atomic_dec(&hctx->nr_active);
443 
444 	wbt_done(q->rq_wb, &rq->issue_stat);
445 
446 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
447 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
448 	if (rq->tag != -1)
449 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
450 	if (sched_tag != -1)
451 		blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
452 	blk_mq_sched_restart(hctx);
453 	blk_queue_exit(q);
454 }
455 EXPORT_SYMBOL_GPL(blk_mq_free_request);
456 
457 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
458 {
459 	blk_account_io_done(rq);
460 
461 	if (rq->end_io) {
462 		wbt_done(rq->q->rq_wb, &rq->issue_stat);
463 		rq->end_io(rq, error);
464 	} else {
465 		if (unlikely(blk_bidi_rq(rq)))
466 			blk_mq_free_request(rq->next_rq);
467 		blk_mq_free_request(rq);
468 	}
469 }
470 EXPORT_SYMBOL(__blk_mq_end_request);
471 
472 void blk_mq_end_request(struct request *rq, blk_status_t error)
473 {
474 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
475 		BUG();
476 	__blk_mq_end_request(rq, error);
477 }
478 EXPORT_SYMBOL(blk_mq_end_request);
479 
480 static void __blk_mq_complete_request_remote(void *data)
481 {
482 	struct request *rq = data;
483 
484 	rq->q->softirq_done_fn(rq);
485 }
486 
487 static void __blk_mq_complete_request(struct request *rq)
488 {
489 	struct blk_mq_ctx *ctx = rq->mq_ctx;
490 	bool shared = false;
491 	int cpu;
492 
493 	if (rq->internal_tag != -1)
494 		blk_mq_sched_completed_request(rq);
495 	if (rq->rq_flags & RQF_STATS) {
496 		blk_mq_poll_stats_start(rq->q);
497 		blk_stat_add(rq);
498 	}
499 
500 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
501 		rq->q->softirq_done_fn(rq);
502 		return;
503 	}
504 
505 	cpu = get_cpu();
506 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
507 		shared = cpus_share_cache(cpu, ctx->cpu);
508 
509 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
510 		rq->csd.func = __blk_mq_complete_request_remote;
511 		rq->csd.info = rq;
512 		rq->csd.flags = 0;
513 		smp_call_function_single_async(ctx->cpu, &rq->csd);
514 	} else {
515 		rq->q->softirq_done_fn(rq);
516 	}
517 	put_cpu();
518 }
519 
520 /**
521  * blk_mq_complete_request - end I/O on a request
522  * @rq:		the request being processed
523  *
524  * Description:
525  *	Ends all I/O on a request. It does not handle partial completions.
526  *	The actual completion happens out-of-order, through a IPI handler.
527  **/
528 void blk_mq_complete_request(struct request *rq)
529 {
530 	struct request_queue *q = rq->q;
531 
532 	if (unlikely(blk_should_fake_timeout(q)))
533 		return;
534 	if (!blk_mark_rq_complete(rq))
535 		__blk_mq_complete_request(rq);
536 }
537 EXPORT_SYMBOL(blk_mq_complete_request);
538 
539 int blk_mq_request_started(struct request *rq)
540 {
541 	return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
542 }
543 EXPORT_SYMBOL_GPL(blk_mq_request_started);
544 
545 void blk_mq_start_request(struct request *rq)
546 {
547 	struct request_queue *q = rq->q;
548 
549 	blk_mq_sched_started_request(rq);
550 
551 	trace_block_rq_issue(q, rq);
552 
553 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
554 		blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
555 		rq->rq_flags |= RQF_STATS;
556 		wbt_issue(q->rq_wb, &rq->issue_stat);
557 	}
558 
559 	blk_add_timer(rq);
560 
561 	/*
562 	 * Ensure that ->deadline is visible before set the started
563 	 * flag and clear the completed flag.
564 	 */
565 	smp_mb__before_atomic();
566 
567 	/*
568 	 * Mark us as started and clear complete. Complete might have been
569 	 * set if requeue raced with timeout, which then marked it as
570 	 * complete. So be sure to clear complete again when we start
571 	 * the request, otherwise we'll ignore the completion event.
572 	 */
573 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
574 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
575 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
576 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
577 
578 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
579 		/*
580 		 * Make sure space for the drain appears.  We know we can do
581 		 * this because max_hw_segments has been adjusted to be one
582 		 * fewer than the device can handle.
583 		 */
584 		rq->nr_phys_segments++;
585 	}
586 }
587 EXPORT_SYMBOL(blk_mq_start_request);
588 
589 /*
590  * When we reach here because queue is busy, REQ_ATOM_COMPLETE
591  * flag isn't set yet, so there may be race with timeout handler,
592  * but given rq->deadline is just set in .queue_rq() under
593  * this situation, the race won't be possible in reality because
594  * rq->timeout should be set as big enough to cover the window
595  * between blk_mq_start_request() called from .queue_rq() and
596  * clearing REQ_ATOM_STARTED here.
597  */
598 static void __blk_mq_requeue_request(struct request *rq)
599 {
600 	struct request_queue *q = rq->q;
601 
602 	trace_block_rq_requeue(q, rq);
603 	wbt_requeue(q->rq_wb, &rq->issue_stat);
604 	blk_mq_sched_requeue_request(rq);
605 
606 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
607 		if (q->dma_drain_size && blk_rq_bytes(rq))
608 			rq->nr_phys_segments--;
609 	}
610 }
611 
612 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
613 {
614 	__blk_mq_requeue_request(rq);
615 
616 	BUG_ON(blk_queued_rq(rq));
617 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
618 }
619 EXPORT_SYMBOL(blk_mq_requeue_request);
620 
621 static void blk_mq_requeue_work(struct work_struct *work)
622 {
623 	struct request_queue *q =
624 		container_of(work, struct request_queue, requeue_work.work);
625 	LIST_HEAD(rq_list);
626 	struct request *rq, *next;
627 	unsigned long flags;
628 
629 	spin_lock_irqsave(&q->requeue_lock, flags);
630 	list_splice_init(&q->requeue_list, &rq_list);
631 	spin_unlock_irqrestore(&q->requeue_lock, flags);
632 
633 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
634 		if (!(rq->rq_flags & RQF_SOFTBARRIER))
635 			continue;
636 
637 		rq->rq_flags &= ~RQF_SOFTBARRIER;
638 		list_del_init(&rq->queuelist);
639 		blk_mq_sched_insert_request(rq, true, false, false, true);
640 	}
641 
642 	while (!list_empty(&rq_list)) {
643 		rq = list_entry(rq_list.next, struct request, queuelist);
644 		list_del_init(&rq->queuelist);
645 		blk_mq_sched_insert_request(rq, false, false, false, true);
646 	}
647 
648 	blk_mq_run_hw_queues(q, false);
649 }
650 
651 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
652 				bool kick_requeue_list)
653 {
654 	struct request_queue *q = rq->q;
655 	unsigned long flags;
656 
657 	/*
658 	 * We abuse this flag that is otherwise used by the I/O scheduler to
659 	 * request head insertation from the workqueue.
660 	 */
661 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
662 
663 	spin_lock_irqsave(&q->requeue_lock, flags);
664 	if (at_head) {
665 		rq->rq_flags |= RQF_SOFTBARRIER;
666 		list_add(&rq->queuelist, &q->requeue_list);
667 	} else {
668 		list_add_tail(&rq->queuelist, &q->requeue_list);
669 	}
670 	spin_unlock_irqrestore(&q->requeue_lock, flags);
671 
672 	if (kick_requeue_list)
673 		blk_mq_kick_requeue_list(q);
674 }
675 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
676 
677 void blk_mq_kick_requeue_list(struct request_queue *q)
678 {
679 	kblockd_schedule_delayed_work(&q->requeue_work, 0);
680 }
681 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
682 
683 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
684 				    unsigned long msecs)
685 {
686 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
687 				    msecs_to_jiffies(msecs));
688 }
689 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
690 
691 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
692 {
693 	if (tag < tags->nr_tags) {
694 		prefetch(tags->rqs[tag]);
695 		return tags->rqs[tag];
696 	}
697 
698 	return NULL;
699 }
700 EXPORT_SYMBOL(blk_mq_tag_to_rq);
701 
702 struct blk_mq_timeout_data {
703 	unsigned long next;
704 	unsigned int next_set;
705 };
706 
707 void blk_mq_rq_timed_out(struct request *req, bool reserved)
708 {
709 	const struct blk_mq_ops *ops = req->q->mq_ops;
710 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
711 
712 	/*
713 	 * We know that complete is set at this point. If STARTED isn't set
714 	 * anymore, then the request isn't active and the "timeout" should
715 	 * just be ignored. This can happen due to the bitflag ordering.
716 	 * Timeout first checks if STARTED is set, and if it is, assumes
717 	 * the request is active. But if we race with completion, then
718 	 * both flags will get cleared. So check here again, and ignore
719 	 * a timeout event with a request that isn't active.
720 	 */
721 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
722 		return;
723 
724 	if (ops->timeout)
725 		ret = ops->timeout(req, reserved);
726 
727 	switch (ret) {
728 	case BLK_EH_HANDLED:
729 		__blk_mq_complete_request(req);
730 		break;
731 	case BLK_EH_RESET_TIMER:
732 		blk_add_timer(req);
733 		blk_clear_rq_complete(req);
734 		break;
735 	case BLK_EH_NOT_HANDLED:
736 		break;
737 	default:
738 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
739 		break;
740 	}
741 }
742 
743 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
744 		struct request *rq, void *priv, bool reserved)
745 {
746 	struct blk_mq_timeout_data *data = priv;
747 
748 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
749 		return;
750 
751 	/*
752 	 * The rq being checked may have been freed and reallocated
753 	 * out already here, we avoid this race by checking rq->deadline
754 	 * and REQ_ATOM_COMPLETE flag together:
755 	 *
756 	 * - if rq->deadline is observed as new value because of
757 	 *   reusing, the rq won't be timed out because of timing.
758 	 * - if rq->deadline is observed as previous value,
759 	 *   REQ_ATOM_COMPLETE flag won't be cleared in reuse path
760 	 *   because we put a barrier between setting rq->deadline
761 	 *   and clearing the flag in blk_mq_start_request(), so
762 	 *   this rq won't be timed out too.
763 	 */
764 	if (time_after_eq(jiffies, rq->deadline)) {
765 		if (!blk_mark_rq_complete(rq))
766 			blk_mq_rq_timed_out(rq, reserved);
767 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
768 		data->next = rq->deadline;
769 		data->next_set = 1;
770 	}
771 }
772 
773 static void blk_mq_timeout_work(struct work_struct *work)
774 {
775 	struct request_queue *q =
776 		container_of(work, struct request_queue, timeout_work);
777 	struct blk_mq_timeout_data data = {
778 		.next		= 0,
779 		.next_set	= 0,
780 	};
781 	int i;
782 
783 	/* A deadlock might occur if a request is stuck requiring a
784 	 * timeout at the same time a queue freeze is waiting
785 	 * completion, since the timeout code would not be able to
786 	 * acquire the queue reference here.
787 	 *
788 	 * That's why we don't use blk_queue_enter here; instead, we use
789 	 * percpu_ref_tryget directly, because we need to be able to
790 	 * obtain a reference even in the short window between the queue
791 	 * starting to freeze, by dropping the first reference in
792 	 * blk_freeze_queue_start, and the moment the last request is
793 	 * consumed, marked by the instant q_usage_counter reaches
794 	 * zero.
795 	 */
796 	if (!percpu_ref_tryget(&q->q_usage_counter))
797 		return;
798 
799 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
800 
801 	if (data.next_set) {
802 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
803 		mod_timer(&q->timeout, data.next);
804 	} else {
805 		struct blk_mq_hw_ctx *hctx;
806 
807 		queue_for_each_hw_ctx(q, hctx, i) {
808 			/* the hctx may be unmapped, so check it here */
809 			if (blk_mq_hw_queue_mapped(hctx))
810 				blk_mq_tag_idle(hctx);
811 		}
812 	}
813 	blk_queue_exit(q);
814 }
815 
816 struct flush_busy_ctx_data {
817 	struct blk_mq_hw_ctx *hctx;
818 	struct list_head *list;
819 };
820 
821 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
822 {
823 	struct flush_busy_ctx_data *flush_data = data;
824 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
825 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
826 
827 	sbitmap_clear_bit(sb, bitnr);
828 	spin_lock(&ctx->lock);
829 	list_splice_tail_init(&ctx->rq_list, flush_data->list);
830 	spin_unlock(&ctx->lock);
831 	return true;
832 }
833 
834 /*
835  * Process software queues that have been marked busy, splicing them
836  * to the for-dispatch
837  */
838 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
839 {
840 	struct flush_busy_ctx_data data = {
841 		.hctx = hctx,
842 		.list = list,
843 	};
844 
845 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
846 }
847 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
848 
849 static inline unsigned int queued_to_index(unsigned int queued)
850 {
851 	if (!queued)
852 		return 0;
853 
854 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
855 }
856 
857 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
858 			   bool wait)
859 {
860 	struct blk_mq_alloc_data data = {
861 		.q = rq->q,
862 		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
863 		.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
864 	};
865 
866 	might_sleep_if(wait);
867 
868 	if (rq->tag != -1)
869 		goto done;
870 
871 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
872 		data.flags |= BLK_MQ_REQ_RESERVED;
873 
874 	rq->tag = blk_mq_get_tag(&data);
875 	if (rq->tag >= 0) {
876 		if (blk_mq_tag_busy(data.hctx)) {
877 			rq->rq_flags |= RQF_MQ_INFLIGHT;
878 			atomic_inc(&data.hctx->nr_active);
879 		}
880 		data.hctx->tags->rqs[rq->tag] = rq;
881 	}
882 
883 done:
884 	if (hctx)
885 		*hctx = data.hctx;
886 	return rq->tag != -1;
887 }
888 
889 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
890 				    struct request *rq)
891 {
892 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
893 	rq->tag = -1;
894 
895 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
896 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
897 		atomic_dec(&hctx->nr_active);
898 	}
899 }
900 
901 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
902 				       struct request *rq)
903 {
904 	if (rq->tag == -1 || rq->internal_tag == -1)
905 		return;
906 
907 	__blk_mq_put_driver_tag(hctx, rq);
908 }
909 
910 static void blk_mq_put_driver_tag(struct request *rq)
911 {
912 	struct blk_mq_hw_ctx *hctx;
913 
914 	if (rq->tag == -1 || rq->internal_tag == -1)
915 		return;
916 
917 	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
918 	__blk_mq_put_driver_tag(hctx, rq);
919 }
920 
921 /*
922  * If we fail getting a driver tag because all the driver tags are already
923  * assigned and on the dispatch list, BUT the first entry does not have a
924  * tag, then we could deadlock. For that case, move entries with assigned
925  * driver tags to the front, leaving the set of tagged requests in the
926  * same order, and the untagged set in the same order.
927  */
928 static bool reorder_tags_to_front(struct list_head *list)
929 {
930 	struct request *rq, *tmp, *first = NULL;
931 
932 	list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
933 		if (rq == first)
934 			break;
935 		if (rq->tag != -1) {
936 			list_move(&rq->queuelist, list);
937 			if (!first)
938 				first = rq;
939 		}
940 	}
941 
942 	return first != NULL;
943 }
944 
945 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
946 				void *key)
947 {
948 	struct blk_mq_hw_ctx *hctx;
949 
950 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
951 
952 	list_del(&wait->entry);
953 	clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
954 	blk_mq_run_hw_queue(hctx, true);
955 	return 1;
956 }
957 
958 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
959 {
960 	struct sbq_wait_state *ws;
961 
962 	/*
963 	 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
964 	 * The thread which wins the race to grab this bit adds the hardware
965 	 * queue to the wait queue.
966 	 */
967 	if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
968 	    test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
969 		return false;
970 
971 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
972 	ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
973 
974 	/*
975 	 * As soon as this returns, it's no longer safe to fiddle with
976 	 * hctx->dispatch_wait, since a completion can wake up the wait queue
977 	 * and unlock the bit.
978 	 */
979 	add_wait_queue(&ws->wait, &hctx->dispatch_wait);
980 	return true;
981 }
982 
983 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
984 {
985 	struct blk_mq_hw_ctx *hctx;
986 	struct request *rq;
987 	int errors, queued;
988 
989 	if (list_empty(list))
990 		return false;
991 
992 	/*
993 	 * Now process all the entries, sending them to the driver.
994 	 */
995 	errors = queued = 0;
996 	do {
997 		struct blk_mq_queue_data bd;
998 		blk_status_t ret;
999 
1000 		rq = list_first_entry(list, struct request, queuelist);
1001 		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1002 			if (!queued && reorder_tags_to_front(list))
1003 				continue;
1004 
1005 			/*
1006 			 * The initial allocation attempt failed, so we need to
1007 			 * rerun the hardware queue when a tag is freed.
1008 			 */
1009 			if (!blk_mq_dispatch_wait_add(hctx))
1010 				break;
1011 
1012 			/*
1013 			 * It's possible that a tag was freed in the window
1014 			 * between the allocation failure and adding the
1015 			 * hardware queue to the wait queue.
1016 			 */
1017 			if (!blk_mq_get_driver_tag(rq, &hctx, false))
1018 				break;
1019 		}
1020 
1021 		list_del_init(&rq->queuelist);
1022 
1023 		bd.rq = rq;
1024 
1025 		/*
1026 		 * Flag last if we have no more requests, or if we have more
1027 		 * but can't assign a driver tag to it.
1028 		 */
1029 		if (list_empty(list))
1030 			bd.last = true;
1031 		else {
1032 			struct request *nxt;
1033 
1034 			nxt = list_first_entry(list, struct request, queuelist);
1035 			bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1036 		}
1037 
1038 		ret = q->mq_ops->queue_rq(hctx, &bd);
1039 		if (ret == BLK_STS_RESOURCE) {
1040 			blk_mq_put_driver_tag_hctx(hctx, rq);
1041 			list_add(&rq->queuelist, list);
1042 			__blk_mq_requeue_request(rq);
1043 			break;
1044 		}
1045 
1046 		if (unlikely(ret != BLK_STS_OK)) {
1047 			errors++;
1048 			blk_mq_end_request(rq, BLK_STS_IOERR);
1049 			continue;
1050 		}
1051 
1052 		queued++;
1053 	} while (!list_empty(list));
1054 
1055 	hctx->dispatched[queued_to_index(queued)]++;
1056 
1057 	/*
1058 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1059 	 * that is where we will continue on next queue run.
1060 	 */
1061 	if (!list_empty(list)) {
1062 		/*
1063 		 * If an I/O scheduler has been configured and we got a driver
1064 		 * tag for the next request already, free it again.
1065 		 */
1066 		rq = list_first_entry(list, struct request, queuelist);
1067 		blk_mq_put_driver_tag(rq);
1068 
1069 		spin_lock(&hctx->lock);
1070 		list_splice_init(list, &hctx->dispatch);
1071 		spin_unlock(&hctx->lock);
1072 
1073 		/*
1074 		 * If SCHED_RESTART was set by the caller of this function and
1075 		 * it is no longer set that means that it was cleared by another
1076 		 * thread and hence that a queue rerun is needed.
1077 		 *
1078 		 * If TAG_WAITING is set that means that an I/O scheduler has
1079 		 * been configured and another thread is waiting for a driver
1080 		 * tag. To guarantee fairness, do not rerun this hardware queue
1081 		 * but let the other thread grab the driver tag.
1082 		 *
1083 		 * If no I/O scheduler has been configured it is possible that
1084 		 * the hardware queue got stopped and restarted before requests
1085 		 * were pushed back onto the dispatch list. Rerun the queue to
1086 		 * avoid starvation. Notes:
1087 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
1088 		 *   been stopped before rerunning a queue.
1089 		 * - Some but not all block drivers stop a queue before
1090 		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1091 		 *   and dm-rq.
1092 		 */
1093 		if (!blk_mq_sched_needs_restart(hctx) &&
1094 		    !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1095 			blk_mq_run_hw_queue(hctx, true);
1096 	}
1097 
1098 	return (queued + errors) != 0;
1099 }
1100 
1101 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1102 {
1103 	int srcu_idx;
1104 
1105 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1106 		cpu_online(hctx->next_cpu));
1107 
1108 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1109 		rcu_read_lock();
1110 		blk_mq_sched_dispatch_requests(hctx);
1111 		rcu_read_unlock();
1112 	} else {
1113 		might_sleep();
1114 
1115 		srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1116 		blk_mq_sched_dispatch_requests(hctx);
1117 		srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1118 	}
1119 }
1120 
1121 /*
1122  * It'd be great if the workqueue API had a way to pass
1123  * in a mask and had some smarts for more clever placement.
1124  * For now we just round-robin here, switching for every
1125  * BLK_MQ_CPU_WORK_BATCH queued items.
1126  */
1127 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1128 {
1129 	if (hctx->queue->nr_hw_queues == 1)
1130 		return WORK_CPU_UNBOUND;
1131 
1132 	if (--hctx->next_cpu_batch <= 0) {
1133 		int next_cpu;
1134 
1135 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1136 		if (next_cpu >= nr_cpu_ids)
1137 			next_cpu = cpumask_first(hctx->cpumask);
1138 
1139 		hctx->next_cpu = next_cpu;
1140 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1141 	}
1142 
1143 	return hctx->next_cpu;
1144 }
1145 
1146 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1147 					unsigned long msecs)
1148 {
1149 	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1150 		return;
1151 
1152 	if (unlikely(blk_mq_hctx_stopped(hctx)))
1153 		return;
1154 
1155 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1156 		int cpu = get_cpu();
1157 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1158 			__blk_mq_run_hw_queue(hctx);
1159 			put_cpu();
1160 			return;
1161 		}
1162 
1163 		put_cpu();
1164 	}
1165 
1166 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1167 					 &hctx->run_work,
1168 					 msecs_to_jiffies(msecs));
1169 }
1170 
1171 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1172 {
1173 	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
1174 }
1175 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1176 
1177 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1178 {
1179 	__blk_mq_delay_run_hw_queue(hctx, async, 0);
1180 }
1181 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1182 
1183 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1184 {
1185 	struct blk_mq_hw_ctx *hctx;
1186 	int i;
1187 
1188 	queue_for_each_hw_ctx(q, hctx, i) {
1189 		if (!blk_mq_hctx_has_pending(hctx) ||
1190 		    blk_mq_hctx_stopped(hctx))
1191 			continue;
1192 
1193 		blk_mq_run_hw_queue(hctx, async);
1194 	}
1195 }
1196 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1197 
1198 /**
1199  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1200  * @q: request queue.
1201  *
1202  * The caller is responsible for serializing this function against
1203  * blk_mq_{start,stop}_hw_queue().
1204  */
1205 bool blk_mq_queue_stopped(struct request_queue *q)
1206 {
1207 	struct blk_mq_hw_ctx *hctx;
1208 	int i;
1209 
1210 	queue_for_each_hw_ctx(q, hctx, i)
1211 		if (blk_mq_hctx_stopped(hctx))
1212 			return true;
1213 
1214 	return false;
1215 }
1216 EXPORT_SYMBOL(blk_mq_queue_stopped);
1217 
1218 /*
1219  * This function is often used for pausing .queue_rq() by driver when
1220  * there isn't enough resource or some conditions aren't satisfied, and
1221  * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
1222  *
1223  * We do not guarantee that dispatch can be drained or blocked
1224  * after blk_mq_stop_hw_queue() returns. Please use
1225  * blk_mq_quiesce_queue() for that requirement.
1226  */
1227 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1228 {
1229 	cancel_delayed_work(&hctx->run_work);
1230 
1231 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1232 }
1233 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1234 
1235 /*
1236  * This function is often used for pausing .queue_rq() by driver when
1237  * there isn't enough resource or some conditions aren't satisfied, and
1238  * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
1239  *
1240  * We do not guarantee that dispatch can be drained or blocked
1241  * after blk_mq_stop_hw_queues() returns. Please use
1242  * blk_mq_quiesce_queue() for that requirement.
1243  */
1244 void blk_mq_stop_hw_queues(struct request_queue *q)
1245 {
1246 	struct blk_mq_hw_ctx *hctx;
1247 	int i;
1248 
1249 	queue_for_each_hw_ctx(q, hctx, i)
1250 		blk_mq_stop_hw_queue(hctx);
1251 }
1252 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1253 
1254 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1255 {
1256 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1257 
1258 	blk_mq_run_hw_queue(hctx, false);
1259 }
1260 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1261 
1262 void blk_mq_start_hw_queues(struct request_queue *q)
1263 {
1264 	struct blk_mq_hw_ctx *hctx;
1265 	int i;
1266 
1267 	queue_for_each_hw_ctx(q, hctx, i)
1268 		blk_mq_start_hw_queue(hctx);
1269 }
1270 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1271 
1272 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1273 {
1274 	if (!blk_mq_hctx_stopped(hctx))
1275 		return;
1276 
1277 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1278 	blk_mq_run_hw_queue(hctx, async);
1279 }
1280 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1281 
1282 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1283 {
1284 	struct blk_mq_hw_ctx *hctx;
1285 	int i;
1286 
1287 	queue_for_each_hw_ctx(q, hctx, i)
1288 		blk_mq_start_stopped_hw_queue(hctx, async);
1289 }
1290 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1291 
1292 static void blk_mq_run_work_fn(struct work_struct *work)
1293 {
1294 	struct blk_mq_hw_ctx *hctx;
1295 
1296 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1297 
1298 	/*
1299 	 * If we are stopped, don't run the queue. The exception is if
1300 	 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1301 	 * the STOPPED bit and run it.
1302 	 */
1303 	if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1304 		if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1305 			return;
1306 
1307 		clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1308 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1309 	}
1310 
1311 	__blk_mq_run_hw_queue(hctx);
1312 }
1313 
1314 
1315 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1316 {
1317 	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1318 		return;
1319 
1320 	/*
1321 	 * Stop the hw queue, then modify currently delayed work.
1322 	 * This should prevent us from running the queue prematurely.
1323 	 * Mark the queue as auto-clearing STOPPED when it runs.
1324 	 */
1325 	blk_mq_stop_hw_queue(hctx);
1326 	set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1327 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1328 					&hctx->run_work,
1329 					msecs_to_jiffies(msecs));
1330 }
1331 EXPORT_SYMBOL(blk_mq_delay_queue);
1332 
1333 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1334 					    struct request *rq,
1335 					    bool at_head)
1336 {
1337 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1338 
1339 	lockdep_assert_held(&ctx->lock);
1340 
1341 	trace_block_rq_insert(hctx->queue, rq);
1342 
1343 	if (at_head)
1344 		list_add(&rq->queuelist, &ctx->rq_list);
1345 	else
1346 		list_add_tail(&rq->queuelist, &ctx->rq_list);
1347 }
1348 
1349 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1350 			     bool at_head)
1351 {
1352 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1353 
1354 	lockdep_assert_held(&ctx->lock);
1355 
1356 	__blk_mq_insert_req_list(hctx, rq, at_head);
1357 	blk_mq_hctx_mark_pending(hctx, ctx);
1358 }
1359 
1360 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1361 			    struct list_head *list)
1362 
1363 {
1364 	/*
1365 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1366 	 * offline now
1367 	 */
1368 	spin_lock(&ctx->lock);
1369 	while (!list_empty(list)) {
1370 		struct request *rq;
1371 
1372 		rq = list_first_entry(list, struct request, queuelist);
1373 		BUG_ON(rq->mq_ctx != ctx);
1374 		list_del_init(&rq->queuelist);
1375 		__blk_mq_insert_req_list(hctx, rq, false);
1376 	}
1377 	blk_mq_hctx_mark_pending(hctx, ctx);
1378 	spin_unlock(&ctx->lock);
1379 }
1380 
1381 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1382 {
1383 	struct request *rqa = container_of(a, struct request, queuelist);
1384 	struct request *rqb = container_of(b, struct request, queuelist);
1385 
1386 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1387 		 (rqa->mq_ctx == rqb->mq_ctx &&
1388 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1389 }
1390 
1391 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1392 {
1393 	struct blk_mq_ctx *this_ctx;
1394 	struct request_queue *this_q;
1395 	struct request *rq;
1396 	LIST_HEAD(list);
1397 	LIST_HEAD(ctx_list);
1398 	unsigned int depth;
1399 
1400 	list_splice_init(&plug->mq_list, &list);
1401 
1402 	list_sort(NULL, &list, plug_ctx_cmp);
1403 
1404 	this_q = NULL;
1405 	this_ctx = NULL;
1406 	depth = 0;
1407 
1408 	while (!list_empty(&list)) {
1409 		rq = list_entry_rq(list.next);
1410 		list_del_init(&rq->queuelist);
1411 		BUG_ON(!rq->q);
1412 		if (rq->mq_ctx != this_ctx) {
1413 			if (this_ctx) {
1414 				trace_block_unplug(this_q, depth, from_schedule);
1415 				blk_mq_sched_insert_requests(this_q, this_ctx,
1416 								&ctx_list,
1417 								from_schedule);
1418 			}
1419 
1420 			this_ctx = rq->mq_ctx;
1421 			this_q = rq->q;
1422 			depth = 0;
1423 		}
1424 
1425 		depth++;
1426 		list_add_tail(&rq->queuelist, &ctx_list);
1427 	}
1428 
1429 	/*
1430 	 * If 'this_ctx' is set, we know we have entries to complete
1431 	 * on 'ctx_list'. Do those.
1432 	 */
1433 	if (this_ctx) {
1434 		trace_block_unplug(this_q, depth, from_schedule);
1435 		blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1436 						from_schedule);
1437 	}
1438 }
1439 
1440 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1441 {
1442 	blk_init_request_from_bio(rq, bio);
1443 
1444 	blk_account_io_start(rq, true);
1445 }
1446 
1447 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1448 {
1449 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1450 		!blk_queue_nomerges(hctx->queue);
1451 }
1452 
1453 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1454 				   struct blk_mq_ctx *ctx,
1455 				   struct request *rq)
1456 {
1457 	spin_lock(&ctx->lock);
1458 	__blk_mq_insert_request(hctx, rq, false);
1459 	spin_unlock(&ctx->lock);
1460 }
1461 
1462 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1463 {
1464 	if (rq->tag != -1)
1465 		return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1466 
1467 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1468 }
1469 
1470 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1471 					struct request *rq,
1472 					blk_qc_t *cookie, bool may_sleep)
1473 {
1474 	struct request_queue *q = rq->q;
1475 	struct blk_mq_queue_data bd = {
1476 		.rq = rq,
1477 		.last = true,
1478 	};
1479 	blk_qc_t new_cookie;
1480 	blk_status_t ret;
1481 	bool run_queue = true;
1482 
1483 	/* RCU or SRCU read lock is needed before checking quiesced flag */
1484 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1485 		run_queue = false;
1486 		goto insert;
1487 	}
1488 
1489 	if (q->elevator)
1490 		goto insert;
1491 
1492 	if (!blk_mq_get_driver_tag(rq, NULL, false))
1493 		goto insert;
1494 
1495 	new_cookie = request_to_qc_t(hctx, rq);
1496 
1497 	/*
1498 	 * For OK queue, we are done. For error, kill it. Any other
1499 	 * error (busy), just add it to our list as we previously
1500 	 * would have done
1501 	 */
1502 	ret = q->mq_ops->queue_rq(hctx, &bd);
1503 	switch (ret) {
1504 	case BLK_STS_OK:
1505 		*cookie = new_cookie;
1506 		return;
1507 	case BLK_STS_RESOURCE:
1508 		__blk_mq_requeue_request(rq);
1509 		goto insert;
1510 	default:
1511 		*cookie = BLK_QC_T_NONE;
1512 		blk_mq_end_request(rq, ret);
1513 		return;
1514 	}
1515 
1516 insert:
1517 	blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1518 }
1519 
1520 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1521 		struct request *rq, blk_qc_t *cookie)
1522 {
1523 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1524 		rcu_read_lock();
1525 		__blk_mq_try_issue_directly(hctx, rq, cookie, false);
1526 		rcu_read_unlock();
1527 	} else {
1528 		unsigned int srcu_idx;
1529 
1530 		might_sleep();
1531 
1532 		srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1533 		__blk_mq_try_issue_directly(hctx, rq, cookie, true);
1534 		srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1535 	}
1536 }
1537 
1538 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1539 {
1540 	const int is_sync = op_is_sync(bio->bi_opf);
1541 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1542 	struct blk_mq_alloc_data data = { .flags = 0 };
1543 	struct request *rq;
1544 	unsigned int request_count = 0;
1545 	struct blk_plug *plug;
1546 	struct request *same_queue_rq = NULL;
1547 	blk_qc_t cookie;
1548 	unsigned int wb_acct;
1549 
1550 	blk_queue_bounce(q, &bio);
1551 
1552 	blk_queue_split(q, &bio);
1553 
1554 	if (!bio_integrity_prep(bio))
1555 		return BLK_QC_T_NONE;
1556 
1557 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1558 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1559 		return BLK_QC_T_NONE;
1560 
1561 	if (blk_mq_sched_bio_merge(q, bio))
1562 		return BLK_QC_T_NONE;
1563 
1564 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1565 
1566 	trace_block_getrq(q, bio, bio->bi_opf);
1567 
1568 	rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1569 	if (unlikely(!rq)) {
1570 		__wbt_done(q->rq_wb, wb_acct);
1571 		if (bio->bi_opf & REQ_NOWAIT)
1572 			bio_wouldblock_error(bio);
1573 		return BLK_QC_T_NONE;
1574 	}
1575 
1576 	wbt_track(&rq->issue_stat, wb_acct);
1577 
1578 	cookie = request_to_qc_t(data.hctx, rq);
1579 
1580 	plug = current->plug;
1581 	if (unlikely(is_flush_fua)) {
1582 		blk_mq_put_ctx(data.ctx);
1583 		blk_mq_bio_to_request(rq, bio);
1584 		if (q->elevator) {
1585 			blk_mq_sched_insert_request(rq, false, true, true,
1586 					true);
1587 		} else {
1588 			blk_insert_flush(rq);
1589 			blk_mq_run_hw_queue(data.hctx, true);
1590 		}
1591 	} else if (plug && q->nr_hw_queues == 1) {
1592 		struct request *last = NULL;
1593 
1594 		blk_mq_put_ctx(data.ctx);
1595 		blk_mq_bio_to_request(rq, bio);
1596 
1597 		/*
1598 		 * @request_count may become stale because of schedule
1599 		 * out, so check the list again.
1600 		 */
1601 		if (list_empty(&plug->mq_list))
1602 			request_count = 0;
1603 		else if (blk_queue_nomerges(q))
1604 			request_count = blk_plug_queued_count(q);
1605 
1606 		if (!request_count)
1607 			trace_block_plug(q);
1608 		else
1609 			last = list_entry_rq(plug->mq_list.prev);
1610 
1611 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1612 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1613 			blk_flush_plug_list(plug, false);
1614 			trace_block_plug(q);
1615 		}
1616 
1617 		list_add_tail(&rq->queuelist, &plug->mq_list);
1618 	} else if (plug && !blk_queue_nomerges(q)) {
1619 		blk_mq_bio_to_request(rq, bio);
1620 
1621 		/*
1622 		 * We do limited plugging. If the bio can be merged, do that.
1623 		 * Otherwise the existing request in the plug list will be
1624 		 * issued. So the plug list will have one request at most
1625 		 * The plug list might get flushed before this. If that happens,
1626 		 * the plug list is empty, and same_queue_rq is invalid.
1627 		 */
1628 		if (list_empty(&plug->mq_list))
1629 			same_queue_rq = NULL;
1630 		if (same_queue_rq)
1631 			list_del_init(&same_queue_rq->queuelist);
1632 		list_add_tail(&rq->queuelist, &plug->mq_list);
1633 
1634 		blk_mq_put_ctx(data.ctx);
1635 
1636 		if (same_queue_rq) {
1637 			data.hctx = blk_mq_map_queue(q,
1638 					same_queue_rq->mq_ctx->cpu);
1639 			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1640 					&cookie);
1641 		}
1642 	} else if (q->nr_hw_queues > 1 && is_sync) {
1643 		blk_mq_put_ctx(data.ctx);
1644 		blk_mq_bio_to_request(rq, bio);
1645 		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1646 	} else if (q->elevator) {
1647 		blk_mq_put_ctx(data.ctx);
1648 		blk_mq_bio_to_request(rq, bio);
1649 		blk_mq_sched_insert_request(rq, false, true, true, true);
1650 	} else {
1651 		blk_mq_put_ctx(data.ctx);
1652 		blk_mq_bio_to_request(rq, bio);
1653 		blk_mq_queue_io(data.hctx, data.ctx, rq);
1654 		blk_mq_run_hw_queue(data.hctx, true);
1655 	}
1656 
1657 	return cookie;
1658 }
1659 
1660 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1661 		     unsigned int hctx_idx)
1662 {
1663 	struct page *page;
1664 
1665 	if (tags->rqs && set->ops->exit_request) {
1666 		int i;
1667 
1668 		for (i = 0; i < tags->nr_tags; i++) {
1669 			struct request *rq = tags->static_rqs[i];
1670 
1671 			if (!rq)
1672 				continue;
1673 			set->ops->exit_request(set, rq, hctx_idx);
1674 			tags->static_rqs[i] = NULL;
1675 		}
1676 	}
1677 
1678 	while (!list_empty(&tags->page_list)) {
1679 		page = list_first_entry(&tags->page_list, struct page, lru);
1680 		list_del_init(&page->lru);
1681 		/*
1682 		 * Remove kmemleak object previously allocated in
1683 		 * blk_mq_init_rq_map().
1684 		 */
1685 		kmemleak_free(page_address(page));
1686 		__free_pages(page, page->private);
1687 	}
1688 }
1689 
1690 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1691 {
1692 	kfree(tags->rqs);
1693 	tags->rqs = NULL;
1694 	kfree(tags->static_rqs);
1695 	tags->static_rqs = NULL;
1696 
1697 	blk_mq_free_tags(tags);
1698 }
1699 
1700 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1701 					unsigned int hctx_idx,
1702 					unsigned int nr_tags,
1703 					unsigned int reserved_tags)
1704 {
1705 	struct blk_mq_tags *tags;
1706 	int node;
1707 
1708 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1709 	if (node == NUMA_NO_NODE)
1710 		node = set->numa_node;
1711 
1712 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1713 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1714 	if (!tags)
1715 		return NULL;
1716 
1717 	tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1718 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1719 				 node);
1720 	if (!tags->rqs) {
1721 		blk_mq_free_tags(tags);
1722 		return NULL;
1723 	}
1724 
1725 	tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1726 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1727 				 node);
1728 	if (!tags->static_rqs) {
1729 		kfree(tags->rqs);
1730 		blk_mq_free_tags(tags);
1731 		return NULL;
1732 	}
1733 
1734 	return tags;
1735 }
1736 
1737 static size_t order_to_size(unsigned int order)
1738 {
1739 	return (size_t)PAGE_SIZE << order;
1740 }
1741 
1742 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1743 		     unsigned int hctx_idx, unsigned int depth)
1744 {
1745 	unsigned int i, j, entries_per_page, max_order = 4;
1746 	size_t rq_size, left;
1747 	int node;
1748 
1749 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1750 	if (node == NUMA_NO_NODE)
1751 		node = set->numa_node;
1752 
1753 	INIT_LIST_HEAD(&tags->page_list);
1754 
1755 	/*
1756 	 * rq_size is the size of the request plus driver payload, rounded
1757 	 * to the cacheline size
1758 	 */
1759 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1760 				cache_line_size());
1761 	left = rq_size * depth;
1762 
1763 	for (i = 0; i < depth; ) {
1764 		int this_order = max_order;
1765 		struct page *page;
1766 		int to_do;
1767 		void *p;
1768 
1769 		while (this_order && left < order_to_size(this_order - 1))
1770 			this_order--;
1771 
1772 		do {
1773 			page = alloc_pages_node(node,
1774 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1775 				this_order);
1776 			if (page)
1777 				break;
1778 			if (!this_order--)
1779 				break;
1780 			if (order_to_size(this_order) < rq_size)
1781 				break;
1782 		} while (1);
1783 
1784 		if (!page)
1785 			goto fail;
1786 
1787 		page->private = this_order;
1788 		list_add_tail(&page->lru, &tags->page_list);
1789 
1790 		p = page_address(page);
1791 		/*
1792 		 * Allow kmemleak to scan these pages as they contain pointers
1793 		 * to additional allocations like via ops->init_request().
1794 		 */
1795 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1796 		entries_per_page = order_to_size(this_order) / rq_size;
1797 		to_do = min(entries_per_page, depth - i);
1798 		left -= to_do * rq_size;
1799 		for (j = 0; j < to_do; j++) {
1800 			struct request *rq = p;
1801 
1802 			tags->static_rqs[i] = rq;
1803 			if (set->ops->init_request) {
1804 				if (set->ops->init_request(set, rq, hctx_idx,
1805 						node)) {
1806 					tags->static_rqs[i] = NULL;
1807 					goto fail;
1808 				}
1809 			}
1810 
1811 			p += rq_size;
1812 			i++;
1813 		}
1814 	}
1815 	return 0;
1816 
1817 fail:
1818 	blk_mq_free_rqs(set, tags, hctx_idx);
1819 	return -ENOMEM;
1820 }
1821 
1822 /*
1823  * 'cpu' is going away. splice any existing rq_list entries from this
1824  * software queue to the hw queue dispatch list, and ensure that it
1825  * gets run.
1826  */
1827 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1828 {
1829 	struct blk_mq_hw_ctx *hctx;
1830 	struct blk_mq_ctx *ctx;
1831 	LIST_HEAD(tmp);
1832 
1833 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1834 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1835 
1836 	spin_lock(&ctx->lock);
1837 	if (!list_empty(&ctx->rq_list)) {
1838 		list_splice_init(&ctx->rq_list, &tmp);
1839 		blk_mq_hctx_clear_pending(hctx, ctx);
1840 	}
1841 	spin_unlock(&ctx->lock);
1842 
1843 	if (list_empty(&tmp))
1844 		return 0;
1845 
1846 	spin_lock(&hctx->lock);
1847 	list_splice_tail_init(&tmp, &hctx->dispatch);
1848 	spin_unlock(&hctx->lock);
1849 
1850 	blk_mq_run_hw_queue(hctx, true);
1851 	return 0;
1852 }
1853 
1854 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1855 {
1856 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1857 					    &hctx->cpuhp_dead);
1858 }
1859 
1860 /* hctx->ctxs will be freed in queue's release handler */
1861 static void blk_mq_exit_hctx(struct request_queue *q,
1862 		struct blk_mq_tag_set *set,
1863 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1864 {
1865 	blk_mq_debugfs_unregister_hctx(hctx);
1866 
1867 	blk_mq_tag_idle(hctx);
1868 
1869 	if (set->ops->exit_request)
1870 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
1871 
1872 	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1873 
1874 	if (set->ops->exit_hctx)
1875 		set->ops->exit_hctx(hctx, hctx_idx);
1876 
1877 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1878 		cleanup_srcu_struct(hctx->queue_rq_srcu);
1879 
1880 	blk_mq_remove_cpuhp(hctx);
1881 	blk_free_flush_queue(hctx->fq);
1882 	sbitmap_free(&hctx->ctx_map);
1883 }
1884 
1885 static void blk_mq_exit_hw_queues(struct request_queue *q,
1886 		struct blk_mq_tag_set *set, int nr_queue)
1887 {
1888 	struct blk_mq_hw_ctx *hctx;
1889 	unsigned int i;
1890 
1891 	queue_for_each_hw_ctx(q, hctx, i) {
1892 		if (i == nr_queue)
1893 			break;
1894 		blk_mq_exit_hctx(q, set, hctx, i);
1895 	}
1896 }
1897 
1898 static int blk_mq_init_hctx(struct request_queue *q,
1899 		struct blk_mq_tag_set *set,
1900 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1901 {
1902 	int node;
1903 
1904 	node = hctx->numa_node;
1905 	if (node == NUMA_NO_NODE)
1906 		node = hctx->numa_node = set->numa_node;
1907 
1908 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1909 	spin_lock_init(&hctx->lock);
1910 	INIT_LIST_HEAD(&hctx->dispatch);
1911 	hctx->queue = q;
1912 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1913 
1914 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1915 
1916 	hctx->tags = set->tags[hctx_idx];
1917 
1918 	/*
1919 	 * Allocate space for all possible cpus to avoid allocation at
1920 	 * runtime
1921 	 */
1922 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1923 					GFP_KERNEL, node);
1924 	if (!hctx->ctxs)
1925 		goto unregister_cpu_notifier;
1926 
1927 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1928 			      node))
1929 		goto free_ctxs;
1930 
1931 	hctx->nr_ctx = 0;
1932 
1933 	if (set->ops->init_hctx &&
1934 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1935 		goto free_bitmap;
1936 
1937 	if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
1938 		goto exit_hctx;
1939 
1940 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1941 	if (!hctx->fq)
1942 		goto sched_exit_hctx;
1943 
1944 	if (set->ops->init_request &&
1945 	    set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
1946 				   node))
1947 		goto free_fq;
1948 
1949 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1950 		init_srcu_struct(hctx->queue_rq_srcu);
1951 
1952 	blk_mq_debugfs_register_hctx(q, hctx);
1953 
1954 	return 0;
1955 
1956  free_fq:
1957 	kfree(hctx->fq);
1958  sched_exit_hctx:
1959 	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1960  exit_hctx:
1961 	if (set->ops->exit_hctx)
1962 		set->ops->exit_hctx(hctx, hctx_idx);
1963  free_bitmap:
1964 	sbitmap_free(&hctx->ctx_map);
1965  free_ctxs:
1966 	kfree(hctx->ctxs);
1967  unregister_cpu_notifier:
1968 	blk_mq_remove_cpuhp(hctx);
1969 	return -1;
1970 }
1971 
1972 static void blk_mq_init_cpu_queues(struct request_queue *q,
1973 				   unsigned int nr_hw_queues)
1974 {
1975 	unsigned int i;
1976 
1977 	for_each_possible_cpu(i) {
1978 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1979 		struct blk_mq_hw_ctx *hctx;
1980 
1981 		__ctx->cpu = i;
1982 		spin_lock_init(&__ctx->lock);
1983 		INIT_LIST_HEAD(&__ctx->rq_list);
1984 		__ctx->queue = q;
1985 
1986 		/* If the cpu isn't present, the cpu is mapped to first hctx */
1987 		if (!cpu_present(i))
1988 			continue;
1989 
1990 		hctx = blk_mq_map_queue(q, i);
1991 
1992 		/*
1993 		 * Set local node, IFF we have more than one hw queue. If
1994 		 * not, we remain on the home node of the device
1995 		 */
1996 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1997 			hctx->numa_node = local_memory_node(cpu_to_node(i));
1998 	}
1999 }
2000 
2001 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2002 {
2003 	int ret = 0;
2004 
2005 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2006 					set->queue_depth, set->reserved_tags);
2007 	if (!set->tags[hctx_idx])
2008 		return false;
2009 
2010 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2011 				set->queue_depth);
2012 	if (!ret)
2013 		return true;
2014 
2015 	blk_mq_free_rq_map(set->tags[hctx_idx]);
2016 	set->tags[hctx_idx] = NULL;
2017 	return false;
2018 }
2019 
2020 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2021 					 unsigned int hctx_idx)
2022 {
2023 	if (set->tags[hctx_idx]) {
2024 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2025 		blk_mq_free_rq_map(set->tags[hctx_idx]);
2026 		set->tags[hctx_idx] = NULL;
2027 	}
2028 }
2029 
2030 static void blk_mq_map_swqueue(struct request_queue *q)
2031 {
2032 	unsigned int i, hctx_idx;
2033 	struct blk_mq_hw_ctx *hctx;
2034 	struct blk_mq_ctx *ctx;
2035 	struct blk_mq_tag_set *set = q->tag_set;
2036 
2037 	/*
2038 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
2039 	 */
2040 	mutex_lock(&q->sysfs_lock);
2041 
2042 	queue_for_each_hw_ctx(q, hctx, i) {
2043 		cpumask_clear(hctx->cpumask);
2044 		hctx->nr_ctx = 0;
2045 	}
2046 
2047 	/*
2048 	 * Map software to hardware queues.
2049 	 *
2050 	 * If the cpu isn't present, the cpu is mapped to first hctx.
2051 	 */
2052 	for_each_present_cpu(i) {
2053 		hctx_idx = q->mq_map[i];
2054 		/* unmapped hw queue can be remapped after CPU topo changed */
2055 		if (!set->tags[hctx_idx] &&
2056 		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2057 			/*
2058 			 * If tags initialization fail for some hctx,
2059 			 * that hctx won't be brought online.  In this
2060 			 * case, remap the current ctx to hctx[0] which
2061 			 * is guaranteed to always have tags allocated
2062 			 */
2063 			q->mq_map[i] = 0;
2064 		}
2065 
2066 		ctx = per_cpu_ptr(q->queue_ctx, i);
2067 		hctx = blk_mq_map_queue(q, i);
2068 
2069 		cpumask_set_cpu(i, hctx->cpumask);
2070 		ctx->index_hw = hctx->nr_ctx;
2071 		hctx->ctxs[hctx->nr_ctx++] = ctx;
2072 	}
2073 
2074 	mutex_unlock(&q->sysfs_lock);
2075 
2076 	queue_for_each_hw_ctx(q, hctx, i) {
2077 		/*
2078 		 * If no software queues are mapped to this hardware queue,
2079 		 * disable it and free the request entries.
2080 		 */
2081 		if (!hctx->nr_ctx) {
2082 			/* Never unmap queue 0.  We need it as a
2083 			 * fallback in case of a new remap fails
2084 			 * allocation
2085 			 */
2086 			if (i && set->tags[i])
2087 				blk_mq_free_map_and_requests(set, i);
2088 
2089 			hctx->tags = NULL;
2090 			continue;
2091 		}
2092 
2093 		hctx->tags = set->tags[i];
2094 		WARN_ON(!hctx->tags);
2095 
2096 		/*
2097 		 * Set the map size to the number of mapped software queues.
2098 		 * This is more accurate and more efficient than looping
2099 		 * over all possibly mapped software queues.
2100 		 */
2101 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2102 
2103 		/*
2104 		 * Initialize batch roundrobin counts
2105 		 */
2106 		hctx->next_cpu = cpumask_first(hctx->cpumask);
2107 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2108 	}
2109 }
2110 
2111 /*
2112  * Caller needs to ensure that we're either frozen/quiesced, or that
2113  * the queue isn't live yet.
2114  */
2115 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2116 {
2117 	struct blk_mq_hw_ctx *hctx;
2118 	int i;
2119 
2120 	queue_for_each_hw_ctx(q, hctx, i) {
2121 		if (shared) {
2122 			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2123 				atomic_inc(&q->shared_hctx_restart);
2124 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2125 		} else {
2126 			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2127 				atomic_dec(&q->shared_hctx_restart);
2128 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2129 		}
2130 	}
2131 }
2132 
2133 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2134 					bool shared)
2135 {
2136 	struct request_queue *q;
2137 
2138 	lockdep_assert_held(&set->tag_list_lock);
2139 
2140 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2141 		blk_mq_freeze_queue(q);
2142 		queue_set_hctx_shared(q, shared);
2143 		blk_mq_unfreeze_queue(q);
2144 	}
2145 }
2146 
2147 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2148 {
2149 	struct blk_mq_tag_set *set = q->tag_set;
2150 
2151 	mutex_lock(&set->tag_list_lock);
2152 	list_del_rcu(&q->tag_set_list);
2153 	INIT_LIST_HEAD(&q->tag_set_list);
2154 	if (list_is_singular(&set->tag_list)) {
2155 		/* just transitioned to unshared */
2156 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
2157 		/* update existing queue */
2158 		blk_mq_update_tag_set_depth(set, false);
2159 	}
2160 	mutex_unlock(&set->tag_list_lock);
2161 
2162 	synchronize_rcu();
2163 }
2164 
2165 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2166 				     struct request_queue *q)
2167 {
2168 	q->tag_set = set;
2169 
2170 	mutex_lock(&set->tag_list_lock);
2171 
2172 	/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2173 	if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2174 		set->flags |= BLK_MQ_F_TAG_SHARED;
2175 		/* update existing queue */
2176 		blk_mq_update_tag_set_depth(set, true);
2177 	}
2178 	if (set->flags & BLK_MQ_F_TAG_SHARED)
2179 		queue_set_hctx_shared(q, true);
2180 	list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2181 
2182 	mutex_unlock(&set->tag_list_lock);
2183 }
2184 
2185 /*
2186  * It is the actual release handler for mq, but we do it from
2187  * request queue's release handler for avoiding use-after-free
2188  * and headache because q->mq_kobj shouldn't have been introduced,
2189  * but we can't group ctx/kctx kobj without it.
2190  */
2191 void blk_mq_release(struct request_queue *q)
2192 {
2193 	struct blk_mq_hw_ctx *hctx;
2194 	unsigned int i;
2195 
2196 	/* hctx kobj stays in hctx */
2197 	queue_for_each_hw_ctx(q, hctx, i) {
2198 		if (!hctx)
2199 			continue;
2200 		kobject_put(&hctx->kobj);
2201 	}
2202 
2203 	q->mq_map = NULL;
2204 
2205 	kfree(q->queue_hw_ctx);
2206 
2207 	/*
2208 	 * release .mq_kobj and sw queue's kobject now because
2209 	 * both share lifetime with request queue.
2210 	 */
2211 	blk_mq_sysfs_deinit(q);
2212 
2213 	free_percpu(q->queue_ctx);
2214 }
2215 
2216 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2217 {
2218 	struct request_queue *uninit_q, *q;
2219 
2220 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2221 	if (!uninit_q)
2222 		return ERR_PTR(-ENOMEM);
2223 
2224 	q = blk_mq_init_allocated_queue(set, uninit_q);
2225 	if (IS_ERR(q))
2226 		blk_cleanup_queue(uninit_q);
2227 
2228 	return q;
2229 }
2230 EXPORT_SYMBOL(blk_mq_init_queue);
2231 
2232 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2233 {
2234 	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2235 
2236 	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2237 			   __alignof__(struct blk_mq_hw_ctx)) !=
2238 		     sizeof(struct blk_mq_hw_ctx));
2239 
2240 	if (tag_set->flags & BLK_MQ_F_BLOCKING)
2241 		hw_ctx_size += sizeof(struct srcu_struct);
2242 
2243 	return hw_ctx_size;
2244 }
2245 
2246 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2247 						struct request_queue *q)
2248 {
2249 	int i, j;
2250 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2251 
2252 	blk_mq_sysfs_unregister(q);
2253 	for (i = 0; i < set->nr_hw_queues; i++) {
2254 		int node;
2255 
2256 		if (hctxs[i])
2257 			continue;
2258 
2259 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2260 		hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2261 					GFP_KERNEL, node);
2262 		if (!hctxs[i])
2263 			break;
2264 
2265 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2266 						node)) {
2267 			kfree(hctxs[i]);
2268 			hctxs[i] = NULL;
2269 			break;
2270 		}
2271 
2272 		atomic_set(&hctxs[i]->nr_active, 0);
2273 		hctxs[i]->numa_node = node;
2274 		hctxs[i]->queue_num = i;
2275 
2276 		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2277 			free_cpumask_var(hctxs[i]->cpumask);
2278 			kfree(hctxs[i]);
2279 			hctxs[i] = NULL;
2280 			break;
2281 		}
2282 		blk_mq_hctx_kobj_init(hctxs[i]);
2283 	}
2284 	for (j = i; j < q->nr_hw_queues; j++) {
2285 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2286 
2287 		if (hctx) {
2288 			if (hctx->tags)
2289 				blk_mq_free_map_and_requests(set, j);
2290 			blk_mq_exit_hctx(q, set, hctx, j);
2291 			kobject_put(&hctx->kobj);
2292 			hctxs[j] = NULL;
2293 
2294 		}
2295 	}
2296 	q->nr_hw_queues = i;
2297 	blk_mq_sysfs_register(q);
2298 }
2299 
2300 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2301 						  struct request_queue *q)
2302 {
2303 	/* mark the queue as mq asap */
2304 	q->mq_ops = set->ops;
2305 
2306 	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2307 					     blk_mq_poll_stats_bkt,
2308 					     BLK_MQ_POLL_STATS_BKTS, q);
2309 	if (!q->poll_cb)
2310 		goto err_exit;
2311 
2312 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2313 	if (!q->queue_ctx)
2314 		goto err_exit;
2315 
2316 	/* init q->mq_kobj and sw queues' kobjects */
2317 	blk_mq_sysfs_init(q);
2318 
2319 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2320 						GFP_KERNEL, set->numa_node);
2321 	if (!q->queue_hw_ctx)
2322 		goto err_percpu;
2323 
2324 	q->mq_map = set->mq_map;
2325 
2326 	blk_mq_realloc_hw_ctxs(set, q);
2327 	if (!q->nr_hw_queues)
2328 		goto err_hctxs;
2329 
2330 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2331 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2332 
2333 	q->nr_queues = nr_cpu_ids;
2334 
2335 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2336 
2337 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2338 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2339 
2340 	q->sg_reserved_size = INT_MAX;
2341 
2342 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2343 	INIT_LIST_HEAD(&q->requeue_list);
2344 	spin_lock_init(&q->requeue_lock);
2345 
2346 	blk_queue_make_request(q, blk_mq_make_request);
2347 
2348 	/*
2349 	 * Do this after blk_queue_make_request() overrides it...
2350 	 */
2351 	q->nr_requests = set->queue_depth;
2352 
2353 	/*
2354 	 * Default to classic polling
2355 	 */
2356 	q->poll_nsec = -1;
2357 
2358 	if (set->ops->complete)
2359 		blk_queue_softirq_done(q, set->ops->complete);
2360 
2361 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2362 	blk_mq_add_queue_tag_set(set, q);
2363 	blk_mq_map_swqueue(q);
2364 
2365 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2366 		int ret;
2367 
2368 		ret = blk_mq_sched_init(q);
2369 		if (ret)
2370 			return ERR_PTR(ret);
2371 	}
2372 
2373 	return q;
2374 
2375 err_hctxs:
2376 	kfree(q->queue_hw_ctx);
2377 err_percpu:
2378 	free_percpu(q->queue_ctx);
2379 err_exit:
2380 	q->mq_ops = NULL;
2381 	return ERR_PTR(-ENOMEM);
2382 }
2383 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2384 
2385 void blk_mq_free_queue(struct request_queue *q)
2386 {
2387 	struct blk_mq_tag_set	*set = q->tag_set;
2388 
2389 	blk_mq_del_queue_tag_set(q);
2390 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2391 }
2392 
2393 /* Basically redo blk_mq_init_queue with queue frozen */
2394 static void blk_mq_queue_reinit(struct request_queue *q)
2395 {
2396 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2397 
2398 	blk_mq_debugfs_unregister_hctxs(q);
2399 	blk_mq_sysfs_unregister(q);
2400 
2401 	/*
2402 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2403 	 * we should change hctx numa_node according to new topology (this
2404 	 * involves free and re-allocate memory, worthy doing?)
2405 	 */
2406 
2407 	blk_mq_map_swqueue(q);
2408 
2409 	blk_mq_sysfs_register(q);
2410 	blk_mq_debugfs_register_hctxs(q);
2411 }
2412 
2413 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2414 {
2415 	int i;
2416 
2417 	for (i = 0; i < set->nr_hw_queues; i++)
2418 		if (!__blk_mq_alloc_rq_map(set, i))
2419 			goto out_unwind;
2420 
2421 	return 0;
2422 
2423 out_unwind:
2424 	while (--i >= 0)
2425 		blk_mq_free_rq_map(set->tags[i]);
2426 
2427 	return -ENOMEM;
2428 }
2429 
2430 /*
2431  * Allocate the request maps associated with this tag_set. Note that this
2432  * may reduce the depth asked for, if memory is tight. set->queue_depth
2433  * will be updated to reflect the allocated depth.
2434  */
2435 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2436 {
2437 	unsigned int depth;
2438 	int err;
2439 
2440 	depth = set->queue_depth;
2441 	do {
2442 		err = __blk_mq_alloc_rq_maps(set);
2443 		if (!err)
2444 			break;
2445 
2446 		set->queue_depth >>= 1;
2447 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2448 			err = -ENOMEM;
2449 			break;
2450 		}
2451 	} while (set->queue_depth);
2452 
2453 	if (!set->queue_depth || err) {
2454 		pr_err("blk-mq: failed to allocate request map\n");
2455 		return -ENOMEM;
2456 	}
2457 
2458 	if (depth != set->queue_depth)
2459 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2460 						depth, set->queue_depth);
2461 
2462 	return 0;
2463 }
2464 
2465 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2466 {
2467 	if (set->ops->map_queues)
2468 		return set->ops->map_queues(set);
2469 	else
2470 		return blk_mq_map_queues(set);
2471 }
2472 
2473 /*
2474  * Alloc a tag set to be associated with one or more request queues.
2475  * May fail with EINVAL for various error conditions. May adjust the
2476  * requested depth down, if if it too large. In that case, the set
2477  * value will be stored in set->queue_depth.
2478  */
2479 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2480 {
2481 	int ret;
2482 
2483 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2484 
2485 	if (!set->nr_hw_queues)
2486 		return -EINVAL;
2487 	if (!set->queue_depth)
2488 		return -EINVAL;
2489 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2490 		return -EINVAL;
2491 
2492 	if (!set->ops->queue_rq)
2493 		return -EINVAL;
2494 
2495 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2496 		pr_info("blk-mq: reduced tag depth to %u\n",
2497 			BLK_MQ_MAX_DEPTH);
2498 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2499 	}
2500 
2501 	/*
2502 	 * If a crashdump is active, then we are potentially in a very
2503 	 * memory constrained environment. Limit us to 1 queue and
2504 	 * 64 tags to prevent using too much memory.
2505 	 */
2506 	if (is_kdump_kernel()) {
2507 		set->nr_hw_queues = 1;
2508 		set->queue_depth = min(64U, set->queue_depth);
2509 	}
2510 	/*
2511 	 * There is no use for more h/w queues than cpus.
2512 	 */
2513 	if (set->nr_hw_queues > nr_cpu_ids)
2514 		set->nr_hw_queues = nr_cpu_ids;
2515 
2516 	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2517 				 GFP_KERNEL, set->numa_node);
2518 	if (!set->tags)
2519 		return -ENOMEM;
2520 
2521 	ret = -ENOMEM;
2522 	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2523 			GFP_KERNEL, set->numa_node);
2524 	if (!set->mq_map)
2525 		goto out_free_tags;
2526 
2527 	ret = blk_mq_update_queue_map(set);
2528 	if (ret)
2529 		goto out_free_mq_map;
2530 
2531 	ret = blk_mq_alloc_rq_maps(set);
2532 	if (ret)
2533 		goto out_free_mq_map;
2534 
2535 	mutex_init(&set->tag_list_lock);
2536 	INIT_LIST_HEAD(&set->tag_list);
2537 
2538 	return 0;
2539 
2540 out_free_mq_map:
2541 	kfree(set->mq_map);
2542 	set->mq_map = NULL;
2543 out_free_tags:
2544 	kfree(set->tags);
2545 	set->tags = NULL;
2546 	return ret;
2547 }
2548 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2549 
2550 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2551 {
2552 	int i;
2553 
2554 	for (i = 0; i < nr_cpu_ids; i++)
2555 		blk_mq_free_map_and_requests(set, i);
2556 
2557 	kfree(set->mq_map);
2558 	set->mq_map = NULL;
2559 
2560 	kfree(set->tags);
2561 	set->tags = NULL;
2562 }
2563 EXPORT_SYMBOL(blk_mq_free_tag_set);
2564 
2565 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2566 {
2567 	struct blk_mq_tag_set *set = q->tag_set;
2568 	struct blk_mq_hw_ctx *hctx;
2569 	int i, ret;
2570 
2571 	if (!set)
2572 		return -EINVAL;
2573 
2574 	blk_mq_freeze_queue(q);
2575 
2576 	ret = 0;
2577 	queue_for_each_hw_ctx(q, hctx, i) {
2578 		if (!hctx->tags)
2579 			continue;
2580 		/*
2581 		 * If we're using an MQ scheduler, just update the scheduler
2582 		 * queue depth. This is similar to what the old code would do.
2583 		 */
2584 		if (!hctx->sched_tags) {
2585 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2586 							min(nr, set->queue_depth),
2587 							false);
2588 		} else {
2589 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2590 							nr, true);
2591 		}
2592 		if (ret)
2593 			break;
2594 	}
2595 
2596 	if (!ret)
2597 		q->nr_requests = nr;
2598 
2599 	blk_mq_unfreeze_queue(q);
2600 
2601 	return ret;
2602 }
2603 
2604 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2605 							int nr_hw_queues)
2606 {
2607 	struct request_queue *q;
2608 
2609 	lockdep_assert_held(&set->tag_list_lock);
2610 
2611 	if (nr_hw_queues > nr_cpu_ids)
2612 		nr_hw_queues = nr_cpu_ids;
2613 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2614 		return;
2615 
2616 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2617 		blk_mq_freeze_queue(q);
2618 
2619 	set->nr_hw_queues = nr_hw_queues;
2620 	blk_mq_update_queue_map(set);
2621 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2622 		blk_mq_realloc_hw_ctxs(set, q);
2623 		blk_mq_queue_reinit(q);
2624 	}
2625 
2626 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2627 		blk_mq_unfreeze_queue(q);
2628 }
2629 
2630 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2631 {
2632 	mutex_lock(&set->tag_list_lock);
2633 	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2634 	mutex_unlock(&set->tag_list_lock);
2635 }
2636 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2637 
2638 /* Enable polling stats and return whether they were already enabled. */
2639 static bool blk_poll_stats_enable(struct request_queue *q)
2640 {
2641 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2642 	    test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2643 		return true;
2644 	blk_stat_add_callback(q, q->poll_cb);
2645 	return false;
2646 }
2647 
2648 static void blk_mq_poll_stats_start(struct request_queue *q)
2649 {
2650 	/*
2651 	 * We don't arm the callback if polling stats are not enabled or the
2652 	 * callback is already active.
2653 	 */
2654 	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2655 	    blk_stat_is_active(q->poll_cb))
2656 		return;
2657 
2658 	blk_stat_activate_msecs(q->poll_cb, 100);
2659 }
2660 
2661 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2662 {
2663 	struct request_queue *q = cb->data;
2664 	int bucket;
2665 
2666 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2667 		if (cb->stat[bucket].nr_samples)
2668 			q->poll_stat[bucket] = cb->stat[bucket];
2669 	}
2670 }
2671 
2672 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2673 				       struct blk_mq_hw_ctx *hctx,
2674 				       struct request *rq)
2675 {
2676 	unsigned long ret = 0;
2677 	int bucket;
2678 
2679 	/*
2680 	 * If stats collection isn't on, don't sleep but turn it on for
2681 	 * future users
2682 	 */
2683 	if (!blk_poll_stats_enable(q))
2684 		return 0;
2685 
2686 	/*
2687 	 * As an optimistic guess, use half of the mean service time
2688 	 * for this type of request. We can (and should) make this smarter.
2689 	 * For instance, if the completion latencies are tight, we can
2690 	 * get closer than just half the mean. This is especially
2691 	 * important on devices where the completion latencies are longer
2692 	 * than ~10 usec. We do use the stats for the relevant IO size
2693 	 * if available which does lead to better estimates.
2694 	 */
2695 	bucket = blk_mq_poll_stats_bkt(rq);
2696 	if (bucket < 0)
2697 		return ret;
2698 
2699 	if (q->poll_stat[bucket].nr_samples)
2700 		ret = (q->poll_stat[bucket].mean + 1) / 2;
2701 
2702 	return ret;
2703 }
2704 
2705 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2706 				     struct blk_mq_hw_ctx *hctx,
2707 				     struct request *rq)
2708 {
2709 	struct hrtimer_sleeper hs;
2710 	enum hrtimer_mode mode;
2711 	unsigned int nsecs;
2712 	ktime_t kt;
2713 
2714 	if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2715 		return false;
2716 
2717 	/*
2718 	 * poll_nsec can be:
2719 	 *
2720 	 * -1:	don't ever hybrid sleep
2721 	 *  0:	use half of prev avg
2722 	 * >0:	use this specific value
2723 	 */
2724 	if (q->poll_nsec == -1)
2725 		return false;
2726 	else if (q->poll_nsec > 0)
2727 		nsecs = q->poll_nsec;
2728 	else
2729 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2730 
2731 	if (!nsecs)
2732 		return false;
2733 
2734 	set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2735 
2736 	/*
2737 	 * This will be replaced with the stats tracking code, using
2738 	 * 'avg_completion_time / 2' as the pre-sleep target.
2739 	 */
2740 	kt = nsecs;
2741 
2742 	mode = HRTIMER_MODE_REL;
2743 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2744 	hrtimer_set_expires(&hs.timer, kt);
2745 
2746 	hrtimer_init_sleeper(&hs, current);
2747 	do {
2748 		if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2749 			break;
2750 		set_current_state(TASK_UNINTERRUPTIBLE);
2751 		hrtimer_start_expires(&hs.timer, mode);
2752 		if (hs.task)
2753 			io_schedule();
2754 		hrtimer_cancel(&hs.timer);
2755 		mode = HRTIMER_MODE_ABS;
2756 	} while (hs.task && !signal_pending(current));
2757 
2758 	__set_current_state(TASK_RUNNING);
2759 	destroy_hrtimer_on_stack(&hs.timer);
2760 	return true;
2761 }
2762 
2763 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2764 {
2765 	struct request_queue *q = hctx->queue;
2766 	long state;
2767 
2768 	/*
2769 	 * If we sleep, have the caller restart the poll loop to reset
2770 	 * the state. Like for the other success return cases, the
2771 	 * caller is responsible for checking if the IO completed. If
2772 	 * the IO isn't complete, we'll get called again and will go
2773 	 * straight to the busy poll loop.
2774 	 */
2775 	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2776 		return true;
2777 
2778 	hctx->poll_considered++;
2779 
2780 	state = current->state;
2781 	while (!need_resched()) {
2782 		int ret;
2783 
2784 		hctx->poll_invoked++;
2785 
2786 		ret = q->mq_ops->poll(hctx, rq->tag);
2787 		if (ret > 0) {
2788 			hctx->poll_success++;
2789 			set_current_state(TASK_RUNNING);
2790 			return true;
2791 		}
2792 
2793 		if (signal_pending_state(state, current))
2794 			set_current_state(TASK_RUNNING);
2795 
2796 		if (current->state == TASK_RUNNING)
2797 			return true;
2798 		if (ret < 0)
2799 			break;
2800 		cpu_relax();
2801 	}
2802 
2803 	return false;
2804 }
2805 
2806 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2807 {
2808 	struct blk_mq_hw_ctx *hctx;
2809 	struct blk_plug *plug;
2810 	struct request *rq;
2811 
2812 	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2813 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2814 		return false;
2815 
2816 	plug = current->plug;
2817 	if (plug)
2818 		blk_flush_plug_list(plug, false);
2819 
2820 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2821 	if (!blk_qc_t_is_internal(cookie))
2822 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2823 	else {
2824 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2825 		/*
2826 		 * With scheduling, if the request has completed, we'll
2827 		 * get a NULL return here, as we clear the sched tag when
2828 		 * that happens. The request still remains valid, like always,
2829 		 * so we should be safe with just the NULL check.
2830 		 */
2831 		if (!rq)
2832 			return false;
2833 	}
2834 
2835 	return __blk_mq_poll(hctx, rq);
2836 }
2837 EXPORT_SYMBOL_GPL(blk_mq_poll);
2838 
2839 static int __init blk_mq_init(void)
2840 {
2841 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2842 				blk_mq_hctx_notify_dead);
2843 	return 0;
2844 }
2845 subsys_initcall(blk_mq_init);
2846