xref: /openbmc/linux/block/blk-mq.c (revision d8bcaabe)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28 
29 #include <trace/events/block.h>
30 
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39 
40 static void blk_mq_poll_stats_start(struct request_queue *q);
41 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
42 
43 static int blk_mq_poll_stats_bkt(const struct request *rq)
44 {
45 	int ddir, bytes, bucket;
46 
47 	ddir = rq_data_dir(rq);
48 	bytes = blk_rq_bytes(rq);
49 
50 	bucket = ddir + 2*(ilog2(bytes) - 9);
51 
52 	if (bucket < 0)
53 		return -1;
54 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
55 		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
56 
57 	return bucket;
58 }
59 
60 /*
61  * Check if any of the ctx's have pending work in this hardware queue
62  */
63 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
64 {
65 	return sbitmap_any_bit_set(&hctx->ctx_map) ||
66 			!list_empty_careful(&hctx->dispatch) ||
67 			blk_mq_sched_has_work(hctx);
68 }
69 
70 /*
71  * Mark this ctx as having pending work in this hardware queue
72  */
73 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
74 				     struct blk_mq_ctx *ctx)
75 {
76 	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
77 		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
78 }
79 
80 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
81 				      struct blk_mq_ctx *ctx)
82 {
83 	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
84 }
85 
86 struct mq_inflight {
87 	struct hd_struct *part;
88 	unsigned int *inflight;
89 };
90 
91 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
92 				  struct request *rq, void *priv,
93 				  bool reserved)
94 {
95 	struct mq_inflight *mi = priv;
96 
97 	if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
98 	    !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
99 		/*
100 		 * index[0] counts the specific partition that was asked
101 		 * for. index[1] counts the ones that are active on the
102 		 * whole device, so increment that if mi->part is indeed
103 		 * a partition, and not a whole device.
104 		 */
105 		if (rq->part == mi->part)
106 			mi->inflight[0]++;
107 		if (mi->part->partno)
108 			mi->inflight[1]++;
109 	}
110 }
111 
112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
113 		      unsigned int inflight[2])
114 {
115 	struct mq_inflight mi = { .part = part, .inflight = inflight, };
116 
117 	inflight[0] = inflight[1] = 0;
118 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 }
120 
121 void blk_freeze_queue_start(struct request_queue *q)
122 {
123 	int freeze_depth;
124 
125 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
126 	if (freeze_depth == 1) {
127 		percpu_ref_kill(&q->q_usage_counter);
128 		blk_mq_run_hw_queues(q, false);
129 	}
130 }
131 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
132 
133 void blk_mq_freeze_queue_wait(struct request_queue *q)
134 {
135 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
136 }
137 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
138 
139 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
140 				     unsigned long timeout)
141 {
142 	return wait_event_timeout(q->mq_freeze_wq,
143 					percpu_ref_is_zero(&q->q_usage_counter),
144 					timeout);
145 }
146 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
147 
148 /*
149  * Guarantee no request is in use, so we can change any data structure of
150  * the queue afterward.
151  */
152 void blk_freeze_queue(struct request_queue *q)
153 {
154 	/*
155 	 * In the !blk_mq case we are only calling this to kill the
156 	 * q_usage_counter, otherwise this increases the freeze depth
157 	 * and waits for it to return to zero.  For this reason there is
158 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
159 	 * exported to drivers as the only user for unfreeze is blk_mq.
160 	 */
161 	blk_freeze_queue_start(q);
162 	blk_mq_freeze_queue_wait(q);
163 }
164 
165 void blk_mq_freeze_queue(struct request_queue *q)
166 {
167 	/*
168 	 * ...just an alias to keep freeze and unfreeze actions balanced
169 	 * in the blk_mq_* namespace
170 	 */
171 	blk_freeze_queue(q);
172 }
173 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
174 
175 void blk_mq_unfreeze_queue(struct request_queue *q)
176 {
177 	int freeze_depth;
178 
179 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
180 	WARN_ON_ONCE(freeze_depth < 0);
181 	if (!freeze_depth) {
182 		percpu_ref_reinit(&q->q_usage_counter);
183 		wake_up_all(&q->mq_freeze_wq);
184 	}
185 }
186 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
187 
188 /*
189  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
190  * mpt3sas driver such that this function can be removed.
191  */
192 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
193 {
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(q->queue_lock, flags);
197 	queue_flag_set(QUEUE_FLAG_QUIESCED, q);
198 	spin_unlock_irqrestore(q->queue_lock, flags);
199 }
200 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
201 
202 /**
203  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
204  * @q: request queue.
205  *
206  * Note: this function does not prevent that the struct request end_io()
207  * callback function is invoked. Once this function is returned, we make
208  * sure no dispatch can happen until the queue is unquiesced via
209  * blk_mq_unquiesce_queue().
210  */
211 void blk_mq_quiesce_queue(struct request_queue *q)
212 {
213 	struct blk_mq_hw_ctx *hctx;
214 	unsigned int i;
215 	bool rcu = false;
216 
217 	blk_mq_quiesce_queue_nowait(q);
218 
219 	queue_for_each_hw_ctx(q, hctx, i) {
220 		if (hctx->flags & BLK_MQ_F_BLOCKING)
221 			synchronize_srcu(hctx->queue_rq_srcu);
222 		else
223 			rcu = true;
224 	}
225 	if (rcu)
226 		synchronize_rcu();
227 }
228 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
229 
230 /*
231  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
232  * @q: request queue.
233  *
234  * This function recovers queue into the state before quiescing
235  * which is done by blk_mq_quiesce_queue.
236  */
237 void blk_mq_unquiesce_queue(struct request_queue *q)
238 {
239 	unsigned long flags;
240 
241 	spin_lock_irqsave(q->queue_lock, flags);
242 	queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
243 	spin_unlock_irqrestore(q->queue_lock, flags);
244 
245 	/* dispatch requests which are inserted during quiescing */
246 	blk_mq_run_hw_queues(q, true);
247 }
248 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
249 
250 void blk_mq_wake_waiters(struct request_queue *q)
251 {
252 	struct blk_mq_hw_ctx *hctx;
253 	unsigned int i;
254 
255 	queue_for_each_hw_ctx(q, hctx, i)
256 		if (blk_mq_hw_queue_mapped(hctx))
257 			blk_mq_tag_wakeup_all(hctx->tags, true);
258 
259 	/*
260 	 * If we are called because the queue has now been marked as
261 	 * dying, we need to ensure that processes currently waiting on
262 	 * the queue are notified as well.
263 	 */
264 	wake_up_all(&q->mq_freeze_wq);
265 }
266 
267 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
268 {
269 	return blk_mq_has_free_tags(hctx->tags);
270 }
271 EXPORT_SYMBOL(blk_mq_can_queue);
272 
273 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
274 		unsigned int tag, unsigned int op)
275 {
276 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
277 	struct request *rq = tags->static_rqs[tag];
278 
279 	rq->rq_flags = 0;
280 
281 	if (data->flags & BLK_MQ_REQ_INTERNAL) {
282 		rq->tag = -1;
283 		rq->internal_tag = tag;
284 	} else {
285 		if (blk_mq_tag_busy(data->hctx)) {
286 			rq->rq_flags = RQF_MQ_INFLIGHT;
287 			atomic_inc(&data->hctx->nr_active);
288 		}
289 		rq->tag = tag;
290 		rq->internal_tag = -1;
291 		data->hctx->tags->rqs[rq->tag] = rq;
292 	}
293 
294 	INIT_LIST_HEAD(&rq->queuelist);
295 	/* csd/requeue_work/fifo_time is initialized before use */
296 	rq->q = data->q;
297 	rq->mq_ctx = data->ctx;
298 	rq->cmd_flags = op;
299 	if (blk_queue_io_stat(data->q))
300 		rq->rq_flags |= RQF_IO_STAT;
301 	/* do not touch atomic flags, it needs atomic ops against the timer */
302 	rq->cpu = -1;
303 	INIT_HLIST_NODE(&rq->hash);
304 	RB_CLEAR_NODE(&rq->rb_node);
305 	rq->rq_disk = NULL;
306 	rq->part = NULL;
307 	rq->start_time = jiffies;
308 #ifdef CONFIG_BLK_CGROUP
309 	rq->rl = NULL;
310 	set_start_time_ns(rq);
311 	rq->io_start_time_ns = 0;
312 #endif
313 	rq->nr_phys_segments = 0;
314 #if defined(CONFIG_BLK_DEV_INTEGRITY)
315 	rq->nr_integrity_segments = 0;
316 #endif
317 	rq->special = NULL;
318 	/* tag was already set */
319 	rq->extra_len = 0;
320 
321 	INIT_LIST_HEAD(&rq->timeout_list);
322 	rq->timeout = 0;
323 
324 	rq->end_io = NULL;
325 	rq->end_io_data = NULL;
326 	rq->next_rq = NULL;
327 
328 	data->ctx->rq_dispatched[op_is_sync(op)]++;
329 	return rq;
330 }
331 
332 static struct request *blk_mq_get_request(struct request_queue *q,
333 		struct bio *bio, unsigned int op,
334 		struct blk_mq_alloc_data *data)
335 {
336 	struct elevator_queue *e = q->elevator;
337 	struct request *rq;
338 	unsigned int tag;
339 	struct blk_mq_ctx *local_ctx = NULL;
340 
341 	blk_queue_enter_live(q);
342 	data->q = q;
343 	if (likely(!data->ctx))
344 		data->ctx = local_ctx = blk_mq_get_ctx(q);
345 	if (likely(!data->hctx))
346 		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
347 	if (op & REQ_NOWAIT)
348 		data->flags |= BLK_MQ_REQ_NOWAIT;
349 
350 	if (e) {
351 		data->flags |= BLK_MQ_REQ_INTERNAL;
352 
353 		/*
354 		 * Flush requests are special and go directly to the
355 		 * dispatch list.
356 		 */
357 		if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
358 			e->type->ops.mq.limit_depth(op, data);
359 	}
360 
361 	tag = blk_mq_get_tag(data);
362 	if (tag == BLK_MQ_TAG_FAIL) {
363 		if (local_ctx) {
364 			blk_mq_put_ctx(local_ctx);
365 			data->ctx = NULL;
366 		}
367 		blk_queue_exit(q);
368 		return NULL;
369 	}
370 
371 	rq = blk_mq_rq_ctx_init(data, tag, op);
372 	if (!op_is_flush(op)) {
373 		rq->elv.icq = NULL;
374 		if (e && e->type->ops.mq.prepare_request) {
375 			if (e->type->icq_cache && rq_ioc(bio))
376 				blk_mq_sched_assign_ioc(rq, bio);
377 
378 			e->type->ops.mq.prepare_request(rq, bio);
379 			rq->rq_flags |= RQF_ELVPRIV;
380 		}
381 	}
382 	data->hctx->queued++;
383 	return rq;
384 }
385 
386 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
387 		unsigned int flags)
388 {
389 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
390 	struct request *rq;
391 	int ret;
392 
393 	ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
394 	if (ret)
395 		return ERR_PTR(ret);
396 
397 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
398 	blk_queue_exit(q);
399 
400 	if (!rq)
401 		return ERR_PTR(-EWOULDBLOCK);
402 
403 	blk_mq_put_ctx(alloc_data.ctx);
404 
405 	rq->__data_len = 0;
406 	rq->__sector = (sector_t) -1;
407 	rq->bio = rq->biotail = NULL;
408 	return rq;
409 }
410 EXPORT_SYMBOL(blk_mq_alloc_request);
411 
412 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
413 		unsigned int op, unsigned int flags, unsigned int hctx_idx)
414 {
415 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
416 	struct request *rq;
417 	unsigned int cpu;
418 	int ret;
419 
420 	/*
421 	 * If the tag allocator sleeps we could get an allocation for a
422 	 * different hardware context.  No need to complicate the low level
423 	 * allocator for this for the rare use case of a command tied to
424 	 * a specific queue.
425 	 */
426 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
427 		return ERR_PTR(-EINVAL);
428 
429 	if (hctx_idx >= q->nr_hw_queues)
430 		return ERR_PTR(-EIO);
431 
432 	ret = blk_queue_enter(q, true);
433 	if (ret)
434 		return ERR_PTR(ret);
435 
436 	/*
437 	 * Check if the hardware context is actually mapped to anything.
438 	 * If not tell the caller that it should skip this queue.
439 	 */
440 	alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
441 	if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
442 		blk_queue_exit(q);
443 		return ERR_PTR(-EXDEV);
444 	}
445 	cpu = cpumask_first(alloc_data.hctx->cpumask);
446 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
447 
448 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
449 	blk_queue_exit(q);
450 
451 	if (!rq)
452 		return ERR_PTR(-EWOULDBLOCK);
453 
454 	return rq;
455 }
456 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
457 
458 void blk_mq_free_request(struct request *rq)
459 {
460 	struct request_queue *q = rq->q;
461 	struct elevator_queue *e = q->elevator;
462 	struct blk_mq_ctx *ctx = rq->mq_ctx;
463 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
464 	const int sched_tag = rq->internal_tag;
465 
466 	if (rq->rq_flags & RQF_ELVPRIV) {
467 		if (e && e->type->ops.mq.finish_request)
468 			e->type->ops.mq.finish_request(rq);
469 		if (rq->elv.icq) {
470 			put_io_context(rq->elv.icq->ioc);
471 			rq->elv.icq = NULL;
472 		}
473 	}
474 
475 	ctx->rq_completed[rq_is_sync(rq)]++;
476 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
477 		atomic_dec(&hctx->nr_active);
478 
479 	wbt_done(q->rq_wb, &rq->issue_stat);
480 
481 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
482 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
483 	if (rq->tag != -1)
484 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
485 	if (sched_tag != -1)
486 		blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
487 	blk_mq_sched_restart(hctx);
488 	blk_queue_exit(q);
489 }
490 EXPORT_SYMBOL_GPL(blk_mq_free_request);
491 
492 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
493 {
494 	blk_account_io_done(rq);
495 
496 	if (rq->end_io) {
497 		wbt_done(rq->q->rq_wb, &rq->issue_stat);
498 		rq->end_io(rq, error);
499 	} else {
500 		if (unlikely(blk_bidi_rq(rq)))
501 			blk_mq_free_request(rq->next_rq);
502 		blk_mq_free_request(rq);
503 	}
504 }
505 EXPORT_SYMBOL(__blk_mq_end_request);
506 
507 void blk_mq_end_request(struct request *rq, blk_status_t error)
508 {
509 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
510 		BUG();
511 	__blk_mq_end_request(rq, error);
512 }
513 EXPORT_SYMBOL(blk_mq_end_request);
514 
515 static void __blk_mq_complete_request_remote(void *data)
516 {
517 	struct request *rq = data;
518 
519 	rq->q->softirq_done_fn(rq);
520 }
521 
522 static void __blk_mq_complete_request(struct request *rq)
523 {
524 	struct blk_mq_ctx *ctx = rq->mq_ctx;
525 	bool shared = false;
526 	int cpu;
527 
528 	if (rq->internal_tag != -1)
529 		blk_mq_sched_completed_request(rq);
530 	if (rq->rq_flags & RQF_STATS) {
531 		blk_mq_poll_stats_start(rq->q);
532 		blk_stat_add(rq);
533 	}
534 
535 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
536 		rq->q->softirq_done_fn(rq);
537 		return;
538 	}
539 
540 	cpu = get_cpu();
541 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
542 		shared = cpus_share_cache(cpu, ctx->cpu);
543 
544 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
545 		rq->csd.func = __blk_mq_complete_request_remote;
546 		rq->csd.info = rq;
547 		rq->csd.flags = 0;
548 		smp_call_function_single_async(ctx->cpu, &rq->csd);
549 	} else {
550 		rq->q->softirq_done_fn(rq);
551 	}
552 	put_cpu();
553 }
554 
555 /**
556  * blk_mq_complete_request - end I/O on a request
557  * @rq:		the request being processed
558  *
559  * Description:
560  *	Ends all I/O on a request. It does not handle partial completions.
561  *	The actual completion happens out-of-order, through a IPI handler.
562  **/
563 void blk_mq_complete_request(struct request *rq)
564 {
565 	struct request_queue *q = rq->q;
566 
567 	if (unlikely(blk_should_fake_timeout(q)))
568 		return;
569 	if (!blk_mark_rq_complete(rq))
570 		__blk_mq_complete_request(rq);
571 }
572 EXPORT_SYMBOL(blk_mq_complete_request);
573 
574 int blk_mq_request_started(struct request *rq)
575 {
576 	return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
577 }
578 EXPORT_SYMBOL_GPL(blk_mq_request_started);
579 
580 void blk_mq_start_request(struct request *rq)
581 {
582 	struct request_queue *q = rq->q;
583 
584 	blk_mq_sched_started_request(rq);
585 
586 	trace_block_rq_issue(q, rq);
587 
588 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
589 		blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
590 		rq->rq_flags |= RQF_STATS;
591 		wbt_issue(q->rq_wb, &rq->issue_stat);
592 	}
593 
594 	blk_add_timer(rq);
595 
596 	/*
597 	 * Ensure that ->deadline is visible before set the started
598 	 * flag and clear the completed flag.
599 	 */
600 	smp_mb__before_atomic();
601 
602 	/*
603 	 * Mark us as started and clear complete. Complete might have been
604 	 * set if requeue raced with timeout, which then marked it as
605 	 * complete. So be sure to clear complete again when we start
606 	 * the request, otherwise we'll ignore the completion event.
607 	 */
608 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
609 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
610 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
611 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
612 
613 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
614 		/*
615 		 * Make sure space for the drain appears.  We know we can do
616 		 * this because max_hw_segments has been adjusted to be one
617 		 * fewer than the device can handle.
618 		 */
619 		rq->nr_phys_segments++;
620 	}
621 }
622 EXPORT_SYMBOL(blk_mq_start_request);
623 
624 /*
625  * When we reach here because queue is busy, REQ_ATOM_COMPLETE
626  * flag isn't set yet, so there may be race with timeout handler,
627  * but given rq->deadline is just set in .queue_rq() under
628  * this situation, the race won't be possible in reality because
629  * rq->timeout should be set as big enough to cover the window
630  * between blk_mq_start_request() called from .queue_rq() and
631  * clearing REQ_ATOM_STARTED here.
632  */
633 static void __blk_mq_requeue_request(struct request *rq)
634 {
635 	struct request_queue *q = rq->q;
636 
637 	trace_block_rq_requeue(q, rq);
638 	wbt_requeue(q->rq_wb, &rq->issue_stat);
639 	blk_mq_sched_requeue_request(rq);
640 
641 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
642 		if (q->dma_drain_size && blk_rq_bytes(rq))
643 			rq->nr_phys_segments--;
644 	}
645 }
646 
647 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
648 {
649 	__blk_mq_requeue_request(rq);
650 
651 	BUG_ON(blk_queued_rq(rq));
652 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
653 }
654 EXPORT_SYMBOL(blk_mq_requeue_request);
655 
656 static void blk_mq_requeue_work(struct work_struct *work)
657 {
658 	struct request_queue *q =
659 		container_of(work, struct request_queue, requeue_work.work);
660 	LIST_HEAD(rq_list);
661 	struct request *rq, *next;
662 
663 	spin_lock_irq(&q->requeue_lock);
664 	list_splice_init(&q->requeue_list, &rq_list);
665 	spin_unlock_irq(&q->requeue_lock);
666 
667 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
668 		if (!(rq->rq_flags & RQF_SOFTBARRIER))
669 			continue;
670 
671 		rq->rq_flags &= ~RQF_SOFTBARRIER;
672 		list_del_init(&rq->queuelist);
673 		blk_mq_sched_insert_request(rq, true, false, false, true);
674 	}
675 
676 	while (!list_empty(&rq_list)) {
677 		rq = list_entry(rq_list.next, struct request, queuelist);
678 		list_del_init(&rq->queuelist);
679 		blk_mq_sched_insert_request(rq, false, false, false, true);
680 	}
681 
682 	blk_mq_run_hw_queues(q, false);
683 }
684 
685 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
686 				bool kick_requeue_list)
687 {
688 	struct request_queue *q = rq->q;
689 	unsigned long flags;
690 
691 	/*
692 	 * We abuse this flag that is otherwise used by the I/O scheduler to
693 	 * request head insertation from the workqueue.
694 	 */
695 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
696 
697 	spin_lock_irqsave(&q->requeue_lock, flags);
698 	if (at_head) {
699 		rq->rq_flags |= RQF_SOFTBARRIER;
700 		list_add(&rq->queuelist, &q->requeue_list);
701 	} else {
702 		list_add_tail(&rq->queuelist, &q->requeue_list);
703 	}
704 	spin_unlock_irqrestore(&q->requeue_lock, flags);
705 
706 	if (kick_requeue_list)
707 		blk_mq_kick_requeue_list(q);
708 }
709 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
710 
711 void blk_mq_kick_requeue_list(struct request_queue *q)
712 {
713 	kblockd_schedule_delayed_work(&q->requeue_work, 0);
714 }
715 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
716 
717 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
718 				    unsigned long msecs)
719 {
720 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
721 				    msecs_to_jiffies(msecs));
722 }
723 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
724 
725 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
726 {
727 	if (tag < tags->nr_tags) {
728 		prefetch(tags->rqs[tag]);
729 		return tags->rqs[tag];
730 	}
731 
732 	return NULL;
733 }
734 EXPORT_SYMBOL(blk_mq_tag_to_rq);
735 
736 struct blk_mq_timeout_data {
737 	unsigned long next;
738 	unsigned int next_set;
739 };
740 
741 void blk_mq_rq_timed_out(struct request *req, bool reserved)
742 {
743 	const struct blk_mq_ops *ops = req->q->mq_ops;
744 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
745 
746 	/*
747 	 * We know that complete is set at this point. If STARTED isn't set
748 	 * anymore, then the request isn't active and the "timeout" should
749 	 * just be ignored. This can happen due to the bitflag ordering.
750 	 * Timeout first checks if STARTED is set, and if it is, assumes
751 	 * the request is active. But if we race with completion, then
752 	 * both flags will get cleared. So check here again, and ignore
753 	 * a timeout event with a request that isn't active.
754 	 */
755 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
756 		return;
757 
758 	if (ops->timeout)
759 		ret = ops->timeout(req, reserved);
760 
761 	switch (ret) {
762 	case BLK_EH_HANDLED:
763 		__blk_mq_complete_request(req);
764 		break;
765 	case BLK_EH_RESET_TIMER:
766 		blk_add_timer(req);
767 		blk_clear_rq_complete(req);
768 		break;
769 	case BLK_EH_NOT_HANDLED:
770 		break;
771 	default:
772 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
773 		break;
774 	}
775 }
776 
777 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
778 		struct request *rq, void *priv, bool reserved)
779 {
780 	struct blk_mq_timeout_data *data = priv;
781 
782 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
783 		return;
784 
785 	/*
786 	 * The rq being checked may have been freed and reallocated
787 	 * out already here, we avoid this race by checking rq->deadline
788 	 * and REQ_ATOM_COMPLETE flag together:
789 	 *
790 	 * - if rq->deadline is observed as new value because of
791 	 *   reusing, the rq won't be timed out because of timing.
792 	 * - if rq->deadline is observed as previous value,
793 	 *   REQ_ATOM_COMPLETE flag won't be cleared in reuse path
794 	 *   because we put a barrier between setting rq->deadline
795 	 *   and clearing the flag in blk_mq_start_request(), so
796 	 *   this rq won't be timed out too.
797 	 */
798 	if (time_after_eq(jiffies, rq->deadline)) {
799 		if (!blk_mark_rq_complete(rq))
800 			blk_mq_rq_timed_out(rq, reserved);
801 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
802 		data->next = rq->deadline;
803 		data->next_set = 1;
804 	}
805 }
806 
807 static void blk_mq_timeout_work(struct work_struct *work)
808 {
809 	struct request_queue *q =
810 		container_of(work, struct request_queue, timeout_work);
811 	struct blk_mq_timeout_data data = {
812 		.next		= 0,
813 		.next_set	= 0,
814 	};
815 	int i;
816 
817 	/* A deadlock might occur if a request is stuck requiring a
818 	 * timeout at the same time a queue freeze is waiting
819 	 * completion, since the timeout code would not be able to
820 	 * acquire the queue reference here.
821 	 *
822 	 * That's why we don't use blk_queue_enter here; instead, we use
823 	 * percpu_ref_tryget directly, because we need to be able to
824 	 * obtain a reference even in the short window between the queue
825 	 * starting to freeze, by dropping the first reference in
826 	 * blk_freeze_queue_start, and the moment the last request is
827 	 * consumed, marked by the instant q_usage_counter reaches
828 	 * zero.
829 	 */
830 	if (!percpu_ref_tryget(&q->q_usage_counter))
831 		return;
832 
833 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
834 
835 	if (data.next_set) {
836 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
837 		mod_timer(&q->timeout, data.next);
838 	} else {
839 		struct blk_mq_hw_ctx *hctx;
840 
841 		queue_for_each_hw_ctx(q, hctx, i) {
842 			/* the hctx may be unmapped, so check it here */
843 			if (blk_mq_hw_queue_mapped(hctx))
844 				blk_mq_tag_idle(hctx);
845 		}
846 	}
847 	blk_queue_exit(q);
848 }
849 
850 struct flush_busy_ctx_data {
851 	struct blk_mq_hw_ctx *hctx;
852 	struct list_head *list;
853 };
854 
855 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
856 {
857 	struct flush_busy_ctx_data *flush_data = data;
858 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
859 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
860 
861 	sbitmap_clear_bit(sb, bitnr);
862 	spin_lock(&ctx->lock);
863 	list_splice_tail_init(&ctx->rq_list, flush_data->list);
864 	spin_unlock(&ctx->lock);
865 	return true;
866 }
867 
868 /*
869  * Process software queues that have been marked busy, splicing them
870  * to the for-dispatch
871  */
872 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
873 {
874 	struct flush_busy_ctx_data data = {
875 		.hctx = hctx,
876 		.list = list,
877 	};
878 
879 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
880 }
881 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
882 
883 static inline unsigned int queued_to_index(unsigned int queued)
884 {
885 	if (!queued)
886 		return 0;
887 
888 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
889 }
890 
891 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
892 			   bool wait)
893 {
894 	struct blk_mq_alloc_data data = {
895 		.q = rq->q,
896 		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
897 		.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
898 	};
899 
900 	might_sleep_if(wait);
901 
902 	if (rq->tag != -1)
903 		goto done;
904 
905 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
906 		data.flags |= BLK_MQ_REQ_RESERVED;
907 
908 	rq->tag = blk_mq_get_tag(&data);
909 	if (rq->tag >= 0) {
910 		if (blk_mq_tag_busy(data.hctx)) {
911 			rq->rq_flags |= RQF_MQ_INFLIGHT;
912 			atomic_inc(&data.hctx->nr_active);
913 		}
914 		data.hctx->tags->rqs[rq->tag] = rq;
915 	}
916 
917 done:
918 	if (hctx)
919 		*hctx = data.hctx;
920 	return rq->tag != -1;
921 }
922 
923 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
924 				    struct request *rq)
925 {
926 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
927 	rq->tag = -1;
928 
929 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
930 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
931 		atomic_dec(&hctx->nr_active);
932 	}
933 }
934 
935 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
936 				       struct request *rq)
937 {
938 	if (rq->tag == -1 || rq->internal_tag == -1)
939 		return;
940 
941 	__blk_mq_put_driver_tag(hctx, rq);
942 }
943 
944 static void blk_mq_put_driver_tag(struct request *rq)
945 {
946 	struct blk_mq_hw_ctx *hctx;
947 
948 	if (rq->tag == -1 || rq->internal_tag == -1)
949 		return;
950 
951 	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
952 	__blk_mq_put_driver_tag(hctx, rq);
953 }
954 
955 /*
956  * If we fail getting a driver tag because all the driver tags are already
957  * assigned and on the dispatch list, BUT the first entry does not have a
958  * tag, then we could deadlock. For that case, move entries with assigned
959  * driver tags to the front, leaving the set of tagged requests in the
960  * same order, and the untagged set in the same order.
961  */
962 static bool reorder_tags_to_front(struct list_head *list)
963 {
964 	struct request *rq, *tmp, *first = NULL;
965 
966 	list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
967 		if (rq == first)
968 			break;
969 		if (rq->tag != -1) {
970 			list_move(&rq->queuelist, list);
971 			if (!first)
972 				first = rq;
973 		}
974 	}
975 
976 	return first != NULL;
977 }
978 
979 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
980 				void *key)
981 {
982 	struct blk_mq_hw_ctx *hctx;
983 
984 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
985 
986 	list_del(&wait->entry);
987 	clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
988 	blk_mq_run_hw_queue(hctx, true);
989 	return 1;
990 }
991 
992 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
993 {
994 	struct sbq_wait_state *ws;
995 
996 	/*
997 	 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
998 	 * The thread which wins the race to grab this bit adds the hardware
999 	 * queue to the wait queue.
1000 	 */
1001 	if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
1002 	    test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
1003 		return false;
1004 
1005 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
1006 	ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
1007 
1008 	/*
1009 	 * As soon as this returns, it's no longer safe to fiddle with
1010 	 * hctx->dispatch_wait, since a completion can wake up the wait queue
1011 	 * and unlock the bit.
1012 	 */
1013 	add_wait_queue(&ws->wait, &hctx->dispatch_wait);
1014 	return true;
1015 }
1016 
1017 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
1018 {
1019 	struct blk_mq_hw_ctx *hctx;
1020 	struct request *rq;
1021 	int errors, queued;
1022 
1023 	if (list_empty(list))
1024 		return false;
1025 
1026 	/*
1027 	 * Now process all the entries, sending them to the driver.
1028 	 */
1029 	errors = queued = 0;
1030 	do {
1031 		struct blk_mq_queue_data bd;
1032 		blk_status_t ret;
1033 
1034 		rq = list_first_entry(list, struct request, queuelist);
1035 		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1036 			if (!queued && reorder_tags_to_front(list))
1037 				continue;
1038 
1039 			/*
1040 			 * The initial allocation attempt failed, so we need to
1041 			 * rerun the hardware queue when a tag is freed.
1042 			 */
1043 			if (!blk_mq_dispatch_wait_add(hctx))
1044 				break;
1045 
1046 			/*
1047 			 * It's possible that a tag was freed in the window
1048 			 * between the allocation failure and adding the
1049 			 * hardware queue to the wait queue.
1050 			 */
1051 			if (!blk_mq_get_driver_tag(rq, &hctx, false))
1052 				break;
1053 		}
1054 
1055 		list_del_init(&rq->queuelist);
1056 
1057 		bd.rq = rq;
1058 
1059 		/*
1060 		 * Flag last if we have no more requests, or if we have more
1061 		 * but can't assign a driver tag to it.
1062 		 */
1063 		if (list_empty(list))
1064 			bd.last = true;
1065 		else {
1066 			struct request *nxt;
1067 
1068 			nxt = list_first_entry(list, struct request, queuelist);
1069 			bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1070 		}
1071 
1072 		ret = q->mq_ops->queue_rq(hctx, &bd);
1073 		if (ret == BLK_STS_RESOURCE) {
1074 			blk_mq_put_driver_tag_hctx(hctx, rq);
1075 			list_add(&rq->queuelist, list);
1076 			__blk_mq_requeue_request(rq);
1077 			break;
1078 		}
1079 
1080 		if (unlikely(ret != BLK_STS_OK)) {
1081 			errors++;
1082 			blk_mq_end_request(rq, BLK_STS_IOERR);
1083 			continue;
1084 		}
1085 
1086 		queued++;
1087 	} while (!list_empty(list));
1088 
1089 	hctx->dispatched[queued_to_index(queued)]++;
1090 
1091 	/*
1092 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1093 	 * that is where we will continue on next queue run.
1094 	 */
1095 	if (!list_empty(list)) {
1096 		/*
1097 		 * If an I/O scheduler has been configured and we got a driver
1098 		 * tag for the next request already, free it again.
1099 		 */
1100 		rq = list_first_entry(list, struct request, queuelist);
1101 		blk_mq_put_driver_tag(rq);
1102 
1103 		spin_lock(&hctx->lock);
1104 		list_splice_init(list, &hctx->dispatch);
1105 		spin_unlock(&hctx->lock);
1106 
1107 		/*
1108 		 * If SCHED_RESTART was set by the caller of this function and
1109 		 * it is no longer set that means that it was cleared by another
1110 		 * thread and hence that a queue rerun is needed.
1111 		 *
1112 		 * If TAG_WAITING is set that means that an I/O scheduler has
1113 		 * been configured and another thread is waiting for a driver
1114 		 * tag. To guarantee fairness, do not rerun this hardware queue
1115 		 * but let the other thread grab the driver tag.
1116 		 *
1117 		 * If no I/O scheduler has been configured it is possible that
1118 		 * the hardware queue got stopped and restarted before requests
1119 		 * were pushed back onto the dispatch list. Rerun the queue to
1120 		 * avoid starvation. Notes:
1121 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
1122 		 *   been stopped before rerunning a queue.
1123 		 * - Some but not all block drivers stop a queue before
1124 		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1125 		 *   and dm-rq.
1126 		 */
1127 		if (!blk_mq_sched_needs_restart(hctx) &&
1128 		    !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1129 			blk_mq_run_hw_queue(hctx, true);
1130 	}
1131 
1132 	return (queued + errors) != 0;
1133 }
1134 
1135 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1136 {
1137 	int srcu_idx;
1138 
1139 	/*
1140 	 * We should be running this queue from one of the CPUs that
1141 	 * are mapped to it.
1142 	 */
1143 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1144 		cpu_online(hctx->next_cpu));
1145 
1146 	/*
1147 	 * We can't run the queue inline with ints disabled. Ensure that
1148 	 * we catch bad users of this early.
1149 	 */
1150 	WARN_ON_ONCE(in_interrupt());
1151 
1152 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1153 		rcu_read_lock();
1154 		blk_mq_sched_dispatch_requests(hctx);
1155 		rcu_read_unlock();
1156 	} else {
1157 		might_sleep();
1158 
1159 		srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1160 		blk_mq_sched_dispatch_requests(hctx);
1161 		srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1162 	}
1163 }
1164 
1165 /*
1166  * It'd be great if the workqueue API had a way to pass
1167  * in a mask and had some smarts for more clever placement.
1168  * For now we just round-robin here, switching for every
1169  * BLK_MQ_CPU_WORK_BATCH queued items.
1170  */
1171 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1172 {
1173 	if (hctx->queue->nr_hw_queues == 1)
1174 		return WORK_CPU_UNBOUND;
1175 
1176 	if (--hctx->next_cpu_batch <= 0) {
1177 		int next_cpu;
1178 
1179 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1180 		if (next_cpu >= nr_cpu_ids)
1181 			next_cpu = cpumask_first(hctx->cpumask);
1182 
1183 		hctx->next_cpu = next_cpu;
1184 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1185 	}
1186 
1187 	return hctx->next_cpu;
1188 }
1189 
1190 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1191 					unsigned long msecs)
1192 {
1193 	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1194 		return;
1195 
1196 	if (unlikely(blk_mq_hctx_stopped(hctx)))
1197 		return;
1198 
1199 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1200 		int cpu = get_cpu();
1201 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1202 			__blk_mq_run_hw_queue(hctx);
1203 			put_cpu();
1204 			return;
1205 		}
1206 
1207 		put_cpu();
1208 	}
1209 
1210 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1211 					 &hctx->run_work,
1212 					 msecs_to_jiffies(msecs));
1213 }
1214 
1215 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1216 {
1217 	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
1218 }
1219 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1220 
1221 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1222 {
1223 	__blk_mq_delay_run_hw_queue(hctx, async, 0);
1224 }
1225 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1226 
1227 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1228 {
1229 	struct blk_mq_hw_ctx *hctx;
1230 	int i;
1231 
1232 	queue_for_each_hw_ctx(q, hctx, i) {
1233 		if (!blk_mq_hctx_has_pending(hctx) ||
1234 		    blk_mq_hctx_stopped(hctx))
1235 			continue;
1236 
1237 		blk_mq_run_hw_queue(hctx, async);
1238 	}
1239 }
1240 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1241 
1242 /**
1243  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1244  * @q: request queue.
1245  *
1246  * The caller is responsible for serializing this function against
1247  * blk_mq_{start,stop}_hw_queue().
1248  */
1249 bool blk_mq_queue_stopped(struct request_queue *q)
1250 {
1251 	struct blk_mq_hw_ctx *hctx;
1252 	int i;
1253 
1254 	queue_for_each_hw_ctx(q, hctx, i)
1255 		if (blk_mq_hctx_stopped(hctx))
1256 			return true;
1257 
1258 	return false;
1259 }
1260 EXPORT_SYMBOL(blk_mq_queue_stopped);
1261 
1262 /*
1263  * This function is often used for pausing .queue_rq() by driver when
1264  * there isn't enough resource or some conditions aren't satisfied, and
1265  * BLK_STS_RESOURCE is usually returned.
1266  *
1267  * We do not guarantee that dispatch can be drained or blocked
1268  * after blk_mq_stop_hw_queue() returns. Please use
1269  * blk_mq_quiesce_queue() for that requirement.
1270  */
1271 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1272 {
1273 	cancel_delayed_work(&hctx->run_work);
1274 
1275 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1276 }
1277 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1278 
1279 /*
1280  * This function is often used for pausing .queue_rq() by driver when
1281  * there isn't enough resource or some conditions aren't satisfied, and
1282  * BLK_STS_RESOURCE is usually returned.
1283  *
1284  * We do not guarantee that dispatch can be drained or blocked
1285  * after blk_mq_stop_hw_queues() returns. Please use
1286  * blk_mq_quiesce_queue() for that requirement.
1287  */
1288 void blk_mq_stop_hw_queues(struct request_queue *q)
1289 {
1290 	struct blk_mq_hw_ctx *hctx;
1291 	int i;
1292 
1293 	queue_for_each_hw_ctx(q, hctx, i)
1294 		blk_mq_stop_hw_queue(hctx);
1295 }
1296 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1297 
1298 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1299 {
1300 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1301 
1302 	blk_mq_run_hw_queue(hctx, false);
1303 }
1304 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1305 
1306 void blk_mq_start_hw_queues(struct request_queue *q)
1307 {
1308 	struct blk_mq_hw_ctx *hctx;
1309 	int i;
1310 
1311 	queue_for_each_hw_ctx(q, hctx, i)
1312 		blk_mq_start_hw_queue(hctx);
1313 }
1314 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1315 
1316 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1317 {
1318 	if (!blk_mq_hctx_stopped(hctx))
1319 		return;
1320 
1321 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1322 	blk_mq_run_hw_queue(hctx, async);
1323 }
1324 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1325 
1326 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1327 {
1328 	struct blk_mq_hw_ctx *hctx;
1329 	int i;
1330 
1331 	queue_for_each_hw_ctx(q, hctx, i)
1332 		blk_mq_start_stopped_hw_queue(hctx, async);
1333 }
1334 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1335 
1336 static void blk_mq_run_work_fn(struct work_struct *work)
1337 {
1338 	struct blk_mq_hw_ctx *hctx;
1339 
1340 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1341 
1342 	/*
1343 	 * If we are stopped, don't run the queue. The exception is if
1344 	 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1345 	 * the STOPPED bit and run it.
1346 	 */
1347 	if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1348 		if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1349 			return;
1350 
1351 		clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1352 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1353 	}
1354 
1355 	__blk_mq_run_hw_queue(hctx);
1356 }
1357 
1358 
1359 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1360 {
1361 	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1362 		return;
1363 
1364 	/*
1365 	 * Stop the hw queue, then modify currently delayed work.
1366 	 * This should prevent us from running the queue prematurely.
1367 	 * Mark the queue as auto-clearing STOPPED when it runs.
1368 	 */
1369 	blk_mq_stop_hw_queue(hctx);
1370 	set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1371 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1372 					&hctx->run_work,
1373 					msecs_to_jiffies(msecs));
1374 }
1375 EXPORT_SYMBOL(blk_mq_delay_queue);
1376 
1377 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1378 					    struct request *rq,
1379 					    bool at_head)
1380 {
1381 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1382 
1383 	lockdep_assert_held(&ctx->lock);
1384 
1385 	trace_block_rq_insert(hctx->queue, rq);
1386 
1387 	if (at_head)
1388 		list_add(&rq->queuelist, &ctx->rq_list);
1389 	else
1390 		list_add_tail(&rq->queuelist, &ctx->rq_list);
1391 }
1392 
1393 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1394 			     bool at_head)
1395 {
1396 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1397 
1398 	lockdep_assert_held(&ctx->lock);
1399 
1400 	__blk_mq_insert_req_list(hctx, rq, at_head);
1401 	blk_mq_hctx_mark_pending(hctx, ctx);
1402 }
1403 
1404 /*
1405  * Should only be used carefully, when the caller knows we want to
1406  * bypass a potential IO scheduler on the target device.
1407  */
1408 void blk_mq_request_bypass_insert(struct request *rq)
1409 {
1410 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1411 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1412 
1413 	spin_lock(&hctx->lock);
1414 	list_add_tail(&rq->queuelist, &hctx->dispatch);
1415 	spin_unlock(&hctx->lock);
1416 
1417 	blk_mq_run_hw_queue(hctx, false);
1418 }
1419 
1420 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1421 			    struct list_head *list)
1422 
1423 {
1424 	/*
1425 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1426 	 * offline now
1427 	 */
1428 	spin_lock(&ctx->lock);
1429 	while (!list_empty(list)) {
1430 		struct request *rq;
1431 
1432 		rq = list_first_entry(list, struct request, queuelist);
1433 		BUG_ON(rq->mq_ctx != ctx);
1434 		list_del_init(&rq->queuelist);
1435 		__blk_mq_insert_req_list(hctx, rq, false);
1436 	}
1437 	blk_mq_hctx_mark_pending(hctx, ctx);
1438 	spin_unlock(&ctx->lock);
1439 }
1440 
1441 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1442 {
1443 	struct request *rqa = container_of(a, struct request, queuelist);
1444 	struct request *rqb = container_of(b, struct request, queuelist);
1445 
1446 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1447 		 (rqa->mq_ctx == rqb->mq_ctx &&
1448 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1449 }
1450 
1451 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1452 {
1453 	struct blk_mq_ctx *this_ctx;
1454 	struct request_queue *this_q;
1455 	struct request *rq;
1456 	LIST_HEAD(list);
1457 	LIST_HEAD(ctx_list);
1458 	unsigned int depth;
1459 
1460 	list_splice_init(&plug->mq_list, &list);
1461 
1462 	list_sort(NULL, &list, plug_ctx_cmp);
1463 
1464 	this_q = NULL;
1465 	this_ctx = NULL;
1466 	depth = 0;
1467 
1468 	while (!list_empty(&list)) {
1469 		rq = list_entry_rq(list.next);
1470 		list_del_init(&rq->queuelist);
1471 		BUG_ON(!rq->q);
1472 		if (rq->mq_ctx != this_ctx) {
1473 			if (this_ctx) {
1474 				trace_block_unplug(this_q, depth, from_schedule);
1475 				blk_mq_sched_insert_requests(this_q, this_ctx,
1476 								&ctx_list,
1477 								from_schedule);
1478 			}
1479 
1480 			this_ctx = rq->mq_ctx;
1481 			this_q = rq->q;
1482 			depth = 0;
1483 		}
1484 
1485 		depth++;
1486 		list_add_tail(&rq->queuelist, &ctx_list);
1487 	}
1488 
1489 	/*
1490 	 * If 'this_ctx' is set, we know we have entries to complete
1491 	 * on 'ctx_list'. Do those.
1492 	 */
1493 	if (this_ctx) {
1494 		trace_block_unplug(this_q, depth, from_schedule);
1495 		blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1496 						from_schedule);
1497 	}
1498 }
1499 
1500 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1501 {
1502 	blk_init_request_from_bio(rq, bio);
1503 
1504 	blk_account_io_start(rq, true);
1505 }
1506 
1507 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1508 {
1509 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1510 		!blk_queue_nomerges(hctx->queue);
1511 }
1512 
1513 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1514 				   struct blk_mq_ctx *ctx,
1515 				   struct request *rq)
1516 {
1517 	spin_lock(&ctx->lock);
1518 	__blk_mq_insert_request(hctx, rq, false);
1519 	spin_unlock(&ctx->lock);
1520 }
1521 
1522 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1523 {
1524 	if (rq->tag != -1)
1525 		return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1526 
1527 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1528 }
1529 
1530 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1531 					struct request *rq,
1532 					blk_qc_t *cookie, bool may_sleep)
1533 {
1534 	struct request_queue *q = rq->q;
1535 	struct blk_mq_queue_data bd = {
1536 		.rq = rq,
1537 		.last = true,
1538 	};
1539 	blk_qc_t new_cookie;
1540 	blk_status_t ret;
1541 	bool run_queue = true;
1542 
1543 	/* RCU or SRCU read lock is needed before checking quiesced flag */
1544 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1545 		run_queue = false;
1546 		goto insert;
1547 	}
1548 
1549 	if (q->elevator)
1550 		goto insert;
1551 
1552 	if (!blk_mq_get_driver_tag(rq, NULL, false))
1553 		goto insert;
1554 
1555 	new_cookie = request_to_qc_t(hctx, rq);
1556 
1557 	/*
1558 	 * For OK queue, we are done. For error, kill it. Any other
1559 	 * error (busy), just add it to our list as we previously
1560 	 * would have done
1561 	 */
1562 	ret = q->mq_ops->queue_rq(hctx, &bd);
1563 	switch (ret) {
1564 	case BLK_STS_OK:
1565 		*cookie = new_cookie;
1566 		return;
1567 	case BLK_STS_RESOURCE:
1568 		__blk_mq_requeue_request(rq);
1569 		goto insert;
1570 	default:
1571 		*cookie = BLK_QC_T_NONE;
1572 		blk_mq_end_request(rq, ret);
1573 		return;
1574 	}
1575 
1576 insert:
1577 	blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1578 }
1579 
1580 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1581 		struct request *rq, blk_qc_t *cookie)
1582 {
1583 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1584 		rcu_read_lock();
1585 		__blk_mq_try_issue_directly(hctx, rq, cookie, false);
1586 		rcu_read_unlock();
1587 	} else {
1588 		unsigned int srcu_idx;
1589 
1590 		might_sleep();
1591 
1592 		srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1593 		__blk_mq_try_issue_directly(hctx, rq, cookie, true);
1594 		srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1595 	}
1596 }
1597 
1598 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1599 {
1600 	const int is_sync = op_is_sync(bio->bi_opf);
1601 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1602 	struct blk_mq_alloc_data data = { .flags = 0 };
1603 	struct request *rq;
1604 	unsigned int request_count = 0;
1605 	struct blk_plug *plug;
1606 	struct request *same_queue_rq = NULL;
1607 	blk_qc_t cookie;
1608 	unsigned int wb_acct;
1609 
1610 	blk_queue_bounce(q, &bio);
1611 
1612 	blk_queue_split(q, &bio);
1613 
1614 	if (!bio_integrity_prep(bio))
1615 		return BLK_QC_T_NONE;
1616 
1617 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1618 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1619 		return BLK_QC_T_NONE;
1620 
1621 	if (blk_mq_sched_bio_merge(q, bio))
1622 		return BLK_QC_T_NONE;
1623 
1624 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1625 
1626 	trace_block_getrq(q, bio, bio->bi_opf);
1627 
1628 	rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1629 	if (unlikely(!rq)) {
1630 		__wbt_done(q->rq_wb, wb_acct);
1631 		if (bio->bi_opf & REQ_NOWAIT)
1632 			bio_wouldblock_error(bio);
1633 		return BLK_QC_T_NONE;
1634 	}
1635 
1636 	wbt_track(&rq->issue_stat, wb_acct);
1637 
1638 	cookie = request_to_qc_t(data.hctx, rq);
1639 
1640 	plug = current->plug;
1641 	if (unlikely(is_flush_fua)) {
1642 		blk_mq_put_ctx(data.ctx);
1643 		blk_mq_bio_to_request(rq, bio);
1644 		if (q->elevator) {
1645 			blk_mq_sched_insert_request(rq, false, true, true,
1646 					true);
1647 		} else {
1648 			blk_insert_flush(rq);
1649 			blk_mq_run_hw_queue(data.hctx, true);
1650 		}
1651 	} else if (plug && q->nr_hw_queues == 1) {
1652 		struct request *last = NULL;
1653 
1654 		blk_mq_put_ctx(data.ctx);
1655 		blk_mq_bio_to_request(rq, bio);
1656 
1657 		/*
1658 		 * @request_count may become stale because of schedule
1659 		 * out, so check the list again.
1660 		 */
1661 		if (list_empty(&plug->mq_list))
1662 			request_count = 0;
1663 		else if (blk_queue_nomerges(q))
1664 			request_count = blk_plug_queued_count(q);
1665 
1666 		if (!request_count)
1667 			trace_block_plug(q);
1668 		else
1669 			last = list_entry_rq(plug->mq_list.prev);
1670 
1671 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1672 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1673 			blk_flush_plug_list(plug, false);
1674 			trace_block_plug(q);
1675 		}
1676 
1677 		list_add_tail(&rq->queuelist, &plug->mq_list);
1678 	} else if (plug && !blk_queue_nomerges(q)) {
1679 		blk_mq_bio_to_request(rq, bio);
1680 
1681 		/*
1682 		 * We do limited plugging. If the bio can be merged, do that.
1683 		 * Otherwise the existing request in the plug list will be
1684 		 * issued. So the plug list will have one request at most
1685 		 * The plug list might get flushed before this. If that happens,
1686 		 * the plug list is empty, and same_queue_rq is invalid.
1687 		 */
1688 		if (list_empty(&plug->mq_list))
1689 			same_queue_rq = NULL;
1690 		if (same_queue_rq)
1691 			list_del_init(&same_queue_rq->queuelist);
1692 		list_add_tail(&rq->queuelist, &plug->mq_list);
1693 
1694 		blk_mq_put_ctx(data.ctx);
1695 
1696 		if (same_queue_rq) {
1697 			data.hctx = blk_mq_map_queue(q,
1698 					same_queue_rq->mq_ctx->cpu);
1699 			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1700 					&cookie);
1701 		}
1702 	} else if (q->nr_hw_queues > 1 && is_sync) {
1703 		blk_mq_put_ctx(data.ctx);
1704 		blk_mq_bio_to_request(rq, bio);
1705 		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1706 	} else if (q->elevator) {
1707 		blk_mq_put_ctx(data.ctx);
1708 		blk_mq_bio_to_request(rq, bio);
1709 		blk_mq_sched_insert_request(rq, false, true, true, true);
1710 	} else {
1711 		blk_mq_put_ctx(data.ctx);
1712 		blk_mq_bio_to_request(rq, bio);
1713 		blk_mq_queue_io(data.hctx, data.ctx, rq);
1714 		blk_mq_run_hw_queue(data.hctx, true);
1715 	}
1716 
1717 	return cookie;
1718 }
1719 
1720 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1721 		     unsigned int hctx_idx)
1722 {
1723 	struct page *page;
1724 
1725 	if (tags->rqs && set->ops->exit_request) {
1726 		int i;
1727 
1728 		for (i = 0; i < tags->nr_tags; i++) {
1729 			struct request *rq = tags->static_rqs[i];
1730 
1731 			if (!rq)
1732 				continue;
1733 			set->ops->exit_request(set, rq, hctx_idx);
1734 			tags->static_rqs[i] = NULL;
1735 		}
1736 	}
1737 
1738 	while (!list_empty(&tags->page_list)) {
1739 		page = list_first_entry(&tags->page_list, struct page, lru);
1740 		list_del_init(&page->lru);
1741 		/*
1742 		 * Remove kmemleak object previously allocated in
1743 		 * blk_mq_init_rq_map().
1744 		 */
1745 		kmemleak_free(page_address(page));
1746 		__free_pages(page, page->private);
1747 	}
1748 }
1749 
1750 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1751 {
1752 	kfree(tags->rqs);
1753 	tags->rqs = NULL;
1754 	kfree(tags->static_rqs);
1755 	tags->static_rqs = NULL;
1756 
1757 	blk_mq_free_tags(tags);
1758 }
1759 
1760 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1761 					unsigned int hctx_idx,
1762 					unsigned int nr_tags,
1763 					unsigned int reserved_tags)
1764 {
1765 	struct blk_mq_tags *tags;
1766 	int node;
1767 
1768 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1769 	if (node == NUMA_NO_NODE)
1770 		node = set->numa_node;
1771 
1772 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1773 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1774 	if (!tags)
1775 		return NULL;
1776 
1777 	tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1778 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1779 				 node);
1780 	if (!tags->rqs) {
1781 		blk_mq_free_tags(tags);
1782 		return NULL;
1783 	}
1784 
1785 	tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1786 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1787 				 node);
1788 	if (!tags->static_rqs) {
1789 		kfree(tags->rqs);
1790 		blk_mq_free_tags(tags);
1791 		return NULL;
1792 	}
1793 
1794 	return tags;
1795 }
1796 
1797 static size_t order_to_size(unsigned int order)
1798 {
1799 	return (size_t)PAGE_SIZE << order;
1800 }
1801 
1802 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1803 		     unsigned int hctx_idx, unsigned int depth)
1804 {
1805 	unsigned int i, j, entries_per_page, max_order = 4;
1806 	size_t rq_size, left;
1807 	int node;
1808 
1809 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1810 	if (node == NUMA_NO_NODE)
1811 		node = set->numa_node;
1812 
1813 	INIT_LIST_HEAD(&tags->page_list);
1814 
1815 	/*
1816 	 * rq_size is the size of the request plus driver payload, rounded
1817 	 * to the cacheline size
1818 	 */
1819 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1820 				cache_line_size());
1821 	left = rq_size * depth;
1822 
1823 	for (i = 0; i < depth; ) {
1824 		int this_order = max_order;
1825 		struct page *page;
1826 		int to_do;
1827 		void *p;
1828 
1829 		while (this_order && left < order_to_size(this_order - 1))
1830 			this_order--;
1831 
1832 		do {
1833 			page = alloc_pages_node(node,
1834 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1835 				this_order);
1836 			if (page)
1837 				break;
1838 			if (!this_order--)
1839 				break;
1840 			if (order_to_size(this_order) < rq_size)
1841 				break;
1842 		} while (1);
1843 
1844 		if (!page)
1845 			goto fail;
1846 
1847 		page->private = this_order;
1848 		list_add_tail(&page->lru, &tags->page_list);
1849 
1850 		p = page_address(page);
1851 		/*
1852 		 * Allow kmemleak to scan these pages as they contain pointers
1853 		 * to additional allocations like via ops->init_request().
1854 		 */
1855 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1856 		entries_per_page = order_to_size(this_order) / rq_size;
1857 		to_do = min(entries_per_page, depth - i);
1858 		left -= to_do * rq_size;
1859 		for (j = 0; j < to_do; j++) {
1860 			struct request *rq = p;
1861 
1862 			tags->static_rqs[i] = rq;
1863 			if (set->ops->init_request) {
1864 				if (set->ops->init_request(set, rq, hctx_idx,
1865 						node)) {
1866 					tags->static_rqs[i] = NULL;
1867 					goto fail;
1868 				}
1869 			}
1870 
1871 			p += rq_size;
1872 			i++;
1873 		}
1874 	}
1875 	return 0;
1876 
1877 fail:
1878 	blk_mq_free_rqs(set, tags, hctx_idx);
1879 	return -ENOMEM;
1880 }
1881 
1882 /*
1883  * 'cpu' is going away. splice any existing rq_list entries from this
1884  * software queue to the hw queue dispatch list, and ensure that it
1885  * gets run.
1886  */
1887 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1888 {
1889 	struct blk_mq_hw_ctx *hctx;
1890 	struct blk_mq_ctx *ctx;
1891 	LIST_HEAD(tmp);
1892 
1893 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1894 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1895 
1896 	spin_lock(&ctx->lock);
1897 	if (!list_empty(&ctx->rq_list)) {
1898 		list_splice_init(&ctx->rq_list, &tmp);
1899 		blk_mq_hctx_clear_pending(hctx, ctx);
1900 	}
1901 	spin_unlock(&ctx->lock);
1902 
1903 	if (list_empty(&tmp))
1904 		return 0;
1905 
1906 	spin_lock(&hctx->lock);
1907 	list_splice_tail_init(&tmp, &hctx->dispatch);
1908 	spin_unlock(&hctx->lock);
1909 
1910 	blk_mq_run_hw_queue(hctx, true);
1911 	return 0;
1912 }
1913 
1914 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1915 {
1916 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1917 					    &hctx->cpuhp_dead);
1918 }
1919 
1920 /* hctx->ctxs will be freed in queue's release handler */
1921 static void blk_mq_exit_hctx(struct request_queue *q,
1922 		struct blk_mq_tag_set *set,
1923 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1924 {
1925 	blk_mq_debugfs_unregister_hctx(hctx);
1926 
1927 	blk_mq_tag_idle(hctx);
1928 
1929 	if (set->ops->exit_request)
1930 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
1931 
1932 	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1933 
1934 	if (set->ops->exit_hctx)
1935 		set->ops->exit_hctx(hctx, hctx_idx);
1936 
1937 	if (hctx->flags & BLK_MQ_F_BLOCKING)
1938 		cleanup_srcu_struct(hctx->queue_rq_srcu);
1939 
1940 	blk_mq_remove_cpuhp(hctx);
1941 	blk_free_flush_queue(hctx->fq);
1942 	sbitmap_free(&hctx->ctx_map);
1943 }
1944 
1945 static void blk_mq_exit_hw_queues(struct request_queue *q,
1946 		struct blk_mq_tag_set *set, int nr_queue)
1947 {
1948 	struct blk_mq_hw_ctx *hctx;
1949 	unsigned int i;
1950 
1951 	queue_for_each_hw_ctx(q, hctx, i) {
1952 		if (i == nr_queue)
1953 			break;
1954 		blk_mq_exit_hctx(q, set, hctx, i);
1955 	}
1956 }
1957 
1958 static int blk_mq_init_hctx(struct request_queue *q,
1959 		struct blk_mq_tag_set *set,
1960 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1961 {
1962 	int node;
1963 
1964 	node = hctx->numa_node;
1965 	if (node == NUMA_NO_NODE)
1966 		node = hctx->numa_node = set->numa_node;
1967 
1968 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1969 	spin_lock_init(&hctx->lock);
1970 	INIT_LIST_HEAD(&hctx->dispatch);
1971 	hctx->queue = q;
1972 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1973 
1974 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1975 
1976 	hctx->tags = set->tags[hctx_idx];
1977 
1978 	/*
1979 	 * Allocate space for all possible cpus to avoid allocation at
1980 	 * runtime
1981 	 */
1982 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1983 					GFP_KERNEL, node);
1984 	if (!hctx->ctxs)
1985 		goto unregister_cpu_notifier;
1986 
1987 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1988 			      node))
1989 		goto free_ctxs;
1990 
1991 	hctx->nr_ctx = 0;
1992 
1993 	if (set->ops->init_hctx &&
1994 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1995 		goto free_bitmap;
1996 
1997 	if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
1998 		goto exit_hctx;
1999 
2000 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2001 	if (!hctx->fq)
2002 		goto sched_exit_hctx;
2003 
2004 	if (set->ops->init_request &&
2005 	    set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
2006 				   node))
2007 		goto free_fq;
2008 
2009 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2010 		init_srcu_struct(hctx->queue_rq_srcu);
2011 
2012 	blk_mq_debugfs_register_hctx(q, hctx);
2013 
2014 	return 0;
2015 
2016  free_fq:
2017 	kfree(hctx->fq);
2018  sched_exit_hctx:
2019 	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2020  exit_hctx:
2021 	if (set->ops->exit_hctx)
2022 		set->ops->exit_hctx(hctx, hctx_idx);
2023  free_bitmap:
2024 	sbitmap_free(&hctx->ctx_map);
2025  free_ctxs:
2026 	kfree(hctx->ctxs);
2027  unregister_cpu_notifier:
2028 	blk_mq_remove_cpuhp(hctx);
2029 	return -1;
2030 }
2031 
2032 static void blk_mq_init_cpu_queues(struct request_queue *q,
2033 				   unsigned int nr_hw_queues)
2034 {
2035 	unsigned int i;
2036 
2037 	for_each_possible_cpu(i) {
2038 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2039 		struct blk_mq_hw_ctx *hctx;
2040 
2041 		__ctx->cpu = i;
2042 		spin_lock_init(&__ctx->lock);
2043 		INIT_LIST_HEAD(&__ctx->rq_list);
2044 		__ctx->queue = q;
2045 
2046 		/* If the cpu isn't present, the cpu is mapped to first hctx */
2047 		if (!cpu_present(i))
2048 			continue;
2049 
2050 		hctx = blk_mq_map_queue(q, i);
2051 
2052 		/*
2053 		 * Set local node, IFF we have more than one hw queue. If
2054 		 * not, we remain on the home node of the device
2055 		 */
2056 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2057 			hctx->numa_node = local_memory_node(cpu_to_node(i));
2058 	}
2059 }
2060 
2061 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2062 {
2063 	int ret = 0;
2064 
2065 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2066 					set->queue_depth, set->reserved_tags);
2067 	if (!set->tags[hctx_idx])
2068 		return false;
2069 
2070 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2071 				set->queue_depth);
2072 	if (!ret)
2073 		return true;
2074 
2075 	blk_mq_free_rq_map(set->tags[hctx_idx]);
2076 	set->tags[hctx_idx] = NULL;
2077 	return false;
2078 }
2079 
2080 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2081 					 unsigned int hctx_idx)
2082 {
2083 	if (set->tags[hctx_idx]) {
2084 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2085 		blk_mq_free_rq_map(set->tags[hctx_idx]);
2086 		set->tags[hctx_idx] = NULL;
2087 	}
2088 }
2089 
2090 static void blk_mq_map_swqueue(struct request_queue *q)
2091 {
2092 	unsigned int i, hctx_idx;
2093 	struct blk_mq_hw_ctx *hctx;
2094 	struct blk_mq_ctx *ctx;
2095 	struct blk_mq_tag_set *set = q->tag_set;
2096 
2097 	/*
2098 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
2099 	 */
2100 	mutex_lock(&q->sysfs_lock);
2101 
2102 	queue_for_each_hw_ctx(q, hctx, i) {
2103 		cpumask_clear(hctx->cpumask);
2104 		hctx->nr_ctx = 0;
2105 	}
2106 
2107 	/*
2108 	 * Map software to hardware queues.
2109 	 *
2110 	 * If the cpu isn't present, the cpu is mapped to first hctx.
2111 	 */
2112 	for_each_present_cpu(i) {
2113 		hctx_idx = q->mq_map[i];
2114 		/* unmapped hw queue can be remapped after CPU topo changed */
2115 		if (!set->tags[hctx_idx] &&
2116 		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2117 			/*
2118 			 * If tags initialization fail for some hctx,
2119 			 * that hctx won't be brought online.  In this
2120 			 * case, remap the current ctx to hctx[0] which
2121 			 * is guaranteed to always have tags allocated
2122 			 */
2123 			q->mq_map[i] = 0;
2124 		}
2125 
2126 		ctx = per_cpu_ptr(q->queue_ctx, i);
2127 		hctx = blk_mq_map_queue(q, i);
2128 
2129 		cpumask_set_cpu(i, hctx->cpumask);
2130 		ctx->index_hw = hctx->nr_ctx;
2131 		hctx->ctxs[hctx->nr_ctx++] = ctx;
2132 	}
2133 
2134 	mutex_unlock(&q->sysfs_lock);
2135 
2136 	queue_for_each_hw_ctx(q, hctx, i) {
2137 		/*
2138 		 * If no software queues are mapped to this hardware queue,
2139 		 * disable it and free the request entries.
2140 		 */
2141 		if (!hctx->nr_ctx) {
2142 			/* Never unmap queue 0.  We need it as a
2143 			 * fallback in case of a new remap fails
2144 			 * allocation
2145 			 */
2146 			if (i && set->tags[i])
2147 				blk_mq_free_map_and_requests(set, i);
2148 
2149 			hctx->tags = NULL;
2150 			continue;
2151 		}
2152 
2153 		hctx->tags = set->tags[i];
2154 		WARN_ON(!hctx->tags);
2155 
2156 		/*
2157 		 * Set the map size to the number of mapped software queues.
2158 		 * This is more accurate and more efficient than looping
2159 		 * over all possibly mapped software queues.
2160 		 */
2161 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2162 
2163 		/*
2164 		 * Initialize batch roundrobin counts
2165 		 */
2166 		hctx->next_cpu = cpumask_first(hctx->cpumask);
2167 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2168 	}
2169 }
2170 
2171 /*
2172  * Caller needs to ensure that we're either frozen/quiesced, or that
2173  * the queue isn't live yet.
2174  */
2175 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2176 {
2177 	struct blk_mq_hw_ctx *hctx;
2178 	int i;
2179 
2180 	queue_for_each_hw_ctx(q, hctx, i) {
2181 		if (shared) {
2182 			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2183 				atomic_inc(&q->shared_hctx_restart);
2184 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2185 		} else {
2186 			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2187 				atomic_dec(&q->shared_hctx_restart);
2188 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2189 		}
2190 	}
2191 }
2192 
2193 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2194 					bool shared)
2195 {
2196 	struct request_queue *q;
2197 
2198 	lockdep_assert_held(&set->tag_list_lock);
2199 
2200 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2201 		blk_mq_freeze_queue(q);
2202 		queue_set_hctx_shared(q, shared);
2203 		blk_mq_unfreeze_queue(q);
2204 	}
2205 }
2206 
2207 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2208 {
2209 	struct blk_mq_tag_set *set = q->tag_set;
2210 
2211 	mutex_lock(&set->tag_list_lock);
2212 	list_del_rcu(&q->tag_set_list);
2213 	INIT_LIST_HEAD(&q->tag_set_list);
2214 	if (list_is_singular(&set->tag_list)) {
2215 		/* just transitioned to unshared */
2216 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
2217 		/* update existing queue */
2218 		blk_mq_update_tag_set_depth(set, false);
2219 	}
2220 	mutex_unlock(&set->tag_list_lock);
2221 
2222 	synchronize_rcu();
2223 }
2224 
2225 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2226 				     struct request_queue *q)
2227 {
2228 	q->tag_set = set;
2229 
2230 	mutex_lock(&set->tag_list_lock);
2231 
2232 	/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2233 	if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2234 		set->flags |= BLK_MQ_F_TAG_SHARED;
2235 		/* update existing queue */
2236 		blk_mq_update_tag_set_depth(set, true);
2237 	}
2238 	if (set->flags & BLK_MQ_F_TAG_SHARED)
2239 		queue_set_hctx_shared(q, true);
2240 	list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2241 
2242 	mutex_unlock(&set->tag_list_lock);
2243 }
2244 
2245 /*
2246  * It is the actual release handler for mq, but we do it from
2247  * request queue's release handler for avoiding use-after-free
2248  * and headache because q->mq_kobj shouldn't have been introduced,
2249  * but we can't group ctx/kctx kobj without it.
2250  */
2251 void blk_mq_release(struct request_queue *q)
2252 {
2253 	struct blk_mq_hw_ctx *hctx;
2254 	unsigned int i;
2255 
2256 	/* hctx kobj stays in hctx */
2257 	queue_for_each_hw_ctx(q, hctx, i) {
2258 		if (!hctx)
2259 			continue;
2260 		kobject_put(&hctx->kobj);
2261 	}
2262 
2263 	q->mq_map = NULL;
2264 
2265 	kfree(q->queue_hw_ctx);
2266 
2267 	/*
2268 	 * release .mq_kobj and sw queue's kobject now because
2269 	 * both share lifetime with request queue.
2270 	 */
2271 	blk_mq_sysfs_deinit(q);
2272 
2273 	free_percpu(q->queue_ctx);
2274 }
2275 
2276 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2277 {
2278 	struct request_queue *uninit_q, *q;
2279 
2280 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2281 	if (!uninit_q)
2282 		return ERR_PTR(-ENOMEM);
2283 
2284 	q = blk_mq_init_allocated_queue(set, uninit_q);
2285 	if (IS_ERR(q))
2286 		blk_cleanup_queue(uninit_q);
2287 
2288 	return q;
2289 }
2290 EXPORT_SYMBOL(blk_mq_init_queue);
2291 
2292 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2293 {
2294 	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2295 
2296 	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2297 			   __alignof__(struct blk_mq_hw_ctx)) !=
2298 		     sizeof(struct blk_mq_hw_ctx));
2299 
2300 	if (tag_set->flags & BLK_MQ_F_BLOCKING)
2301 		hw_ctx_size += sizeof(struct srcu_struct);
2302 
2303 	return hw_ctx_size;
2304 }
2305 
2306 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2307 						struct request_queue *q)
2308 {
2309 	int i, j;
2310 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2311 
2312 	blk_mq_sysfs_unregister(q);
2313 	for (i = 0; i < set->nr_hw_queues; i++) {
2314 		int node;
2315 
2316 		if (hctxs[i])
2317 			continue;
2318 
2319 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2320 		hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2321 					GFP_KERNEL, node);
2322 		if (!hctxs[i])
2323 			break;
2324 
2325 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2326 						node)) {
2327 			kfree(hctxs[i]);
2328 			hctxs[i] = NULL;
2329 			break;
2330 		}
2331 
2332 		atomic_set(&hctxs[i]->nr_active, 0);
2333 		hctxs[i]->numa_node = node;
2334 		hctxs[i]->queue_num = i;
2335 
2336 		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2337 			free_cpumask_var(hctxs[i]->cpumask);
2338 			kfree(hctxs[i]);
2339 			hctxs[i] = NULL;
2340 			break;
2341 		}
2342 		blk_mq_hctx_kobj_init(hctxs[i]);
2343 	}
2344 	for (j = i; j < q->nr_hw_queues; j++) {
2345 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2346 
2347 		if (hctx) {
2348 			if (hctx->tags)
2349 				blk_mq_free_map_and_requests(set, j);
2350 			blk_mq_exit_hctx(q, set, hctx, j);
2351 			kobject_put(&hctx->kobj);
2352 			hctxs[j] = NULL;
2353 
2354 		}
2355 	}
2356 	q->nr_hw_queues = i;
2357 	blk_mq_sysfs_register(q);
2358 }
2359 
2360 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2361 						  struct request_queue *q)
2362 {
2363 	/* mark the queue as mq asap */
2364 	q->mq_ops = set->ops;
2365 
2366 	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2367 					     blk_mq_poll_stats_bkt,
2368 					     BLK_MQ_POLL_STATS_BKTS, q);
2369 	if (!q->poll_cb)
2370 		goto err_exit;
2371 
2372 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2373 	if (!q->queue_ctx)
2374 		goto err_exit;
2375 
2376 	/* init q->mq_kobj and sw queues' kobjects */
2377 	blk_mq_sysfs_init(q);
2378 
2379 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2380 						GFP_KERNEL, set->numa_node);
2381 	if (!q->queue_hw_ctx)
2382 		goto err_percpu;
2383 
2384 	q->mq_map = set->mq_map;
2385 
2386 	blk_mq_realloc_hw_ctxs(set, q);
2387 	if (!q->nr_hw_queues)
2388 		goto err_hctxs;
2389 
2390 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2391 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2392 
2393 	q->nr_queues = nr_cpu_ids;
2394 
2395 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2396 
2397 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2398 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2399 
2400 	q->sg_reserved_size = INT_MAX;
2401 
2402 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2403 	INIT_LIST_HEAD(&q->requeue_list);
2404 	spin_lock_init(&q->requeue_lock);
2405 
2406 	blk_queue_make_request(q, blk_mq_make_request);
2407 
2408 	/*
2409 	 * Do this after blk_queue_make_request() overrides it...
2410 	 */
2411 	q->nr_requests = set->queue_depth;
2412 
2413 	/*
2414 	 * Default to classic polling
2415 	 */
2416 	q->poll_nsec = -1;
2417 
2418 	if (set->ops->complete)
2419 		blk_queue_softirq_done(q, set->ops->complete);
2420 
2421 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2422 	blk_mq_add_queue_tag_set(set, q);
2423 	blk_mq_map_swqueue(q);
2424 
2425 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2426 		int ret;
2427 
2428 		ret = blk_mq_sched_init(q);
2429 		if (ret)
2430 			return ERR_PTR(ret);
2431 	}
2432 
2433 	return q;
2434 
2435 err_hctxs:
2436 	kfree(q->queue_hw_ctx);
2437 err_percpu:
2438 	free_percpu(q->queue_ctx);
2439 err_exit:
2440 	q->mq_ops = NULL;
2441 	return ERR_PTR(-ENOMEM);
2442 }
2443 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2444 
2445 void blk_mq_free_queue(struct request_queue *q)
2446 {
2447 	struct blk_mq_tag_set	*set = q->tag_set;
2448 
2449 	blk_mq_del_queue_tag_set(q);
2450 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2451 }
2452 
2453 /* Basically redo blk_mq_init_queue with queue frozen */
2454 static void blk_mq_queue_reinit(struct request_queue *q)
2455 {
2456 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2457 
2458 	blk_mq_debugfs_unregister_hctxs(q);
2459 	blk_mq_sysfs_unregister(q);
2460 
2461 	/*
2462 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2463 	 * we should change hctx numa_node according to new topology (this
2464 	 * involves free and re-allocate memory, worthy doing?)
2465 	 */
2466 
2467 	blk_mq_map_swqueue(q);
2468 
2469 	blk_mq_sysfs_register(q);
2470 	blk_mq_debugfs_register_hctxs(q);
2471 }
2472 
2473 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2474 {
2475 	int i;
2476 
2477 	for (i = 0; i < set->nr_hw_queues; i++)
2478 		if (!__blk_mq_alloc_rq_map(set, i))
2479 			goto out_unwind;
2480 
2481 	return 0;
2482 
2483 out_unwind:
2484 	while (--i >= 0)
2485 		blk_mq_free_rq_map(set->tags[i]);
2486 
2487 	return -ENOMEM;
2488 }
2489 
2490 /*
2491  * Allocate the request maps associated with this tag_set. Note that this
2492  * may reduce the depth asked for, if memory is tight. set->queue_depth
2493  * will be updated to reflect the allocated depth.
2494  */
2495 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2496 {
2497 	unsigned int depth;
2498 	int err;
2499 
2500 	depth = set->queue_depth;
2501 	do {
2502 		err = __blk_mq_alloc_rq_maps(set);
2503 		if (!err)
2504 			break;
2505 
2506 		set->queue_depth >>= 1;
2507 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2508 			err = -ENOMEM;
2509 			break;
2510 		}
2511 	} while (set->queue_depth);
2512 
2513 	if (!set->queue_depth || err) {
2514 		pr_err("blk-mq: failed to allocate request map\n");
2515 		return -ENOMEM;
2516 	}
2517 
2518 	if (depth != set->queue_depth)
2519 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2520 						depth, set->queue_depth);
2521 
2522 	return 0;
2523 }
2524 
2525 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2526 {
2527 	if (set->ops->map_queues)
2528 		return set->ops->map_queues(set);
2529 	else
2530 		return blk_mq_map_queues(set);
2531 }
2532 
2533 /*
2534  * Alloc a tag set to be associated with one or more request queues.
2535  * May fail with EINVAL for various error conditions. May adjust the
2536  * requested depth down, if if it too large. In that case, the set
2537  * value will be stored in set->queue_depth.
2538  */
2539 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2540 {
2541 	int ret;
2542 
2543 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2544 
2545 	if (!set->nr_hw_queues)
2546 		return -EINVAL;
2547 	if (!set->queue_depth)
2548 		return -EINVAL;
2549 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2550 		return -EINVAL;
2551 
2552 	if (!set->ops->queue_rq)
2553 		return -EINVAL;
2554 
2555 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2556 		pr_info("blk-mq: reduced tag depth to %u\n",
2557 			BLK_MQ_MAX_DEPTH);
2558 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2559 	}
2560 
2561 	/*
2562 	 * If a crashdump is active, then we are potentially in a very
2563 	 * memory constrained environment. Limit us to 1 queue and
2564 	 * 64 tags to prevent using too much memory.
2565 	 */
2566 	if (is_kdump_kernel()) {
2567 		set->nr_hw_queues = 1;
2568 		set->queue_depth = min(64U, set->queue_depth);
2569 	}
2570 	/*
2571 	 * There is no use for more h/w queues than cpus.
2572 	 */
2573 	if (set->nr_hw_queues > nr_cpu_ids)
2574 		set->nr_hw_queues = nr_cpu_ids;
2575 
2576 	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2577 				 GFP_KERNEL, set->numa_node);
2578 	if (!set->tags)
2579 		return -ENOMEM;
2580 
2581 	ret = -ENOMEM;
2582 	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2583 			GFP_KERNEL, set->numa_node);
2584 	if (!set->mq_map)
2585 		goto out_free_tags;
2586 
2587 	ret = blk_mq_update_queue_map(set);
2588 	if (ret)
2589 		goto out_free_mq_map;
2590 
2591 	ret = blk_mq_alloc_rq_maps(set);
2592 	if (ret)
2593 		goto out_free_mq_map;
2594 
2595 	mutex_init(&set->tag_list_lock);
2596 	INIT_LIST_HEAD(&set->tag_list);
2597 
2598 	return 0;
2599 
2600 out_free_mq_map:
2601 	kfree(set->mq_map);
2602 	set->mq_map = NULL;
2603 out_free_tags:
2604 	kfree(set->tags);
2605 	set->tags = NULL;
2606 	return ret;
2607 }
2608 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2609 
2610 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2611 {
2612 	int i;
2613 
2614 	for (i = 0; i < nr_cpu_ids; i++)
2615 		blk_mq_free_map_and_requests(set, i);
2616 
2617 	kfree(set->mq_map);
2618 	set->mq_map = NULL;
2619 
2620 	kfree(set->tags);
2621 	set->tags = NULL;
2622 }
2623 EXPORT_SYMBOL(blk_mq_free_tag_set);
2624 
2625 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2626 {
2627 	struct blk_mq_tag_set *set = q->tag_set;
2628 	struct blk_mq_hw_ctx *hctx;
2629 	int i, ret;
2630 
2631 	if (!set)
2632 		return -EINVAL;
2633 
2634 	blk_mq_freeze_queue(q);
2635 
2636 	ret = 0;
2637 	queue_for_each_hw_ctx(q, hctx, i) {
2638 		if (!hctx->tags)
2639 			continue;
2640 		/*
2641 		 * If we're using an MQ scheduler, just update the scheduler
2642 		 * queue depth. This is similar to what the old code would do.
2643 		 */
2644 		if (!hctx->sched_tags) {
2645 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2646 							min(nr, set->queue_depth),
2647 							false);
2648 		} else {
2649 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2650 							nr, true);
2651 		}
2652 		if (ret)
2653 			break;
2654 	}
2655 
2656 	if (!ret)
2657 		q->nr_requests = nr;
2658 
2659 	blk_mq_unfreeze_queue(q);
2660 
2661 	return ret;
2662 }
2663 
2664 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2665 							int nr_hw_queues)
2666 {
2667 	struct request_queue *q;
2668 
2669 	lockdep_assert_held(&set->tag_list_lock);
2670 
2671 	if (nr_hw_queues > nr_cpu_ids)
2672 		nr_hw_queues = nr_cpu_ids;
2673 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2674 		return;
2675 
2676 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2677 		blk_mq_freeze_queue(q);
2678 
2679 	set->nr_hw_queues = nr_hw_queues;
2680 	blk_mq_update_queue_map(set);
2681 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2682 		blk_mq_realloc_hw_ctxs(set, q);
2683 		blk_mq_queue_reinit(q);
2684 	}
2685 
2686 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2687 		blk_mq_unfreeze_queue(q);
2688 }
2689 
2690 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2691 {
2692 	mutex_lock(&set->tag_list_lock);
2693 	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2694 	mutex_unlock(&set->tag_list_lock);
2695 }
2696 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2697 
2698 /* Enable polling stats and return whether they were already enabled. */
2699 static bool blk_poll_stats_enable(struct request_queue *q)
2700 {
2701 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2702 	    test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2703 		return true;
2704 	blk_stat_add_callback(q, q->poll_cb);
2705 	return false;
2706 }
2707 
2708 static void blk_mq_poll_stats_start(struct request_queue *q)
2709 {
2710 	/*
2711 	 * We don't arm the callback if polling stats are not enabled or the
2712 	 * callback is already active.
2713 	 */
2714 	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2715 	    blk_stat_is_active(q->poll_cb))
2716 		return;
2717 
2718 	blk_stat_activate_msecs(q->poll_cb, 100);
2719 }
2720 
2721 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2722 {
2723 	struct request_queue *q = cb->data;
2724 	int bucket;
2725 
2726 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2727 		if (cb->stat[bucket].nr_samples)
2728 			q->poll_stat[bucket] = cb->stat[bucket];
2729 	}
2730 }
2731 
2732 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2733 				       struct blk_mq_hw_ctx *hctx,
2734 				       struct request *rq)
2735 {
2736 	unsigned long ret = 0;
2737 	int bucket;
2738 
2739 	/*
2740 	 * If stats collection isn't on, don't sleep but turn it on for
2741 	 * future users
2742 	 */
2743 	if (!blk_poll_stats_enable(q))
2744 		return 0;
2745 
2746 	/*
2747 	 * As an optimistic guess, use half of the mean service time
2748 	 * for this type of request. We can (and should) make this smarter.
2749 	 * For instance, if the completion latencies are tight, we can
2750 	 * get closer than just half the mean. This is especially
2751 	 * important on devices where the completion latencies are longer
2752 	 * than ~10 usec. We do use the stats for the relevant IO size
2753 	 * if available which does lead to better estimates.
2754 	 */
2755 	bucket = blk_mq_poll_stats_bkt(rq);
2756 	if (bucket < 0)
2757 		return ret;
2758 
2759 	if (q->poll_stat[bucket].nr_samples)
2760 		ret = (q->poll_stat[bucket].mean + 1) / 2;
2761 
2762 	return ret;
2763 }
2764 
2765 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2766 				     struct blk_mq_hw_ctx *hctx,
2767 				     struct request *rq)
2768 {
2769 	struct hrtimer_sleeper hs;
2770 	enum hrtimer_mode mode;
2771 	unsigned int nsecs;
2772 	ktime_t kt;
2773 
2774 	if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2775 		return false;
2776 
2777 	/*
2778 	 * poll_nsec can be:
2779 	 *
2780 	 * -1:	don't ever hybrid sleep
2781 	 *  0:	use half of prev avg
2782 	 * >0:	use this specific value
2783 	 */
2784 	if (q->poll_nsec == -1)
2785 		return false;
2786 	else if (q->poll_nsec > 0)
2787 		nsecs = q->poll_nsec;
2788 	else
2789 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2790 
2791 	if (!nsecs)
2792 		return false;
2793 
2794 	set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2795 
2796 	/*
2797 	 * This will be replaced with the stats tracking code, using
2798 	 * 'avg_completion_time / 2' as the pre-sleep target.
2799 	 */
2800 	kt = nsecs;
2801 
2802 	mode = HRTIMER_MODE_REL;
2803 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2804 	hrtimer_set_expires(&hs.timer, kt);
2805 
2806 	hrtimer_init_sleeper(&hs, current);
2807 	do {
2808 		if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2809 			break;
2810 		set_current_state(TASK_UNINTERRUPTIBLE);
2811 		hrtimer_start_expires(&hs.timer, mode);
2812 		if (hs.task)
2813 			io_schedule();
2814 		hrtimer_cancel(&hs.timer);
2815 		mode = HRTIMER_MODE_ABS;
2816 	} while (hs.task && !signal_pending(current));
2817 
2818 	__set_current_state(TASK_RUNNING);
2819 	destroy_hrtimer_on_stack(&hs.timer);
2820 	return true;
2821 }
2822 
2823 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2824 {
2825 	struct request_queue *q = hctx->queue;
2826 	long state;
2827 
2828 	/*
2829 	 * If we sleep, have the caller restart the poll loop to reset
2830 	 * the state. Like for the other success return cases, the
2831 	 * caller is responsible for checking if the IO completed. If
2832 	 * the IO isn't complete, we'll get called again and will go
2833 	 * straight to the busy poll loop.
2834 	 */
2835 	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2836 		return true;
2837 
2838 	hctx->poll_considered++;
2839 
2840 	state = current->state;
2841 	while (!need_resched()) {
2842 		int ret;
2843 
2844 		hctx->poll_invoked++;
2845 
2846 		ret = q->mq_ops->poll(hctx, rq->tag);
2847 		if (ret > 0) {
2848 			hctx->poll_success++;
2849 			set_current_state(TASK_RUNNING);
2850 			return true;
2851 		}
2852 
2853 		if (signal_pending_state(state, current))
2854 			set_current_state(TASK_RUNNING);
2855 
2856 		if (current->state == TASK_RUNNING)
2857 			return true;
2858 		if (ret < 0)
2859 			break;
2860 		cpu_relax();
2861 	}
2862 
2863 	return false;
2864 }
2865 
2866 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2867 {
2868 	struct blk_mq_hw_ctx *hctx;
2869 	struct blk_plug *plug;
2870 	struct request *rq;
2871 
2872 	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2873 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2874 		return false;
2875 
2876 	plug = current->plug;
2877 	if (plug)
2878 		blk_flush_plug_list(plug, false);
2879 
2880 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2881 	if (!blk_qc_t_is_internal(cookie))
2882 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2883 	else {
2884 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2885 		/*
2886 		 * With scheduling, if the request has completed, we'll
2887 		 * get a NULL return here, as we clear the sched tag when
2888 		 * that happens. The request still remains valid, like always,
2889 		 * so we should be safe with just the NULL check.
2890 		 */
2891 		if (!rq)
2892 			return false;
2893 	}
2894 
2895 	return __blk_mq_poll(hctx, rq);
2896 }
2897 EXPORT_SYMBOL_GPL(blk_mq_poll);
2898 
2899 static int __init blk_mq_init(void)
2900 {
2901 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2902 				blk_mq_hctx_notify_dead);
2903 	return 0;
2904 }
2905 subsys_initcall(blk_mq_init);
2906