xref: /openbmc/linux/block/blk-mq.c (revision c3e23538)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/kmemleak.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/workqueue.h>
18 #include <linux/smp.h>
19 #include <linux/llist.h>
20 #include <linux/list_sort.h>
21 #include <linux/cpu.h>
22 #include <linux/cache.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/sched/topology.h>
25 #include <linux/sched/signal.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28 #include <linux/prefetch.h>
29 #include <linux/blk-crypto.h>
30 
31 #include <trace/events/block.h>
32 
33 #include <linux/blk-mq.h>
34 #include <linux/t10-pi.h>
35 #include "blk.h"
36 #include "blk-mq.h"
37 #include "blk-mq-debugfs.h"
38 #include "blk-mq-tag.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43 
44 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45 
46 static void blk_mq_poll_stats_start(struct request_queue *q);
47 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
48 
49 static int blk_mq_poll_stats_bkt(const struct request *rq)
50 {
51 	int ddir, sectors, bucket;
52 
53 	ddir = rq_data_dir(rq);
54 	sectors = blk_rq_stats_sectors(rq);
55 
56 	bucket = ddir + 2 * ilog2(sectors);
57 
58 	if (bucket < 0)
59 		return -1;
60 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
61 		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
62 
63 	return bucket;
64 }
65 
66 /*
67  * Check if any of the ctx, dispatch list or elevator
68  * have pending work in this hardware queue.
69  */
70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
71 {
72 	return !list_empty_careful(&hctx->dispatch) ||
73 		sbitmap_any_bit_set(&hctx->ctx_map) ||
74 			blk_mq_sched_has_work(hctx);
75 }
76 
77 /*
78  * Mark this ctx as having pending work in this hardware queue
79  */
80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
81 				     struct blk_mq_ctx *ctx)
82 {
83 	const int bit = ctx->index_hw[hctx->type];
84 
85 	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
86 		sbitmap_set_bit(&hctx->ctx_map, bit);
87 }
88 
89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
90 				      struct blk_mq_ctx *ctx)
91 {
92 	const int bit = ctx->index_hw[hctx->type];
93 
94 	sbitmap_clear_bit(&hctx->ctx_map, bit);
95 }
96 
97 struct mq_inflight {
98 	struct block_device *part;
99 	unsigned int inflight[2];
100 };
101 
102 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
103 				  struct request *rq, void *priv,
104 				  bool reserved)
105 {
106 	struct mq_inflight *mi = priv;
107 
108 	if ((!mi->part->bd_partno || rq->part == mi->part) &&
109 	    blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
110 		mi->inflight[rq_data_dir(rq)]++;
111 
112 	return true;
113 }
114 
115 unsigned int blk_mq_in_flight(struct request_queue *q,
116 		struct block_device *part)
117 {
118 	struct mq_inflight mi = { .part = part };
119 
120 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
121 
122 	return mi.inflight[0] + mi.inflight[1];
123 }
124 
125 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
126 		unsigned int inflight[2])
127 {
128 	struct mq_inflight mi = { .part = part };
129 
130 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
131 	inflight[0] = mi.inflight[0];
132 	inflight[1] = mi.inflight[1];
133 }
134 
135 void blk_freeze_queue_start(struct request_queue *q)
136 {
137 	mutex_lock(&q->mq_freeze_lock);
138 	if (++q->mq_freeze_depth == 1) {
139 		percpu_ref_kill(&q->q_usage_counter);
140 		mutex_unlock(&q->mq_freeze_lock);
141 		if (queue_is_mq(q))
142 			blk_mq_run_hw_queues(q, false);
143 	} else {
144 		mutex_unlock(&q->mq_freeze_lock);
145 	}
146 }
147 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
148 
149 void blk_mq_freeze_queue_wait(struct request_queue *q)
150 {
151 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
152 }
153 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
154 
155 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
156 				     unsigned long timeout)
157 {
158 	return wait_event_timeout(q->mq_freeze_wq,
159 					percpu_ref_is_zero(&q->q_usage_counter),
160 					timeout);
161 }
162 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
163 
164 /*
165  * Guarantee no request is in use, so we can change any data structure of
166  * the queue afterward.
167  */
168 void blk_freeze_queue(struct request_queue *q)
169 {
170 	/*
171 	 * In the !blk_mq case we are only calling this to kill the
172 	 * q_usage_counter, otherwise this increases the freeze depth
173 	 * and waits for it to return to zero.  For this reason there is
174 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
175 	 * exported to drivers as the only user for unfreeze is blk_mq.
176 	 */
177 	blk_freeze_queue_start(q);
178 	blk_mq_freeze_queue_wait(q);
179 }
180 
181 void blk_mq_freeze_queue(struct request_queue *q)
182 {
183 	/*
184 	 * ...just an alias to keep freeze and unfreeze actions balanced
185 	 * in the blk_mq_* namespace
186 	 */
187 	blk_freeze_queue(q);
188 }
189 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
190 
191 void blk_mq_unfreeze_queue(struct request_queue *q)
192 {
193 	mutex_lock(&q->mq_freeze_lock);
194 	q->mq_freeze_depth--;
195 	WARN_ON_ONCE(q->mq_freeze_depth < 0);
196 	if (!q->mq_freeze_depth) {
197 		percpu_ref_resurrect(&q->q_usage_counter);
198 		wake_up_all(&q->mq_freeze_wq);
199 	}
200 	mutex_unlock(&q->mq_freeze_lock);
201 }
202 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
203 
204 /*
205  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
206  * mpt3sas driver such that this function can be removed.
207  */
208 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
209 {
210 	blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
211 }
212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
213 
214 /**
215  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
216  * @q: request queue.
217  *
218  * Note: this function does not prevent that the struct request end_io()
219  * callback function is invoked. Once this function is returned, we make
220  * sure no dispatch can happen until the queue is unquiesced via
221  * blk_mq_unquiesce_queue().
222  */
223 void blk_mq_quiesce_queue(struct request_queue *q)
224 {
225 	struct blk_mq_hw_ctx *hctx;
226 	unsigned int i;
227 	bool rcu = false;
228 
229 	blk_mq_quiesce_queue_nowait(q);
230 
231 	queue_for_each_hw_ctx(q, hctx, i) {
232 		if (hctx->flags & BLK_MQ_F_BLOCKING)
233 			synchronize_srcu(hctx->srcu);
234 		else
235 			rcu = true;
236 	}
237 	if (rcu)
238 		synchronize_rcu();
239 }
240 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
241 
242 /*
243  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
244  * @q: request queue.
245  *
246  * This function recovers queue into the state before quiescing
247  * which is done by blk_mq_quiesce_queue.
248  */
249 void blk_mq_unquiesce_queue(struct request_queue *q)
250 {
251 	blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
252 
253 	/* dispatch requests which are inserted during quiescing */
254 	blk_mq_run_hw_queues(q, true);
255 }
256 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
257 
258 void blk_mq_wake_waiters(struct request_queue *q)
259 {
260 	struct blk_mq_hw_ctx *hctx;
261 	unsigned int i;
262 
263 	queue_for_each_hw_ctx(q, hctx, i)
264 		if (blk_mq_hw_queue_mapped(hctx))
265 			blk_mq_tag_wakeup_all(hctx->tags, true);
266 }
267 
268 /*
269  * Only need start/end time stamping if we have iostat or
270  * blk stats enabled, or using an IO scheduler.
271  */
272 static inline bool blk_mq_need_time_stamp(struct request *rq)
273 {
274 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
275 }
276 
277 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
278 		unsigned int tag, u64 alloc_time_ns)
279 {
280 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
281 	struct request *rq = tags->static_rqs[tag];
282 
283 	if (data->q->elevator) {
284 		rq->tag = BLK_MQ_NO_TAG;
285 		rq->internal_tag = tag;
286 	} else {
287 		rq->tag = tag;
288 		rq->internal_tag = BLK_MQ_NO_TAG;
289 	}
290 
291 	/* csd/requeue_work/fifo_time is initialized before use */
292 	rq->q = data->q;
293 	rq->mq_ctx = data->ctx;
294 	rq->mq_hctx = data->hctx;
295 	rq->rq_flags = 0;
296 	rq->cmd_flags = data->cmd_flags;
297 	if (data->flags & BLK_MQ_REQ_PM)
298 		rq->rq_flags |= RQF_PM;
299 	if (blk_queue_io_stat(data->q))
300 		rq->rq_flags |= RQF_IO_STAT;
301 	INIT_LIST_HEAD(&rq->queuelist);
302 	INIT_HLIST_NODE(&rq->hash);
303 	RB_CLEAR_NODE(&rq->rb_node);
304 	rq->rq_disk = NULL;
305 	rq->part = NULL;
306 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
307 	rq->alloc_time_ns = alloc_time_ns;
308 #endif
309 	if (blk_mq_need_time_stamp(rq))
310 		rq->start_time_ns = ktime_get_ns();
311 	else
312 		rq->start_time_ns = 0;
313 	rq->io_start_time_ns = 0;
314 	rq->stats_sectors = 0;
315 	rq->nr_phys_segments = 0;
316 #if defined(CONFIG_BLK_DEV_INTEGRITY)
317 	rq->nr_integrity_segments = 0;
318 #endif
319 	blk_crypto_rq_set_defaults(rq);
320 	/* tag was already set */
321 	WRITE_ONCE(rq->deadline, 0);
322 
323 	rq->timeout = 0;
324 
325 	rq->end_io = NULL;
326 	rq->end_io_data = NULL;
327 
328 	data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
329 	refcount_set(&rq->ref, 1);
330 
331 	if (!op_is_flush(data->cmd_flags)) {
332 		struct elevator_queue *e = data->q->elevator;
333 
334 		rq->elv.icq = NULL;
335 		if (e && e->type->ops.prepare_request) {
336 			if (e->type->icq_cache)
337 				blk_mq_sched_assign_ioc(rq);
338 
339 			e->type->ops.prepare_request(rq);
340 			rq->rq_flags |= RQF_ELVPRIV;
341 		}
342 	}
343 
344 	data->hctx->queued++;
345 	return rq;
346 }
347 
348 static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
349 {
350 	struct request_queue *q = data->q;
351 	struct elevator_queue *e = q->elevator;
352 	u64 alloc_time_ns = 0;
353 	unsigned int tag;
354 
355 	/* alloc_time includes depth and tag waits */
356 	if (blk_queue_rq_alloc_time(q))
357 		alloc_time_ns = ktime_get_ns();
358 
359 	if (data->cmd_flags & REQ_NOWAIT)
360 		data->flags |= BLK_MQ_REQ_NOWAIT;
361 
362 	if (e) {
363 		/*
364 		 * Flush/passthrough requests are special and go directly to the
365 		 * dispatch list. Don't include reserved tags in the
366 		 * limiting, as it isn't useful.
367 		 */
368 		if (!op_is_flush(data->cmd_flags) &&
369 		    !blk_op_is_passthrough(data->cmd_flags) &&
370 		    e->type->ops.limit_depth &&
371 		    !(data->flags & BLK_MQ_REQ_RESERVED))
372 			e->type->ops.limit_depth(data->cmd_flags, data);
373 	}
374 
375 retry:
376 	data->ctx = blk_mq_get_ctx(q);
377 	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
378 	if (!e)
379 		blk_mq_tag_busy(data->hctx);
380 
381 	/*
382 	 * Waiting allocations only fail because of an inactive hctx.  In that
383 	 * case just retry the hctx assignment and tag allocation as CPU hotplug
384 	 * should have migrated us to an online CPU by now.
385 	 */
386 	tag = blk_mq_get_tag(data);
387 	if (tag == BLK_MQ_NO_TAG) {
388 		if (data->flags & BLK_MQ_REQ_NOWAIT)
389 			return NULL;
390 
391 		/*
392 		 * Give up the CPU and sleep for a random short time to ensure
393 		 * that thread using a realtime scheduling class are migrated
394 		 * off the CPU, and thus off the hctx that is going away.
395 		 */
396 		msleep(3);
397 		goto retry;
398 	}
399 	return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
400 }
401 
402 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
403 		blk_mq_req_flags_t flags)
404 {
405 	struct blk_mq_alloc_data data = {
406 		.q		= q,
407 		.flags		= flags,
408 		.cmd_flags	= op,
409 	};
410 	struct request *rq;
411 	int ret;
412 
413 	ret = blk_queue_enter(q, flags);
414 	if (ret)
415 		return ERR_PTR(ret);
416 
417 	rq = __blk_mq_alloc_request(&data);
418 	if (!rq)
419 		goto out_queue_exit;
420 	rq->__data_len = 0;
421 	rq->__sector = (sector_t) -1;
422 	rq->bio = rq->biotail = NULL;
423 	return rq;
424 out_queue_exit:
425 	blk_queue_exit(q);
426 	return ERR_PTR(-EWOULDBLOCK);
427 }
428 EXPORT_SYMBOL(blk_mq_alloc_request);
429 
430 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
431 	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
432 {
433 	struct blk_mq_alloc_data data = {
434 		.q		= q,
435 		.flags		= flags,
436 		.cmd_flags	= op,
437 	};
438 	u64 alloc_time_ns = 0;
439 	unsigned int cpu;
440 	unsigned int tag;
441 	int ret;
442 
443 	/* alloc_time includes depth and tag waits */
444 	if (blk_queue_rq_alloc_time(q))
445 		alloc_time_ns = ktime_get_ns();
446 
447 	/*
448 	 * If the tag allocator sleeps we could get an allocation for a
449 	 * different hardware context.  No need to complicate the low level
450 	 * allocator for this for the rare use case of a command tied to
451 	 * a specific queue.
452 	 */
453 	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
454 		return ERR_PTR(-EINVAL);
455 
456 	if (hctx_idx >= q->nr_hw_queues)
457 		return ERR_PTR(-EIO);
458 
459 	ret = blk_queue_enter(q, flags);
460 	if (ret)
461 		return ERR_PTR(ret);
462 
463 	/*
464 	 * Check if the hardware context is actually mapped to anything.
465 	 * If not tell the caller that it should skip this queue.
466 	 */
467 	ret = -EXDEV;
468 	data.hctx = q->queue_hw_ctx[hctx_idx];
469 	if (!blk_mq_hw_queue_mapped(data.hctx))
470 		goto out_queue_exit;
471 	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
472 	data.ctx = __blk_mq_get_ctx(q, cpu);
473 
474 	if (!q->elevator)
475 		blk_mq_tag_busy(data.hctx);
476 
477 	ret = -EWOULDBLOCK;
478 	tag = blk_mq_get_tag(&data);
479 	if (tag == BLK_MQ_NO_TAG)
480 		goto out_queue_exit;
481 	return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
482 
483 out_queue_exit:
484 	blk_queue_exit(q);
485 	return ERR_PTR(ret);
486 }
487 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
488 
489 static void __blk_mq_free_request(struct request *rq)
490 {
491 	struct request_queue *q = rq->q;
492 	struct blk_mq_ctx *ctx = rq->mq_ctx;
493 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
494 	const int sched_tag = rq->internal_tag;
495 
496 	blk_crypto_free_request(rq);
497 	blk_pm_mark_last_busy(rq);
498 	rq->mq_hctx = NULL;
499 	if (rq->tag != BLK_MQ_NO_TAG)
500 		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
501 	if (sched_tag != BLK_MQ_NO_TAG)
502 		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
503 	blk_mq_sched_restart(hctx);
504 	blk_queue_exit(q);
505 }
506 
507 void blk_mq_free_request(struct request *rq)
508 {
509 	struct request_queue *q = rq->q;
510 	struct elevator_queue *e = q->elevator;
511 	struct blk_mq_ctx *ctx = rq->mq_ctx;
512 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
513 
514 	if (rq->rq_flags & RQF_ELVPRIV) {
515 		if (e && e->type->ops.finish_request)
516 			e->type->ops.finish_request(rq);
517 		if (rq->elv.icq) {
518 			put_io_context(rq->elv.icq->ioc);
519 			rq->elv.icq = NULL;
520 		}
521 	}
522 
523 	ctx->rq_completed[rq_is_sync(rq)]++;
524 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
525 		__blk_mq_dec_active_requests(hctx);
526 
527 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
528 		laptop_io_completion(q->backing_dev_info);
529 
530 	rq_qos_done(q, rq);
531 
532 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
533 	if (refcount_dec_and_test(&rq->ref))
534 		__blk_mq_free_request(rq);
535 }
536 EXPORT_SYMBOL_GPL(blk_mq_free_request);
537 
538 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
539 {
540 	u64 now = 0;
541 
542 	if (blk_mq_need_time_stamp(rq))
543 		now = ktime_get_ns();
544 
545 	if (rq->rq_flags & RQF_STATS) {
546 		blk_mq_poll_stats_start(rq->q);
547 		blk_stat_add(rq, now);
548 	}
549 
550 	blk_mq_sched_completed_request(rq, now);
551 
552 	blk_account_io_done(rq, now);
553 
554 	if (rq->end_io) {
555 		rq_qos_done(rq->q, rq);
556 		rq->end_io(rq, error);
557 	} else {
558 		blk_mq_free_request(rq);
559 	}
560 }
561 EXPORT_SYMBOL(__blk_mq_end_request);
562 
563 void blk_mq_end_request(struct request *rq, blk_status_t error)
564 {
565 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
566 		BUG();
567 	__blk_mq_end_request(rq, error);
568 }
569 EXPORT_SYMBOL(blk_mq_end_request);
570 
571 static void blk_complete_reqs(struct llist_head *list)
572 {
573 	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
574 	struct request *rq, *next;
575 
576 	llist_for_each_entry_safe(rq, next, entry, ipi_list)
577 		rq->q->mq_ops->complete(rq);
578 }
579 
580 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
581 {
582 	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
583 }
584 
585 static int blk_softirq_cpu_dead(unsigned int cpu)
586 {
587 	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
588 	return 0;
589 }
590 
591 static void __blk_mq_complete_request_remote(void *data)
592 {
593 	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
594 }
595 
596 static inline bool blk_mq_complete_need_ipi(struct request *rq)
597 {
598 	int cpu = raw_smp_processor_id();
599 
600 	if (!IS_ENABLED(CONFIG_SMP) ||
601 	    !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
602 		return false;
603 	/*
604 	 * With force threaded interrupts enabled, raising softirq from an SMP
605 	 * function call will always result in waking the ksoftirqd thread.
606 	 * This is probably worse than completing the request on a different
607 	 * cache domain.
608 	 */
609 	if (force_irqthreads)
610 		return false;
611 
612 	/* same CPU or cache domain?  Complete locally */
613 	if (cpu == rq->mq_ctx->cpu ||
614 	    (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
615 	     cpus_share_cache(cpu, rq->mq_ctx->cpu)))
616 		return false;
617 
618 	/* don't try to IPI to an offline CPU */
619 	return cpu_online(rq->mq_ctx->cpu);
620 }
621 
622 static void blk_mq_complete_send_ipi(struct request *rq)
623 {
624 	struct llist_head *list;
625 	unsigned int cpu;
626 
627 	cpu = rq->mq_ctx->cpu;
628 	list = &per_cpu(blk_cpu_done, cpu);
629 	if (llist_add(&rq->ipi_list, list)) {
630 		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
631 		smp_call_function_single_async(cpu, &rq->csd);
632 	}
633 }
634 
635 static void blk_mq_raise_softirq(struct request *rq)
636 {
637 	struct llist_head *list;
638 
639 	preempt_disable();
640 	list = this_cpu_ptr(&blk_cpu_done);
641 	if (llist_add(&rq->ipi_list, list))
642 		raise_softirq(BLOCK_SOFTIRQ);
643 	preempt_enable();
644 }
645 
646 bool blk_mq_complete_request_remote(struct request *rq)
647 {
648 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
649 
650 	/*
651 	 * For a polled request, always complete locallly, it's pointless
652 	 * to redirect the completion.
653 	 */
654 	if (rq->cmd_flags & REQ_HIPRI)
655 		return false;
656 
657 	if (blk_mq_complete_need_ipi(rq)) {
658 		blk_mq_complete_send_ipi(rq);
659 		return true;
660 	}
661 
662 	if (rq->q->nr_hw_queues == 1) {
663 		blk_mq_raise_softirq(rq);
664 		return true;
665 	}
666 	return false;
667 }
668 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
669 
670 /**
671  * blk_mq_complete_request - end I/O on a request
672  * @rq:		the request being processed
673  *
674  * Description:
675  *	Complete a request by scheduling the ->complete_rq operation.
676  **/
677 void blk_mq_complete_request(struct request *rq)
678 {
679 	if (!blk_mq_complete_request_remote(rq))
680 		rq->q->mq_ops->complete(rq);
681 }
682 EXPORT_SYMBOL(blk_mq_complete_request);
683 
684 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
685 	__releases(hctx->srcu)
686 {
687 	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
688 		rcu_read_unlock();
689 	else
690 		srcu_read_unlock(hctx->srcu, srcu_idx);
691 }
692 
693 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
694 	__acquires(hctx->srcu)
695 {
696 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
697 		/* shut up gcc false positive */
698 		*srcu_idx = 0;
699 		rcu_read_lock();
700 	} else
701 		*srcu_idx = srcu_read_lock(hctx->srcu);
702 }
703 
704 /**
705  * blk_mq_start_request - Start processing a request
706  * @rq: Pointer to request to be started
707  *
708  * Function used by device drivers to notify the block layer that a request
709  * is going to be processed now, so blk layer can do proper initializations
710  * such as starting the timeout timer.
711  */
712 void blk_mq_start_request(struct request *rq)
713 {
714 	struct request_queue *q = rq->q;
715 
716 	trace_block_rq_issue(rq);
717 
718 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
719 		rq->io_start_time_ns = ktime_get_ns();
720 		rq->stats_sectors = blk_rq_sectors(rq);
721 		rq->rq_flags |= RQF_STATS;
722 		rq_qos_issue(q, rq);
723 	}
724 
725 	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
726 
727 	blk_add_timer(rq);
728 	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
729 
730 #ifdef CONFIG_BLK_DEV_INTEGRITY
731 	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
732 		q->integrity.profile->prepare_fn(rq);
733 #endif
734 }
735 EXPORT_SYMBOL(blk_mq_start_request);
736 
737 static void __blk_mq_requeue_request(struct request *rq)
738 {
739 	struct request_queue *q = rq->q;
740 
741 	blk_mq_put_driver_tag(rq);
742 
743 	trace_block_rq_requeue(rq);
744 	rq_qos_requeue(q, rq);
745 
746 	if (blk_mq_request_started(rq)) {
747 		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
748 		rq->rq_flags &= ~RQF_TIMED_OUT;
749 	}
750 }
751 
752 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
753 {
754 	__blk_mq_requeue_request(rq);
755 
756 	/* this request will be re-inserted to io scheduler queue */
757 	blk_mq_sched_requeue_request(rq);
758 
759 	BUG_ON(!list_empty(&rq->queuelist));
760 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
761 }
762 EXPORT_SYMBOL(blk_mq_requeue_request);
763 
764 static void blk_mq_requeue_work(struct work_struct *work)
765 {
766 	struct request_queue *q =
767 		container_of(work, struct request_queue, requeue_work.work);
768 	LIST_HEAD(rq_list);
769 	struct request *rq, *next;
770 
771 	spin_lock_irq(&q->requeue_lock);
772 	list_splice_init(&q->requeue_list, &rq_list);
773 	spin_unlock_irq(&q->requeue_lock);
774 
775 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
776 		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
777 			continue;
778 
779 		rq->rq_flags &= ~RQF_SOFTBARRIER;
780 		list_del_init(&rq->queuelist);
781 		/*
782 		 * If RQF_DONTPREP, rq has contained some driver specific
783 		 * data, so insert it to hctx dispatch list to avoid any
784 		 * merge.
785 		 */
786 		if (rq->rq_flags & RQF_DONTPREP)
787 			blk_mq_request_bypass_insert(rq, false, false);
788 		else
789 			blk_mq_sched_insert_request(rq, true, false, false);
790 	}
791 
792 	while (!list_empty(&rq_list)) {
793 		rq = list_entry(rq_list.next, struct request, queuelist);
794 		list_del_init(&rq->queuelist);
795 		blk_mq_sched_insert_request(rq, false, false, false);
796 	}
797 
798 	blk_mq_run_hw_queues(q, false);
799 }
800 
801 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
802 				bool kick_requeue_list)
803 {
804 	struct request_queue *q = rq->q;
805 	unsigned long flags;
806 
807 	/*
808 	 * We abuse this flag that is otherwise used by the I/O scheduler to
809 	 * request head insertion from the workqueue.
810 	 */
811 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
812 
813 	spin_lock_irqsave(&q->requeue_lock, flags);
814 	if (at_head) {
815 		rq->rq_flags |= RQF_SOFTBARRIER;
816 		list_add(&rq->queuelist, &q->requeue_list);
817 	} else {
818 		list_add_tail(&rq->queuelist, &q->requeue_list);
819 	}
820 	spin_unlock_irqrestore(&q->requeue_lock, flags);
821 
822 	if (kick_requeue_list)
823 		blk_mq_kick_requeue_list(q);
824 }
825 
826 void blk_mq_kick_requeue_list(struct request_queue *q)
827 {
828 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
829 }
830 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
831 
832 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
833 				    unsigned long msecs)
834 {
835 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
836 				    msecs_to_jiffies(msecs));
837 }
838 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
839 
840 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
841 {
842 	if (tag < tags->nr_tags) {
843 		prefetch(tags->rqs[tag]);
844 		return tags->rqs[tag];
845 	}
846 
847 	return NULL;
848 }
849 EXPORT_SYMBOL(blk_mq_tag_to_rq);
850 
851 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
852 			       void *priv, bool reserved)
853 {
854 	/*
855 	 * If we find a request that isn't idle and the queue matches,
856 	 * we know the queue is busy. Return false to stop the iteration.
857 	 */
858 	if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
859 		bool *busy = priv;
860 
861 		*busy = true;
862 		return false;
863 	}
864 
865 	return true;
866 }
867 
868 bool blk_mq_queue_inflight(struct request_queue *q)
869 {
870 	bool busy = false;
871 
872 	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
873 	return busy;
874 }
875 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
876 
877 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
878 {
879 	req->rq_flags |= RQF_TIMED_OUT;
880 	if (req->q->mq_ops->timeout) {
881 		enum blk_eh_timer_return ret;
882 
883 		ret = req->q->mq_ops->timeout(req, reserved);
884 		if (ret == BLK_EH_DONE)
885 			return;
886 		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
887 	}
888 
889 	blk_add_timer(req);
890 }
891 
892 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
893 {
894 	unsigned long deadline;
895 
896 	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
897 		return false;
898 	if (rq->rq_flags & RQF_TIMED_OUT)
899 		return false;
900 
901 	deadline = READ_ONCE(rq->deadline);
902 	if (time_after_eq(jiffies, deadline))
903 		return true;
904 
905 	if (*next == 0)
906 		*next = deadline;
907 	else if (time_after(*next, deadline))
908 		*next = deadline;
909 	return false;
910 }
911 
912 void blk_mq_put_rq_ref(struct request *rq)
913 {
914 	if (is_flush_rq(rq, rq->mq_hctx))
915 		rq->end_io(rq, 0);
916 	else if (refcount_dec_and_test(&rq->ref))
917 		__blk_mq_free_request(rq);
918 }
919 
920 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
921 		struct request *rq, void *priv, bool reserved)
922 {
923 	unsigned long *next = priv;
924 
925 	/*
926 	 * Just do a quick check if it is expired before locking the request in
927 	 * so we're not unnecessarilly synchronizing across CPUs.
928 	 */
929 	if (!blk_mq_req_expired(rq, next))
930 		return true;
931 
932 	/*
933 	 * We have reason to believe the request may be expired. Take a
934 	 * reference on the request to lock this request lifetime into its
935 	 * currently allocated context to prevent it from being reallocated in
936 	 * the event the completion by-passes this timeout handler.
937 	 *
938 	 * If the reference was already released, then the driver beat the
939 	 * timeout handler to posting a natural completion.
940 	 */
941 	if (!refcount_inc_not_zero(&rq->ref))
942 		return true;
943 
944 	/*
945 	 * The request is now locked and cannot be reallocated underneath the
946 	 * timeout handler's processing. Re-verify this exact request is truly
947 	 * expired; if it is not expired, then the request was completed and
948 	 * reallocated as a new request.
949 	 */
950 	if (blk_mq_req_expired(rq, next))
951 		blk_mq_rq_timed_out(rq, reserved);
952 
953 	blk_mq_put_rq_ref(rq);
954 	return true;
955 }
956 
957 static void blk_mq_timeout_work(struct work_struct *work)
958 {
959 	struct request_queue *q =
960 		container_of(work, struct request_queue, timeout_work);
961 	unsigned long next = 0;
962 	struct blk_mq_hw_ctx *hctx;
963 	int i;
964 
965 	/* A deadlock might occur if a request is stuck requiring a
966 	 * timeout at the same time a queue freeze is waiting
967 	 * completion, since the timeout code would not be able to
968 	 * acquire the queue reference here.
969 	 *
970 	 * That's why we don't use blk_queue_enter here; instead, we use
971 	 * percpu_ref_tryget directly, because we need to be able to
972 	 * obtain a reference even in the short window between the queue
973 	 * starting to freeze, by dropping the first reference in
974 	 * blk_freeze_queue_start, and the moment the last request is
975 	 * consumed, marked by the instant q_usage_counter reaches
976 	 * zero.
977 	 */
978 	if (!percpu_ref_tryget(&q->q_usage_counter))
979 		return;
980 
981 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
982 
983 	if (next != 0) {
984 		mod_timer(&q->timeout, next);
985 	} else {
986 		/*
987 		 * Request timeouts are handled as a forward rolling timer. If
988 		 * we end up here it means that no requests are pending and
989 		 * also that no request has been pending for a while. Mark
990 		 * each hctx as idle.
991 		 */
992 		queue_for_each_hw_ctx(q, hctx, i) {
993 			/* the hctx may be unmapped, so check it here */
994 			if (blk_mq_hw_queue_mapped(hctx))
995 				blk_mq_tag_idle(hctx);
996 		}
997 	}
998 	blk_queue_exit(q);
999 }
1000 
1001 struct flush_busy_ctx_data {
1002 	struct blk_mq_hw_ctx *hctx;
1003 	struct list_head *list;
1004 };
1005 
1006 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1007 {
1008 	struct flush_busy_ctx_data *flush_data = data;
1009 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1010 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1011 	enum hctx_type type = hctx->type;
1012 
1013 	spin_lock(&ctx->lock);
1014 	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1015 	sbitmap_clear_bit(sb, bitnr);
1016 	spin_unlock(&ctx->lock);
1017 	return true;
1018 }
1019 
1020 /*
1021  * Process software queues that have been marked busy, splicing them
1022  * to the for-dispatch
1023  */
1024 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1025 {
1026 	struct flush_busy_ctx_data data = {
1027 		.hctx = hctx,
1028 		.list = list,
1029 	};
1030 
1031 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1032 }
1033 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1034 
1035 struct dispatch_rq_data {
1036 	struct blk_mq_hw_ctx *hctx;
1037 	struct request *rq;
1038 };
1039 
1040 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1041 		void *data)
1042 {
1043 	struct dispatch_rq_data *dispatch_data = data;
1044 	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1045 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1046 	enum hctx_type type = hctx->type;
1047 
1048 	spin_lock(&ctx->lock);
1049 	if (!list_empty(&ctx->rq_lists[type])) {
1050 		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1051 		list_del_init(&dispatch_data->rq->queuelist);
1052 		if (list_empty(&ctx->rq_lists[type]))
1053 			sbitmap_clear_bit(sb, bitnr);
1054 	}
1055 	spin_unlock(&ctx->lock);
1056 
1057 	return !dispatch_data->rq;
1058 }
1059 
1060 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1061 					struct blk_mq_ctx *start)
1062 {
1063 	unsigned off = start ? start->index_hw[hctx->type] : 0;
1064 	struct dispatch_rq_data data = {
1065 		.hctx = hctx,
1066 		.rq   = NULL,
1067 	};
1068 
1069 	__sbitmap_for_each_set(&hctx->ctx_map, off,
1070 			       dispatch_rq_from_ctx, &data);
1071 
1072 	return data.rq;
1073 }
1074 
1075 static inline unsigned int queued_to_index(unsigned int queued)
1076 {
1077 	if (!queued)
1078 		return 0;
1079 
1080 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1081 }
1082 
1083 static bool __blk_mq_get_driver_tag(struct request *rq)
1084 {
1085 	struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags;
1086 	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1087 	int tag;
1088 
1089 	blk_mq_tag_busy(rq->mq_hctx);
1090 
1091 	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1092 		bt = rq->mq_hctx->tags->breserved_tags;
1093 		tag_offset = 0;
1094 	} else {
1095 		if (!hctx_may_queue(rq->mq_hctx, bt))
1096 			return false;
1097 	}
1098 
1099 	tag = __sbitmap_queue_get(bt);
1100 	if (tag == BLK_MQ_NO_TAG)
1101 		return false;
1102 
1103 	rq->tag = tag + tag_offset;
1104 	return true;
1105 }
1106 
1107 static bool blk_mq_get_driver_tag(struct request *rq)
1108 {
1109 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1110 
1111 	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
1112 		return false;
1113 
1114 	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1115 			!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1116 		rq->rq_flags |= RQF_MQ_INFLIGHT;
1117 		__blk_mq_inc_active_requests(hctx);
1118 	}
1119 	hctx->tags->rqs[rq->tag] = rq;
1120 	return true;
1121 }
1122 
1123 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1124 				int flags, void *key)
1125 {
1126 	struct blk_mq_hw_ctx *hctx;
1127 
1128 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1129 
1130 	spin_lock(&hctx->dispatch_wait_lock);
1131 	if (!list_empty(&wait->entry)) {
1132 		struct sbitmap_queue *sbq;
1133 
1134 		list_del_init(&wait->entry);
1135 		sbq = hctx->tags->bitmap_tags;
1136 		atomic_dec(&sbq->ws_active);
1137 	}
1138 	spin_unlock(&hctx->dispatch_wait_lock);
1139 
1140 	blk_mq_run_hw_queue(hctx, true);
1141 	return 1;
1142 }
1143 
1144 /*
1145  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1146  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1147  * restart. For both cases, take care to check the condition again after
1148  * marking us as waiting.
1149  */
1150 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1151 				 struct request *rq)
1152 {
1153 	struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
1154 	struct wait_queue_head *wq;
1155 	wait_queue_entry_t *wait;
1156 	bool ret;
1157 
1158 	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1159 		blk_mq_sched_mark_restart_hctx(hctx);
1160 
1161 		/*
1162 		 * It's possible that a tag was freed in the window between the
1163 		 * allocation failure and adding the hardware queue to the wait
1164 		 * queue.
1165 		 *
1166 		 * Don't clear RESTART here, someone else could have set it.
1167 		 * At most this will cost an extra queue run.
1168 		 */
1169 		return blk_mq_get_driver_tag(rq);
1170 	}
1171 
1172 	wait = &hctx->dispatch_wait;
1173 	if (!list_empty_careful(&wait->entry))
1174 		return false;
1175 
1176 	wq = &bt_wait_ptr(sbq, hctx)->wait;
1177 
1178 	spin_lock_irq(&wq->lock);
1179 	spin_lock(&hctx->dispatch_wait_lock);
1180 	if (!list_empty(&wait->entry)) {
1181 		spin_unlock(&hctx->dispatch_wait_lock);
1182 		spin_unlock_irq(&wq->lock);
1183 		return false;
1184 	}
1185 
1186 	atomic_inc(&sbq->ws_active);
1187 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1188 	__add_wait_queue(wq, wait);
1189 
1190 	/*
1191 	 * It's possible that a tag was freed in the window between the
1192 	 * allocation failure and adding the hardware queue to the wait
1193 	 * queue.
1194 	 */
1195 	ret = blk_mq_get_driver_tag(rq);
1196 	if (!ret) {
1197 		spin_unlock(&hctx->dispatch_wait_lock);
1198 		spin_unlock_irq(&wq->lock);
1199 		return false;
1200 	}
1201 
1202 	/*
1203 	 * We got a tag, remove ourselves from the wait queue to ensure
1204 	 * someone else gets the wakeup.
1205 	 */
1206 	list_del_init(&wait->entry);
1207 	atomic_dec(&sbq->ws_active);
1208 	spin_unlock(&hctx->dispatch_wait_lock);
1209 	spin_unlock_irq(&wq->lock);
1210 
1211 	return true;
1212 }
1213 
1214 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1215 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1216 /*
1217  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1218  * - EWMA is one simple way to compute running average value
1219  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1220  * - take 4 as factor for avoiding to get too small(0) result, and this
1221  *   factor doesn't matter because EWMA decreases exponentially
1222  */
1223 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1224 {
1225 	unsigned int ewma;
1226 
1227 	if (hctx->queue->elevator)
1228 		return;
1229 
1230 	ewma = hctx->dispatch_busy;
1231 
1232 	if (!ewma && !busy)
1233 		return;
1234 
1235 	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1236 	if (busy)
1237 		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1238 	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1239 
1240 	hctx->dispatch_busy = ewma;
1241 }
1242 
1243 #define BLK_MQ_RESOURCE_DELAY	3		/* ms units */
1244 
1245 static void blk_mq_handle_dev_resource(struct request *rq,
1246 				       struct list_head *list)
1247 {
1248 	struct request *next =
1249 		list_first_entry_or_null(list, struct request, queuelist);
1250 
1251 	/*
1252 	 * If an I/O scheduler has been configured and we got a driver tag for
1253 	 * the next request already, free it.
1254 	 */
1255 	if (next)
1256 		blk_mq_put_driver_tag(next);
1257 
1258 	list_add(&rq->queuelist, list);
1259 	__blk_mq_requeue_request(rq);
1260 }
1261 
1262 static void blk_mq_handle_zone_resource(struct request *rq,
1263 					struct list_head *zone_list)
1264 {
1265 	/*
1266 	 * If we end up here it is because we cannot dispatch a request to a
1267 	 * specific zone due to LLD level zone-write locking or other zone
1268 	 * related resource not being available. In this case, set the request
1269 	 * aside in zone_list for retrying it later.
1270 	 */
1271 	list_add(&rq->queuelist, zone_list);
1272 	__blk_mq_requeue_request(rq);
1273 }
1274 
1275 enum prep_dispatch {
1276 	PREP_DISPATCH_OK,
1277 	PREP_DISPATCH_NO_TAG,
1278 	PREP_DISPATCH_NO_BUDGET,
1279 };
1280 
1281 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1282 						  bool need_budget)
1283 {
1284 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1285 	int budget_token = -1;
1286 
1287 	if (need_budget) {
1288 		budget_token = blk_mq_get_dispatch_budget(rq->q);
1289 		if (budget_token < 0) {
1290 			blk_mq_put_driver_tag(rq);
1291 			return PREP_DISPATCH_NO_BUDGET;
1292 		}
1293 		blk_mq_set_rq_budget_token(rq, budget_token);
1294 	}
1295 
1296 	if (!blk_mq_get_driver_tag(rq)) {
1297 		/*
1298 		 * The initial allocation attempt failed, so we need to
1299 		 * rerun the hardware queue when a tag is freed. The
1300 		 * waitqueue takes care of that. If the queue is run
1301 		 * before we add this entry back on the dispatch list,
1302 		 * we'll re-run it below.
1303 		 */
1304 		if (!blk_mq_mark_tag_wait(hctx, rq)) {
1305 			/*
1306 			 * All budgets not got from this function will be put
1307 			 * together during handling partial dispatch
1308 			 */
1309 			if (need_budget)
1310 				blk_mq_put_dispatch_budget(rq->q, budget_token);
1311 			return PREP_DISPATCH_NO_TAG;
1312 		}
1313 	}
1314 
1315 	return PREP_DISPATCH_OK;
1316 }
1317 
1318 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1319 static void blk_mq_release_budgets(struct request_queue *q,
1320 		struct list_head *list)
1321 {
1322 	struct request *rq;
1323 
1324 	list_for_each_entry(rq, list, queuelist) {
1325 		int budget_token = blk_mq_get_rq_budget_token(rq);
1326 
1327 		if (budget_token >= 0)
1328 			blk_mq_put_dispatch_budget(q, budget_token);
1329 	}
1330 }
1331 
1332 /*
1333  * Returns true if we did some work AND can potentially do more.
1334  */
1335 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1336 			     unsigned int nr_budgets)
1337 {
1338 	enum prep_dispatch prep;
1339 	struct request_queue *q = hctx->queue;
1340 	struct request *rq, *nxt;
1341 	int errors, queued;
1342 	blk_status_t ret = BLK_STS_OK;
1343 	LIST_HEAD(zone_list);
1344 
1345 	if (list_empty(list))
1346 		return false;
1347 
1348 	/*
1349 	 * Now process all the entries, sending them to the driver.
1350 	 */
1351 	errors = queued = 0;
1352 	do {
1353 		struct blk_mq_queue_data bd;
1354 
1355 		rq = list_first_entry(list, struct request, queuelist);
1356 
1357 		WARN_ON_ONCE(hctx != rq->mq_hctx);
1358 		prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1359 		if (prep != PREP_DISPATCH_OK)
1360 			break;
1361 
1362 		list_del_init(&rq->queuelist);
1363 
1364 		bd.rq = rq;
1365 
1366 		/*
1367 		 * Flag last if we have no more requests, or if we have more
1368 		 * but can't assign a driver tag to it.
1369 		 */
1370 		if (list_empty(list))
1371 			bd.last = true;
1372 		else {
1373 			nxt = list_first_entry(list, struct request, queuelist);
1374 			bd.last = !blk_mq_get_driver_tag(nxt);
1375 		}
1376 
1377 		/*
1378 		 * once the request is queued to lld, no need to cover the
1379 		 * budget any more
1380 		 */
1381 		if (nr_budgets)
1382 			nr_budgets--;
1383 		ret = q->mq_ops->queue_rq(hctx, &bd);
1384 		switch (ret) {
1385 		case BLK_STS_OK:
1386 			queued++;
1387 			break;
1388 		case BLK_STS_RESOURCE:
1389 		case BLK_STS_DEV_RESOURCE:
1390 			blk_mq_handle_dev_resource(rq, list);
1391 			goto out;
1392 		case BLK_STS_ZONE_RESOURCE:
1393 			/*
1394 			 * Move the request to zone_list and keep going through
1395 			 * the dispatch list to find more requests the drive can
1396 			 * accept.
1397 			 */
1398 			blk_mq_handle_zone_resource(rq, &zone_list);
1399 			break;
1400 		default:
1401 			errors++;
1402 			blk_mq_end_request(rq, ret);
1403 		}
1404 	} while (!list_empty(list));
1405 out:
1406 	if (!list_empty(&zone_list))
1407 		list_splice_tail_init(&zone_list, list);
1408 
1409 	hctx->dispatched[queued_to_index(queued)]++;
1410 
1411 	/* If we didn't flush the entire list, we could have told the driver
1412 	 * there was more coming, but that turned out to be a lie.
1413 	 */
1414 	if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
1415 		q->mq_ops->commit_rqs(hctx);
1416 	/*
1417 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1418 	 * that is where we will continue on next queue run.
1419 	 */
1420 	if (!list_empty(list)) {
1421 		bool needs_restart;
1422 		/* For non-shared tags, the RESTART check will suffice */
1423 		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1424 			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1425 		bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
1426 
1427 		if (nr_budgets)
1428 			blk_mq_release_budgets(q, list);
1429 
1430 		spin_lock(&hctx->lock);
1431 		list_splice_tail_init(list, &hctx->dispatch);
1432 		spin_unlock(&hctx->lock);
1433 
1434 		/*
1435 		 * Order adding requests to hctx->dispatch and checking
1436 		 * SCHED_RESTART flag. The pair of this smp_mb() is the one
1437 		 * in blk_mq_sched_restart(). Avoid restart code path to
1438 		 * miss the new added requests to hctx->dispatch, meantime
1439 		 * SCHED_RESTART is observed here.
1440 		 */
1441 		smp_mb();
1442 
1443 		/*
1444 		 * If SCHED_RESTART was set by the caller of this function and
1445 		 * it is no longer set that means that it was cleared by another
1446 		 * thread and hence that a queue rerun is needed.
1447 		 *
1448 		 * If 'no_tag' is set, that means that we failed getting
1449 		 * a driver tag with an I/O scheduler attached. If our dispatch
1450 		 * waitqueue is no longer active, ensure that we run the queue
1451 		 * AFTER adding our entries back to the list.
1452 		 *
1453 		 * If no I/O scheduler has been configured it is possible that
1454 		 * the hardware queue got stopped and restarted before requests
1455 		 * were pushed back onto the dispatch list. Rerun the queue to
1456 		 * avoid starvation. Notes:
1457 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
1458 		 *   been stopped before rerunning a queue.
1459 		 * - Some but not all block drivers stop a queue before
1460 		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1461 		 *   and dm-rq.
1462 		 *
1463 		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1464 		 * bit is set, run queue after a delay to avoid IO stalls
1465 		 * that could otherwise occur if the queue is idle.  We'll do
1466 		 * similar if we couldn't get budget and SCHED_RESTART is set.
1467 		 */
1468 		needs_restart = blk_mq_sched_needs_restart(hctx);
1469 		if (!needs_restart ||
1470 		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1471 			blk_mq_run_hw_queue(hctx, true);
1472 		else if (needs_restart && (ret == BLK_STS_RESOURCE ||
1473 					   no_budget_avail))
1474 			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1475 
1476 		blk_mq_update_dispatch_busy(hctx, true);
1477 		return false;
1478 	} else
1479 		blk_mq_update_dispatch_busy(hctx, false);
1480 
1481 	return (queued + errors) != 0;
1482 }
1483 
1484 /**
1485  * __blk_mq_run_hw_queue - Run a hardware queue.
1486  * @hctx: Pointer to the hardware queue to run.
1487  *
1488  * Send pending requests to the hardware.
1489  */
1490 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1491 {
1492 	int srcu_idx;
1493 
1494 	/*
1495 	 * We can't run the queue inline with ints disabled. Ensure that
1496 	 * we catch bad users of this early.
1497 	 */
1498 	WARN_ON_ONCE(in_interrupt());
1499 
1500 	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1501 
1502 	hctx_lock(hctx, &srcu_idx);
1503 	blk_mq_sched_dispatch_requests(hctx);
1504 	hctx_unlock(hctx, srcu_idx);
1505 }
1506 
1507 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1508 {
1509 	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1510 
1511 	if (cpu >= nr_cpu_ids)
1512 		cpu = cpumask_first(hctx->cpumask);
1513 	return cpu;
1514 }
1515 
1516 /*
1517  * It'd be great if the workqueue API had a way to pass
1518  * in a mask and had some smarts for more clever placement.
1519  * For now we just round-robin here, switching for every
1520  * BLK_MQ_CPU_WORK_BATCH queued items.
1521  */
1522 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1523 {
1524 	bool tried = false;
1525 	int next_cpu = hctx->next_cpu;
1526 
1527 	if (hctx->queue->nr_hw_queues == 1)
1528 		return WORK_CPU_UNBOUND;
1529 
1530 	if (--hctx->next_cpu_batch <= 0) {
1531 select_cpu:
1532 		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1533 				cpu_online_mask);
1534 		if (next_cpu >= nr_cpu_ids)
1535 			next_cpu = blk_mq_first_mapped_cpu(hctx);
1536 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1537 	}
1538 
1539 	/*
1540 	 * Do unbound schedule if we can't find a online CPU for this hctx,
1541 	 * and it should only happen in the path of handling CPU DEAD.
1542 	 */
1543 	if (!cpu_online(next_cpu)) {
1544 		if (!tried) {
1545 			tried = true;
1546 			goto select_cpu;
1547 		}
1548 
1549 		/*
1550 		 * Make sure to re-select CPU next time once after CPUs
1551 		 * in hctx->cpumask become online again.
1552 		 */
1553 		hctx->next_cpu = next_cpu;
1554 		hctx->next_cpu_batch = 1;
1555 		return WORK_CPU_UNBOUND;
1556 	}
1557 
1558 	hctx->next_cpu = next_cpu;
1559 	return next_cpu;
1560 }
1561 
1562 /**
1563  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
1564  * @hctx: Pointer to the hardware queue to run.
1565  * @async: If we want to run the queue asynchronously.
1566  * @msecs: Milliseconds of delay to wait before running the queue.
1567  *
1568  * If !@async, try to run the queue now. Else, run the queue asynchronously and
1569  * with a delay of @msecs.
1570  */
1571 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1572 					unsigned long msecs)
1573 {
1574 	if (unlikely(blk_mq_hctx_stopped(hctx)))
1575 		return;
1576 
1577 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1578 		int cpu = get_cpu();
1579 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1580 			__blk_mq_run_hw_queue(hctx);
1581 			put_cpu();
1582 			return;
1583 		}
1584 
1585 		put_cpu();
1586 	}
1587 
1588 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1589 				    msecs_to_jiffies(msecs));
1590 }
1591 
1592 /**
1593  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
1594  * @hctx: Pointer to the hardware queue to run.
1595  * @msecs: Milliseconds of delay to wait before running the queue.
1596  *
1597  * Run a hardware queue asynchronously with a delay of @msecs.
1598  */
1599 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1600 {
1601 	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
1602 }
1603 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1604 
1605 /**
1606  * blk_mq_run_hw_queue - Start to run a hardware queue.
1607  * @hctx: Pointer to the hardware queue to run.
1608  * @async: If we want to run the queue asynchronously.
1609  *
1610  * Check if the request queue is not in a quiesced state and if there are
1611  * pending requests to be sent. If this is true, run the queue to send requests
1612  * to hardware.
1613  */
1614 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1615 {
1616 	int srcu_idx;
1617 	bool need_run;
1618 
1619 	/*
1620 	 * When queue is quiesced, we may be switching io scheduler, or
1621 	 * updating nr_hw_queues, or other things, and we can't run queue
1622 	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1623 	 *
1624 	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1625 	 * quiesced.
1626 	 */
1627 	hctx_lock(hctx, &srcu_idx);
1628 	need_run = !blk_queue_quiesced(hctx->queue) &&
1629 		blk_mq_hctx_has_pending(hctx);
1630 	hctx_unlock(hctx, srcu_idx);
1631 
1632 	if (need_run)
1633 		__blk_mq_delay_run_hw_queue(hctx, async, 0);
1634 }
1635 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1636 
1637 /*
1638  * Is the request queue handled by an IO scheduler that does not respect
1639  * hardware queues when dispatching?
1640  */
1641 static bool blk_mq_has_sqsched(struct request_queue *q)
1642 {
1643 	struct elevator_queue *e = q->elevator;
1644 
1645 	if (e && e->type->ops.dispatch_request &&
1646 	    !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
1647 		return true;
1648 	return false;
1649 }
1650 
1651 /*
1652  * Return prefered queue to dispatch from (if any) for non-mq aware IO
1653  * scheduler.
1654  */
1655 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
1656 {
1657 	struct blk_mq_hw_ctx *hctx;
1658 
1659 	/*
1660 	 * If the IO scheduler does not respect hardware queues when
1661 	 * dispatching, we just don't bother with multiple HW queues and
1662 	 * dispatch from hctx for the current CPU since running multiple queues
1663 	 * just causes lock contention inside the scheduler and pointless cache
1664 	 * bouncing.
1665 	 */
1666 	hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
1667 				     raw_smp_processor_id());
1668 	if (!blk_mq_hctx_stopped(hctx))
1669 		return hctx;
1670 	return NULL;
1671 }
1672 
1673 /**
1674  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
1675  * @q: Pointer to the request queue to run.
1676  * @async: If we want to run the queue asynchronously.
1677  */
1678 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1679 {
1680 	struct blk_mq_hw_ctx *hctx, *sq_hctx;
1681 	int i;
1682 
1683 	sq_hctx = NULL;
1684 	if (blk_mq_has_sqsched(q))
1685 		sq_hctx = blk_mq_get_sq_hctx(q);
1686 	queue_for_each_hw_ctx(q, hctx, i) {
1687 		if (blk_mq_hctx_stopped(hctx))
1688 			continue;
1689 		/*
1690 		 * Dispatch from this hctx either if there's no hctx preferred
1691 		 * by IO scheduler or if it has requests that bypass the
1692 		 * scheduler.
1693 		 */
1694 		if (!sq_hctx || sq_hctx == hctx ||
1695 		    !list_empty_careful(&hctx->dispatch))
1696 			blk_mq_run_hw_queue(hctx, async);
1697 	}
1698 }
1699 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1700 
1701 /**
1702  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
1703  * @q: Pointer to the request queue to run.
1704  * @msecs: Milliseconds of delay to wait before running the queues.
1705  */
1706 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
1707 {
1708 	struct blk_mq_hw_ctx *hctx, *sq_hctx;
1709 	int i;
1710 
1711 	sq_hctx = NULL;
1712 	if (blk_mq_has_sqsched(q))
1713 		sq_hctx = blk_mq_get_sq_hctx(q);
1714 	queue_for_each_hw_ctx(q, hctx, i) {
1715 		if (blk_mq_hctx_stopped(hctx))
1716 			continue;
1717 		/*
1718 		 * Dispatch from this hctx either if there's no hctx preferred
1719 		 * by IO scheduler or if it has requests that bypass the
1720 		 * scheduler.
1721 		 */
1722 		if (!sq_hctx || sq_hctx == hctx ||
1723 		    !list_empty_careful(&hctx->dispatch))
1724 			blk_mq_delay_run_hw_queue(hctx, msecs);
1725 	}
1726 }
1727 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
1728 
1729 /**
1730  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1731  * @q: request queue.
1732  *
1733  * The caller is responsible for serializing this function against
1734  * blk_mq_{start,stop}_hw_queue().
1735  */
1736 bool blk_mq_queue_stopped(struct request_queue *q)
1737 {
1738 	struct blk_mq_hw_ctx *hctx;
1739 	int i;
1740 
1741 	queue_for_each_hw_ctx(q, hctx, i)
1742 		if (blk_mq_hctx_stopped(hctx))
1743 			return true;
1744 
1745 	return false;
1746 }
1747 EXPORT_SYMBOL(blk_mq_queue_stopped);
1748 
1749 /*
1750  * This function is often used for pausing .queue_rq() by driver when
1751  * there isn't enough resource or some conditions aren't satisfied, and
1752  * BLK_STS_RESOURCE is usually returned.
1753  *
1754  * We do not guarantee that dispatch can be drained or blocked
1755  * after blk_mq_stop_hw_queue() returns. Please use
1756  * blk_mq_quiesce_queue() for that requirement.
1757  */
1758 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1759 {
1760 	cancel_delayed_work(&hctx->run_work);
1761 
1762 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1763 }
1764 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1765 
1766 /*
1767  * This function is often used for pausing .queue_rq() by driver when
1768  * there isn't enough resource or some conditions aren't satisfied, and
1769  * BLK_STS_RESOURCE is usually returned.
1770  *
1771  * We do not guarantee that dispatch can be drained or blocked
1772  * after blk_mq_stop_hw_queues() returns. Please use
1773  * blk_mq_quiesce_queue() for that requirement.
1774  */
1775 void blk_mq_stop_hw_queues(struct request_queue *q)
1776 {
1777 	struct blk_mq_hw_ctx *hctx;
1778 	int i;
1779 
1780 	queue_for_each_hw_ctx(q, hctx, i)
1781 		blk_mq_stop_hw_queue(hctx);
1782 }
1783 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1784 
1785 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1786 {
1787 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1788 
1789 	blk_mq_run_hw_queue(hctx, false);
1790 }
1791 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1792 
1793 void blk_mq_start_hw_queues(struct request_queue *q)
1794 {
1795 	struct blk_mq_hw_ctx *hctx;
1796 	int i;
1797 
1798 	queue_for_each_hw_ctx(q, hctx, i)
1799 		blk_mq_start_hw_queue(hctx);
1800 }
1801 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1802 
1803 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1804 {
1805 	if (!blk_mq_hctx_stopped(hctx))
1806 		return;
1807 
1808 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1809 	blk_mq_run_hw_queue(hctx, async);
1810 }
1811 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1812 
1813 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1814 {
1815 	struct blk_mq_hw_ctx *hctx;
1816 	int i;
1817 
1818 	queue_for_each_hw_ctx(q, hctx, i)
1819 		blk_mq_start_stopped_hw_queue(hctx, async);
1820 }
1821 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1822 
1823 static void blk_mq_run_work_fn(struct work_struct *work)
1824 {
1825 	struct blk_mq_hw_ctx *hctx;
1826 
1827 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1828 
1829 	/*
1830 	 * If we are stopped, don't run the queue.
1831 	 */
1832 	if (blk_mq_hctx_stopped(hctx))
1833 		return;
1834 
1835 	__blk_mq_run_hw_queue(hctx);
1836 }
1837 
1838 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1839 					    struct request *rq,
1840 					    bool at_head)
1841 {
1842 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1843 	enum hctx_type type = hctx->type;
1844 
1845 	lockdep_assert_held(&ctx->lock);
1846 
1847 	trace_block_rq_insert(rq);
1848 
1849 	if (at_head)
1850 		list_add(&rq->queuelist, &ctx->rq_lists[type]);
1851 	else
1852 		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1853 }
1854 
1855 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1856 			     bool at_head)
1857 {
1858 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1859 
1860 	lockdep_assert_held(&ctx->lock);
1861 
1862 	__blk_mq_insert_req_list(hctx, rq, at_head);
1863 	blk_mq_hctx_mark_pending(hctx, ctx);
1864 }
1865 
1866 /**
1867  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
1868  * @rq: Pointer to request to be inserted.
1869  * @at_head: true if the request should be inserted at the head of the list.
1870  * @run_queue: If we should run the hardware queue after inserting the request.
1871  *
1872  * Should only be used carefully, when the caller knows we want to
1873  * bypass a potential IO scheduler on the target device.
1874  */
1875 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
1876 				  bool run_queue)
1877 {
1878 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1879 
1880 	spin_lock(&hctx->lock);
1881 	if (at_head)
1882 		list_add(&rq->queuelist, &hctx->dispatch);
1883 	else
1884 		list_add_tail(&rq->queuelist, &hctx->dispatch);
1885 	spin_unlock(&hctx->lock);
1886 
1887 	if (run_queue)
1888 		blk_mq_run_hw_queue(hctx, false);
1889 }
1890 
1891 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1892 			    struct list_head *list)
1893 
1894 {
1895 	struct request *rq;
1896 	enum hctx_type type = hctx->type;
1897 
1898 	/*
1899 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1900 	 * offline now
1901 	 */
1902 	list_for_each_entry(rq, list, queuelist) {
1903 		BUG_ON(rq->mq_ctx != ctx);
1904 		trace_block_rq_insert(rq);
1905 	}
1906 
1907 	spin_lock(&ctx->lock);
1908 	list_splice_tail_init(list, &ctx->rq_lists[type]);
1909 	blk_mq_hctx_mark_pending(hctx, ctx);
1910 	spin_unlock(&ctx->lock);
1911 }
1912 
1913 static int plug_rq_cmp(void *priv, const struct list_head *a,
1914 		       const struct list_head *b)
1915 {
1916 	struct request *rqa = container_of(a, struct request, queuelist);
1917 	struct request *rqb = container_of(b, struct request, queuelist);
1918 
1919 	if (rqa->mq_ctx != rqb->mq_ctx)
1920 		return rqa->mq_ctx > rqb->mq_ctx;
1921 	if (rqa->mq_hctx != rqb->mq_hctx)
1922 		return rqa->mq_hctx > rqb->mq_hctx;
1923 
1924 	return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1925 }
1926 
1927 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1928 {
1929 	LIST_HEAD(list);
1930 
1931 	if (list_empty(&plug->mq_list))
1932 		return;
1933 	list_splice_init(&plug->mq_list, &list);
1934 
1935 	if (plug->rq_count > 2 && plug->multiple_queues)
1936 		list_sort(NULL, &list, plug_rq_cmp);
1937 
1938 	plug->rq_count = 0;
1939 
1940 	do {
1941 		struct list_head rq_list;
1942 		struct request *rq, *head_rq = list_entry_rq(list.next);
1943 		struct list_head *pos = &head_rq->queuelist; /* skip first */
1944 		struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
1945 		struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
1946 		unsigned int depth = 1;
1947 
1948 		list_for_each_continue(pos, &list) {
1949 			rq = list_entry_rq(pos);
1950 			BUG_ON(!rq->q);
1951 			if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
1952 				break;
1953 			depth++;
1954 		}
1955 
1956 		list_cut_before(&rq_list, &list, pos);
1957 		trace_block_unplug(head_rq->q, depth, !from_schedule);
1958 		blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1959 						from_schedule);
1960 	} while(!list_empty(&list));
1961 }
1962 
1963 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
1964 		unsigned int nr_segs)
1965 {
1966 	int err;
1967 
1968 	if (bio->bi_opf & REQ_RAHEAD)
1969 		rq->cmd_flags |= REQ_FAILFAST_MASK;
1970 
1971 	rq->__sector = bio->bi_iter.bi_sector;
1972 	rq->write_hint = bio->bi_write_hint;
1973 	blk_rq_bio_prep(rq, bio, nr_segs);
1974 
1975 	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
1976 	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
1977 	WARN_ON_ONCE(err);
1978 
1979 	blk_account_io_start(rq);
1980 }
1981 
1982 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1983 					    struct request *rq,
1984 					    blk_qc_t *cookie, bool last)
1985 {
1986 	struct request_queue *q = rq->q;
1987 	struct blk_mq_queue_data bd = {
1988 		.rq = rq,
1989 		.last = last,
1990 	};
1991 	blk_qc_t new_cookie;
1992 	blk_status_t ret;
1993 
1994 	new_cookie = request_to_qc_t(hctx, rq);
1995 
1996 	/*
1997 	 * For OK queue, we are done. For error, caller may kill it.
1998 	 * Any other error (busy), just add it to our list as we
1999 	 * previously would have done.
2000 	 */
2001 	ret = q->mq_ops->queue_rq(hctx, &bd);
2002 	switch (ret) {
2003 	case BLK_STS_OK:
2004 		blk_mq_update_dispatch_busy(hctx, false);
2005 		*cookie = new_cookie;
2006 		break;
2007 	case BLK_STS_RESOURCE:
2008 	case BLK_STS_DEV_RESOURCE:
2009 		blk_mq_update_dispatch_busy(hctx, true);
2010 		__blk_mq_requeue_request(rq);
2011 		break;
2012 	default:
2013 		blk_mq_update_dispatch_busy(hctx, false);
2014 		*cookie = BLK_QC_T_NONE;
2015 		break;
2016 	}
2017 
2018 	return ret;
2019 }
2020 
2021 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2022 						struct request *rq,
2023 						blk_qc_t *cookie,
2024 						bool bypass_insert, bool last)
2025 {
2026 	struct request_queue *q = rq->q;
2027 	bool run_queue = true;
2028 	int budget_token;
2029 
2030 	/*
2031 	 * RCU or SRCU read lock is needed before checking quiesced flag.
2032 	 *
2033 	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
2034 	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2035 	 * and avoid driver to try to dispatch again.
2036 	 */
2037 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2038 		run_queue = false;
2039 		bypass_insert = false;
2040 		goto insert;
2041 	}
2042 
2043 	if (q->elevator && !bypass_insert)
2044 		goto insert;
2045 
2046 	budget_token = blk_mq_get_dispatch_budget(q);
2047 	if (budget_token < 0)
2048 		goto insert;
2049 
2050 	blk_mq_set_rq_budget_token(rq, budget_token);
2051 
2052 	if (!blk_mq_get_driver_tag(rq)) {
2053 		blk_mq_put_dispatch_budget(q, budget_token);
2054 		goto insert;
2055 	}
2056 
2057 	return __blk_mq_issue_directly(hctx, rq, cookie, last);
2058 insert:
2059 	if (bypass_insert)
2060 		return BLK_STS_RESOURCE;
2061 
2062 	blk_mq_sched_insert_request(rq, false, run_queue, false);
2063 
2064 	return BLK_STS_OK;
2065 }
2066 
2067 /**
2068  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2069  * @hctx: Pointer of the associated hardware queue.
2070  * @rq: Pointer to request to be sent.
2071  * @cookie: Request queue cookie.
2072  *
2073  * If the device has enough resources to accept a new request now, send the
2074  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2075  * we can try send it another time in the future. Requests inserted at this
2076  * queue have higher priority.
2077  */
2078 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2079 		struct request *rq, blk_qc_t *cookie)
2080 {
2081 	blk_status_t ret;
2082 	int srcu_idx;
2083 
2084 	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
2085 
2086 	hctx_lock(hctx, &srcu_idx);
2087 
2088 	ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
2089 	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2090 		blk_mq_request_bypass_insert(rq, false, true);
2091 	else if (ret != BLK_STS_OK)
2092 		blk_mq_end_request(rq, ret);
2093 
2094 	hctx_unlock(hctx, srcu_idx);
2095 }
2096 
2097 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2098 {
2099 	blk_status_t ret;
2100 	int srcu_idx;
2101 	blk_qc_t unused_cookie;
2102 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2103 
2104 	hctx_lock(hctx, &srcu_idx);
2105 	ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
2106 	hctx_unlock(hctx, srcu_idx);
2107 
2108 	return ret;
2109 }
2110 
2111 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2112 		struct list_head *list)
2113 {
2114 	int queued = 0;
2115 	int errors = 0;
2116 
2117 	while (!list_empty(list)) {
2118 		blk_status_t ret;
2119 		struct request *rq = list_first_entry(list, struct request,
2120 				queuelist);
2121 
2122 		list_del_init(&rq->queuelist);
2123 		ret = blk_mq_request_issue_directly(rq, list_empty(list));
2124 		if (ret != BLK_STS_OK) {
2125 			if (ret == BLK_STS_RESOURCE ||
2126 					ret == BLK_STS_DEV_RESOURCE) {
2127 				blk_mq_request_bypass_insert(rq, false,
2128 							list_empty(list));
2129 				break;
2130 			}
2131 			blk_mq_end_request(rq, ret);
2132 			errors++;
2133 		} else
2134 			queued++;
2135 	}
2136 
2137 	/*
2138 	 * If we didn't flush the entire list, we could have told
2139 	 * the driver there was more coming, but that turned out to
2140 	 * be a lie.
2141 	 */
2142 	if ((!list_empty(list) || errors) &&
2143 	     hctx->queue->mq_ops->commit_rqs && queued)
2144 		hctx->queue->mq_ops->commit_rqs(hctx);
2145 }
2146 
2147 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
2148 {
2149 	list_add_tail(&rq->queuelist, &plug->mq_list);
2150 	plug->rq_count++;
2151 	if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
2152 		struct request *tmp;
2153 
2154 		tmp = list_first_entry(&plug->mq_list, struct request,
2155 						queuelist);
2156 		if (tmp->q != rq->q)
2157 			plug->multiple_queues = true;
2158 	}
2159 }
2160 
2161 /**
2162  * blk_mq_submit_bio - Create and send a request to block device.
2163  * @bio: Bio pointer.
2164  *
2165  * Builds up a request structure from @q and @bio and send to the device. The
2166  * request may not be queued directly to hardware if:
2167  * * This request can be merged with another one
2168  * * We want to place request at plug queue for possible future merging
2169  * * There is an IO scheduler active at this queue
2170  *
2171  * It will not queue the request if there is an error with the bio, or at the
2172  * request creation.
2173  *
2174  * Returns: Request queue cookie.
2175  */
2176 blk_qc_t blk_mq_submit_bio(struct bio *bio)
2177 {
2178 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2179 	const int is_sync = op_is_sync(bio->bi_opf);
2180 	const int is_flush_fua = op_is_flush(bio->bi_opf);
2181 	struct blk_mq_alloc_data data = {
2182 		.q		= q,
2183 	};
2184 	struct request *rq;
2185 	struct blk_plug *plug;
2186 	struct request *same_queue_rq = NULL;
2187 	unsigned int nr_segs;
2188 	blk_qc_t cookie;
2189 	blk_status_t ret;
2190 	bool hipri;
2191 
2192 	blk_queue_bounce(q, &bio);
2193 	__blk_queue_split(&bio, &nr_segs);
2194 
2195 	if (!bio_integrity_prep(bio))
2196 		goto queue_exit;
2197 
2198 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
2199 	    blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
2200 		goto queue_exit;
2201 
2202 	if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2203 		goto queue_exit;
2204 
2205 	rq_qos_throttle(q, bio);
2206 
2207 	hipri = bio->bi_opf & REQ_HIPRI;
2208 
2209 	data.cmd_flags = bio->bi_opf;
2210 	rq = __blk_mq_alloc_request(&data);
2211 	if (unlikely(!rq)) {
2212 		rq_qos_cleanup(q, bio);
2213 		if (bio->bi_opf & REQ_NOWAIT)
2214 			bio_wouldblock_error(bio);
2215 		goto queue_exit;
2216 	}
2217 
2218 	trace_block_getrq(bio);
2219 
2220 	rq_qos_track(q, rq, bio);
2221 
2222 	cookie = request_to_qc_t(data.hctx, rq);
2223 
2224 	blk_mq_bio_to_request(rq, bio, nr_segs);
2225 
2226 	ret = blk_crypto_init_request(rq);
2227 	if (ret != BLK_STS_OK) {
2228 		bio->bi_status = ret;
2229 		bio_endio(bio);
2230 		blk_mq_free_request(rq);
2231 		return BLK_QC_T_NONE;
2232 	}
2233 
2234 	plug = blk_mq_plug(q, bio);
2235 	if (unlikely(is_flush_fua)) {
2236 		/* Bypass scheduler for flush requests */
2237 		blk_insert_flush(rq);
2238 		blk_mq_run_hw_queue(data.hctx, true);
2239 	} else if (plug && (q->nr_hw_queues == 1 ||
2240 		   blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
2241 		   q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
2242 		/*
2243 		 * Use plugging if we have a ->commit_rqs() hook as well, as
2244 		 * we know the driver uses bd->last in a smart fashion.
2245 		 *
2246 		 * Use normal plugging if this disk is slow HDD, as sequential
2247 		 * IO may benefit a lot from plug merging.
2248 		 */
2249 		unsigned int request_count = plug->rq_count;
2250 		struct request *last = NULL;
2251 
2252 		if (!request_count)
2253 			trace_block_plug(q);
2254 		else
2255 			last = list_entry_rq(plug->mq_list.prev);
2256 
2257 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
2258 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
2259 			blk_flush_plug_list(plug, false);
2260 			trace_block_plug(q);
2261 		}
2262 
2263 		blk_add_rq_to_plug(plug, rq);
2264 	} else if (q->elevator) {
2265 		/* Insert the request at the IO scheduler queue */
2266 		blk_mq_sched_insert_request(rq, false, true, true);
2267 	} else if (plug && !blk_queue_nomerges(q)) {
2268 		/*
2269 		 * We do limited plugging. If the bio can be merged, do that.
2270 		 * Otherwise the existing request in the plug list will be
2271 		 * issued. So the plug list will have one request at most
2272 		 * The plug list might get flushed before this. If that happens,
2273 		 * the plug list is empty, and same_queue_rq is invalid.
2274 		 */
2275 		if (list_empty(&plug->mq_list))
2276 			same_queue_rq = NULL;
2277 		if (same_queue_rq) {
2278 			list_del_init(&same_queue_rq->queuelist);
2279 			plug->rq_count--;
2280 		}
2281 		blk_add_rq_to_plug(plug, rq);
2282 		trace_block_plug(q);
2283 
2284 		if (same_queue_rq) {
2285 			data.hctx = same_queue_rq->mq_hctx;
2286 			trace_block_unplug(q, 1, true);
2287 			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2288 					&cookie);
2289 		}
2290 	} else if ((q->nr_hw_queues > 1 && is_sync) ||
2291 			!data.hctx->dispatch_busy) {
2292 		/*
2293 		 * There is no scheduler and we can try to send directly
2294 		 * to the hardware.
2295 		 */
2296 		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2297 	} else {
2298 		/* Default case. */
2299 		blk_mq_sched_insert_request(rq, false, true, true);
2300 	}
2301 
2302 	if (!hipri)
2303 		return BLK_QC_T_NONE;
2304 	return cookie;
2305 queue_exit:
2306 	blk_queue_exit(q);
2307 	return BLK_QC_T_NONE;
2308 }
2309 
2310 static size_t order_to_size(unsigned int order)
2311 {
2312 	return (size_t)PAGE_SIZE << order;
2313 }
2314 
2315 /* called before freeing request pool in @tags */
2316 static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
2317 		struct blk_mq_tags *tags, unsigned int hctx_idx)
2318 {
2319 	struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
2320 	struct page *page;
2321 	unsigned long flags;
2322 
2323 	list_for_each_entry(page, &tags->page_list, lru) {
2324 		unsigned long start = (unsigned long)page_address(page);
2325 		unsigned long end = start + order_to_size(page->private);
2326 		int i;
2327 
2328 		for (i = 0; i < set->queue_depth; i++) {
2329 			struct request *rq = drv_tags->rqs[i];
2330 			unsigned long rq_addr = (unsigned long)rq;
2331 
2332 			if (rq_addr >= start && rq_addr < end) {
2333 				WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
2334 				cmpxchg(&drv_tags->rqs[i], rq, NULL);
2335 			}
2336 		}
2337 	}
2338 
2339 	/*
2340 	 * Wait until all pending iteration is done.
2341 	 *
2342 	 * Request reference is cleared and it is guaranteed to be observed
2343 	 * after the ->lock is released.
2344 	 */
2345 	spin_lock_irqsave(&drv_tags->lock, flags);
2346 	spin_unlock_irqrestore(&drv_tags->lock, flags);
2347 }
2348 
2349 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2350 		     unsigned int hctx_idx)
2351 {
2352 	struct page *page;
2353 
2354 	if (tags->rqs && set->ops->exit_request) {
2355 		int i;
2356 
2357 		for (i = 0; i < tags->nr_tags; i++) {
2358 			struct request *rq = tags->static_rqs[i];
2359 
2360 			if (!rq)
2361 				continue;
2362 			set->ops->exit_request(set, rq, hctx_idx);
2363 			tags->static_rqs[i] = NULL;
2364 		}
2365 	}
2366 
2367 	blk_mq_clear_rq_mapping(set, tags, hctx_idx);
2368 
2369 	while (!list_empty(&tags->page_list)) {
2370 		page = list_first_entry(&tags->page_list, struct page, lru);
2371 		list_del_init(&page->lru);
2372 		/*
2373 		 * Remove kmemleak object previously allocated in
2374 		 * blk_mq_alloc_rqs().
2375 		 */
2376 		kmemleak_free(page_address(page));
2377 		__free_pages(page, page->private);
2378 	}
2379 }
2380 
2381 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
2382 {
2383 	kfree(tags->rqs);
2384 	tags->rqs = NULL;
2385 	kfree(tags->static_rqs);
2386 	tags->static_rqs = NULL;
2387 
2388 	blk_mq_free_tags(tags, flags);
2389 }
2390 
2391 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2392 					unsigned int hctx_idx,
2393 					unsigned int nr_tags,
2394 					unsigned int reserved_tags,
2395 					unsigned int flags)
2396 {
2397 	struct blk_mq_tags *tags;
2398 	int node;
2399 
2400 	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2401 	if (node == NUMA_NO_NODE)
2402 		node = set->numa_node;
2403 
2404 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
2405 	if (!tags)
2406 		return NULL;
2407 
2408 	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2409 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2410 				 node);
2411 	if (!tags->rqs) {
2412 		blk_mq_free_tags(tags, flags);
2413 		return NULL;
2414 	}
2415 
2416 	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2417 					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2418 					node);
2419 	if (!tags->static_rqs) {
2420 		kfree(tags->rqs);
2421 		blk_mq_free_tags(tags, flags);
2422 		return NULL;
2423 	}
2424 
2425 	return tags;
2426 }
2427 
2428 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2429 			       unsigned int hctx_idx, int node)
2430 {
2431 	int ret;
2432 
2433 	if (set->ops->init_request) {
2434 		ret = set->ops->init_request(set, rq, hctx_idx, node);
2435 		if (ret)
2436 			return ret;
2437 	}
2438 
2439 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2440 	return 0;
2441 }
2442 
2443 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2444 		     unsigned int hctx_idx, unsigned int depth)
2445 {
2446 	unsigned int i, j, entries_per_page, max_order = 4;
2447 	size_t rq_size, left;
2448 	int node;
2449 
2450 	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2451 	if (node == NUMA_NO_NODE)
2452 		node = set->numa_node;
2453 
2454 	INIT_LIST_HEAD(&tags->page_list);
2455 
2456 	/*
2457 	 * rq_size is the size of the request plus driver payload, rounded
2458 	 * to the cacheline size
2459 	 */
2460 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
2461 				cache_line_size());
2462 	left = rq_size * depth;
2463 
2464 	for (i = 0; i < depth; ) {
2465 		int this_order = max_order;
2466 		struct page *page;
2467 		int to_do;
2468 		void *p;
2469 
2470 		while (this_order && left < order_to_size(this_order - 1))
2471 			this_order--;
2472 
2473 		do {
2474 			page = alloc_pages_node(node,
2475 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2476 				this_order);
2477 			if (page)
2478 				break;
2479 			if (!this_order--)
2480 				break;
2481 			if (order_to_size(this_order) < rq_size)
2482 				break;
2483 		} while (1);
2484 
2485 		if (!page)
2486 			goto fail;
2487 
2488 		page->private = this_order;
2489 		list_add_tail(&page->lru, &tags->page_list);
2490 
2491 		p = page_address(page);
2492 		/*
2493 		 * Allow kmemleak to scan these pages as they contain pointers
2494 		 * to additional allocations like via ops->init_request().
2495 		 */
2496 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2497 		entries_per_page = order_to_size(this_order) / rq_size;
2498 		to_do = min(entries_per_page, depth - i);
2499 		left -= to_do * rq_size;
2500 		for (j = 0; j < to_do; j++) {
2501 			struct request *rq = p;
2502 
2503 			tags->static_rqs[i] = rq;
2504 			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2505 				tags->static_rqs[i] = NULL;
2506 				goto fail;
2507 			}
2508 
2509 			p += rq_size;
2510 			i++;
2511 		}
2512 	}
2513 	return 0;
2514 
2515 fail:
2516 	blk_mq_free_rqs(set, tags, hctx_idx);
2517 	return -ENOMEM;
2518 }
2519 
2520 struct rq_iter_data {
2521 	struct blk_mq_hw_ctx *hctx;
2522 	bool has_rq;
2523 };
2524 
2525 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
2526 {
2527 	struct rq_iter_data *iter_data = data;
2528 
2529 	if (rq->mq_hctx != iter_data->hctx)
2530 		return true;
2531 	iter_data->has_rq = true;
2532 	return false;
2533 }
2534 
2535 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
2536 {
2537 	struct blk_mq_tags *tags = hctx->sched_tags ?
2538 			hctx->sched_tags : hctx->tags;
2539 	struct rq_iter_data data = {
2540 		.hctx	= hctx,
2541 	};
2542 
2543 	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
2544 	return data.has_rq;
2545 }
2546 
2547 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
2548 		struct blk_mq_hw_ctx *hctx)
2549 {
2550 	if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
2551 		return false;
2552 	if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
2553 		return false;
2554 	return true;
2555 }
2556 
2557 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
2558 {
2559 	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2560 			struct blk_mq_hw_ctx, cpuhp_online);
2561 
2562 	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
2563 	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
2564 		return 0;
2565 
2566 	/*
2567 	 * Prevent new request from being allocated on the current hctx.
2568 	 *
2569 	 * The smp_mb__after_atomic() Pairs with the implied barrier in
2570 	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
2571 	 * seen once we return from the tag allocator.
2572 	 */
2573 	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2574 	smp_mb__after_atomic();
2575 
2576 	/*
2577 	 * Try to grab a reference to the queue and wait for any outstanding
2578 	 * requests.  If we could not grab a reference the queue has been
2579 	 * frozen and there are no requests.
2580 	 */
2581 	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
2582 		while (blk_mq_hctx_has_requests(hctx))
2583 			msleep(5);
2584 		percpu_ref_put(&hctx->queue->q_usage_counter);
2585 	}
2586 
2587 	return 0;
2588 }
2589 
2590 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
2591 {
2592 	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2593 			struct blk_mq_hw_ctx, cpuhp_online);
2594 
2595 	if (cpumask_test_cpu(cpu, hctx->cpumask))
2596 		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2597 	return 0;
2598 }
2599 
2600 /*
2601  * 'cpu' is going away. splice any existing rq_list entries from this
2602  * software queue to the hw queue dispatch list, and ensure that it
2603  * gets run.
2604  */
2605 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2606 {
2607 	struct blk_mq_hw_ctx *hctx;
2608 	struct blk_mq_ctx *ctx;
2609 	LIST_HEAD(tmp);
2610 	enum hctx_type type;
2611 
2612 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2613 	if (!cpumask_test_cpu(cpu, hctx->cpumask))
2614 		return 0;
2615 
2616 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2617 	type = hctx->type;
2618 
2619 	spin_lock(&ctx->lock);
2620 	if (!list_empty(&ctx->rq_lists[type])) {
2621 		list_splice_init(&ctx->rq_lists[type], &tmp);
2622 		blk_mq_hctx_clear_pending(hctx, ctx);
2623 	}
2624 	spin_unlock(&ctx->lock);
2625 
2626 	if (list_empty(&tmp))
2627 		return 0;
2628 
2629 	spin_lock(&hctx->lock);
2630 	list_splice_tail_init(&tmp, &hctx->dispatch);
2631 	spin_unlock(&hctx->lock);
2632 
2633 	blk_mq_run_hw_queue(hctx, true);
2634 	return 0;
2635 }
2636 
2637 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2638 {
2639 	if (!(hctx->flags & BLK_MQ_F_STACKING))
2640 		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2641 						    &hctx->cpuhp_online);
2642 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2643 					    &hctx->cpuhp_dead);
2644 }
2645 
2646 /*
2647  * Before freeing hw queue, clearing the flush request reference in
2648  * tags->rqs[] for avoiding potential UAF.
2649  */
2650 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
2651 		unsigned int queue_depth, struct request *flush_rq)
2652 {
2653 	int i;
2654 	unsigned long flags;
2655 
2656 	/* The hw queue may not be mapped yet */
2657 	if (!tags)
2658 		return;
2659 
2660 	WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
2661 
2662 	for (i = 0; i < queue_depth; i++)
2663 		cmpxchg(&tags->rqs[i], flush_rq, NULL);
2664 
2665 	/*
2666 	 * Wait until all pending iteration is done.
2667 	 *
2668 	 * Request reference is cleared and it is guaranteed to be observed
2669 	 * after the ->lock is released.
2670 	 */
2671 	spin_lock_irqsave(&tags->lock, flags);
2672 	spin_unlock_irqrestore(&tags->lock, flags);
2673 }
2674 
2675 /* hctx->ctxs will be freed in queue's release handler */
2676 static void blk_mq_exit_hctx(struct request_queue *q,
2677 		struct blk_mq_tag_set *set,
2678 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2679 {
2680 	struct request *flush_rq = hctx->fq->flush_rq;
2681 
2682 	if (blk_mq_hw_queue_mapped(hctx))
2683 		blk_mq_tag_idle(hctx);
2684 
2685 	blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
2686 			set->queue_depth, flush_rq);
2687 	if (set->ops->exit_request)
2688 		set->ops->exit_request(set, flush_rq, hctx_idx);
2689 
2690 	if (set->ops->exit_hctx)
2691 		set->ops->exit_hctx(hctx, hctx_idx);
2692 
2693 	blk_mq_remove_cpuhp(hctx);
2694 
2695 	spin_lock(&q->unused_hctx_lock);
2696 	list_add(&hctx->hctx_list, &q->unused_hctx_list);
2697 	spin_unlock(&q->unused_hctx_lock);
2698 }
2699 
2700 static void blk_mq_exit_hw_queues(struct request_queue *q,
2701 		struct blk_mq_tag_set *set, int nr_queue)
2702 {
2703 	struct blk_mq_hw_ctx *hctx;
2704 	unsigned int i;
2705 
2706 	queue_for_each_hw_ctx(q, hctx, i) {
2707 		if (i == nr_queue)
2708 			break;
2709 		blk_mq_debugfs_unregister_hctx(hctx);
2710 		blk_mq_exit_hctx(q, set, hctx, i);
2711 	}
2712 }
2713 
2714 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2715 {
2716 	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2717 
2718 	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2719 			   __alignof__(struct blk_mq_hw_ctx)) !=
2720 		     sizeof(struct blk_mq_hw_ctx));
2721 
2722 	if (tag_set->flags & BLK_MQ_F_BLOCKING)
2723 		hw_ctx_size += sizeof(struct srcu_struct);
2724 
2725 	return hw_ctx_size;
2726 }
2727 
2728 static int blk_mq_init_hctx(struct request_queue *q,
2729 		struct blk_mq_tag_set *set,
2730 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2731 {
2732 	hctx->queue_num = hctx_idx;
2733 
2734 	if (!(hctx->flags & BLK_MQ_F_STACKING))
2735 		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2736 				&hctx->cpuhp_online);
2737 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2738 
2739 	hctx->tags = set->tags[hctx_idx];
2740 
2741 	if (set->ops->init_hctx &&
2742 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2743 		goto unregister_cpu_notifier;
2744 
2745 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2746 				hctx->numa_node))
2747 		goto exit_hctx;
2748 	return 0;
2749 
2750  exit_hctx:
2751 	if (set->ops->exit_hctx)
2752 		set->ops->exit_hctx(hctx, hctx_idx);
2753  unregister_cpu_notifier:
2754 	blk_mq_remove_cpuhp(hctx);
2755 	return -1;
2756 }
2757 
2758 static struct blk_mq_hw_ctx *
2759 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2760 		int node)
2761 {
2762 	struct blk_mq_hw_ctx *hctx;
2763 	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2764 
2765 	hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2766 	if (!hctx)
2767 		goto fail_alloc_hctx;
2768 
2769 	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2770 		goto free_hctx;
2771 
2772 	atomic_set(&hctx->nr_active, 0);
2773 	if (node == NUMA_NO_NODE)
2774 		node = set->numa_node;
2775 	hctx->numa_node = node;
2776 
2777 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2778 	spin_lock_init(&hctx->lock);
2779 	INIT_LIST_HEAD(&hctx->dispatch);
2780 	hctx->queue = q;
2781 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
2782 
2783 	INIT_LIST_HEAD(&hctx->hctx_list);
2784 
2785 	/*
2786 	 * Allocate space for all possible cpus to avoid allocation at
2787 	 * runtime
2788 	 */
2789 	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2790 			gfp, node);
2791 	if (!hctx->ctxs)
2792 		goto free_cpumask;
2793 
2794 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2795 				gfp, node, false, false))
2796 		goto free_ctxs;
2797 	hctx->nr_ctx = 0;
2798 
2799 	spin_lock_init(&hctx->dispatch_wait_lock);
2800 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2801 	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2802 
2803 	hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
2804 	if (!hctx->fq)
2805 		goto free_bitmap;
2806 
2807 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2808 		init_srcu_struct(hctx->srcu);
2809 	blk_mq_hctx_kobj_init(hctx);
2810 
2811 	return hctx;
2812 
2813  free_bitmap:
2814 	sbitmap_free(&hctx->ctx_map);
2815  free_ctxs:
2816 	kfree(hctx->ctxs);
2817  free_cpumask:
2818 	free_cpumask_var(hctx->cpumask);
2819  free_hctx:
2820 	kfree(hctx);
2821  fail_alloc_hctx:
2822 	return NULL;
2823 }
2824 
2825 static void blk_mq_init_cpu_queues(struct request_queue *q,
2826 				   unsigned int nr_hw_queues)
2827 {
2828 	struct blk_mq_tag_set *set = q->tag_set;
2829 	unsigned int i, j;
2830 
2831 	for_each_possible_cpu(i) {
2832 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2833 		struct blk_mq_hw_ctx *hctx;
2834 		int k;
2835 
2836 		__ctx->cpu = i;
2837 		spin_lock_init(&__ctx->lock);
2838 		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2839 			INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2840 
2841 		__ctx->queue = q;
2842 
2843 		/*
2844 		 * Set local node, IFF we have more than one hw queue. If
2845 		 * not, we remain on the home node of the device
2846 		 */
2847 		for (j = 0; j < set->nr_maps; j++) {
2848 			hctx = blk_mq_map_queue_type(q, j, i);
2849 			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2850 				hctx->numa_node = cpu_to_node(i);
2851 		}
2852 	}
2853 }
2854 
2855 static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
2856 					int hctx_idx)
2857 {
2858 	unsigned int flags = set->flags;
2859 	int ret = 0;
2860 
2861 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2862 					set->queue_depth, set->reserved_tags, flags);
2863 	if (!set->tags[hctx_idx])
2864 		return false;
2865 
2866 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2867 				set->queue_depth);
2868 	if (!ret)
2869 		return true;
2870 
2871 	blk_mq_free_rq_map(set->tags[hctx_idx], flags);
2872 	set->tags[hctx_idx] = NULL;
2873 	return false;
2874 }
2875 
2876 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2877 					 unsigned int hctx_idx)
2878 {
2879 	unsigned int flags = set->flags;
2880 
2881 	if (set->tags && set->tags[hctx_idx]) {
2882 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2883 		blk_mq_free_rq_map(set->tags[hctx_idx], flags);
2884 		set->tags[hctx_idx] = NULL;
2885 	}
2886 }
2887 
2888 static void blk_mq_map_swqueue(struct request_queue *q)
2889 {
2890 	unsigned int i, j, hctx_idx;
2891 	struct blk_mq_hw_ctx *hctx;
2892 	struct blk_mq_ctx *ctx;
2893 	struct blk_mq_tag_set *set = q->tag_set;
2894 
2895 	queue_for_each_hw_ctx(q, hctx, i) {
2896 		cpumask_clear(hctx->cpumask);
2897 		hctx->nr_ctx = 0;
2898 		hctx->dispatch_from = NULL;
2899 	}
2900 
2901 	/*
2902 	 * Map software to hardware queues.
2903 	 *
2904 	 * If the cpu isn't present, the cpu is mapped to first hctx.
2905 	 */
2906 	for_each_possible_cpu(i) {
2907 
2908 		ctx = per_cpu_ptr(q->queue_ctx, i);
2909 		for (j = 0; j < set->nr_maps; j++) {
2910 			if (!set->map[j].nr_queues) {
2911 				ctx->hctxs[j] = blk_mq_map_queue_type(q,
2912 						HCTX_TYPE_DEFAULT, i);
2913 				continue;
2914 			}
2915 			hctx_idx = set->map[j].mq_map[i];
2916 			/* unmapped hw queue can be remapped after CPU topo changed */
2917 			if (!set->tags[hctx_idx] &&
2918 			    !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
2919 				/*
2920 				 * If tags initialization fail for some hctx,
2921 				 * that hctx won't be brought online.  In this
2922 				 * case, remap the current ctx to hctx[0] which
2923 				 * is guaranteed to always have tags allocated
2924 				 */
2925 				set->map[j].mq_map[i] = 0;
2926 			}
2927 
2928 			hctx = blk_mq_map_queue_type(q, j, i);
2929 			ctx->hctxs[j] = hctx;
2930 			/*
2931 			 * If the CPU is already set in the mask, then we've
2932 			 * mapped this one already. This can happen if
2933 			 * devices share queues across queue maps.
2934 			 */
2935 			if (cpumask_test_cpu(i, hctx->cpumask))
2936 				continue;
2937 
2938 			cpumask_set_cpu(i, hctx->cpumask);
2939 			hctx->type = j;
2940 			ctx->index_hw[hctx->type] = hctx->nr_ctx;
2941 			hctx->ctxs[hctx->nr_ctx++] = ctx;
2942 
2943 			/*
2944 			 * If the nr_ctx type overflows, we have exceeded the
2945 			 * amount of sw queues we can support.
2946 			 */
2947 			BUG_ON(!hctx->nr_ctx);
2948 		}
2949 
2950 		for (; j < HCTX_MAX_TYPES; j++)
2951 			ctx->hctxs[j] = blk_mq_map_queue_type(q,
2952 					HCTX_TYPE_DEFAULT, i);
2953 	}
2954 
2955 	queue_for_each_hw_ctx(q, hctx, i) {
2956 		/*
2957 		 * If no software queues are mapped to this hardware queue,
2958 		 * disable it and free the request entries.
2959 		 */
2960 		if (!hctx->nr_ctx) {
2961 			/* Never unmap queue 0.  We need it as a
2962 			 * fallback in case of a new remap fails
2963 			 * allocation
2964 			 */
2965 			if (i && set->tags[i])
2966 				blk_mq_free_map_and_requests(set, i);
2967 
2968 			hctx->tags = NULL;
2969 			continue;
2970 		}
2971 
2972 		hctx->tags = set->tags[i];
2973 		WARN_ON(!hctx->tags);
2974 
2975 		/*
2976 		 * Set the map size to the number of mapped software queues.
2977 		 * This is more accurate and more efficient than looping
2978 		 * over all possibly mapped software queues.
2979 		 */
2980 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2981 
2982 		/*
2983 		 * Initialize batch roundrobin counts
2984 		 */
2985 		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2986 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2987 	}
2988 }
2989 
2990 /*
2991  * Caller needs to ensure that we're either frozen/quiesced, or that
2992  * the queue isn't live yet.
2993  */
2994 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2995 {
2996 	struct blk_mq_hw_ctx *hctx;
2997 	int i;
2998 
2999 	queue_for_each_hw_ctx(q, hctx, i) {
3000 		if (shared)
3001 			hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3002 		else
3003 			hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3004 	}
3005 }
3006 
3007 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3008 					 bool shared)
3009 {
3010 	struct request_queue *q;
3011 
3012 	lockdep_assert_held(&set->tag_list_lock);
3013 
3014 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3015 		blk_mq_freeze_queue(q);
3016 		queue_set_hctx_shared(q, shared);
3017 		blk_mq_unfreeze_queue(q);
3018 	}
3019 }
3020 
3021 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3022 {
3023 	struct blk_mq_tag_set *set = q->tag_set;
3024 
3025 	mutex_lock(&set->tag_list_lock);
3026 	list_del(&q->tag_set_list);
3027 	if (list_is_singular(&set->tag_list)) {
3028 		/* just transitioned to unshared */
3029 		set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3030 		/* update existing queue */
3031 		blk_mq_update_tag_set_shared(set, false);
3032 	}
3033 	mutex_unlock(&set->tag_list_lock);
3034 	INIT_LIST_HEAD(&q->tag_set_list);
3035 }
3036 
3037 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3038 				     struct request_queue *q)
3039 {
3040 	mutex_lock(&set->tag_list_lock);
3041 
3042 	/*
3043 	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
3044 	 */
3045 	if (!list_empty(&set->tag_list) &&
3046 	    !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3047 		set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3048 		/* update existing queue */
3049 		blk_mq_update_tag_set_shared(set, true);
3050 	}
3051 	if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3052 		queue_set_hctx_shared(q, true);
3053 	list_add_tail(&q->tag_set_list, &set->tag_list);
3054 
3055 	mutex_unlock(&set->tag_list_lock);
3056 }
3057 
3058 /* All allocations will be freed in release handler of q->mq_kobj */
3059 static int blk_mq_alloc_ctxs(struct request_queue *q)
3060 {
3061 	struct blk_mq_ctxs *ctxs;
3062 	int cpu;
3063 
3064 	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3065 	if (!ctxs)
3066 		return -ENOMEM;
3067 
3068 	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3069 	if (!ctxs->queue_ctx)
3070 		goto fail;
3071 
3072 	for_each_possible_cpu(cpu) {
3073 		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3074 		ctx->ctxs = ctxs;
3075 	}
3076 
3077 	q->mq_kobj = &ctxs->kobj;
3078 	q->queue_ctx = ctxs->queue_ctx;
3079 
3080 	return 0;
3081  fail:
3082 	kfree(ctxs);
3083 	return -ENOMEM;
3084 }
3085 
3086 /*
3087  * It is the actual release handler for mq, but we do it from
3088  * request queue's release handler for avoiding use-after-free
3089  * and headache because q->mq_kobj shouldn't have been introduced,
3090  * but we can't group ctx/kctx kobj without it.
3091  */
3092 void blk_mq_release(struct request_queue *q)
3093 {
3094 	struct blk_mq_hw_ctx *hctx, *next;
3095 	int i;
3096 
3097 	queue_for_each_hw_ctx(q, hctx, i)
3098 		WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3099 
3100 	/* all hctx are in .unused_hctx_list now */
3101 	list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3102 		list_del_init(&hctx->hctx_list);
3103 		kobject_put(&hctx->kobj);
3104 	}
3105 
3106 	kfree(q->queue_hw_ctx);
3107 
3108 	/*
3109 	 * release .mq_kobj and sw queue's kobject now because
3110 	 * both share lifetime with request queue.
3111 	 */
3112 	blk_mq_sysfs_deinit(q);
3113 }
3114 
3115 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3116 		void *queuedata)
3117 {
3118 	struct request_queue *uninit_q, *q;
3119 
3120 	uninit_q = blk_alloc_queue(set->numa_node);
3121 	if (!uninit_q)
3122 		return ERR_PTR(-ENOMEM);
3123 	uninit_q->queuedata = queuedata;
3124 
3125 	/*
3126 	 * Initialize the queue without an elevator. device_add_disk() will do
3127 	 * the initialization.
3128 	 */
3129 	q = blk_mq_init_allocated_queue(set, uninit_q, false);
3130 	if (IS_ERR(q))
3131 		blk_cleanup_queue(uninit_q);
3132 
3133 	return q;
3134 }
3135 EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
3136 
3137 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3138 {
3139 	return blk_mq_init_queue_data(set, NULL);
3140 }
3141 EXPORT_SYMBOL(blk_mq_init_queue);
3142 
3143 /*
3144  * Helper for setting up a queue with mq ops, given queue depth, and
3145  * the passed in mq ops flags.
3146  */
3147 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
3148 					   const struct blk_mq_ops *ops,
3149 					   unsigned int queue_depth,
3150 					   unsigned int set_flags)
3151 {
3152 	struct request_queue *q;
3153 	int ret;
3154 
3155 	memset(set, 0, sizeof(*set));
3156 	set->ops = ops;
3157 	set->nr_hw_queues = 1;
3158 	set->nr_maps = 1;
3159 	set->queue_depth = queue_depth;
3160 	set->numa_node = NUMA_NO_NODE;
3161 	set->flags = set_flags;
3162 
3163 	ret = blk_mq_alloc_tag_set(set);
3164 	if (ret)
3165 		return ERR_PTR(ret);
3166 
3167 	q = blk_mq_init_queue(set);
3168 	if (IS_ERR(q)) {
3169 		blk_mq_free_tag_set(set);
3170 		return q;
3171 	}
3172 
3173 	return q;
3174 }
3175 EXPORT_SYMBOL(blk_mq_init_sq_queue);
3176 
3177 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
3178 		struct blk_mq_tag_set *set, struct request_queue *q,
3179 		int hctx_idx, int node)
3180 {
3181 	struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3182 
3183 	/* reuse dead hctx first */
3184 	spin_lock(&q->unused_hctx_lock);
3185 	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
3186 		if (tmp->numa_node == node) {
3187 			hctx = tmp;
3188 			break;
3189 		}
3190 	}
3191 	if (hctx)
3192 		list_del_init(&hctx->hctx_list);
3193 	spin_unlock(&q->unused_hctx_lock);
3194 
3195 	if (!hctx)
3196 		hctx = blk_mq_alloc_hctx(q, set, node);
3197 	if (!hctx)
3198 		goto fail;
3199 
3200 	if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
3201 		goto free_hctx;
3202 
3203 	return hctx;
3204 
3205  free_hctx:
3206 	kobject_put(&hctx->kobj);
3207  fail:
3208 	return NULL;
3209 }
3210 
3211 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
3212 						struct request_queue *q)
3213 {
3214 	int i, j, end;
3215 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
3216 
3217 	if (q->nr_hw_queues < set->nr_hw_queues) {
3218 		struct blk_mq_hw_ctx **new_hctxs;
3219 
3220 		new_hctxs = kcalloc_node(set->nr_hw_queues,
3221 				       sizeof(*new_hctxs), GFP_KERNEL,
3222 				       set->numa_node);
3223 		if (!new_hctxs)
3224 			return;
3225 		if (hctxs)
3226 			memcpy(new_hctxs, hctxs, q->nr_hw_queues *
3227 			       sizeof(*hctxs));
3228 		q->queue_hw_ctx = new_hctxs;
3229 		kfree(hctxs);
3230 		hctxs = new_hctxs;
3231 	}
3232 
3233 	/* protect against switching io scheduler  */
3234 	mutex_lock(&q->sysfs_lock);
3235 	for (i = 0; i < set->nr_hw_queues; i++) {
3236 		int node;
3237 		struct blk_mq_hw_ctx *hctx;
3238 
3239 		node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
3240 		/*
3241 		 * If the hw queue has been mapped to another numa node,
3242 		 * we need to realloc the hctx. If allocation fails, fallback
3243 		 * to use the previous one.
3244 		 */
3245 		if (hctxs[i] && (hctxs[i]->numa_node == node))
3246 			continue;
3247 
3248 		hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
3249 		if (hctx) {
3250 			if (hctxs[i])
3251 				blk_mq_exit_hctx(q, set, hctxs[i], i);
3252 			hctxs[i] = hctx;
3253 		} else {
3254 			if (hctxs[i])
3255 				pr_warn("Allocate new hctx on node %d fails,\
3256 						fallback to previous one on node %d\n",
3257 						node, hctxs[i]->numa_node);
3258 			else
3259 				break;
3260 		}
3261 	}
3262 	/*
3263 	 * Increasing nr_hw_queues fails. Free the newly allocated
3264 	 * hctxs and keep the previous q->nr_hw_queues.
3265 	 */
3266 	if (i != set->nr_hw_queues) {
3267 		j = q->nr_hw_queues;
3268 		end = i;
3269 	} else {
3270 		j = i;
3271 		end = q->nr_hw_queues;
3272 		q->nr_hw_queues = set->nr_hw_queues;
3273 	}
3274 
3275 	for (; j < end; j++) {
3276 		struct blk_mq_hw_ctx *hctx = hctxs[j];
3277 
3278 		if (hctx) {
3279 			if (hctx->tags)
3280 				blk_mq_free_map_and_requests(set, j);
3281 			blk_mq_exit_hctx(q, set, hctx, j);
3282 			hctxs[j] = NULL;
3283 		}
3284 	}
3285 	mutex_unlock(&q->sysfs_lock);
3286 }
3287 
3288 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
3289 						  struct request_queue *q,
3290 						  bool elevator_init)
3291 {
3292 	/* mark the queue as mq asap */
3293 	q->mq_ops = set->ops;
3294 
3295 	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
3296 					     blk_mq_poll_stats_bkt,
3297 					     BLK_MQ_POLL_STATS_BKTS, q);
3298 	if (!q->poll_cb)
3299 		goto err_exit;
3300 
3301 	if (blk_mq_alloc_ctxs(q))
3302 		goto err_poll;
3303 
3304 	/* init q->mq_kobj and sw queues' kobjects */
3305 	blk_mq_sysfs_init(q);
3306 
3307 	INIT_LIST_HEAD(&q->unused_hctx_list);
3308 	spin_lock_init(&q->unused_hctx_lock);
3309 
3310 	blk_mq_realloc_hw_ctxs(set, q);
3311 	if (!q->nr_hw_queues)
3312 		goto err_hctxs;
3313 
3314 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
3315 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
3316 
3317 	q->tag_set = set;
3318 
3319 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
3320 	if (set->nr_maps > HCTX_TYPE_POLL &&
3321 	    set->map[HCTX_TYPE_POLL].nr_queues)
3322 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
3323 
3324 	q->sg_reserved_size = INT_MAX;
3325 
3326 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
3327 	INIT_LIST_HEAD(&q->requeue_list);
3328 	spin_lock_init(&q->requeue_lock);
3329 
3330 	q->nr_requests = set->queue_depth;
3331 
3332 	/*
3333 	 * Default to classic polling
3334 	 */
3335 	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
3336 
3337 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
3338 	blk_mq_add_queue_tag_set(set, q);
3339 	blk_mq_map_swqueue(q);
3340 
3341 	if (elevator_init)
3342 		elevator_init_mq(q);
3343 
3344 	return q;
3345 
3346 err_hctxs:
3347 	kfree(q->queue_hw_ctx);
3348 	q->nr_hw_queues = 0;
3349 	blk_mq_sysfs_deinit(q);
3350 err_poll:
3351 	blk_stat_free_callback(q->poll_cb);
3352 	q->poll_cb = NULL;
3353 err_exit:
3354 	q->mq_ops = NULL;
3355 	return ERR_PTR(-ENOMEM);
3356 }
3357 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
3358 
3359 /* tags can _not_ be used after returning from blk_mq_exit_queue */
3360 void blk_mq_exit_queue(struct request_queue *q)
3361 {
3362 	struct blk_mq_tag_set *set = q->tag_set;
3363 
3364 	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
3365 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
3366 	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
3367 	blk_mq_del_queue_tag_set(q);
3368 }
3369 
3370 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
3371 {
3372 	int i;
3373 
3374 	for (i = 0; i < set->nr_hw_queues; i++) {
3375 		if (!__blk_mq_alloc_map_and_request(set, i))
3376 			goto out_unwind;
3377 		cond_resched();
3378 	}
3379 
3380 	return 0;
3381 
3382 out_unwind:
3383 	while (--i >= 0)
3384 		blk_mq_free_map_and_requests(set, i);
3385 
3386 	return -ENOMEM;
3387 }
3388 
3389 /*
3390  * Allocate the request maps associated with this tag_set. Note that this
3391  * may reduce the depth asked for, if memory is tight. set->queue_depth
3392  * will be updated to reflect the allocated depth.
3393  */
3394 static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
3395 {
3396 	unsigned int depth;
3397 	int err;
3398 
3399 	depth = set->queue_depth;
3400 	do {
3401 		err = __blk_mq_alloc_rq_maps(set);
3402 		if (!err)
3403 			break;
3404 
3405 		set->queue_depth >>= 1;
3406 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
3407 			err = -ENOMEM;
3408 			break;
3409 		}
3410 	} while (set->queue_depth);
3411 
3412 	if (!set->queue_depth || err) {
3413 		pr_err("blk-mq: failed to allocate request map\n");
3414 		return -ENOMEM;
3415 	}
3416 
3417 	if (depth != set->queue_depth)
3418 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
3419 						depth, set->queue_depth);
3420 
3421 	return 0;
3422 }
3423 
3424 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
3425 {
3426 	/*
3427 	 * blk_mq_map_queues() and multiple .map_queues() implementations
3428 	 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
3429 	 * number of hardware queues.
3430 	 */
3431 	if (set->nr_maps == 1)
3432 		set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
3433 
3434 	if (set->ops->map_queues && !is_kdump_kernel()) {
3435 		int i;
3436 
3437 		/*
3438 		 * transport .map_queues is usually done in the following
3439 		 * way:
3440 		 *
3441 		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
3442 		 * 	mask = get_cpu_mask(queue)
3443 		 * 	for_each_cpu(cpu, mask)
3444 		 * 		set->map[x].mq_map[cpu] = queue;
3445 		 * }
3446 		 *
3447 		 * When we need to remap, the table has to be cleared for
3448 		 * killing stale mapping since one CPU may not be mapped
3449 		 * to any hw queue.
3450 		 */
3451 		for (i = 0; i < set->nr_maps; i++)
3452 			blk_mq_clear_mq_map(&set->map[i]);
3453 
3454 		return set->ops->map_queues(set);
3455 	} else {
3456 		BUG_ON(set->nr_maps > 1);
3457 		return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3458 	}
3459 }
3460 
3461 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
3462 				  int cur_nr_hw_queues, int new_nr_hw_queues)
3463 {
3464 	struct blk_mq_tags **new_tags;
3465 
3466 	if (cur_nr_hw_queues >= new_nr_hw_queues)
3467 		return 0;
3468 
3469 	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
3470 				GFP_KERNEL, set->numa_node);
3471 	if (!new_tags)
3472 		return -ENOMEM;
3473 
3474 	if (set->tags)
3475 		memcpy(new_tags, set->tags, cur_nr_hw_queues *
3476 		       sizeof(*set->tags));
3477 	kfree(set->tags);
3478 	set->tags = new_tags;
3479 	set->nr_hw_queues = new_nr_hw_queues;
3480 
3481 	return 0;
3482 }
3483 
3484 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
3485 				int new_nr_hw_queues)
3486 {
3487 	return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
3488 }
3489 
3490 /*
3491  * Alloc a tag set to be associated with one or more request queues.
3492  * May fail with EINVAL for various error conditions. May adjust the
3493  * requested depth down, if it's too large. In that case, the set
3494  * value will be stored in set->queue_depth.
3495  */
3496 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3497 {
3498 	int i, ret;
3499 
3500 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3501 
3502 	if (!set->nr_hw_queues)
3503 		return -EINVAL;
3504 	if (!set->queue_depth)
3505 		return -EINVAL;
3506 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3507 		return -EINVAL;
3508 
3509 	if (!set->ops->queue_rq)
3510 		return -EINVAL;
3511 
3512 	if (!set->ops->get_budget ^ !set->ops->put_budget)
3513 		return -EINVAL;
3514 
3515 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3516 		pr_info("blk-mq: reduced tag depth to %u\n",
3517 			BLK_MQ_MAX_DEPTH);
3518 		set->queue_depth = BLK_MQ_MAX_DEPTH;
3519 	}
3520 
3521 	if (!set->nr_maps)
3522 		set->nr_maps = 1;
3523 	else if (set->nr_maps > HCTX_MAX_TYPES)
3524 		return -EINVAL;
3525 
3526 	/*
3527 	 * If a crashdump is active, then we are potentially in a very
3528 	 * memory constrained environment. Limit us to 1 queue and
3529 	 * 64 tags to prevent using too much memory.
3530 	 */
3531 	if (is_kdump_kernel()) {
3532 		set->nr_hw_queues = 1;
3533 		set->nr_maps = 1;
3534 		set->queue_depth = min(64U, set->queue_depth);
3535 	}
3536 	/*
3537 	 * There is no use for more h/w queues than cpus if we just have
3538 	 * a single map
3539 	 */
3540 	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3541 		set->nr_hw_queues = nr_cpu_ids;
3542 
3543 	if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
3544 		return -ENOMEM;
3545 
3546 	ret = -ENOMEM;
3547 	for (i = 0; i < set->nr_maps; i++) {
3548 		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3549 						  sizeof(set->map[i].mq_map[0]),
3550 						  GFP_KERNEL, set->numa_node);
3551 		if (!set->map[i].mq_map)
3552 			goto out_free_mq_map;
3553 		set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3554 	}
3555 
3556 	ret = blk_mq_update_queue_map(set);
3557 	if (ret)
3558 		goto out_free_mq_map;
3559 
3560 	ret = blk_mq_alloc_map_and_requests(set);
3561 	if (ret)
3562 		goto out_free_mq_map;
3563 
3564 	if (blk_mq_is_sbitmap_shared(set->flags)) {
3565 		atomic_set(&set->active_queues_shared_sbitmap, 0);
3566 
3567 		if (blk_mq_init_shared_sbitmap(set)) {
3568 			ret = -ENOMEM;
3569 			goto out_free_mq_rq_maps;
3570 		}
3571 	}
3572 
3573 	mutex_init(&set->tag_list_lock);
3574 	INIT_LIST_HEAD(&set->tag_list);
3575 
3576 	return 0;
3577 
3578 out_free_mq_rq_maps:
3579 	for (i = 0; i < set->nr_hw_queues; i++)
3580 		blk_mq_free_map_and_requests(set, i);
3581 out_free_mq_map:
3582 	for (i = 0; i < set->nr_maps; i++) {
3583 		kfree(set->map[i].mq_map);
3584 		set->map[i].mq_map = NULL;
3585 	}
3586 	kfree(set->tags);
3587 	set->tags = NULL;
3588 	return ret;
3589 }
3590 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3591 
3592 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3593 {
3594 	int i, j;
3595 
3596 	for (i = 0; i < set->nr_hw_queues; i++)
3597 		blk_mq_free_map_and_requests(set, i);
3598 
3599 	if (blk_mq_is_sbitmap_shared(set->flags))
3600 		blk_mq_exit_shared_sbitmap(set);
3601 
3602 	for (j = 0; j < set->nr_maps; j++) {
3603 		kfree(set->map[j].mq_map);
3604 		set->map[j].mq_map = NULL;
3605 	}
3606 
3607 	kfree(set->tags);
3608 	set->tags = NULL;
3609 }
3610 EXPORT_SYMBOL(blk_mq_free_tag_set);
3611 
3612 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3613 {
3614 	struct blk_mq_tag_set *set = q->tag_set;
3615 	struct blk_mq_hw_ctx *hctx;
3616 	int i, ret;
3617 
3618 	if (!set)
3619 		return -EINVAL;
3620 
3621 	if (q->nr_requests == nr)
3622 		return 0;
3623 
3624 	blk_mq_freeze_queue(q);
3625 	blk_mq_quiesce_queue(q);
3626 
3627 	ret = 0;
3628 	queue_for_each_hw_ctx(q, hctx, i) {
3629 		if (!hctx->tags)
3630 			continue;
3631 		/*
3632 		 * If we're using an MQ scheduler, just update the scheduler
3633 		 * queue depth. This is similar to what the old code would do.
3634 		 */
3635 		if (!hctx->sched_tags) {
3636 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3637 							false);
3638 			if (!ret && blk_mq_is_sbitmap_shared(set->flags))
3639 				blk_mq_tag_resize_shared_sbitmap(set, nr);
3640 		} else {
3641 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3642 							nr, true);
3643 			if (blk_mq_is_sbitmap_shared(set->flags)) {
3644 				hctx->sched_tags->bitmap_tags =
3645 					&q->sched_bitmap_tags;
3646 				hctx->sched_tags->breserved_tags =
3647 					&q->sched_breserved_tags;
3648 			}
3649 		}
3650 		if (ret)
3651 			break;
3652 		if (q->elevator && q->elevator->type->ops.depth_updated)
3653 			q->elevator->type->ops.depth_updated(hctx);
3654 	}
3655 	if (!ret) {
3656 		q->nr_requests = nr;
3657 		if (q->elevator && blk_mq_is_sbitmap_shared(set->flags))
3658 			sbitmap_queue_resize(&q->sched_bitmap_tags,
3659 					     nr - set->reserved_tags);
3660 	}
3661 
3662 	blk_mq_unquiesce_queue(q);
3663 	blk_mq_unfreeze_queue(q);
3664 
3665 	return ret;
3666 }
3667 
3668 /*
3669  * request_queue and elevator_type pair.
3670  * It is just used by __blk_mq_update_nr_hw_queues to cache
3671  * the elevator_type associated with a request_queue.
3672  */
3673 struct blk_mq_qe_pair {
3674 	struct list_head node;
3675 	struct request_queue *q;
3676 	struct elevator_type *type;
3677 };
3678 
3679 /*
3680  * Cache the elevator_type in qe pair list and switch the
3681  * io scheduler to 'none'
3682  */
3683 static bool blk_mq_elv_switch_none(struct list_head *head,
3684 		struct request_queue *q)
3685 {
3686 	struct blk_mq_qe_pair *qe;
3687 
3688 	if (!q->elevator)
3689 		return true;
3690 
3691 	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3692 	if (!qe)
3693 		return false;
3694 
3695 	INIT_LIST_HEAD(&qe->node);
3696 	qe->q = q;
3697 	qe->type = q->elevator->type;
3698 	list_add(&qe->node, head);
3699 
3700 	mutex_lock(&q->sysfs_lock);
3701 	/*
3702 	 * After elevator_switch_mq, the previous elevator_queue will be
3703 	 * released by elevator_release. The reference of the io scheduler
3704 	 * module get by elevator_get will also be put. So we need to get
3705 	 * a reference of the io scheduler module here to prevent it to be
3706 	 * removed.
3707 	 */
3708 	__module_get(qe->type->elevator_owner);
3709 	elevator_switch_mq(q, NULL);
3710 	mutex_unlock(&q->sysfs_lock);
3711 
3712 	return true;
3713 }
3714 
3715 static void blk_mq_elv_switch_back(struct list_head *head,
3716 		struct request_queue *q)
3717 {
3718 	struct blk_mq_qe_pair *qe;
3719 	struct elevator_type *t = NULL;
3720 
3721 	list_for_each_entry(qe, head, node)
3722 		if (qe->q == q) {
3723 			t = qe->type;
3724 			break;
3725 		}
3726 
3727 	if (!t)
3728 		return;
3729 
3730 	list_del(&qe->node);
3731 	kfree(qe);
3732 
3733 	mutex_lock(&q->sysfs_lock);
3734 	elevator_switch_mq(q, t);
3735 	mutex_unlock(&q->sysfs_lock);
3736 }
3737 
3738 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3739 							int nr_hw_queues)
3740 {
3741 	struct request_queue *q;
3742 	LIST_HEAD(head);
3743 	int prev_nr_hw_queues;
3744 
3745 	lockdep_assert_held(&set->tag_list_lock);
3746 
3747 	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3748 		nr_hw_queues = nr_cpu_ids;
3749 	if (nr_hw_queues < 1)
3750 		return;
3751 	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
3752 		return;
3753 
3754 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3755 		blk_mq_freeze_queue(q);
3756 	/*
3757 	 * Switch IO scheduler to 'none', cleaning up the data associated
3758 	 * with the previous scheduler. We will switch back once we are done
3759 	 * updating the new sw to hw queue mappings.
3760 	 */
3761 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3762 		if (!blk_mq_elv_switch_none(&head, q))
3763 			goto switch_back;
3764 
3765 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3766 		blk_mq_debugfs_unregister_hctxs(q);
3767 		blk_mq_sysfs_unregister(q);
3768 	}
3769 
3770 	prev_nr_hw_queues = set->nr_hw_queues;
3771 	if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
3772 	    0)
3773 		goto reregister;
3774 
3775 	set->nr_hw_queues = nr_hw_queues;
3776 fallback:
3777 	blk_mq_update_queue_map(set);
3778 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3779 		blk_mq_realloc_hw_ctxs(set, q);
3780 		if (q->nr_hw_queues != set->nr_hw_queues) {
3781 			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3782 					nr_hw_queues, prev_nr_hw_queues);
3783 			set->nr_hw_queues = prev_nr_hw_queues;
3784 			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3785 			goto fallback;
3786 		}
3787 		blk_mq_map_swqueue(q);
3788 	}
3789 
3790 reregister:
3791 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3792 		blk_mq_sysfs_register(q);
3793 		blk_mq_debugfs_register_hctxs(q);
3794 	}
3795 
3796 switch_back:
3797 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3798 		blk_mq_elv_switch_back(&head, q);
3799 
3800 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3801 		blk_mq_unfreeze_queue(q);
3802 }
3803 
3804 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3805 {
3806 	mutex_lock(&set->tag_list_lock);
3807 	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3808 	mutex_unlock(&set->tag_list_lock);
3809 }
3810 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3811 
3812 /* Enable polling stats and return whether they were already enabled. */
3813 static bool blk_poll_stats_enable(struct request_queue *q)
3814 {
3815 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3816 	    blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3817 		return true;
3818 	blk_stat_add_callback(q, q->poll_cb);
3819 	return false;
3820 }
3821 
3822 static void blk_mq_poll_stats_start(struct request_queue *q)
3823 {
3824 	/*
3825 	 * We don't arm the callback if polling stats are not enabled or the
3826 	 * callback is already active.
3827 	 */
3828 	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3829 	    blk_stat_is_active(q->poll_cb))
3830 		return;
3831 
3832 	blk_stat_activate_msecs(q->poll_cb, 100);
3833 }
3834 
3835 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3836 {
3837 	struct request_queue *q = cb->data;
3838 	int bucket;
3839 
3840 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3841 		if (cb->stat[bucket].nr_samples)
3842 			q->poll_stat[bucket] = cb->stat[bucket];
3843 	}
3844 }
3845 
3846 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3847 				       struct request *rq)
3848 {
3849 	unsigned long ret = 0;
3850 	int bucket;
3851 
3852 	/*
3853 	 * If stats collection isn't on, don't sleep but turn it on for
3854 	 * future users
3855 	 */
3856 	if (!blk_poll_stats_enable(q))
3857 		return 0;
3858 
3859 	/*
3860 	 * As an optimistic guess, use half of the mean service time
3861 	 * for this type of request. We can (and should) make this smarter.
3862 	 * For instance, if the completion latencies are tight, we can
3863 	 * get closer than just half the mean. This is especially
3864 	 * important on devices where the completion latencies are longer
3865 	 * than ~10 usec. We do use the stats for the relevant IO size
3866 	 * if available which does lead to better estimates.
3867 	 */
3868 	bucket = blk_mq_poll_stats_bkt(rq);
3869 	if (bucket < 0)
3870 		return ret;
3871 
3872 	if (q->poll_stat[bucket].nr_samples)
3873 		ret = (q->poll_stat[bucket].mean + 1) / 2;
3874 
3875 	return ret;
3876 }
3877 
3878 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3879 				     struct request *rq)
3880 {
3881 	struct hrtimer_sleeper hs;
3882 	enum hrtimer_mode mode;
3883 	unsigned int nsecs;
3884 	ktime_t kt;
3885 
3886 	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3887 		return false;
3888 
3889 	/*
3890 	 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3891 	 *
3892 	 *  0:	use half of prev avg
3893 	 * >0:	use this specific value
3894 	 */
3895 	if (q->poll_nsec > 0)
3896 		nsecs = q->poll_nsec;
3897 	else
3898 		nsecs = blk_mq_poll_nsecs(q, rq);
3899 
3900 	if (!nsecs)
3901 		return false;
3902 
3903 	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3904 
3905 	/*
3906 	 * This will be replaced with the stats tracking code, using
3907 	 * 'avg_completion_time / 2' as the pre-sleep target.
3908 	 */
3909 	kt = nsecs;
3910 
3911 	mode = HRTIMER_MODE_REL;
3912 	hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
3913 	hrtimer_set_expires(&hs.timer, kt);
3914 
3915 	do {
3916 		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3917 			break;
3918 		set_current_state(TASK_UNINTERRUPTIBLE);
3919 		hrtimer_sleeper_start_expires(&hs, mode);
3920 		if (hs.task)
3921 			io_schedule();
3922 		hrtimer_cancel(&hs.timer);
3923 		mode = HRTIMER_MODE_ABS;
3924 	} while (hs.task && !signal_pending(current));
3925 
3926 	__set_current_state(TASK_RUNNING);
3927 	destroy_hrtimer_on_stack(&hs.timer);
3928 	return true;
3929 }
3930 
3931 static bool blk_mq_poll_hybrid(struct request_queue *q,
3932 			       struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3933 {
3934 	struct request *rq;
3935 
3936 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3937 		return false;
3938 
3939 	if (!blk_qc_t_is_internal(cookie))
3940 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3941 	else {
3942 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3943 		/*
3944 		 * With scheduling, if the request has completed, we'll
3945 		 * get a NULL return here, as we clear the sched tag when
3946 		 * that happens. The request still remains valid, like always,
3947 		 * so we should be safe with just the NULL check.
3948 		 */
3949 		if (!rq)
3950 			return false;
3951 	}
3952 
3953 	return blk_mq_poll_hybrid_sleep(q, rq);
3954 }
3955 
3956 /**
3957  * blk_poll - poll for IO completions
3958  * @q:  the queue
3959  * @cookie: cookie passed back at IO submission time
3960  * @spin: whether to spin for completions
3961  *
3962  * Description:
3963  *    Poll for completions on the passed in queue. Returns number of
3964  *    completed entries found. If @spin is true, then blk_poll will continue
3965  *    looping until at least one completion is found, unless the task is
3966  *    otherwise marked running (or we need to reschedule).
3967  */
3968 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3969 {
3970 	struct blk_mq_hw_ctx *hctx;
3971 	long state;
3972 
3973 	if (!blk_qc_t_valid(cookie) ||
3974 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3975 		return 0;
3976 
3977 	if (current->plug)
3978 		blk_flush_plug_list(current->plug, false);
3979 
3980 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3981 
3982 	/*
3983 	 * If we sleep, have the caller restart the poll loop to reset
3984 	 * the state. Like for the other success return cases, the
3985 	 * caller is responsible for checking if the IO completed. If
3986 	 * the IO isn't complete, we'll get called again and will go
3987 	 * straight to the busy poll loop. If specified not to spin,
3988 	 * we also should not sleep.
3989 	 */
3990 	if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
3991 		return 1;
3992 
3993 	hctx->poll_considered++;
3994 
3995 	state = current->state;
3996 	do {
3997 		int ret;
3998 
3999 		hctx->poll_invoked++;
4000 
4001 		ret = q->mq_ops->poll(hctx);
4002 		if (ret > 0) {
4003 			hctx->poll_success++;
4004 			__set_current_state(TASK_RUNNING);
4005 			return ret;
4006 		}
4007 
4008 		if (signal_pending_state(state, current))
4009 			__set_current_state(TASK_RUNNING);
4010 
4011 		if (current->state == TASK_RUNNING)
4012 			return 1;
4013 		if (ret < 0 || !spin)
4014 			break;
4015 		cpu_relax();
4016 	} while (!need_resched());
4017 
4018 	__set_current_state(TASK_RUNNING);
4019 	return 0;
4020 }
4021 EXPORT_SYMBOL_GPL(blk_poll);
4022 
4023 unsigned int blk_mq_rq_cpu(struct request *rq)
4024 {
4025 	return rq->mq_ctx->cpu;
4026 }
4027 EXPORT_SYMBOL(blk_mq_rq_cpu);
4028 
4029 static int __init blk_mq_init(void)
4030 {
4031 	int i;
4032 
4033 	for_each_possible_cpu(i)
4034 		init_llist_head(&per_cpu(blk_cpu_done, i));
4035 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4036 
4037 	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4038 				  "block/softirq:dead", NULL,
4039 				  blk_softirq_cpu_dead);
4040 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4041 				blk_mq_hctx_notify_dead);
4042 	cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4043 				blk_mq_hctx_notify_online,
4044 				blk_mq_hctx_notify_offline);
4045 	return 0;
4046 }
4047 subsys_initcall(blk_mq_init);
4048