xref: /openbmc/linux/block/blk-mq.c (revision e657c18a)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28 
29 #include <trace/events/block.h>
30 
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-pm.h"
37 #include "blk-stat.h"
38 #include "blk-mq-sched.h"
39 #include "blk-rq-qos.h"
40 
41 static void blk_mq_poll_stats_start(struct request_queue *q);
42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43 
44 static int blk_mq_poll_stats_bkt(const struct request *rq)
45 {
46 	int ddir, bytes, bucket;
47 
48 	ddir = rq_data_dir(rq);
49 	bytes = blk_rq_bytes(rq);
50 
51 	bucket = ddir + 2*(ilog2(bytes) - 9);
52 
53 	if (bucket < 0)
54 		return -1;
55 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56 		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57 
58 	return bucket;
59 }
60 
61 /*
62  * Check if any of the ctx, dispatch list or elevator
63  * have pending work in this hardware queue.
64  */
65 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
66 {
67 	return !list_empty_careful(&hctx->dispatch) ||
68 		sbitmap_any_bit_set(&hctx->ctx_map) ||
69 			blk_mq_sched_has_work(hctx);
70 }
71 
72 /*
73  * Mark this ctx as having pending work in this hardware queue
74  */
75 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
76 				     struct blk_mq_ctx *ctx)
77 {
78 	const int bit = ctx->index_hw[hctx->type];
79 
80 	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
81 		sbitmap_set_bit(&hctx->ctx_map, bit);
82 }
83 
84 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
85 				      struct blk_mq_ctx *ctx)
86 {
87 	const int bit = ctx->index_hw[hctx->type];
88 
89 	sbitmap_clear_bit(&hctx->ctx_map, bit);
90 }
91 
92 struct mq_inflight {
93 	struct hd_struct *part;
94 	unsigned int *inflight;
95 };
96 
97 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
98 				  struct request *rq, void *priv,
99 				  bool reserved)
100 {
101 	struct mq_inflight *mi = priv;
102 
103 	/*
104 	 * index[0] counts the specific partition that was asked for.
105 	 */
106 	if (rq->part == mi->part)
107 		mi->inflight[0]++;
108 
109 	return true;
110 }
111 
112 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
113 {
114 	unsigned inflight[2];
115 	struct mq_inflight mi = { .part = part, .inflight = inflight, };
116 
117 	inflight[0] = inflight[1] = 0;
118 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 
120 	return inflight[0];
121 }
122 
123 static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
124 				     struct request *rq, void *priv,
125 				     bool reserved)
126 {
127 	struct mq_inflight *mi = priv;
128 
129 	if (rq->part == mi->part)
130 		mi->inflight[rq_data_dir(rq)]++;
131 
132 	return true;
133 }
134 
135 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
136 			 unsigned int inflight[2])
137 {
138 	struct mq_inflight mi = { .part = part, .inflight = inflight, };
139 
140 	inflight[0] = inflight[1] = 0;
141 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
142 }
143 
144 void blk_freeze_queue_start(struct request_queue *q)
145 {
146 	int freeze_depth;
147 
148 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
149 	if (freeze_depth == 1) {
150 		percpu_ref_kill(&q->q_usage_counter);
151 		if (queue_is_mq(q))
152 			blk_mq_run_hw_queues(q, false);
153 	}
154 }
155 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
156 
157 void blk_mq_freeze_queue_wait(struct request_queue *q)
158 {
159 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
160 }
161 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
162 
163 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
164 				     unsigned long timeout)
165 {
166 	return wait_event_timeout(q->mq_freeze_wq,
167 					percpu_ref_is_zero(&q->q_usage_counter),
168 					timeout);
169 }
170 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
171 
172 /*
173  * Guarantee no request is in use, so we can change any data structure of
174  * the queue afterward.
175  */
176 void blk_freeze_queue(struct request_queue *q)
177 {
178 	/*
179 	 * In the !blk_mq case we are only calling this to kill the
180 	 * q_usage_counter, otherwise this increases the freeze depth
181 	 * and waits for it to return to zero.  For this reason there is
182 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
183 	 * exported to drivers as the only user for unfreeze is blk_mq.
184 	 */
185 	blk_freeze_queue_start(q);
186 	blk_mq_freeze_queue_wait(q);
187 }
188 
189 void blk_mq_freeze_queue(struct request_queue *q)
190 {
191 	/*
192 	 * ...just an alias to keep freeze and unfreeze actions balanced
193 	 * in the blk_mq_* namespace
194 	 */
195 	blk_freeze_queue(q);
196 }
197 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
198 
199 void blk_mq_unfreeze_queue(struct request_queue *q)
200 {
201 	int freeze_depth;
202 
203 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
204 	WARN_ON_ONCE(freeze_depth < 0);
205 	if (!freeze_depth) {
206 		percpu_ref_resurrect(&q->q_usage_counter);
207 		wake_up_all(&q->mq_freeze_wq);
208 	}
209 }
210 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
211 
212 /*
213  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
214  * mpt3sas driver such that this function can be removed.
215  */
216 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
217 {
218 	blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
219 }
220 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
221 
222 /**
223  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
224  * @q: request queue.
225  *
226  * Note: this function does not prevent that the struct request end_io()
227  * callback function is invoked. Once this function is returned, we make
228  * sure no dispatch can happen until the queue is unquiesced via
229  * blk_mq_unquiesce_queue().
230  */
231 void blk_mq_quiesce_queue(struct request_queue *q)
232 {
233 	struct blk_mq_hw_ctx *hctx;
234 	unsigned int i;
235 	bool rcu = false;
236 
237 	blk_mq_quiesce_queue_nowait(q);
238 
239 	queue_for_each_hw_ctx(q, hctx, i) {
240 		if (hctx->flags & BLK_MQ_F_BLOCKING)
241 			synchronize_srcu(hctx->srcu);
242 		else
243 			rcu = true;
244 	}
245 	if (rcu)
246 		synchronize_rcu();
247 }
248 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
249 
250 /*
251  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
252  * @q: request queue.
253  *
254  * This function recovers queue into the state before quiescing
255  * which is done by blk_mq_quiesce_queue.
256  */
257 void blk_mq_unquiesce_queue(struct request_queue *q)
258 {
259 	blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
260 
261 	/* dispatch requests which are inserted during quiescing */
262 	blk_mq_run_hw_queues(q, true);
263 }
264 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
265 
266 void blk_mq_wake_waiters(struct request_queue *q)
267 {
268 	struct blk_mq_hw_ctx *hctx;
269 	unsigned int i;
270 
271 	queue_for_each_hw_ctx(q, hctx, i)
272 		if (blk_mq_hw_queue_mapped(hctx))
273 			blk_mq_tag_wakeup_all(hctx->tags, true);
274 }
275 
276 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
277 {
278 	return blk_mq_has_free_tags(hctx->tags);
279 }
280 EXPORT_SYMBOL(blk_mq_can_queue);
281 
282 /*
283  * Only need start/end time stamping if we have stats enabled, or using
284  * an IO scheduler.
285  */
286 static inline bool blk_mq_need_time_stamp(struct request *rq)
287 {
288 	return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
289 }
290 
291 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
292 		unsigned int tag, unsigned int op)
293 {
294 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
295 	struct request *rq = tags->static_rqs[tag];
296 	req_flags_t rq_flags = 0;
297 
298 	if (data->flags & BLK_MQ_REQ_INTERNAL) {
299 		rq->tag = -1;
300 		rq->internal_tag = tag;
301 	} else {
302 		if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
303 			rq_flags = RQF_MQ_INFLIGHT;
304 			atomic_inc(&data->hctx->nr_active);
305 		}
306 		rq->tag = tag;
307 		rq->internal_tag = -1;
308 		data->hctx->tags->rqs[rq->tag] = rq;
309 	}
310 
311 	/* csd/requeue_work/fifo_time is initialized before use */
312 	rq->q = data->q;
313 	rq->mq_ctx = data->ctx;
314 	rq->mq_hctx = data->hctx;
315 	rq->rq_flags = rq_flags;
316 	rq->cmd_flags = op;
317 	if (data->flags & BLK_MQ_REQ_PREEMPT)
318 		rq->rq_flags |= RQF_PREEMPT;
319 	if (blk_queue_io_stat(data->q))
320 		rq->rq_flags |= RQF_IO_STAT;
321 	INIT_LIST_HEAD(&rq->queuelist);
322 	INIT_HLIST_NODE(&rq->hash);
323 	RB_CLEAR_NODE(&rq->rb_node);
324 	rq->rq_disk = NULL;
325 	rq->part = NULL;
326 	if (blk_mq_need_time_stamp(rq))
327 		rq->start_time_ns = ktime_get_ns();
328 	else
329 		rq->start_time_ns = 0;
330 	rq->io_start_time_ns = 0;
331 	rq->nr_phys_segments = 0;
332 #if defined(CONFIG_BLK_DEV_INTEGRITY)
333 	rq->nr_integrity_segments = 0;
334 #endif
335 	/* tag was already set */
336 	rq->extra_len = 0;
337 	WRITE_ONCE(rq->deadline, 0);
338 
339 	rq->timeout = 0;
340 
341 	rq->end_io = NULL;
342 	rq->end_io_data = NULL;
343 
344 	data->ctx->rq_dispatched[op_is_sync(op)]++;
345 	refcount_set(&rq->ref, 1);
346 	return rq;
347 }
348 
349 static struct request *blk_mq_get_request(struct request_queue *q,
350 					  struct bio *bio,
351 					  struct blk_mq_alloc_data *data)
352 {
353 	struct elevator_queue *e = q->elevator;
354 	struct request *rq;
355 	unsigned int tag;
356 	bool put_ctx_on_error = false;
357 
358 	blk_queue_enter_live(q);
359 	data->q = q;
360 	if (likely(!data->ctx)) {
361 		data->ctx = blk_mq_get_ctx(q);
362 		put_ctx_on_error = true;
363 	}
364 	if (likely(!data->hctx))
365 		data->hctx = blk_mq_map_queue(q, data->cmd_flags,
366 						data->ctx);
367 	if (data->cmd_flags & REQ_NOWAIT)
368 		data->flags |= BLK_MQ_REQ_NOWAIT;
369 
370 	if (e) {
371 		data->flags |= BLK_MQ_REQ_INTERNAL;
372 
373 		/*
374 		 * Flush requests are special and go directly to the
375 		 * dispatch list. Don't include reserved tags in the
376 		 * limiting, as it isn't useful.
377 		 */
378 		if (!op_is_flush(data->cmd_flags) &&
379 		    e->type->ops.limit_depth &&
380 		    !(data->flags & BLK_MQ_REQ_RESERVED))
381 			e->type->ops.limit_depth(data->cmd_flags, data);
382 	} else {
383 		blk_mq_tag_busy(data->hctx);
384 	}
385 
386 	tag = blk_mq_get_tag(data);
387 	if (tag == BLK_MQ_TAG_FAIL) {
388 		if (put_ctx_on_error) {
389 			blk_mq_put_ctx(data->ctx);
390 			data->ctx = NULL;
391 		}
392 		blk_queue_exit(q);
393 		return NULL;
394 	}
395 
396 	rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
397 	if (!op_is_flush(data->cmd_flags)) {
398 		rq->elv.icq = NULL;
399 		if (e && e->type->ops.prepare_request) {
400 			if (e->type->icq_cache)
401 				blk_mq_sched_assign_ioc(rq);
402 
403 			e->type->ops.prepare_request(rq, bio);
404 			rq->rq_flags |= RQF_ELVPRIV;
405 		}
406 	}
407 	data->hctx->queued++;
408 	return rq;
409 }
410 
411 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
412 		blk_mq_req_flags_t flags)
413 {
414 	struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
415 	struct request *rq;
416 	int ret;
417 
418 	ret = blk_queue_enter(q, flags);
419 	if (ret)
420 		return ERR_PTR(ret);
421 
422 	rq = blk_mq_get_request(q, NULL, &alloc_data);
423 	blk_queue_exit(q);
424 
425 	if (!rq)
426 		return ERR_PTR(-EWOULDBLOCK);
427 
428 	blk_mq_put_ctx(alloc_data.ctx);
429 
430 	rq->__data_len = 0;
431 	rq->__sector = (sector_t) -1;
432 	rq->bio = rq->biotail = NULL;
433 	return rq;
434 }
435 EXPORT_SYMBOL(blk_mq_alloc_request);
436 
437 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
438 	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
439 {
440 	struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
441 	struct request *rq;
442 	unsigned int cpu;
443 	int ret;
444 
445 	/*
446 	 * If the tag allocator sleeps we could get an allocation for a
447 	 * different hardware context.  No need to complicate the low level
448 	 * allocator for this for the rare use case of a command tied to
449 	 * a specific queue.
450 	 */
451 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
452 		return ERR_PTR(-EINVAL);
453 
454 	if (hctx_idx >= q->nr_hw_queues)
455 		return ERR_PTR(-EIO);
456 
457 	ret = blk_queue_enter(q, flags);
458 	if (ret)
459 		return ERR_PTR(ret);
460 
461 	/*
462 	 * Check if the hardware context is actually mapped to anything.
463 	 * If not tell the caller that it should skip this queue.
464 	 */
465 	alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
466 	if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
467 		blk_queue_exit(q);
468 		return ERR_PTR(-EXDEV);
469 	}
470 	cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
471 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
472 
473 	rq = blk_mq_get_request(q, NULL, &alloc_data);
474 	blk_queue_exit(q);
475 
476 	if (!rq)
477 		return ERR_PTR(-EWOULDBLOCK);
478 
479 	return rq;
480 }
481 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
482 
483 static void __blk_mq_free_request(struct request *rq)
484 {
485 	struct request_queue *q = rq->q;
486 	struct blk_mq_ctx *ctx = rq->mq_ctx;
487 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
488 	const int sched_tag = rq->internal_tag;
489 
490 	blk_pm_mark_last_busy(rq);
491 	rq->mq_hctx = NULL;
492 	if (rq->tag != -1)
493 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
494 	if (sched_tag != -1)
495 		blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
496 	blk_mq_sched_restart(hctx);
497 	blk_queue_exit(q);
498 }
499 
500 void blk_mq_free_request(struct request *rq)
501 {
502 	struct request_queue *q = rq->q;
503 	struct elevator_queue *e = q->elevator;
504 	struct blk_mq_ctx *ctx = rq->mq_ctx;
505 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
506 
507 	if (rq->rq_flags & RQF_ELVPRIV) {
508 		if (e && e->type->ops.finish_request)
509 			e->type->ops.finish_request(rq);
510 		if (rq->elv.icq) {
511 			put_io_context(rq->elv.icq->ioc);
512 			rq->elv.icq = NULL;
513 		}
514 	}
515 
516 	ctx->rq_completed[rq_is_sync(rq)]++;
517 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
518 		atomic_dec(&hctx->nr_active);
519 
520 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
521 		laptop_io_completion(q->backing_dev_info);
522 
523 	rq_qos_done(q, rq);
524 
525 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
526 	if (refcount_dec_and_test(&rq->ref))
527 		__blk_mq_free_request(rq);
528 }
529 EXPORT_SYMBOL_GPL(blk_mq_free_request);
530 
531 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
532 {
533 	u64 now = 0;
534 
535 	if (blk_mq_need_time_stamp(rq))
536 		now = ktime_get_ns();
537 
538 	if (rq->rq_flags & RQF_STATS) {
539 		blk_mq_poll_stats_start(rq->q);
540 		blk_stat_add(rq, now);
541 	}
542 
543 	if (rq->internal_tag != -1)
544 		blk_mq_sched_completed_request(rq, now);
545 
546 	blk_account_io_done(rq, now);
547 
548 	if (rq->end_io) {
549 		rq_qos_done(rq->q, rq);
550 		rq->end_io(rq, error);
551 	} else {
552 		blk_mq_free_request(rq);
553 	}
554 }
555 EXPORT_SYMBOL(__blk_mq_end_request);
556 
557 void blk_mq_end_request(struct request *rq, blk_status_t error)
558 {
559 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
560 		BUG();
561 	__blk_mq_end_request(rq, error);
562 }
563 EXPORT_SYMBOL(blk_mq_end_request);
564 
565 static void __blk_mq_complete_request_remote(void *data)
566 {
567 	struct request *rq = data;
568 	struct request_queue *q = rq->q;
569 
570 	q->mq_ops->complete(rq);
571 }
572 
573 static void __blk_mq_complete_request(struct request *rq)
574 {
575 	struct blk_mq_ctx *ctx = rq->mq_ctx;
576 	struct request_queue *q = rq->q;
577 	bool shared = false;
578 	int cpu;
579 
580 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
581 	/*
582 	 * Most of single queue controllers, there is only one irq vector
583 	 * for handling IO completion, and the only irq's affinity is set
584 	 * as all possible CPUs. On most of ARCHs, this affinity means the
585 	 * irq is handled on one specific CPU.
586 	 *
587 	 * So complete IO reqeust in softirq context in case of single queue
588 	 * for not degrading IO performance by irqsoff latency.
589 	 */
590 	if (q->nr_hw_queues == 1) {
591 		__blk_complete_request(rq);
592 		return;
593 	}
594 
595 	/*
596 	 * For a polled request, always complete locallly, it's pointless
597 	 * to redirect the completion.
598 	 */
599 	if ((rq->cmd_flags & REQ_HIPRI) ||
600 	    !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
601 		q->mq_ops->complete(rq);
602 		return;
603 	}
604 
605 	cpu = get_cpu();
606 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
607 		shared = cpus_share_cache(cpu, ctx->cpu);
608 
609 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
610 		rq->csd.func = __blk_mq_complete_request_remote;
611 		rq->csd.info = rq;
612 		rq->csd.flags = 0;
613 		smp_call_function_single_async(ctx->cpu, &rq->csd);
614 	} else {
615 		q->mq_ops->complete(rq);
616 	}
617 	put_cpu();
618 }
619 
620 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
621 	__releases(hctx->srcu)
622 {
623 	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
624 		rcu_read_unlock();
625 	else
626 		srcu_read_unlock(hctx->srcu, srcu_idx);
627 }
628 
629 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
630 	__acquires(hctx->srcu)
631 {
632 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
633 		/* shut up gcc false positive */
634 		*srcu_idx = 0;
635 		rcu_read_lock();
636 	} else
637 		*srcu_idx = srcu_read_lock(hctx->srcu);
638 }
639 
640 /**
641  * blk_mq_complete_request - end I/O on a request
642  * @rq:		the request being processed
643  *
644  * Description:
645  *	Ends all I/O on a request. It does not handle partial completions.
646  *	The actual completion happens out-of-order, through a IPI handler.
647  **/
648 bool blk_mq_complete_request(struct request *rq)
649 {
650 	if (unlikely(blk_should_fake_timeout(rq->q)))
651 		return false;
652 	__blk_mq_complete_request(rq);
653 	return true;
654 }
655 EXPORT_SYMBOL(blk_mq_complete_request);
656 
657 int blk_mq_request_started(struct request *rq)
658 {
659 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
660 }
661 EXPORT_SYMBOL_GPL(blk_mq_request_started);
662 
663 void blk_mq_start_request(struct request *rq)
664 {
665 	struct request_queue *q = rq->q;
666 
667 	blk_mq_sched_started_request(rq);
668 
669 	trace_block_rq_issue(q, rq);
670 
671 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
672 		rq->io_start_time_ns = ktime_get_ns();
673 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
674 		rq->throtl_size = blk_rq_sectors(rq);
675 #endif
676 		rq->rq_flags |= RQF_STATS;
677 		rq_qos_issue(q, rq);
678 	}
679 
680 	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
681 
682 	blk_add_timer(rq);
683 	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
684 
685 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
686 		/*
687 		 * Make sure space for the drain appears.  We know we can do
688 		 * this because max_hw_segments has been adjusted to be one
689 		 * fewer than the device can handle.
690 		 */
691 		rq->nr_phys_segments++;
692 	}
693 }
694 EXPORT_SYMBOL(blk_mq_start_request);
695 
696 static void __blk_mq_requeue_request(struct request *rq)
697 {
698 	struct request_queue *q = rq->q;
699 
700 	blk_mq_put_driver_tag(rq);
701 
702 	trace_block_rq_requeue(q, rq);
703 	rq_qos_requeue(q, rq);
704 
705 	if (blk_mq_request_started(rq)) {
706 		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
707 		rq->rq_flags &= ~RQF_TIMED_OUT;
708 		if (q->dma_drain_size && blk_rq_bytes(rq))
709 			rq->nr_phys_segments--;
710 	}
711 }
712 
713 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
714 {
715 	__blk_mq_requeue_request(rq);
716 
717 	/* this request will be re-inserted to io scheduler queue */
718 	blk_mq_sched_requeue_request(rq);
719 
720 	BUG_ON(!list_empty(&rq->queuelist));
721 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
722 }
723 EXPORT_SYMBOL(blk_mq_requeue_request);
724 
725 static void blk_mq_requeue_work(struct work_struct *work)
726 {
727 	struct request_queue *q =
728 		container_of(work, struct request_queue, requeue_work.work);
729 	LIST_HEAD(rq_list);
730 	struct request *rq, *next;
731 
732 	spin_lock_irq(&q->requeue_lock);
733 	list_splice_init(&q->requeue_list, &rq_list);
734 	spin_unlock_irq(&q->requeue_lock);
735 
736 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
737 		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
738 			continue;
739 
740 		rq->rq_flags &= ~RQF_SOFTBARRIER;
741 		list_del_init(&rq->queuelist);
742 		/*
743 		 * If RQF_DONTPREP, rq has contained some driver specific
744 		 * data, so insert it to hctx dispatch list to avoid any
745 		 * merge.
746 		 */
747 		if (rq->rq_flags & RQF_DONTPREP)
748 			blk_mq_request_bypass_insert(rq, false);
749 		else
750 			blk_mq_sched_insert_request(rq, true, false, false);
751 	}
752 
753 	while (!list_empty(&rq_list)) {
754 		rq = list_entry(rq_list.next, struct request, queuelist);
755 		list_del_init(&rq->queuelist);
756 		blk_mq_sched_insert_request(rq, false, false, false);
757 	}
758 
759 	blk_mq_run_hw_queues(q, false);
760 }
761 
762 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
763 				bool kick_requeue_list)
764 {
765 	struct request_queue *q = rq->q;
766 	unsigned long flags;
767 
768 	/*
769 	 * We abuse this flag that is otherwise used by the I/O scheduler to
770 	 * request head insertion from the workqueue.
771 	 */
772 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
773 
774 	spin_lock_irqsave(&q->requeue_lock, flags);
775 	if (at_head) {
776 		rq->rq_flags |= RQF_SOFTBARRIER;
777 		list_add(&rq->queuelist, &q->requeue_list);
778 	} else {
779 		list_add_tail(&rq->queuelist, &q->requeue_list);
780 	}
781 	spin_unlock_irqrestore(&q->requeue_lock, flags);
782 
783 	if (kick_requeue_list)
784 		blk_mq_kick_requeue_list(q);
785 }
786 
787 void blk_mq_kick_requeue_list(struct request_queue *q)
788 {
789 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
790 }
791 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
792 
793 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
794 				    unsigned long msecs)
795 {
796 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
797 				    msecs_to_jiffies(msecs));
798 }
799 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
800 
801 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
802 {
803 	if (tag < tags->nr_tags) {
804 		prefetch(tags->rqs[tag]);
805 		return tags->rqs[tag];
806 	}
807 
808 	return NULL;
809 }
810 EXPORT_SYMBOL(blk_mq_tag_to_rq);
811 
812 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
813 			       void *priv, bool reserved)
814 {
815 	/*
816 	 * If we find a request that is inflight and the queue matches,
817 	 * we know the queue is busy. Return false to stop the iteration.
818 	 */
819 	if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
820 		bool *busy = priv;
821 
822 		*busy = true;
823 		return false;
824 	}
825 
826 	return true;
827 }
828 
829 bool blk_mq_queue_inflight(struct request_queue *q)
830 {
831 	bool busy = false;
832 
833 	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
834 	return busy;
835 }
836 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
837 
838 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
839 {
840 	req->rq_flags |= RQF_TIMED_OUT;
841 	if (req->q->mq_ops->timeout) {
842 		enum blk_eh_timer_return ret;
843 
844 		ret = req->q->mq_ops->timeout(req, reserved);
845 		if (ret == BLK_EH_DONE)
846 			return;
847 		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
848 	}
849 
850 	blk_add_timer(req);
851 }
852 
853 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
854 {
855 	unsigned long deadline;
856 
857 	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
858 		return false;
859 	if (rq->rq_flags & RQF_TIMED_OUT)
860 		return false;
861 
862 	deadline = READ_ONCE(rq->deadline);
863 	if (time_after_eq(jiffies, deadline))
864 		return true;
865 
866 	if (*next == 0)
867 		*next = deadline;
868 	else if (time_after(*next, deadline))
869 		*next = deadline;
870 	return false;
871 }
872 
873 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
874 		struct request *rq, void *priv, bool reserved)
875 {
876 	unsigned long *next = priv;
877 
878 	/*
879 	 * Just do a quick check if it is expired before locking the request in
880 	 * so we're not unnecessarilly synchronizing across CPUs.
881 	 */
882 	if (!blk_mq_req_expired(rq, next))
883 		return true;
884 
885 	/*
886 	 * We have reason to believe the request may be expired. Take a
887 	 * reference on the request to lock this request lifetime into its
888 	 * currently allocated context to prevent it from being reallocated in
889 	 * the event the completion by-passes this timeout handler.
890 	 *
891 	 * If the reference was already released, then the driver beat the
892 	 * timeout handler to posting a natural completion.
893 	 */
894 	if (!refcount_inc_not_zero(&rq->ref))
895 		return true;
896 
897 	/*
898 	 * The request is now locked and cannot be reallocated underneath the
899 	 * timeout handler's processing. Re-verify this exact request is truly
900 	 * expired; if it is not expired, then the request was completed and
901 	 * reallocated as a new request.
902 	 */
903 	if (blk_mq_req_expired(rq, next))
904 		blk_mq_rq_timed_out(rq, reserved);
905 	if (refcount_dec_and_test(&rq->ref))
906 		__blk_mq_free_request(rq);
907 
908 	return true;
909 }
910 
911 static void blk_mq_timeout_work(struct work_struct *work)
912 {
913 	struct request_queue *q =
914 		container_of(work, struct request_queue, timeout_work);
915 	unsigned long next = 0;
916 	struct blk_mq_hw_ctx *hctx;
917 	int i;
918 
919 	/* A deadlock might occur if a request is stuck requiring a
920 	 * timeout at the same time a queue freeze is waiting
921 	 * completion, since the timeout code would not be able to
922 	 * acquire the queue reference here.
923 	 *
924 	 * That's why we don't use blk_queue_enter here; instead, we use
925 	 * percpu_ref_tryget directly, because we need to be able to
926 	 * obtain a reference even in the short window between the queue
927 	 * starting to freeze, by dropping the first reference in
928 	 * blk_freeze_queue_start, and the moment the last request is
929 	 * consumed, marked by the instant q_usage_counter reaches
930 	 * zero.
931 	 */
932 	if (!percpu_ref_tryget(&q->q_usage_counter))
933 		return;
934 
935 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
936 
937 	if (next != 0) {
938 		mod_timer(&q->timeout, next);
939 	} else {
940 		/*
941 		 * Request timeouts are handled as a forward rolling timer. If
942 		 * we end up here it means that no requests are pending and
943 		 * also that no request has been pending for a while. Mark
944 		 * each hctx as idle.
945 		 */
946 		queue_for_each_hw_ctx(q, hctx, i) {
947 			/* the hctx may be unmapped, so check it here */
948 			if (blk_mq_hw_queue_mapped(hctx))
949 				blk_mq_tag_idle(hctx);
950 		}
951 	}
952 	blk_queue_exit(q);
953 }
954 
955 struct flush_busy_ctx_data {
956 	struct blk_mq_hw_ctx *hctx;
957 	struct list_head *list;
958 };
959 
960 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
961 {
962 	struct flush_busy_ctx_data *flush_data = data;
963 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
964 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
965 	enum hctx_type type = hctx->type;
966 
967 	spin_lock(&ctx->lock);
968 	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
969 	sbitmap_clear_bit(sb, bitnr);
970 	spin_unlock(&ctx->lock);
971 	return true;
972 }
973 
974 /*
975  * Process software queues that have been marked busy, splicing them
976  * to the for-dispatch
977  */
978 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
979 {
980 	struct flush_busy_ctx_data data = {
981 		.hctx = hctx,
982 		.list = list,
983 	};
984 
985 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
986 }
987 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
988 
989 struct dispatch_rq_data {
990 	struct blk_mq_hw_ctx *hctx;
991 	struct request *rq;
992 };
993 
994 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
995 		void *data)
996 {
997 	struct dispatch_rq_data *dispatch_data = data;
998 	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
999 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1000 	enum hctx_type type = hctx->type;
1001 
1002 	spin_lock(&ctx->lock);
1003 	if (!list_empty(&ctx->rq_lists[type])) {
1004 		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1005 		list_del_init(&dispatch_data->rq->queuelist);
1006 		if (list_empty(&ctx->rq_lists[type]))
1007 			sbitmap_clear_bit(sb, bitnr);
1008 	}
1009 	spin_unlock(&ctx->lock);
1010 
1011 	return !dispatch_data->rq;
1012 }
1013 
1014 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1015 					struct blk_mq_ctx *start)
1016 {
1017 	unsigned off = start ? start->index_hw[hctx->type] : 0;
1018 	struct dispatch_rq_data data = {
1019 		.hctx = hctx,
1020 		.rq   = NULL,
1021 	};
1022 
1023 	__sbitmap_for_each_set(&hctx->ctx_map, off,
1024 			       dispatch_rq_from_ctx, &data);
1025 
1026 	return data.rq;
1027 }
1028 
1029 static inline unsigned int queued_to_index(unsigned int queued)
1030 {
1031 	if (!queued)
1032 		return 0;
1033 
1034 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1035 }
1036 
1037 bool blk_mq_get_driver_tag(struct request *rq)
1038 {
1039 	struct blk_mq_alloc_data data = {
1040 		.q = rq->q,
1041 		.hctx = rq->mq_hctx,
1042 		.flags = BLK_MQ_REQ_NOWAIT,
1043 		.cmd_flags = rq->cmd_flags,
1044 	};
1045 	bool shared;
1046 
1047 	if (rq->tag != -1)
1048 		goto done;
1049 
1050 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1051 		data.flags |= BLK_MQ_REQ_RESERVED;
1052 
1053 	shared = blk_mq_tag_busy(data.hctx);
1054 	rq->tag = blk_mq_get_tag(&data);
1055 	if (rq->tag >= 0) {
1056 		if (shared) {
1057 			rq->rq_flags |= RQF_MQ_INFLIGHT;
1058 			atomic_inc(&data.hctx->nr_active);
1059 		}
1060 		data.hctx->tags->rqs[rq->tag] = rq;
1061 	}
1062 
1063 done:
1064 	return rq->tag != -1;
1065 }
1066 
1067 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1068 				int flags, void *key)
1069 {
1070 	struct blk_mq_hw_ctx *hctx;
1071 
1072 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1073 
1074 	spin_lock(&hctx->dispatch_wait_lock);
1075 	if (!list_empty(&wait->entry)) {
1076 		struct sbitmap_queue *sbq;
1077 
1078 		list_del_init(&wait->entry);
1079 		sbq = &hctx->tags->bitmap_tags;
1080 		atomic_dec(&sbq->ws_active);
1081 	}
1082 	spin_unlock(&hctx->dispatch_wait_lock);
1083 
1084 	blk_mq_run_hw_queue(hctx, true);
1085 	return 1;
1086 }
1087 
1088 /*
1089  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1090  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1091  * restart. For both cases, take care to check the condition again after
1092  * marking us as waiting.
1093  */
1094 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1095 				 struct request *rq)
1096 {
1097 	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1098 	struct wait_queue_head *wq;
1099 	wait_queue_entry_t *wait;
1100 	bool ret;
1101 
1102 	if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1103 		blk_mq_sched_mark_restart_hctx(hctx);
1104 
1105 		/*
1106 		 * It's possible that a tag was freed in the window between the
1107 		 * allocation failure and adding the hardware queue to the wait
1108 		 * queue.
1109 		 *
1110 		 * Don't clear RESTART here, someone else could have set it.
1111 		 * At most this will cost an extra queue run.
1112 		 */
1113 		return blk_mq_get_driver_tag(rq);
1114 	}
1115 
1116 	wait = &hctx->dispatch_wait;
1117 	if (!list_empty_careful(&wait->entry))
1118 		return false;
1119 
1120 	wq = &bt_wait_ptr(sbq, hctx)->wait;
1121 
1122 	spin_lock_irq(&wq->lock);
1123 	spin_lock(&hctx->dispatch_wait_lock);
1124 	if (!list_empty(&wait->entry)) {
1125 		spin_unlock(&hctx->dispatch_wait_lock);
1126 		spin_unlock_irq(&wq->lock);
1127 		return false;
1128 	}
1129 
1130 	atomic_inc(&sbq->ws_active);
1131 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1132 	__add_wait_queue(wq, wait);
1133 
1134 	/*
1135 	 * It's possible that a tag was freed in the window between the
1136 	 * allocation failure and adding the hardware queue to the wait
1137 	 * queue.
1138 	 */
1139 	ret = blk_mq_get_driver_tag(rq);
1140 	if (!ret) {
1141 		spin_unlock(&hctx->dispatch_wait_lock);
1142 		spin_unlock_irq(&wq->lock);
1143 		return false;
1144 	}
1145 
1146 	/*
1147 	 * We got a tag, remove ourselves from the wait queue to ensure
1148 	 * someone else gets the wakeup.
1149 	 */
1150 	list_del_init(&wait->entry);
1151 	atomic_dec(&sbq->ws_active);
1152 	spin_unlock(&hctx->dispatch_wait_lock);
1153 	spin_unlock_irq(&wq->lock);
1154 
1155 	return true;
1156 }
1157 
1158 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1159 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1160 /*
1161  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1162  * - EWMA is one simple way to compute running average value
1163  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1164  * - take 4 as factor for avoiding to get too small(0) result, and this
1165  *   factor doesn't matter because EWMA decreases exponentially
1166  */
1167 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1168 {
1169 	unsigned int ewma;
1170 
1171 	if (hctx->queue->elevator)
1172 		return;
1173 
1174 	ewma = hctx->dispatch_busy;
1175 
1176 	if (!ewma && !busy)
1177 		return;
1178 
1179 	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1180 	if (busy)
1181 		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1182 	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1183 
1184 	hctx->dispatch_busy = ewma;
1185 }
1186 
1187 #define BLK_MQ_RESOURCE_DELAY	3		/* ms units */
1188 
1189 /*
1190  * Returns true if we did some work AND can potentially do more.
1191  */
1192 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1193 			     bool got_budget)
1194 {
1195 	struct blk_mq_hw_ctx *hctx;
1196 	struct request *rq, *nxt;
1197 	bool no_tag = false;
1198 	int errors, queued;
1199 	blk_status_t ret = BLK_STS_OK;
1200 
1201 	if (list_empty(list))
1202 		return false;
1203 
1204 	WARN_ON(!list_is_singular(list) && got_budget);
1205 
1206 	/*
1207 	 * Now process all the entries, sending them to the driver.
1208 	 */
1209 	errors = queued = 0;
1210 	do {
1211 		struct blk_mq_queue_data bd;
1212 
1213 		rq = list_first_entry(list, struct request, queuelist);
1214 
1215 		hctx = rq->mq_hctx;
1216 		if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1217 			break;
1218 
1219 		if (!blk_mq_get_driver_tag(rq)) {
1220 			/*
1221 			 * The initial allocation attempt failed, so we need to
1222 			 * rerun the hardware queue when a tag is freed. The
1223 			 * waitqueue takes care of that. If the queue is run
1224 			 * before we add this entry back on the dispatch list,
1225 			 * we'll re-run it below.
1226 			 */
1227 			if (!blk_mq_mark_tag_wait(hctx, rq)) {
1228 				blk_mq_put_dispatch_budget(hctx);
1229 				/*
1230 				 * For non-shared tags, the RESTART check
1231 				 * will suffice.
1232 				 */
1233 				if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1234 					no_tag = true;
1235 				break;
1236 			}
1237 		}
1238 
1239 		list_del_init(&rq->queuelist);
1240 
1241 		bd.rq = rq;
1242 
1243 		/*
1244 		 * Flag last if we have no more requests, or if we have more
1245 		 * but can't assign a driver tag to it.
1246 		 */
1247 		if (list_empty(list))
1248 			bd.last = true;
1249 		else {
1250 			nxt = list_first_entry(list, struct request, queuelist);
1251 			bd.last = !blk_mq_get_driver_tag(nxt);
1252 		}
1253 
1254 		ret = q->mq_ops->queue_rq(hctx, &bd);
1255 		if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1256 			/*
1257 			 * If an I/O scheduler has been configured and we got a
1258 			 * driver tag for the next request already, free it
1259 			 * again.
1260 			 */
1261 			if (!list_empty(list)) {
1262 				nxt = list_first_entry(list, struct request, queuelist);
1263 				blk_mq_put_driver_tag(nxt);
1264 			}
1265 			list_add(&rq->queuelist, list);
1266 			__blk_mq_requeue_request(rq);
1267 			break;
1268 		}
1269 
1270 		if (unlikely(ret != BLK_STS_OK)) {
1271 			errors++;
1272 			blk_mq_end_request(rq, BLK_STS_IOERR);
1273 			continue;
1274 		}
1275 
1276 		queued++;
1277 	} while (!list_empty(list));
1278 
1279 	hctx->dispatched[queued_to_index(queued)]++;
1280 
1281 	/*
1282 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1283 	 * that is where we will continue on next queue run.
1284 	 */
1285 	if (!list_empty(list)) {
1286 		bool needs_restart;
1287 
1288 		/*
1289 		 * If we didn't flush the entire list, we could have told
1290 		 * the driver there was more coming, but that turned out to
1291 		 * be a lie.
1292 		 */
1293 		if (q->mq_ops->commit_rqs)
1294 			q->mq_ops->commit_rqs(hctx);
1295 
1296 		spin_lock(&hctx->lock);
1297 		list_splice_init(list, &hctx->dispatch);
1298 		spin_unlock(&hctx->lock);
1299 
1300 		/*
1301 		 * If SCHED_RESTART was set by the caller of this function and
1302 		 * it is no longer set that means that it was cleared by another
1303 		 * thread and hence that a queue rerun is needed.
1304 		 *
1305 		 * If 'no_tag' is set, that means that we failed getting
1306 		 * a driver tag with an I/O scheduler attached. If our dispatch
1307 		 * waitqueue is no longer active, ensure that we run the queue
1308 		 * AFTER adding our entries back to the list.
1309 		 *
1310 		 * If no I/O scheduler has been configured it is possible that
1311 		 * the hardware queue got stopped and restarted before requests
1312 		 * were pushed back onto the dispatch list. Rerun the queue to
1313 		 * avoid starvation. Notes:
1314 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
1315 		 *   been stopped before rerunning a queue.
1316 		 * - Some but not all block drivers stop a queue before
1317 		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1318 		 *   and dm-rq.
1319 		 *
1320 		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1321 		 * bit is set, run queue after a delay to avoid IO stalls
1322 		 * that could otherwise occur if the queue is idle.
1323 		 */
1324 		needs_restart = blk_mq_sched_needs_restart(hctx);
1325 		if (!needs_restart ||
1326 		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1327 			blk_mq_run_hw_queue(hctx, true);
1328 		else if (needs_restart && (ret == BLK_STS_RESOURCE))
1329 			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1330 
1331 		blk_mq_update_dispatch_busy(hctx, true);
1332 		return false;
1333 	} else
1334 		blk_mq_update_dispatch_busy(hctx, false);
1335 
1336 	/*
1337 	 * If the host/device is unable to accept more work, inform the
1338 	 * caller of that.
1339 	 */
1340 	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1341 		return false;
1342 
1343 	return (queued + errors) != 0;
1344 }
1345 
1346 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1347 {
1348 	int srcu_idx;
1349 
1350 	/*
1351 	 * We should be running this queue from one of the CPUs that
1352 	 * are mapped to it.
1353 	 *
1354 	 * There are at least two related races now between setting
1355 	 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1356 	 * __blk_mq_run_hw_queue():
1357 	 *
1358 	 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1359 	 *   but later it becomes online, then this warning is harmless
1360 	 *   at all
1361 	 *
1362 	 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1363 	 *   but later it becomes offline, then the warning can't be
1364 	 *   triggered, and we depend on blk-mq timeout handler to
1365 	 *   handle dispatched requests to this hctx
1366 	 */
1367 	if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1368 		cpu_online(hctx->next_cpu)) {
1369 		printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1370 			raw_smp_processor_id(),
1371 			cpumask_empty(hctx->cpumask) ? "inactive": "active");
1372 		dump_stack();
1373 	}
1374 
1375 	/*
1376 	 * We can't run the queue inline with ints disabled. Ensure that
1377 	 * we catch bad users of this early.
1378 	 */
1379 	WARN_ON_ONCE(in_interrupt());
1380 
1381 	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1382 
1383 	hctx_lock(hctx, &srcu_idx);
1384 	blk_mq_sched_dispatch_requests(hctx);
1385 	hctx_unlock(hctx, srcu_idx);
1386 }
1387 
1388 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1389 {
1390 	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1391 
1392 	if (cpu >= nr_cpu_ids)
1393 		cpu = cpumask_first(hctx->cpumask);
1394 	return cpu;
1395 }
1396 
1397 /*
1398  * It'd be great if the workqueue API had a way to pass
1399  * in a mask and had some smarts for more clever placement.
1400  * For now we just round-robin here, switching for every
1401  * BLK_MQ_CPU_WORK_BATCH queued items.
1402  */
1403 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1404 {
1405 	bool tried = false;
1406 	int next_cpu = hctx->next_cpu;
1407 
1408 	if (hctx->queue->nr_hw_queues == 1)
1409 		return WORK_CPU_UNBOUND;
1410 
1411 	if (--hctx->next_cpu_batch <= 0) {
1412 select_cpu:
1413 		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1414 				cpu_online_mask);
1415 		if (next_cpu >= nr_cpu_ids)
1416 			next_cpu = blk_mq_first_mapped_cpu(hctx);
1417 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1418 	}
1419 
1420 	/*
1421 	 * Do unbound schedule if we can't find a online CPU for this hctx,
1422 	 * and it should only happen in the path of handling CPU DEAD.
1423 	 */
1424 	if (!cpu_online(next_cpu)) {
1425 		if (!tried) {
1426 			tried = true;
1427 			goto select_cpu;
1428 		}
1429 
1430 		/*
1431 		 * Make sure to re-select CPU next time once after CPUs
1432 		 * in hctx->cpumask become online again.
1433 		 */
1434 		hctx->next_cpu = next_cpu;
1435 		hctx->next_cpu_batch = 1;
1436 		return WORK_CPU_UNBOUND;
1437 	}
1438 
1439 	hctx->next_cpu = next_cpu;
1440 	return next_cpu;
1441 }
1442 
1443 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1444 					unsigned long msecs)
1445 {
1446 	if (unlikely(blk_mq_hctx_stopped(hctx)))
1447 		return;
1448 
1449 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1450 		int cpu = get_cpu();
1451 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1452 			__blk_mq_run_hw_queue(hctx);
1453 			put_cpu();
1454 			return;
1455 		}
1456 
1457 		put_cpu();
1458 	}
1459 
1460 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1461 				    msecs_to_jiffies(msecs));
1462 }
1463 
1464 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1465 {
1466 	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
1467 }
1468 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1469 
1470 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1471 {
1472 	int srcu_idx;
1473 	bool need_run;
1474 
1475 	/*
1476 	 * When queue is quiesced, we may be switching io scheduler, or
1477 	 * updating nr_hw_queues, or other things, and we can't run queue
1478 	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1479 	 *
1480 	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1481 	 * quiesced.
1482 	 */
1483 	hctx_lock(hctx, &srcu_idx);
1484 	need_run = !blk_queue_quiesced(hctx->queue) &&
1485 		blk_mq_hctx_has_pending(hctx);
1486 	hctx_unlock(hctx, srcu_idx);
1487 
1488 	if (need_run) {
1489 		__blk_mq_delay_run_hw_queue(hctx, async, 0);
1490 		return true;
1491 	}
1492 
1493 	return false;
1494 }
1495 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1496 
1497 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1498 {
1499 	struct blk_mq_hw_ctx *hctx;
1500 	int i;
1501 
1502 	queue_for_each_hw_ctx(q, hctx, i) {
1503 		if (blk_mq_hctx_stopped(hctx))
1504 			continue;
1505 
1506 		blk_mq_run_hw_queue(hctx, async);
1507 	}
1508 }
1509 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1510 
1511 /**
1512  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1513  * @q: request queue.
1514  *
1515  * The caller is responsible for serializing this function against
1516  * blk_mq_{start,stop}_hw_queue().
1517  */
1518 bool blk_mq_queue_stopped(struct request_queue *q)
1519 {
1520 	struct blk_mq_hw_ctx *hctx;
1521 	int i;
1522 
1523 	queue_for_each_hw_ctx(q, hctx, i)
1524 		if (blk_mq_hctx_stopped(hctx))
1525 			return true;
1526 
1527 	return false;
1528 }
1529 EXPORT_SYMBOL(blk_mq_queue_stopped);
1530 
1531 /*
1532  * This function is often used for pausing .queue_rq() by driver when
1533  * there isn't enough resource or some conditions aren't satisfied, and
1534  * BLK_STS_RESOURCE is usually returned.
1535  *
1536  * We do not guarantee that dispatch can be drained or blocked
1537  * after blk_mq_stop_hw_queue() returns. Please use
1538  * blk_mq_quiesce_queue() for that requirement.
1539  */
1540 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1541 {
1542 	cancel_delayed_work(&hctx->run_work);
1543 
1544 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1545 }
1546 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1547 
1548 /*
1549  * This function is often used for pausing .queue_rq() by driver when
1550  * there isn't enough resource or some conditions aren't satisfied, and
1551  * BLK_STS_RESOURCE is usually returned.
1552  *
1553  * We do not guarantee that dispatch can be drained or blocked
1554  * after blk_mq_stop_hw_queues() returns. Please use
1555  * blk_mq_quiesce_queue() for that requirement.
1556  */
1557 void blk_mq_stop_hw_queues(struct request_queue *q)
1558 {
1559 	struct blk_mq_hw_ctx *hctx;
1560 	int i;
1561 
1562 	queue_for_each_hw_ctx(q, hctx, i)
1563 		blk_mq_stop_hw_queue(hctx);
1564 }
1565 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1566 
1567 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1568 {
1569 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1570 
1571 	blk_mq_run_hw_queue(hctx, false);
1572 }
1573 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1574 
1575 void blk_mq_start_hw_queues(struct request_queue *q)
1576 {
1577 	struct blk_mq_hw_ctx *hctx;
1578 	int i;
1579 
1580 	queue_for_each_hw_ctx(q, hctx, i)
1581 		blk_mq_start_hw_queue(hctx);
1582 }
1583 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1584 
1585 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1586 {
1587 	if (!blk_mq_hctx_stopped(hctx))
1588 		return;
1589 
1590 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1591 	blk_mq_run_hw_queue(hctx, async);
1592 }
1593 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1594 
1595 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1596 {
1597 	struct blk_mq_hw_ctx *hctx;
1598 	int i;
1599 
1600 	queue_for_each_hw_ctx(q, hctx, i)
1601 		blk_mq_start_stopped_hw_queue(hctx, async);
1602 }
1603 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1604 
1605 static void blk_mq_run_work_fn(struct work_struct *work)
1606 {
1607 	struct blk_mq_hw_ctx *hctx;
1608 
1609 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1610 
1611 	/*
1612 	 * If we are stopped, don't run the queue.
1613 	 */
1614 	if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
1615 		return;
1616 
1617 	__blk_mq_run_hw_queue(hctx);
1618 }
1619 
1620 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1621 					    struct request *rq,
1622 					    bool at_head)
1623 {
1624 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1625 	enum hctx_type type = hctx->type;
1626 
1627 	lockdep_assert_held(&ctx->lock);
1628 
1629 	trace_block_rq_insert(hctx->queue, rq);
1630 
1631 	if (at_head)
1632 		list_add(&rq->queuelist, &ctx->rq_lists[type]);
1633 	else
1634 		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1635 }
1636 
1637 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1638 			     bool at_head)
1639 {
1640 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1641 
1642 	lockdep_assert_held(&ctx->lock);
1643 
1644 	__blk_mq_insert_req_list(hctx, rq, at_head);
1645 	blk_mq_hctx_mark_pending(hctx, ctx);
1646 }
1647 
1648 /*
1649  * Should only be used carefully, when the caller knows we want to
1650  * bypass a potential IO scheduler on the target device.
1651  */
1652 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1653 {
1654 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1655 
1656 	spin_lock(&hctx->lock);
1657 	list_add_tail(&rq->queuelist, &hctx->dispatch);
1658 	spin_unlock(&hctx->lock);
1659 
1660 	if (run_queue)
1661 		blk_mq_run_hw_queue(hctx, false);
1662 }
1663 
1664 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1665 			    struct list_head *list)
1666 
1667 {
1668 	struct request *rq;
1669 	enum hctx_type type = hctx->type;
1670 
1671 	/*
1672 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1673 	 * offline now
1674 	 */
1675 	list_for_each_entry(rq, list, queuelist) {
1676 		BUG_ON(rq->mq_ctx != ctx);
1677 		trace_block_rq_insert(hctx->queue, rq);
1678 	}
1679 
1680 	spin_lock(&ctx->lock);
1681 	list_splice_tail_init(list, &ctx->rq_lists[type]);
1682 	blk_mq_hctx_mark_pending(hctx, ctx);
1683 	spin_unlock(&ctx->lock);
1684 }
1685 
1686 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
1687 {
1688 	struct request *rqa = container_of(a, struct request, queuelist);
1689 	struct request *rqb = container_of(b, struct request, queuelist);
1690 
1691 	if (rqa->mq_ctx < rqb->mq_ctx)
1692 		return -1;
1693 	else if (rqa->mq_ctx > rqb->mq_ctx)
1694 		return 1;
1695 	else if (rqa->mq_hctx < rqb->mq_hctx)
1696 		return -1;
1697 	else if (rqa->mq_hctx > rqb->mq_hctx)
1698 		return 1;
1699 
1700 	return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1701 }
1702 
1703 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1704 {
1705 	struct blk_mq_hw_ctx *this_hctx;
1706 	struct blk_mq_ctx *this_ctx;
1707 	struct request_queue *this_q;
1708 	struct request *rq;
1709 	LIST_HEAD(list);
1710 	LIST_HEAD(rq_list);
1711 	unsigned int depth;
1712 
1713 	list_splice_init(&plug->mq_list, &list);
1714 	plug->rq_count = 0;
1715 
1716 	if (plug->rq_count > 2 && plug->multiple_queues)
1717 		list_sort(NULL, &list, plug_rq_cmp);
1718 
1719 	this_q = NULL;
1720 	this_hctx = NULL;
1721 	this_ctx = NULL;
1722 	depth = 0;
1723 
1724 	while (!list_empty(&list)) {
1725 		rq = list_entry_rq(list.next);
1726 		list_del_init(&rq->queuelist);
1727 		BUG_ON(!rq->q);
1728 		if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
1729 			if (this_hctx) {
1730 				trace_block_unplug(this_q, depth, !from_schedule);
1731 				blk_mq_sched_insert_requests(this_hctx, this_ctx,
1732 								&rq_list,
1733 								from_schedule);
1734 			}
1735 
1736 			this_q = rq->q;
1737 			this_ctx = rq->mq_ctx;
1738 			this_hctx = rq->mq_hctx;
1739 			depth = 0;
1740 		}
1741 
1742 		depth++;
1743 		list_add_tail(&rq->queuelist, &rq_list);
1744 	}
1745 
1746 	/*
1747 	 * If 'this_hctx' is set, we know we have entries to complete
1748 	 * on 'rq_list'. Do those.
1749 	 */
1750 	if (this_hctx) {
1751 		trace_block_unplug(this_q, depth, !from_schedule);
1752 		blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1753 						from_schedule);
1754 	}
1755 }
1756 
1757 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1758 {
1759 	blk_init_request_from_bio(rq, bio);
1760 
1761 	blk_account_io_start(rq, true);
1762 }
1763 
1764 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1765 					    struct request *rq,
1766 					    blk_qc_t *cookie, bool last)
1767 {
1768 	struct request_queue *q = rq->q;
1769 	struct blk_mq_queue_data bd = {
1770 		.rq = rq,
1771 		.last = last,
1772 	};
1773 	blk_qc_t new_cookie;
1774 	blk_status_t ret;
1775 
1776 	new_cookie = request_to_qc_t(hctx, rq);
1777 
1778 	/*
1779 	 * For OK queue, we are done. For error, caller may kill it.
1780 	 * Any other error (busy), just add it to our list as we
1781 	 * previously would have done.
1782 	 */
1783 	ret = q->mq_ops->queue_rq(hctx, &bd);
1784 	switch (ret) {
1785 	case BLK_STS_OK:
1786 		blk_mq_update_dispatch_busy(hctx, false);
1787 		*cookie = new_cookie;
1788 		break;
1789 	case BLK_STS_RESOURCE:
1790 	case BLK_STS_DEV_RESOURCE:
1791 		blk_mq_update_dispatch_busy(hctx, true);
1792 		__blk_mq_requeue_request(rq);
1793 		break;
1794 	default:
1795 		blk_mq_update_dispatch_busy(hctx, false);
1796 		*cookie = BLK_QC_T_NONE;
1797 		break;
1798 	}
1799 
1800 	return ret;
1801 }
1802 
1803 blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1804 						struct request *rq,
1805 						blk_qc_t *cookie,
1806 						bool bypass, bool last)
1807 {
1808 	struct request_queue *q = rq->q;
1809 	bool run_queue = true;
1810 	blk_status_t ret = BLK_STS_RESOURCE;
1811 	int srcu_idx;
1812 	bool force = false;
1813 
1814 	hctx_lock(hctx, &srcu_idx);
1815 	/*
1816 	 * hctx_lock is needed before checking quiesced flag.
1817 	 *
1818 	 * When queue is stopped or quiesced, ignore 'bypass', insert
1819 	 * and return BLK_STS_OK to caller, and avoid driver to try to
1820 	 * dispatch again.
1821 	 */
1822 	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
1823 		run_queue = false;
1824 		bypass = false;
1825 		goto out_unlock;
1826 	}
1827 
1828 	if (unlikely(q->elevator && !bypass))
1829 		goto out_unlock;
1830 
1831 	if (!blk_mq_get_dispatch_budget(hctx))
1832 		goto out_unlock;
1833 
1834 	if (!blk_mq_get_driver_tag(rq)) {
1835 		blk_mq_put_dispatch_budget(hctx);
1836 		goto out_unlock;
1837 	}
1838 
1839 	/*
1840 	 * Always add a request that has been through
1841 	 *.queue_rq() to the hardware dispatch list.
1842 	 */
1843 	force = true;
1844 	ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
1845 out_unlock:
1846 	hctx_unlock(hctx, srcu_idx);
1847 	switch (ret) {
1848 	case BLK_STS_OK:
1849 		break;
1850 	case BLK_STS_DEV_RESOURCE:
1851 	case BLK_STS_RESOURCE:
1852 		if (force) {
1853 			blk_mq_request_bypass_insert(rq, run_queue);
1854 			/*
1855 			 * We have to return BLK_STS_OK for the DM
1856 			 * to avoid livelock. Otherwise, we return
1857 			 * the real result to indicate whether the
1858 			 * request is direct-issued successfully.
1859 			 */
1860 			ret = bypass ? BLK_STS_OK : ret;
1861 		} else if (!bypass) {
1862 			blk_mq_sched_insert_request(rq, false,
1863 						    run_queue, false);
1864 		}
1865 		break;
1866 	default:
1867 		if (!bypass)
1868 			blk_mq_end_request(rq, ret);
1869 		break;
1870 	}
1871 
1872 	return ret;
1873 }
1874 
1875 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1876 		struct list_head *list)
1877 {
1878 	blk_qc_t unused;
1879 	blk_status_t ret = BLK_STS_OK;
1880 
1881 	while (!list_empty(list)) {
1882 		struct request *rq = list_first_entry(list, struct request,
1883 				queuelist);
1884 
1885 		list_del_init(&rq->queuelist);
1886 		if (ret == BLK_STS_OK)
1887 			ret = blk_mq_try_issue_directly(hctx, rq, &unused,
1888 							false,
1889 							list_empty(list));
1890 		else
1891 			blk_mq_sched_insert_request(rq, false, true, false);
1892 	}
1893 
1894 	/*
1895 	 * If we didn't flush the entire list, we could have told
1896 	 * the driver there was more coming, but that turned out to
1897 	 * be a lie.
1898 	 */
1899 	if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
1900 		hctx->queue->mq_ops->commit_rqs(hctx);
1901 }
1902 
1903 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1904 {
1905 	list_add_tail(&rq->queuelist, &plug->mq_list);
1906 	plug->rq_count++;
1907 	if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
1908 		struct request *tmp;
1909 
1910 		tmp = list_first_entry(&plug->mq_list, struct request,
1911 						queuelist);
1912 		if (tmp->q != rq->q)
1913 			plug->multiple_queues = true;
1914 	}
1915 }
1916 
1917 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1918 {
1919 	const int is_sync = op_is_sync(bio->bi_opf);
1920 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1921 	struct blk_mq_alloc_data data = { .flags = 0};
1922 	struct request *rq;
1923 	struct blk_plug *plug;
1924 	struct request *same_queue_rq = NULL;
1925 	blk_qc_t cookie;
1926 
1927 	blk_queue_bounce(q, &bio);
1928 
1929 	blk_queue_split(q, &bio);
1930 
1931 	if (!bio_integrity_prep(bio))
1932 		return BLK_QC_T_NONE;
1933 
1934 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1935 	    blk_attempt_plug_merge(q, bio, &same_queue_rq))
1936 		return BLK_QC_T_NONE;
1937 
1938 	if (blk_mq_sched_bio_merge(q, bio))
1939 		return BLK_QC_T_NONE;
1940 
1941 	rq_qos_throttle(q, bio);
1942 
1943 	data.cmd_flags = bio->bi_opf;
1944 	rq = blk_mq_get_request(q, bio, &data);
1945 	if (unlikely(!rq)) {
1946 		rq_qos_cleanup(q, bio);
1947 		if (bio->bi_opf & REQ_NOWAIT)
1948 			bio_wouldblock_error(bio);
1949 		return BLK_QC_T_NONE;
1950 	}
1951 
1952 	trace_block_getrq(q, bio, bio->bi_opf);
1953 
1954 	rq_qos_track(q, rq, bio);
1955 
1956 	cookie = request_to_qc_t(data.hctx, rq);
1957 
1958 	plug = current->plug;
1959 	if (unlikely(is_flush_fua)) {
1960 		blk_mq_put_ctx(data.ctx);
1961 		blk_mq_bio_to_request(rq, bio);
1962 
1963 		/* bypass scheduler for flush rq */
1964 		blk_insert_flush(rq);
1965 		blk_mq_run_hw_queue(data.hctx, true);
1966 	} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
1967 		/*
1968 		 * Use plugging if we have a ->commit_rqs() hook as well, as
1969 		 * we know the driver uses bd->last in a smart fashion.
1970 		 */
1971 		unsigned int request_count = plug->rq_count;
1972 		struct request *last = NULL;
1973 
1974 		blk_mq_put_ctx(data.ctx);
1975 		blk_mq_bio_to_request(rq, bio);
1976 
1977 		if (!request_count)
1978 			trace_block_plug(q);
1979 		else
1980 			last = list_entry_rq(plug->mq_list.prev);
1981 
1982 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1983 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1984 			blk_flush_plug_list(plug, false);
1985 			trace_block_plug(q);
1986 		}
1987 
1988 		blk_add_rq_to_plug(plug, rq);
1989 	} else if (plug && !blk_queue_nomerges(q)) {
1990 		blk_mq_bio_to_request(rq, bio);
1991 
1992 		/*
1993 		 * We do limited plugging. If the bio can be merged, do that.
1994 		 * Otherwise the existing request in the plug list will be
1995 		 * issued. So the plug list will have one request at most
1996 		 * The plug list might get flushed before this. If that happens,
1997 		 * the plug list is empty, and same_queue_rq is invalid.
1998 		 */
1999 		if (list_empty(&plug->mq_list))
2000 			same_queue_rq = NULL;
2001 		if (same_queue_rq) {
2002 			list_del_init(&same_queue_rq->queuelist);
2003 			plug->rq_count--;
2004 		}
2005 		blk_add_rq_to_plug(plug, rq);
2006 
2007 		blk_mq_put_ctx(data.ctx);
2008 
2009 		if (same_queue_rq) {
2010 			data.hctx = same_queue_rq->mq_hctx;
2011 			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2012 					&cookie, false, true);
2013 		}
2014 	} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2015 			!data.hctx->dispatch_busy)) {
2016 		blk_mq_put_ctx(data.ctx);
2017 		blk_mq_bio_to_request(rq, bio);
2018 		blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
2019 	} else {
2020 		blk_mq_put_ctx(data.ctx);
2021 		blk_mq_bio_to_request(rq, bio);
2022 		blk_mq_sched_insert_request(rq, false, true, true);
2023 	}
2024 
2025 	return cookie;
2026 }
2027 
2028 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2029 		     unsigned int hctx_idx)
2030 {
2031 	struct page *page;
2032 
2033 	if (tags->rqs && set->ops->exit_request) {
2034 		int i;
2035 
2036 		for (i = 0; i < tags->nr_tags; i++) {
2037 			struct request *rq = tags->static_rqs[i];
2038 
2039 			if (!rq)
2040 				continue;
2041 			set->ops->exit_request(set, rq, hctx_idx);
2042 			tags->static_rqs[i] = NULL;
2043 		}
2044 	}
2045 
2046 	while (!list_empty(&tags->page_list)) {
2047 		page = list_first_entry(&tags->page_list, struct page, lru);
2048 		list_del_init(&page->lru);
2049 		/*
2050 		 * Remove kmemleak object previously allocated in
2051 		 * blk_mq_init_rq_map().
2052 		 */
2053 		kmemleak_free(page_address(page));
2054 		__free_pages(page, page->private);
2055 	}
2056 }
2057 
2058 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2059 {
2060 	kfree(tags->rqs);
2061 	tags->rqs = NULL;
2062 	kfree(tags->static_rqs);
2063 	tags->static_rqs = NULL;
2064 
2065 	blk_mq_free_tags(tags);
2066 }
2067 
2068 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2069 					unsigned int hctx_idx,
2070 					unsigned int nr_tags,
2071 					unsigned int reserved_tags)
2072 {
2073 	struct blk_mq_tags *tags;
2074 	int node;
2075 
2076 	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2077 	if (node == NUMA_NO_NODE)
2078 		node = set->numa_node;
2079 
2080 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2081 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2082 	if (!tags)
2083 		return NULL;
2084 
2085 	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2086 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2087 				 node);
2088 	if (!tags->rqs) {
2089 		blk_mq_free_tags(tags);
2090 		return NULL;
2091 	}
2092 
2093 	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2094 					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2095 					node);
2096 	if (!tags->static_rqs) {
2097 		kfree(tags->rqs);
2098 		blk_mq_free_tags(tags);
2099 		return NULL;
2100 	}
2101 
2102 	return tags;
2103 }
2104 
2105 static size_t order_to_size(unsigned int order)
2106 {
2107 	return (size_t)PAGE_SIZE << order;
2108 }
2109 
2110 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2111 			       unsigned int hctx_idx, int node)
2112 {
2113 	int ret;
2114 
2115 	if (set->ops->init_request) {
2116 		ret = set->ops->init_request(set, rq, hctx_idx, node);
2117 		if (ret)
2118 			return ret;
2119 	}
2120 
2121 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2122 	return 0;
2123 }
2124 
2125 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2126 		     unsigned int hctx_idx, unsigned int depth)
2127 {
2128 	unsigned int i, j, entries_per_page, max_order = 4;
2129 	size_t rq_size, left;
2130 	int node;
2131 
2132 	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2133 	if (node == NUMA_NO_NODE)
2134 		node = set->numa_node;
2135 
2136 	INIT_LIST_HEAD(&tags->page_list);
2137 
2138 	/*
2139 	 * rq_size is the size of the request plus driver payload, rounded
2140 	 * to the cacheline size
2141 	 */
2142 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
2143 				cache_line_size());
2144 	left = rq_size * depth;
2145 
2146 	for (i = 0; i < depth; ) {
2147 		int this_order = max_order;
2148 		struct page *page;
2149 		int to_do;
2150 		void *p;
2151 
2152 		while (this_order && left < order_to_size(this_order - 1))
2153 			this_order--;
2154 
2155 		do {
2156 			page = alloc_pages_node(node,
2157 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2158 				this_order);
2159 			if (page)
2160 				break;
2161 			if (!this_order--)
2162 				break;
2163 			if (order_to_size(this_order) < rq_size)
2164 				break;
2165 		} while (1);
2166 
2167 		if (!page)
2168 			goto fail;
2169 
2170 		page->private = this_order;
2171 		list_add_tail(&page->lru, &tags->page_list);
2172 
2173 		p = page_address(page);
2174 		/*
2175 		 * Allow kmemleak to scan these pages as they contain pointers
2176 		 * to additional allocations like via ops->init_request().
2177 		 */
2178 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2179 		entries_per_page = order_to_size(this_order) / rq_size;
2180 		to_do = min(entries_per_page, depth - i);
2181 		left -= to_do * rq_size;
2182 		for (j = 0; j < to_do; j++) {
2183 			struct request *rq = p;
2184 
2185 			tags->static_rqs[i] = rq;
2186 			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2187 				tags->static_rqs[i] = NULL;
2188 				goto fail;
2189 			}
2190 
2191 			p += rq_size;
2192 			i++;
2193 		}
2194 	}
2195 	return 0;
2196 
2197 fail:
2198 	blk_mq_free_rqs(set, tags, hctx_idx);
2199 	return -ENOMEM;
2200 }
2201 
2202 /*
2203  * 'cpu' is going away. splice any existing rq_list entries from this
2204  * software queue to the hw queue dispatch list, and ensure that it
2205  * gets run.
2206  */
2207 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2208 {
2209 	struct blk_mq_hw_ctx *hctx;
2210 	struct blk_mq_ctx *ctx;
2211 	LIST_HEAD(tmp);
2212 	enum hctx_type type;
2213 
2214 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2215 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2216 	type = hctx->type;
2217 
2218 	spin_lock(&ctx->lock);
2219 	if (!list_empty(&ctx->rq_lists[type])) {
2220 		list_splice_init(&ctx->rq_lists[type], &tmp);
2221 		blk_mq_hctx_clear_pending(hctx, ctx);
2222 	}
2223 	spin_unlock(&ctx->lock);
2224 
2225 	if (list_empty(&tmp))
2226 		return 0;
2227 
2228 	spin_lock(&hctx->lock);
2229 	list_splice_tail_init(&tmp, &hctx->dispatch);
2230 	spin_unlock(&hctx->lock);
2231 
2232 	blk_mq_run_hw_queue(hctx, true);
2233 	return 0;
2234 }
2235 
2236 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2237 {
2238 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2239 					    &hctx->cpuhp_dead);
2240 }
2241 
2242 /* hctx->ctxs will be freed in queue's release handler */
2243 static void blk_mq_exit_hctx(struct request_queue *q,
2244 		struct blk_mq_tag_set *set,
2245 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2246 {
2247 	if (blk_mq_hw_queue_mapped(hctx))
2248 		blk_mq_tag_idle(hctx);
2249 
2250 	if (set->ops->exit_request)
2251 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2252 
2253 	if (set->ops->exit_hctx)
2254 		set->ops->exit_hctx(hctx, hctx_idx);
2255 
2256 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2257 		cleanup_srcu_struct(hctx->srcu);
2258 
2259 	blk_mq_remove_cpuhp(hctx);
2260 	blk_free_flush_queue(hctx->fq);
2261 	sbitmap_free(&hctx->ctx_map);
2262 }
2263 
2264 static void blk_mq_exit_hw_queues(struct request_queue *q,
2265 		struct blk_mq_tag_set *set, int nr_queue)
2266 {
2267 	struct blk_mq_hw_ctx *hctx;
2268 	unsigned int i;
2269 
2270 	queue_for_each_hw_ctx(q, hctx, i) {
2271 		if (i == nr_queue)
2272 			break;
2273 		blk_mq_debugfs_unregister_hctx(hctx);
2274 		blk_mq_exit_hctx(q, set, hctx, i);
2275 	}
2276 }
2277 
2278 static int blk_mq_init_hctx(struct request_queue *q,
2279 		struct blk_mq_tag_set *set,
2280 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2281 {
2282 	int node;
2283 
2284 	node = hctx->numa_node;
2285 	if (node == NUMA_NO_NODE)
2286 		node = hctx->numa_node = set->numa_node;
2287 
2288 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2289 	spin_lock_init(&hctx->lock);
2290 	INIT_LIST_HEAD(&hctx->dispatch);
2291 	hctx->queue = q;
2292 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2293 
2294 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2295 
2296 	hctx->tags = set->tags[hctx_idx];
2297 
2298 	/*
2299 	 * Allocate space for all possible cpus to avoid allocation at
2300 	 * runtime
2301 	 */
2302 	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2303 			GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
2304 	if (!hctx->ctxs)
2305 		goto unregister_cpu_notifier;
2306 
2307 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2308 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
2309 		goto free_ctxs;
2310 
2311 	hctx->nr_ctx = 0;
2312 
2313 	spin_lock_init(&hctx->dispatch_wait_lock);
2314 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2315 	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2316 
2317 	if (set->ops->init_hctx &&
2318 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2319 		goto free_bitmap;
2320 
2321 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
2322 			GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
2323 	if (!hctx->fq)
2324 		goto exit_hctx;
2325 
2326 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
2327 		goto free_fq;
2328 
2329 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2330 		init_srcu_struct(hctx->srcu);
2331 
2332 	return 0;
2333 
2334  free_fq:
2335 	kfree(hctx->fq);
2336  exit_hctx:
2337 	if (set->ops->exit_hctx)
2338 		set->ops->exit_hctx(hctx, hctx_idx);
2339  free_bitmap:
2340 	sbitmap_free(&hctx->ctx_map);
2341  free_ctxs:
2342 	kfree(hctx->ctxs);
2343  unregister_cpu_notifier:
2344 	blk_mq_remove_cpuhp(hctx);
2345 	return -1;
2346 }
2347 
2348 static void blk_mq_init_cpu_queues(struct request_queue *q,
2349 				   unsigned int nr_hw_queues)
2350 {
2351 	struct blk_mq_tag_set *set = q->tag_set;
2352 	unsigned int i, j;
2353 
2354 	for_each_possible_cpu(i) {
2355 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2356 		struct blk_mq_hw_ctx *hctx;
2357 		int k;
2358 
2359 		__ctx->cpu = i;
2360 		spin_lock_init(&__ctx->lock);
2361 		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2362 			INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2363 
2364 		__ctx->queue = q;
2365 
2366 		/*
2367 		 * Set local node, IFF we have more than one hw queue. If
2368 		 * not, we remain on the home node of the device
2369 		 */
2370 		for (j = 0; j < set->nr_maps; j++) {
2371 			hctx = blk_mq_map_queue_type(q, j, i);
2372 			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2373 				hctx->numa_node = local_memory_node(cpu_to_node(i));
2374 		}
2375 	}
2376 }
2377 
2378 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2379 {
2380 	int ret = 0;
2381 
2382 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2383 					set->queue_depth, set->reserved_tags);
2384 	if (!set->tags[hctx_idx])
2385 		return false;
2386 
2387 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2388 				set->queue_depth);
2389 	if (!ret)
2390 		return true;
2391 
2392 	blk_mq_free_rq_map(set->tags[hctx_idx]);
2393 	set->tags[hctx_idx] = NULL;
2394 	return false;
2395 }
2396 
2397 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2398 					 unsigned int hctx_idx)
2399 {
2400 	if (set->tags && set->tags[hctx_idx]) {
2401 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2402 		blk_mq_free_rq_map(set->tags[hctx_idx]);
2403 		set->tags[hctx_idx] = NULL;
2404 	}
2405 }
2406 
2407 static void blk_mq_map_swqueue(struct request_queue *q)
2408 {
2409 	unsigned int i, j, hctx_idx;
2410 	struct blk_mq_hw_ctx *hctx;
2411 	struct blk_mq_ctx *ctx;
2412 	struct blk_mq_tag_set *set = q->tag_set;
2413 
2414 	/*
2415 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
2416 	 */
2417 	mutex_lock(&q->sysfs_lock);
2418 
2419 	queue_for_each_hw_ctx(q, hctx, i) {
2420 		cpumask_clear(hctx->cpumask);
2421 		hctx->nr_ctx = 0;
2422 		hctx->dispatch_from = NULL;
2423 	}
2424 
2425 	/*
2426 	 * Map software to hardware queues.
2427 	 *
2428 	 * If the cpu isn't present, the cpu is mapped to first hctx.
2429 	 */
2430 	for_each_possible_cpu(i) {
2431 		hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
2432 		/* unmapped hw queue can be remapped after CPU topo changed */
2433 		if (!set->tags[hctx_idx] &&
2434 		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2435 			/*
2436 			 * If tags initialization fail for some hctx,
2437 			 * that hctx won't be brought online.  In this
2438 			 * case, remap the current ctx to hctx[0] which
2439 			 * is guaranteed to always have tags allocated
2440 			 */
2441 			set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
2442 		}
2443 
2444 		ctx = per_cpu_ptr(q->queue_ctx, i);
2445 		for (j = 0; j < set->nr_maps; j++) {
2446 			if (!set->map[j].nr_queues) {
2447 				ctx->hctxs[j] = blk_mq_map_queue_type(q,
2448 						HCTX_TYPE_DEFAULT, i);
2449 				continue;
2450 			}
2451 
2452 			hctx = blk_mq_map_queue_type(q, j, i);
2453 			ctx->hctxs[j] = hctx;
2454 			/*
2455 			 * If the CPU is already set in the mask, then we've
2456 			 * mapped this one already. This can happen if
2457 			 * devices share queues across queue maps.
2458 			 */
2459 			if (cpumask_test_cpu(i, hctx->cpumask))
2460 				continue;
2461 
2462 			cpumask_set_cpu(i, hctx->cpumask);
2463 			hctx->type = j;
2464 			ctx->index_hw[hctx->type] = hctx->nr_ctx;
2465 			hctx->ctxs[hctx->nr_ctx++] = ctx;
2466 
2467 			/*
2468 			 * If the nr_ctx type overflows, we have exceeded the
2469 			 * amount of sw queues we can support.
2470 			 */
2471 			BUG_ON(!hctx->nr_ctx);
2472 		}
2473 
2474 		for (; j < HCTX_MAX_TYPES; j++)
2475 			ctx->hctxs[j] = blk_mq_map_queue_type(q,
2476 					HCTX_TYPE_DEFAULT, i);
2477 	}
2478 
2479 	mutex_unlock(&q->sysfs_lock);
2480 
2481 	queue_for_each_hw_ctx(q, hctx, i) {
2482 		/*
2483 		 * If no software queues are mapped to this hardware queue,
2484 		 * disable it and free the request entries.
2485 		 */
2486 		if (!hctx->nr_ctx) {
2487 			/* Never unmap queue 0.  We need it as a
2488 			 * fallback in case of a new remap fails
2489 			 * allocation
2490 			 */
2491 			if (i && set->tags[i])
2492 				blk_mq_free_map_and_requests(set, i);
2493 
2494 			hctx->tags = NULL;
2495 			continue;
2496 		}
2497 
2498 		hctx->tags = set->tags[i];
2499 		WARN_ON(!hctx->tags);
2500 
2501 		/*
2502 		 * Set the map size to the number of mapped software queues.
2503 		 * This is more accurate and more efficient than looping
2504 		 * over all possibly mapped software queues.
2505 		 */
2506 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2507 
2508 		/*
2509 		 * Initialize batch roundrobin counts
2510 		 */
2511 		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2512 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2513 	}
2514 }
2515 
2516 /*
2517  * Caller needs to ensure that we're either frozen/quiesced, or that
2518  * the queue isn't live yet.
2519  */
2520 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2521 {
2522 	struct blk_mq_hw_ctx *hctx;
2523 	int i;
2524 
2525 	queue_for_each_hw_ctx(q, hctx, i) {
2526 		if (shared)
2527 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2528 		else
2529 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2530 	}
2531 }
2532 
2533 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2534 					bool shared)
2535 {
2536 	struct request_queue *q;
2537 
2538 	lockdep_assert_held(&set->tag_list_lock);
2539 
2540 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2541 		blk_mq_freeze_queue(q);
2542 		queue_set_hctx_shared(q, shared);
2543 		blk_mq_unfreeze_queue(q);
2544 	}
2545 }
2546 
2547 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2548 {
2549 	struct blk_mq_tag_set *set = q->tag_set;
2550 
2551 	mutex_lock(&set->tag_list_lock);
2552 	list_del_rcu(&q->tag_set_list);
2553 	if (list_is_singular(&set->tag_list)) {
2554 		/* just transitioned to unshared */
2555 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
2556 		/* update existing queue */
2557 		blk_mq_update_tag_set_depth(set, false);
2558 	}
2559 	mutex_unlock(&set->tag_list_lock);
2560 	INIT_LIST_HEAD(&q->tag_set_list);
2561 }
2562 
2563 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2564 				     struct request_queue *q)
2565 {
2566 	mutex_lock(&set->tag_list_lock);
2567 
2568 	/*
2569 	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
2570 	 */
2571 	if (!list_empty(&set->tag_list) &&
2572 	    !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2573 		set->flags |= BLK_MQ_F_TAG_SHARED;
2574 		/* update existing queue */
2575 		blk_mq_update_tag_set_depth(set, true);
2576 	}
2577 	if (set->flags & BLK_MQ_F_TAG_SHARED)
2578 		queue_set_hctx_shared(q, true);
2579 	list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2580 
2581 	mutex_unlock(&set->tag_list_lock);
2582 }
2583 
2584 /* All allocations will be freed in release handler of q->mq_kobj */
2585 static int blk_mq_alloc_ctxs(struct request_queue *q)
2586 {
2587 	struct blk_mq_ctxs *ctxs;
2588 	int cpu;
2589 
2590 	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
2591 	if (!ctxs)
2592 		return -ENOMEM;
2593 
2594 	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2595 	if (!ctxs->queue_ctx)
2596 		goto fail;
2597 
2598 	for_each_possible_cpu(cpu) {
2599 		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
2600 		ctx->ctxs = ctxs;
2601 	}
2602 
2603 	q->mq_kobj = &ctxs->kobj;
2604 	q->queue_ctx = ctxs->queue_ctx;
2605 
2606 	return 0;
2607  fail:
2608 	kfree(ctxs);
2609 	return -ENOMEM;
2610 }
2611 
2612 /*
2613  * It is the actual release handler for mq, but we do it from
2614  * request queue's release handler for avoiding use-after-free
2615  * and headache because q->mq_kobj shouldn't have been introduced,
2616  * but we can't group ctx/kctx kobj without it.
2617  */
2618 void blk_mq_release(struct request_queue *q)
2619 {
2620 	struct blk_mq_hw_ctx *hctx;
2621 	unsigned int i;
2622 
2623 	/* hctx kobj stays in hctx */
2624 	queue_for_each_hw_ctx(q, hctx, i) {
2625 		if (!hctx)
2626 			continue;
2627 		kobject_put(&hctx->kobj);
2628 	}
2629 
2630 	kfree(q->queue_hw_ctx);
2631 
2632 	/*
2633 	 * release .mq_kobj and sw queue's kobject now because
2634 	 * both share lifetime with request queue.
2635 	 */
2636 	blk_mq_sysfs_deinit(q);
2637 }
2638 
2639 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2640 {
2641 	struct request_queue *uninit_q, *q;
2642 
2643 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2644 	if (!uninit_q)
2645 		return ERR_PTR(-ENOMEM);
2646 
2647 	q = blk_mq_init_allocated_queue(set, uninit_q);
2648 	if (IS_ERR(q))
2649 		blk_cleanup_queue(uninit_q);
2650 
2651 	return q;
2652 }
2653 EXPORT_SYMBOL(blk_mq_init_queue);
2654 
2655 /*
2656  * Helper for setting up a queue with mq ops, given queue depth, and
2657  * the passed in mq ops flags.
2658  */
2659 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
2660 					   const struct blk_mq_ops *ops,
2661 					   unsigned int queue_depth,
2662 					   unsigned int set_flags)
2663 {
2664 	struct request_queue *q;
2665 	int ret;
2666 
2667 	memset(set, 0, sizeof(*set));
2668 	set->ops = ops;
2669 	set->nr_hw_queues = 1;
2670 	set->nr_maps = 1;
2671 	set->queue_depth = queue_depth;
2672 	set->numa_node = NUMA_NO_NODE;
2673 	set->flags = set_flags;
2674 
2675 	ret = blk_mq_alloc_tag_set(set);
2676 	if (ret)
2677 		return ERR_PTR(ret);
2678 
2679 	q = blk_mq_init_queue(set);
2680 	if (IS_ERR(q)) {
2681 		blk_mq_free_tag_set(set);
2682 		return q;
2683 	}
2684 
2685 	return q;
2686 }
2687 EXPORT_SYMBOL(blk_mq_init_sq_queue);
2688 
2689 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2690 {
2691 	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2692 
2693 	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2694 			   __alignof__(struct blk_mq_hw_ctx)) !=
2695 		     sizeof(struct blk_mq_hw_ctx));
2696 
2697 	if (tag_set->flags & BLK_MQ_F_BLOCKING)
2698 		hw_ctx_size += sizeof(struct srcu_struct);
2699 
2700 	return hw_ctx_size;
2701 }
2702 
2703 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
2704 		struct blk_mq_tag_set *set, struct request_queue *q,
2705 		int hctx_idx, int node)
2706 {
2707 	struct blk_mq_hw_ctx *hctx;
2708 
2709 	hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
2710 			GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2711 			node);
2712 	if (!hctx)
2713 		return NULL;
2714 
2715 	if (!zalloc_cpumask_var_node(&hctx->cpumask,
2716 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2717 				node)) {
2718 		kfree(hctx);
2719 		return NULL;
2720 	}
2721 
2722 	atomic_set(&hctx->nr_active, 0);
2723 	hctx->numa_node = node;
2724 	hctx->queue_num = hctx_idx;
2725 
2726 	if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
2727 		free_cpumask_var(hctx->cpumask);
2728 		kfree(hctx);
2729 		return NULL;
2730 	}
2731 	blk_mq_hctx_kobj_init(hctx);
2732 
2733 	return hctx;
2734 }
2735 
2736 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2737 						struct request_queue *q)
2738 {
2739 	int i, j, end;
2740 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2741 
2742 	/* protect against switching io scheduler  */
2743 	mutex_lock(&q->sysfs_lock);
2744 	for (i = 0; i < set->nr_hw_queues; i++) {
2745 		int node;
2746 		struct blk_mq_hw_ctx *hctx;
2747 
2748 		node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
2749 		/*
2750 		 * If the hw queue has been mapped to another numa node,
2751 		 * we need to realloc the hctx. If allocation fails, fallback
2752 		 * to use the previous one.
2753 		 */
2754 		if (hctxs[i] && (hctxs[i]->numa_node == node))
2755 			continue;
2756 
2757 		hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
2758 		if (hctx) {
2759 			if (hctxs[i]) {
2760 				blk_mq_exit_hctx(q, set, hctxs[i], i);
2761 				kobject_put(&hctxs[i]->kobj);
2762 			}
2763 			hctxs[i] = hctx;
2764 		} else {
2765 			if (hctxs[i])
2766 				pr_warn("Allocate new hctx on node %d fails,\
2767 						fallback to previous one on node %d\n",
2768 						node, hctxs[i]->numa_node);
2769 			else
2770 				break;
2771 		}
2772 	}
2773 	/*
2774 	 * Increasing nr_hw_queues fails. Free the newly allocated
2775 	 * hctxs and keep the previous q->nr_hw_queues.
2776 	 */
2777 	if (i != set->nr_hw_queues) {
2778 		j = q->nr_hw_queues;
2779 		end = i;
2780 	} else {
2781 		j = i;
2782 		end = q->nr_hw_queues;
2783 		q->nr_hw_queues = set->nr_hw_queues;
2784 	}
2785 
2786 	for (; j < end; j++) {
2787 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2788 
2789 		if (hctx) {
2790 			if (hctx->tags)
2791 				blk_mq_free_map_and_requests(set, j);
2792 			blk_mq_exit_hctx(q, set, hctx, j);
2793 			kobject_put(&hctx->kobj);
2794 			hctxs[j] = NULL;
2795 
2796 		}
2797 	}
2798 	mutex_unlock(&q->sysfs_lock);
2799 }
2800 
2801 /*
2802  * Maximum number of hardware queues we support. For single sets, we'll never
2803  * have more than the CPUs (software queues). For multiple sets, the tag_set
2804  * user may have set ->nr_hw_queues larger.
2805  */
2806 static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
2807 {
2808 	if (set->nr_maps == 1)
2809 		return nr_cpu_ids;
2810 
2811 	return max(set->nr_hw_queues, nr_cpu_ids);
2812 }
2813 
2814 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2815 						  struct request_queue *q)
2816 {
2817 	/* mark the queue as mq asap */
2818 	q->mq_ops = set->ops;
2819 
2820 	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2821 					     blk_mq_poll_stats_bkt,
2822 					     BLK_MQ_POLL_STATS_BKTS, q);
2823 	if (!q->poll_cb)
2824 		goto err_exit;
2825 
2826 	if (blk_mq_alloc_ctxs(q))
2827 		goto err_exit;
2828 
2829 	/* init q->mq_kobj and sw queues' kobjects */
2830 	blk_mq_sysfs_init(q);
2831 
2832 	q->nr_queues = nr_hw_queues(set);
2833 	q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
2834 						GFP_KERNEL, set->numa_node);
2835 	if (!q->queue_hw_ctx)
2836 		goto err_sys_init;
2837 
2838 	blk_mq_realloc_hw_ctxs(set, q);
2839 	if (!q->nr_hw_queues)
2840 		goto err_hctxs;
2841 
2842 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2843 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2844 
2845 	q->tag_set = set;
2846 
2847 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2848 	if (set->nr_maps > HCTX_TYPE_POLL &&
2849 	    set->map[HCTX_TYPE_POLL].nr_queues)
2850 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2851 
2852 	q->sg_reserved_size = INT_MAX;
2853 
2854 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2855 	INIT_LIST_HEAD(&q->requeue_list);
2856 	spin_lock_init(&q->requeue_lock);
2857 
2858 	blk_queue_make_request(q, blk_mq_make_request);
2859 
2860 	/*
2861 	 * Do this after blk_queue_make_request() overrides it...
2862 	 */
2863 	q->nr_requests = set->queue_depth;
2864 
2865 	/*
2866 	 * Default to classic polling
2867 	 */
2868 	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
2869 
2870 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2871 	blk_mq_add_queue_tag_set(set, q);
2872 	blk_mq_map_swqueue(q);
2873 
2874 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2875 		int ret;
2876 
2877 		ret = elevator_init_mq(q);
2878 		if (ret)
2879 			return ERR_PTR(ret);
2880 	}
2881 
2882 	return q;
2883 
2884 err_hctxs:
2885 	kfree(q->queue_hw_ctx);
2886 err_sys_init:
2887 	blk_mq_sysfs_deinit(q);
2888 err_exit:
2889 	q->mq_ops = NULL;
2890 	return ERR_PTR(-ENOMEM);
2891 }
2892 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2893 
2894 void blk_mq_free_queue(struct request_queue *q)
2895 {
2896 	struct blk_mq_tag_set	*set = q->tag_set;
2897 
2898 	blk_mq_del_queue_tag_set(q);
2899 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2900 }
2901 
2902 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2903 {
2904 	int i;
2905 
2906 	for (i = 0; i < set->nr_hw_queues; i++)
2907 		if (!__blk_mq_alloc_rq_map(set, i))
2908 			goto out_unwind;
2909 
2910 	return 0;
2911 
2912 out_unwind:
2913 	while (--i >= 0)
2914 		blk_mq_free_rq_map(set->tags[i]);
2915 
2916 	return -ENOMEM;
2917 }
2918 
2919 /*
2920  * Allocate the request maps associated with this tag_set. Note that this
2921  * may reduce the depth asked for, if memory is tight. set->queue_depth
2922  * will be updated to reflect the allocated depth.
2923  */
2924 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2925 {
2926 	unsigned int depth;
2927 	int err;
2928 
2929 	depth = set->queue_depth;
2930 	do {
2931 		err = __blk_mq_alloc_rq_maps(set);
2932 		if (!err)
2933 			break;
2934 
2935 		set->queue_depth >>= 1;
2936 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2937 			err = -ENOMEM;
2938 			break;
2939 		}
2940 	} while (set->queue_depth);
2941 
2942 	if (!set->queue_depth || err) {
2943 		pr_err("blk-mq: failed to allocate request map\n");
2944 		return -ENOMEM;
2945 	}
2946 
2947 	if (depth != set->queue_depth)
2948 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2949 						depth, set->queue_depth);
2950 
2951 	return 0;
2952 }
2953 
2954 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2955 {
2956 	if (set->ops->map_queues && !is_kdump_kernel()) {
2957 		int i;
2958 
2959 		/*
2960 		 * transport .map_queues is usually done in the following
2961 		 * way:
2962 		 *
2963 		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2964 		 * 	mask = get_cpu_mask(queue)
2965 		 * 	for_each_cpu(cpu, mask)
2966 		 * 		set->map[x].mq_map[cpu] = queue;
2967 		 * }
2968 		 *
2969 		 * When we need to remap, the table has to be cleared for
2970 		 * killing stale mapping since one CPU may not be mapped
2971 		 * to any hw queue.
2972 		 */
2973 		for (i = 0; i < set->nr_maps; i++)
2974 			blk_mq_clear_mq_map(&set->map[i]);
2975 
2976 		return set->ops->map_queues(set);
2977 	} else {
2978 		BUG_ON(set->nr_maps > 1);
2979 		return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2980 	}
2981 }
2982 
2983 /*
2984  * Alloc a tag set to be associated with one or more request queues.
2985  * May fail with EINVAL for various error conditions. May adjust the
2986  * requested depth down, if it's too large. In that case, the set
2987  * value will be stored in set->queue_depth.
2988  */
2989 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2990 {
2991 	int i, ret;
2992 
2993 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2994 
2995 	if (!set->nr_hw_queues)
2996 		return -EINVAL;
2997 	if (!set->queue_depth)
2998 		return -EINVAL;
2999 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3000 		return -EINVAL;
3001 
3002 	if (!set->ops->queue_rq)
3003 		return -EINVAL;
3004 
3005 	if (!set->ops->get_budget ^ !set->ops->put_budget)
3006 		return -EINVAL;
3007 
3008 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3009 		pr_info("blk-mq: reduced tag depth to %u\n",
3010 			BLK_MQ_MAX_DEPTH);
3011 		set->queue_depth = BLK_MQ_MAX_DEPTH;
3012 	}
3013 
3014 	if (!set->nr_maps)
3015 		set->nr_maps = 1;
3016 	else if (set->nr_maps > HCTX_MAX_TYPES)
3017 		return -EINVAL;
3018 
3019 	/*
3020 	 * If a crashdump is active, then we are potentially in a very
3021 	 * memory constrained environment. Limit us to 1 queue and
3022 	 * 64 tags to prevent using too much memory.
3023 	 */
3024 	if (is_kdump_kernel()) {
3025 		set->nr_hw_queues = 1;
3026 		set->nr_maps = 1;
3027 		set->queue_depth = min(64U, set->queue_depth);
3028 	}
3029 	/*
3030 	 * There is no use for more h/w queues than cpus if we just have
3031 	 * a single map
3032 	 */
3033 	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3034 		set->nr_hw_queues = nr_cpu_ids;
3035 
3036 	set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
3037 				 GFP_KERNEL, set->numa_node);
3038 	if (!set->tags)
3039 		return -ENOMEM;
3040 
3041 	ret = -ENOMEM;
3042 	for (i = 0; i < set->nr_maps; i++) {
3043 		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3044 						  sizeof(set->map[i].mq_map[0]),
3045 						  GFP_KERNEL, set->numa_node);
3046 		if (!set->map[i].mq_map)
3047 			goto out_free_mq_map;
3048 		set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3049 	}
3050 
3051 	ret = blk_mq_update_queue_map(set);
3052 	if (ret)
3053 		goto out_free_mq_map;
3054 
3055 	ret = blk_mq_alloc_rq_maps(set);
3056 	if (ret)
3057 		goto out_free_mq_map;
3058 
3059 	mutex_init(&set->tag_list_lock);
3060 	INIT_LIST_HEAD(&set->tag_list);
3061 
3062 	return 0;
3063 
3064 out_free_mq_map:
3065 	for (i = 0; i < set->nr_maps; i++) {
3066 		kfree(set->map[i].mq_map);
3067 		set->map[i].mq_map = NULL;
3068 	}
3069 	kfree(set->tags);
3070 	set->tags = NULL;
3071 	return ret;
3072 }
3073 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3074 
3075 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3076 {
3077 	int i, j;
3078 
3079 	for (i = 0; i < nr_hw_queues(set); i++)
3080 		blk_mq_free_map_and_requests(set, i);
3081 
3082 	for (j = 0; j < set->nr_maps; j++) {
3083 		kfree(set->map[j].mq_map);
3084 		set->map[j].mq_map = NULL;
3085 	}
3086 
3087 	kfree(set->tags);
3088 	set->tags = NULL;
3089 }
3090 EXPORT_SYMBOL(blk_mq_free_tag_set);
3091 
3092 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3093 {
3094 	struct blk_mq_tag_set *set = q->tag_set;
3095 	struct blk_mq_hw_ctx *hctx;
3096 	int i, ret;
3097 
3098 	if (!set)
3099 		return -EINVAL;
3100 
3101 	if (q->nr_requests == nr)
3102 		return 0;
3103 
3104 	blk_mq_freeze_queue(q);
3105 	blk_mq_quiesce_queue(q);
3106 
3107 	ret = 0;
3108 	queue_for_each_hw_ctx(q, hctx, i) {
3109 		if (!hctx->tags)
3110 			continue;
3111 		/*
3112 		 * If we're using an MQ scheduler, just update the scheduler
3113 		 * queue depth. This is similar to what the old code would do.
3114 		 */
3115 		if (!hctx->sched_tags) {
3116 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3117 							false);
3118 		} else {
3119 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3120 							nr, true);
3121 		}
3122 		if (ret)
3123 			break;
3124 	}
3125 
3126 	if (!ret)
3127 		q->nr_requests = nr;
3128 
3129 	blk_mq_unquiesce_queue(q);
3130 	blk_mq_unfreeze_queue(q);
3131 
3132 	return ret;
3133 }
3134 
3135 /*
3136  * request_queue and elevator_type pair.
3137  * It is just used by __blk_mq_update_nr_hw_queues to cache
3138  * the elevator_type associated with a request_queue.
3139  */
3140 struct blk_mq_qe_pair {
3141 	struct list_head node;
3142 	struct request_queue *q;
3143 	struct elevator_type *type;
3144 };
3145 
3146 /*
3147  * Cache the elevator_type in qe pair list and switch the
3148  * io scheduler to 'none'
3149  */
3150 static bool blk_mq_elv_switch_none(struct list_head *head,
3151 		struct request_queue *q)
3152 {
3153 	struct blk_mq_qe_pair *qe;
3154 
3155 	if (!q->elevator)
3156 		return true;
3157 
3158 	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3159 	if (!qe)
3160 		return false;
3161 
3162 	INIT_LIST_HEAD(&qe->node);
3163 	qe->q = q;
3164 	qe->type = q->elevator->type;
3165 	list_add(&qe->node, head);
3166 
3167 	mutex_lock(&q->sysfs_lock);
3168 	/*
3169 	 * After elevator_switch_mq, the previous elevator_queue will be
3170 	 * released by elevator_release. The reference of the io scheduler
3171 	 * module get by elevator_get will also be put. So we need to get
3172 	 * a reference of the io scheduler module here to prevent it to be
3173 	 * removed.
3174 	 */
3175 	__module_get(qe->type->elevator_owner);
3176 	elevator_switch_mq(q, NULL);
3177 	mutex_unlock(&q->sysfs_lock);
3178 
3179 	return true;
3180 }
3181 
3182 static void blk_mq_elv_switch_back(struct list_head *head,
3183 		struct request_queue *q)
3184 {
3185 	struct blk_mq_qe_pair *qe;
3186 	struct elevator_type *t = NULL;
3187 
3188 	list_for_each_entry(qe, head, node)
3189 		if (qe->q == q) {
3190 			t = qe->type;
3191 			break;
3192 		}
3193 
3194 	if (!t)
3195 		return;
3196 
3197 	list_del(&qe->node);
3198 	kfree(qe);
3199 
3200 	mutex_lock(&q->sysfs_lock);
3201 	elevator_switch_mq(q, t);
3202 	mutex_unlock(&q->sysfs_lock);
3203 }
3204 
3205 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3206 							int nr_hw_queues)
3207 {
3208 	struct request_queue *q;
3209 	LIST_HEAD(head);
3210 	int prev_nr_hw_queues;
3211 
3212 	lockdep_assert_held(&set->tag_list_lock);
3213 
3214 	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3215 		nr_hw_queues = nr_cpu_ids;
3216 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
3217 		return;
3218 
3219 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3220 		blk_mq_freeze_queue(q);
3221 	/*
3222 	 * Sync with blk_mq_queue_tag_busy_iter.
3223 	 */
3224 	synchronize_rcu();
3225 	/*
3226 	 * Switch IO scheduler to 'none', cleaning up the data associated
3227 	 * with the previous scheduler. We will switch back once we are done
3228 	 * updating the new sw to hw queue mappings.
3229 	 */
3230 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3231 		if (!blk_mq_elv_switch_none(&head, q))
3232 			goto switch_back;
3233 
3234 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3235 		blk_mq_debugfs_unregister_hctxs(q);
3236 		blk_mq_sysfs_unregister(q);
3237 	}
3238 
3239 	prev_nr_hw_queues = set->nr_hw_queues;
3240 	set->nr_hw_queues = nr_hw_queues;
3241 	blk_mq_update_queue_map(set);
3242 fallback:
3243 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3244 		blk_mq_realloc_hw_ctxs(set, q);
3245 		if (q->nr_hw_queues != set->nr_hw_queues) {
3246 			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3247 					nr_hw_queues, prev_nr_hw_queues);
3248 			set->nr_hw_queues = prev_nr_hw_queues;
3249 			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3250 			goto fallback;
3251 		}
3252 		blk_mq_map_swqueue(q);
3253 	}
3254 
3255 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3256 		blk_mq_sysfs_register(q);
3257 		blk_mq_debugfs_register_hctxs(q);
3258 	}
3259 
3260 switch_back:
3261 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3262 		blk_mq_elv_switch_back(&head, q);
3263 
3264 	list_for_each_entry(q, &set->tag_list, tag_set_list)
3265 		blk_mq_unfreeze_queue(q);
3266 }
3267 
3268 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3269 {
3270 	mutex_lock(&set->tag_list_lock);
3271 	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3272 	mutex_unlock(&set->tag_list_lock);
3273 }
3274 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3275 
3276 /* Enable polling stats and return whether they were already enabled. */
3277 static bool blk_poll_stats_enable(struct request_queue *q)
3278 {
3279 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3280 	    blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3281 		return true;
3282 	blk_stat_add_callback(q, q->poll_cb);
3283 	return false;
3284 }
3285 
3286 static void blk_mq_poll_stats_start(struct request_queue *q)
3287 {
3288 	/*
3289 	 * We don't arm the callback if polling stats are not enabled or the
3290 	 * callback is already active.
3291 	 */
3292 	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3293 	    blk_stat_is_active(q->poll_cb))
3294 		return;
3295 
3296 	blk_stat_activate_msecs(q->poll_cb, 100);
3297 }
3298 
3299 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3300 {
3301 	struct request_queue *q = cb->data;
3302 	int bucket;
3303 
3304 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3305 		if (cb->stat[bucket].nr_samples)
3306 			q->poll_stat[bucket] = cb->stat[bucket];
3307 	}
3308 }
3309 
3310 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3311 				       struct blk_mq_hw_ctx *hctx,
3312 				       struct request *rq)
3313 {
3314 	unsigned long ret = 0;
3315 	int bucket;
3316 
3317 	/*
3318 	 * If stats collection isn't on, don't sleep but turn it on for
3319 	 * future users
3320 	 */
3321 	if (!blk_poll_stats_enable(q))
3322 		return 0;
3323 
3324 	/*
3325 	 * As an optimistic guess, use half of the mean service time
3326 	 * for this type of request. We can (and should) make this smarter.
3327 	 * For instance, if the completion latencies are tight, we can
3328 	 * get closer than just half the mean. This is especially
3329 	 * important on devices where the completion latencies are longer
3330 	 * than ~10 usec. We do use the stats for the relevant IO size
3331 	 * if available which does lead to better estimates.
3332 	 */
3333 	bucket = blk_mq_poll_stats_bkt(rq);
3334 	if (bucket < 0)
3335 		return ret;
3336 
3337 	if (q->poll_stat[bucket].nr_samples)
3338 		ret = (q->poll_stat[bucket].mean + 1) / 2;
3339 
3340 	return ret;
3341 }
3342 
3343 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3344 				     struct blk_mq_hw_ctx *hctx,
3345 				     struct request *rq)
3346 {
3347 	struct hrtimer_sleeper hs;
3348 	enum hrtimer_mode mode;
3349 	unsigned int nsecs;
3350 	ktime_t kt;
3351 
3352 	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3353 		return false;
3354 
3355 	/*
3356 	 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3357 	 *
3358 	 *  0:	use half of prev avg
3359 	 * >0:	use this specific value
3360 	 */
3361 	if (q->poll_nsec > 0)
3362 		nsecs = q->poll_nsec;
3363 	else
3364 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3365 
3366 	if (!nsecs)
3367 		return false;
3368 
3369 	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3370 
3371 	/*
3372 	 * This will be replaced with the stats tracking code, using
3373 	 * 'avg_completion_time / 2' as the pre-sleep target.
3374 	 */
3375 	kt = nsecs;
3376 
3377 	mode = HRTIMER_MODE_REL;
3378 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3379 	hrtimer_set_expires(&hs.timer, kt);
3380 
3381 	hrtimer_init_sleeper(&hs, current);
3382 	do {
3383 		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3384 			break;
3385 		set_current_state(TASK_UNINTERRUPTIBLE);
3386 		hrtimer_start_expires(&hs.timer, mode);
3387 		if (hs.task)
3388 			io_schedule();
3389 		hrtimer_cancel(&hs.timer);
3390 		mode = HRTIMER_MODE_ABS;
3391 	} while (hs.task && !signal_pending(current));
3392 
3393 	__set_current_state(TASK_RUNNING);
3394 	destroy_hrtimer_on_stack(&hs.timer);
3395 	return true;
3396 }
3397 
3398 static bool blk_mq_poll_hybrid(struct request_queue *q,
3399 			       struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3400 {
3401 	struct request *rq;
3402 
3403 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3404 		return false;
3405 
3406 	if (!blk_qc_t_is_internal(cookie))
3407 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3408 	else {
3409 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3410 		/*
3411 		 * With scheduling, if the request has completed, we'll
3412 		 * get a NULL return here, as we clear the sched tag when
3413 		 * that happens. The request still remains valid, like always,
3414 		 * so we should be safe with just the NULL check.
3415 		 */
3416 		if (!rq)
3417 			return false;
3418 	}
3419 
3420 	return blk_mq_poll_hybrid_sleep(q, hctx, rq);
3421 }
3422 
3423 /**
3424  * blk_poll - poll for IO completions
3425  * @q:  the queue
3426  * @cookie: cookie passed back at IO submission time
3427  * @spin: whether to spin for completions
3428  *
3429  * Description:
3430  *    Poll for completions on the passed in queue. Returns number of
3431  *    completed entries found. If @spin is true, then blk_poll will continue
3432  *    looping until at least one completion is found, unless the task is
3433  *    otherwise marked running (or we need to reschedule).
3434  */
3435 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3436 {
3437 	struct blk_mq_hw_ctx *hctx;
3438 	long state;
3439 
3440 	if (!blk_qc_t_valid(cookie) ||
3441 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3442 		return 0;
3443 
3444 	if (current->plug)
3445 		blk_flush_plug_list(current->plug, false);
3446 
3447 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3448 
3449 	/*
3450 	 * If we sleep, have the caller restart the poll loop to reset
3451 	 * the state. Like for the other success return cases, the
3452 	 * caller is responsible for checking if the IO completed. If
3453 	 * the IO isn't complete, we'll get called again and will go
3454 	 * straight to the busy poll loop.
3455 	 */
3456 	if (blk_mq_poll_hybrid(q, hctx, cookie))
3457 		return 1;
3458 
3459 	hctx->poll_considered++;
3460 
3461 	state = current->state;
3462 	do {
3463 		int ret;
3464 
3465 		hctx->poll_invoked++;
3466 
3467 		ret = q->mq_ops->poll(hctx);
3468 		if (ret > 0) {
3469 			hctx->poll_success++;
3470 			__set_current_state(TASK_RUNNING);
3471 			return ret;
3472 		}
3473 
3474 		if (signal_pending_state(state, current))
3475 			__set_current_state(TASK_RUNNING);
3476 
3477 		if (current->state == TASK_RUNNING)
3478 			return 1;
3479 		if (ret < 0 || !spin)
3480 			break;
3481 		cpu_relax();
3482 	} while (!need_resched());
3483 
3484 	__set_current_state(TASK_RUNNING);
3485 	return 0;
3486 }
3487 EXPORT_SYMBOL_GPL(blk_poll);
3488 
3489 unsigned int blk_mq_rq_cpu(struct request *rq)
3490 {
3491 	return rq->mq_ctx->cpu;
3492 }
3493 EXPORT_SYMBOL(blk_mq_rq_cpu);
3494 
3495 static int __init blk_mq_init(void)
3496 {
3497 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3498 				blk_mq_hctx_notify_dead);
3499 	return 0;
3500 }
3501 subsys_initcall(blk_mq_init);
3502