13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
275bb4625SJens Axboe /*
375bb4625SJens Axboe * Block multiqueue core code
475bb4625SJens Axboe *
575bb4625SJens Axboe * Copyright (C) 2013-2014 Jens Axboe
675bb4625SJens Axboe * Copyright (C) 2013-2014 Christoph Hellwig
775bb4625SJens Axboe */
8320ae51fSJens Axboe #include <linux/kernel.h>
9320ae51fSJens Axboe #include <linux/module.h>
10320ae51fSJens Axboe #include <linux/backing-dev.h>
11320ae51fSJens Axboe #include <linux/bio.h>
12320ae51fSJens Axboe #include <linux/blkdev.h>
13fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
14f75782e4SCatalin Marinas #include <linux/kmemleak.h>
15320ae51fSJens Axboe #include <linux/mm.h>
16320ae51fSJens Axboe #include <linux/init.h>
17320ae51fSJens Axboe #include <linux/slab.h>
18320ae51fSJens Axboe #include <linux/workqueue.h>
19320ae51fSJens Axboe #include <linux/smp.h>
20e41d12f5SChristoph Hellwig #include <linux/interrupt.h>
21320ae51fSJens Axboe #include <linux/llist.h>
22320ae51fSJens Axboe #include <linux/cpu.h>
23320ae51fSJens Axboe #include <linux/cache.h>
24320ae51fSJens Axboe #include <linux/sched/sysctl.h>
25105ab3d8SIngo Molnar #include <linux/sched/topology.h>
26174cd4b1SIngo Molnar #include <linux/sched/signal.h>
27320ae51fSJens Axboe #include <linux/delay.h>
28aedcd72fSJens Axboe #include <linux/crash_dump.h>
2988c7b2b7SJens Axboe #include <linux/prefetch.h>
30a892c8d5SSatya Tangirala #include <linux/blk-crypto.h>
3182d981d4SChristoph Hellwig #include <linux/part_stat.h>
32320ae51fSJens Axboe
33320ae51fSJens Axboe #include <trace/events/block.h>
34320ae51fSJens Axboe
3554d4e6abSMax Gurtovoy #include <linux/t10-pi.h>
36320ae51fSJens Axboe #include "blk.h"
37320ae51fSJens Axboe #include "blk-mq.h"
389c1051aaSOmar Sandoval #include "blk-mq-debugfs.h"
39986d413bSBart Van Assche #include "blk-pm.h"
40cf43e6beSJens Axboe #include "blk-stat.h"
41bd166ef1SJens Axboe #include "blk-mq-sched.h"
42c1c80384SJosef Bacik #include "blk-rq-qos.h"
43320ae51fSJens Axboe
44f9ab4918SSebastian Andrzej Siewior static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45660e802cSChengming Zhou static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
4658bf9358SMing Lei static DEFINE_MUTEX(blk_mq_cpuhp_lock);
47c3077b5dSChristoph Hellwig
48710fa378SChristoph Hellwig static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
49360f2648SChristoph Hellwig static void blk_mq_request_bypass_insert(struct request *rq,
50360f2648SChristoph Hellwig blk_insert_t flags);
5194aa228cSChristoph Hellwig static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
5294aa228cSChristoph Hellwig struct list_head *list);
53f6c80cffSKeith Busch static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
54f6c80cffSKeith Busch struct io_comp_batch *iob, unsigned int flags);
553e08773cSChristoph Hellwig
56320ae51fSJens Axboe /*
5785fae294SYufen Yu * Check if any of the ctx, dispatch list or elevator
5885fae294SYufen Yu * have pending work in this hardware queue.
59320ae51fSJens Axboe */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)6079f720a7SJens Axboe static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
61320ae51fSJens Axboe {
6279f720a7SJens Axboe return !list_empty_careful(&hctx->dispatch) ||
6379f720a7SJens Axboe sbitmap_any_bit_set(&hctx->ctx_map) ||
64bd166ef1SJens Axboe blk_mq_sched_has_work(hctx);
65320ae51fSJens Axboe }
66320ae51fSJens Axboe
67320ae51fSJens Axboe /*
68320ae51fSJens Axboe * Mark this ctx as having pending work in this hardware queue
69320ae51fSJens Axboe */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)70320ae51fSJens Axboe static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
71320ae51fSJens Axboe struct blk_mq_ctx *ctx)
72320ae51fSJens Axboe {
73f31967f0SJens Axboe const int bit = ctx->index_hw[hctx->type];
74f31967f0SJens Axboe
75f31967f0SJens Axboe if (!sbitmap_test_bit(&hctx->ctx_map, bit))
76f31967f0SJens Axboe sbitmap_set_bit(&hctx->ctx_map, bit);
771429d7c9SJens Axboe }
781429d7c9SJens Axboe
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)791429d7c9SJens Axboe static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
801429d7c9SJens Axboe struct blk_mq_ctx *ctx)
811429d7c9SJens Axboe {
82f31967f0SJens Axboe const int bit = ctx->index_hw[hctx->type];
83f31967f0SJens Axboe
84f31967f0SJens Axboe sbitmap_clear_bit(&hctx->ctx_map, bit);
85320ae51fSJens Axboe }
86320ae51fSJens Axboe
87f299b7c7SJens Axboe struct mq_inflight {
888446fe92SChristoph Hellwig struct block_device *part;
89a2e80f6fSPavel Begunkov unsigned int inflight[2];
90f299b7c7SJens Axboe };
91f299b7c7SJens Axboe
blk_mq_check_inflight(struct request * rq,void * priv)922dd6532eSJohn Garry static bool blk_mq_check_inflight(struct request *rq, void *priv)
93f299b7c7SJens Axboe {
94f299b7c7SJens Axboe struct mq_inflight *mi = priv;
95f299b7c7SJens Axboe
96b81c14caSHaisu Wang if (rq->part && blk_do_io_stat(rq) &&
97b81c14caSHaisu Wang (!mi->part->bd_partno || rq->part == mi->part) &&
98b0d97557SJeffle Xu blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
99bb4e6b14SPavel Begunkov mi->inflight[rq_data_dir(rq)]++;
1007baa8572SJens Axboe
1017baa8572SJens Axboe return true;
102f299b7c7SJens Axboe }
103f299b7c7SJens Axboe
blk_mq_in_flight(struct request_queue * q,struct block_device * part)1048446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q,
1058446fe92SChristoph Hellwig struct block_device *part)
106f299b7c7SJens Axboe {
107a2e80f6fSPavel Begunkov struct mq_inflight mi = { .part = part };
108f299b7c7SJens Axboe
109f299b7c7SJens Axboe blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
110e016b782SMikulas Patocka
111a2e80f6fSPavel Begunkov return mi.inflight[0] + mi.inflight[1];
112bf0ddabaSOmar Sandoval }
113bf0ddabaSOmar Sandoval
blk_mq_in_flight_rw(struct request_queue * q,struct block_device * part,unsigned int inflight[2])1148446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
115bf0ddabaSOmar Sandoval unsigned int inflight[2])
116bf0ddabaSOmar Sandoval {
117a2e80f6fSPavel Begunkov struct mq_inflight mi = { .part = part };
118bf0ddabaSOmar Sandoval
119bb4e6b14SPavel Begunkov blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120a2e80f6fSPavel Begunkov inflight[0] = mi.inflight[0];
121a2e80f6fSPavel Begunkov inflight[1] = mi.inflight[1];
122bf0ddabaSOmar Sandoval }
123bf0ddabaSOmar Sandoval
blk_freeze_queue_start(struct request_queue * q)1241671d522SMing Lei void blk_freeze_queue_start(struct request_queue *q)
12543a5e4e2SMing Lei {
1267996a8b5SBob Liu mutex_lock(&q->mq_freeze_lock);
1277996a8b5SBob Liu if (++q->mq_freeze_depth == 1) {
1283ef28e83SDan Williams percpu_ref_kill(&q->q_usage_counter);
1297996a8b5SBob Liu mutex_unlock(&q->mq_freeze_lock);
130344e9ffcSJens Axboe if (queue_is_mq(q))
131b94ec296SMike Snitzer blk_mq_run_hw_queues(q, false);
1327996a8b5SBob Liu } else {
1337996a8b5SBob Liu mutex_unlock(&q->mq_freeze_lock);
134cddd5d17STejun Heo }
135f3af020bSTejun Heo }
1361671d522SMing Lei EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
137f3af020bSTejun Heo
blk_mq_freeze_queue_wait(struct request_queue * q)1386bae363eSKeith Busch void blk_mq_freeze_queue_wait(struct request_queue *q)
139f3af020bSTejun Heo {
1403ef28e83SDan Williams wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
14143a5e4e2SMing Lei }
1426bae363eSKeith Busch EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
14343a5e4e2SMing Lei
blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout)144f91328c4SKeith Busch int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
145f91328c4SKeith Busch unsigned long timeout)
146f91328c4SKeith Busch {
147f91328c4SKeith Busch return wait_event_timeout(q->mq_freeze_wq,
148f91328c4SKeith Busch percpu_ref_is_zero(&q->q_usage_counter),
149f91328c4SKeith Busch timeout);
150f91328c4SKeith Busch }
151f91328c4SKeith Busch EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
152320ae51fSJens Axboe
153f3af020bSTejun Heo /*
154f3af020bSTejun Heo * Guarantee no request is in use, so we can change any data structure of
155f3af020bSTejun Heo * the queue afterward.
156f3af020bSTejun Heo */
blk_freeze_queue(struct request_queue * q)1573ef28e83SDan Williams void blk_freeze_queue(struct request_queue *q)
158f3af020bSTejun Heo {
1593ef28e83SDan Williams /*
1603ef28e83SDan Williams * In the !blk_mq case we are only calling this to kill the
1613ef28e83SDan Williams * q_usage_counter, otherwise this increases the freeze depth
1623ef28e83SDan Williams * and waits for it to return to zero. For this reason there is
1633ef28e83SDan Williams * no blk_unfreeze_queue(), and blk_freeze_queue() is not
1643ef28e83SDan Williams * exported to drivers as the only user for unfreeze is blk_mq.
1653ef28e83SDan Williams */
1661671d522SMing Lei blk_freeze_queue_start(q);
167f3af020bSTejun Heo blk_mq_freeze_queue_wait(q);
168f3af020bSTejun Heo }
1693ef28e83SDan Williams
blk_mq_freeze_queue(struct request_queue * q)1703ef28e83SDan Williams void blk_mq_freeze_queue(struct request_queue *q)
1713ef28e83SDan Williams {
1723ef28e83SDan Williams /*
1733ef28e83SDan Williams * ...just an alias to keep freeze and unfreeze actions balanced
1743ef28e83SDan Williams * in the blk_mq_* namespace
1753ef28e83SDan Williams */
1763ef28e83SDan Williams blk_freeze_queue(q);
1773ef28e83SDan Williams }
178c761d96bSJens Axboe EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
179f3af020bSTejun Heo
__blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic)180aec89dc5SChristoph Hellwig void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
181320ae51fSJens Axboe {
1827996a8b5SBob Liu mutex_lock(&q->mq_freeze_lock);
183aec89dc5SChristoph Hellwig if (force_atomic)
184aec89dc5SChristoph Hellwig q->q_usage_counter.data->force_atomic = true;
1857996a8b5SBob Liu q->mq_freeze_depth--;
1867996a8b5SBob Liu WARN_ON_ONCE(q->mq_freeze_depth < 0);
1877996a8b5SBob Liu if (!q->mq_freeze_depth) {
188bdd63160SBart Van Assche percpu_ref_resurrect(&q->q_usage_counter);
189320ae51fSJens Axboe wake_up_all(&q->mq_freeze_wq);
190320ae51fSJens Axboe }
1917996a8b5SBob Liu mutex_unlock(&q->mq_freeze_lock);
192add703fdSTejun Heo }
193aec89dc5SChristoph Hellwig
blk_mq_unfreeze_queue(struct request_queue * q)194aec89dc5SChristoph Hellwig void blk_mq_unfreeze_queue(struct request_queue *q)
195aec89dc5SChristoph Hellwig {
196aec89dc5SChristoph Hellwig __blk_mq_unfreeze_queue(q, false);
197aec89dc5SChristoph Hellwig }
198b4c6a028SKeith Busch EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
199320ae51fSJens Axboe
200852ec809SBart Van Assche /*
201852ec809SBart Van Assche * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
202852ec809SBart Van Assche * mpt3sas driver such that this function can be removed.
203852ec809SBart Van Assche */
blk_mq_quiesce_queue_nowait(struct request_queue * q)204852ec809SBart Van Assche void blk_mq_quiesce_queue_nowait(struct request_queue *q)
205852ec809SBart Van Assche {
206e70feb8bSMing Lei unsigned long flags;
207e70feb8bSMing Lei
208e70feb8bSMing Lei spin_lock_irqsave(&q->queue_lock, flags);
209e70feb8bSMing Lei if (!q->quiesce_depth++)
2108814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
211e70feb8bSMing Lei spin_unlock_irqrestore(&q->queue_lock, flags);
212852ec809SBart Van Assche }
213852ec809SBart Van Assche EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
214852ec809SBart Van Assche
2156a83e74dSBart Van Assche /**
2169ef4d020SMing Lei * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
217483239c7SChristoph Hellwig * @set: tag_set to wait on
2189ef4d020SMing Lei *
2199ef4d020SMing Lei * Note: it is driver's responsibility for making sure that quiesce has
220483239c7SChristoph Hellwig * been started on or more of the request_queues of the tag_set. This
221483239c7SChristoph Hellwig * function only waits for the quiesce on those request_queues that had
222483239c7SChristoph Hellwig * the quiesce flag set using blk_mq_quiesce_queue_nowait.
2239ef4d020SMing Lei */
blk_mq_wait_quiesce_done(struct blk_mq_tag_set * set)224483239c7SChristoph Hellwig void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
2259ef4d020SMing Lei {
226483239c7SChristoph Hellwig if (set->flags & BLK_MQ_F_BLOCKING)
227483239c7SChristoph Hellwig synchronize_srcu(set->srcu);
2289ef4d020SMing Lei else
2299ef4d020SMing Lei synchronize_rcu();
2309ef4d020SMing Lei }
2319ef4d020SMing Lei EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
2329ef4d020SMing Lei
2339ef4d020SMing Lei /**
23469e07c4aSMing Lei * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
2356a83e74dSBart Van Assche * @q: request queue.
2366a83e74dSBart Van Assche *
2376a83e74dSBart Van Assche * Note: this function does not prevent that the struct request end_io()
23869e07c4aSMing Lei * callback function is invoked. Once this function is returned, we make
23969e07c4aSMing Lei * sure no dispatch can happen until the queue is unquiesced via
24069e07c4aSMing Lei * blk_mq_unquiesce_queue().
2416a83e74dSBart Van Assche */
blk_mq_quiesce_queue(struct request_queue * q)2426a83e74dSBart Van Assche void blk_mq_quiesce_queue(struct request_queue *q)
2436a83e74dSBart Van Assche {
2441d9e9bc6SMing Lei blk_mq_quiesce_queue_nowait(q);
2458537380bSChristoph Hellwig /* nothing to wait for non-mq queues */
2468537380bSChristoph Hellwig if (queue_is_mq(q))
247483239c7SChristoph Hellwig blk_mq_wait_quiesce_done(q->tag_set);
2486a83e74dSBart Van Assche }
2496a83e74dSBart Van Assche EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
2506a83e74dSBart Van Assche
251e4e73913SMing Lei /*
252e4e73913SMing Lei * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
253e4e73913SMing Lei * @q: request queue.
254e4e73913SMing Lei *
255e4e73913SMing Lei * This function recovers queue into the state before quiescing
256e4e73913SMing Lei * which is done by blk_mq_quiesce_queue.
257e4e73913SMing Lei */
blk_mq_unquiesce_queue(struct request_queue * q)258e4e73913SMing Lei void blk_mq_unquiesce_queue(struct request_queue *q)
259e4e73913SMing Lei {
260e70feb8bSMing Lei unsigned long flags;
261e70feb8bSMing Lei bool run_queue = false;
262e70feb8bSMing Lei
263e70feb8bSMing Lei spin_lock_irqsave(&q->queue_lock, flags);
264e70feb8bSMing Lei if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
265e70feb8bSMing Lei ;
266e70feb8bSMing Lei } else if (!--q->quiesce_depth) {
2678814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
268e70feb8bSMing Lei run_queue = true;
269e70feb8bSMing Lei }
270e70feb8bSMing Lei spin_unlock_irqrestore(&q->queue_lock, flags);
271f4560ffeSMing Lei
2721d9e9bc6SMing Lei /* dispatch requests which are inserted during quiescing */
273e70feb8bSMing Lei if (run_queue)
2741d9e9bc6SMing Lei blk_mq_run_hw_queues(q, true);
275e4e73913SMing Lei }
276e4e73913SMing Lei EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
277e4e73913SMing Lei
blk_mq_quiesce_tagset(struct blk_mq_tag_set * set)278414dd48eSChao Leng void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
279414dd48eSChao Leng {
280414dd48eSChao Leng struct request_queue *q;
281414dd48eSChao Leng
282414dd48eSChao Leng mutex_lock(&set->tag_list_lock);
283414dd48eSChao Leng list_for_each_entry(q, &set->tag_list, tag_set_list) {
284414dd48eSChao Leng if (!blk_queue_skip_tagset_quiesce(q))
285414dd48eSChao Leng blk_mq_quiesce_queue_nowait(q);
286414dd48eSChao Leng }
287414dd48eSChao Leng mutex_unlock(&set->tag_list_lock);
28868a69ed5SBart Van Assche
28968a69ed5SBart Van Assche blk_mq_wait_quiesce_done(set);
290414dd48eSChao Leng }
291414dd48eSChao Leng EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
292414dd48eSChao Leng
blk_mq_unquiesce_tagset(struct blk_mq_tag_set * set)293414dd48eSChao Leng void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
294414dd48eSChao Leng {
295414dd48eSChao Leng struct request_queue *q;
296414dd48eSChao Leng
297414dd48eSChao Leng mutex_lock(&set->tag_list_lock);
298414dd48eSChao Leng list_for_each_entry(q, &set->tag_list, tag_set_list) {
299414dd48eSChao Leng if (!blk_queue_skip_tagset_quiesce(q))
300414dd48eSChao Leng blk_mq_unquiesce_queue(q);
301414dd48eSChao Leng }
302414dd48eSChao Leng mutex_unlock(&set->tag_list_lock);
303414dd48eSChao Leng }
304414dd48eSChao Leng EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
305414dd48eSChao Leng
blk_mq_wake_waiters(struct request_queue * q)306aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q)
307aed3ea94SJens Axboe {
308aed3ea94SJens Axboe struct blk_mq_hw_ctx *hctx;
3094f481208SMing Lei unsigned long i;
310aed3ea94SJens Axboe
311aed3ea94SJens Axboe queue_for_each_hw_ctx(q, hctx, i)
312aed3ea94SJens Axboe if (blk_mq_hw_queue_mapped(hctx))
313aed3ea94SJens Axboe blk_mq_tag_wakeup_all(hctx->tags, true);
314aed3ea94SJens Axboe }
315aed3ea94SJens Axboe
blk_rq_init(struct request_queue * q,struct request * rq)31652fdbbccSChristoph Hellwig void blk_rq_init(struct request_queue *q, struct request *rq)
31752fdbbccSChristoph Hellwig {
31852fdbbccSChristoph Hellwig memset(rq, 0, sizeof(*rq));
31952fdbbccSChristoph Hellwig
32052fdbbccSChristoph Hellwig INIT_LIST_HEAD(&rq->queuelist);
32152fdbbccSChristoph Hellwig rq->q = q;
32252fdbbccSChristoph Hellwig rq->__sector = (sector_t) -1;
32352fdbbccSChristoph Hellwig INIT_HLIST_NODE(&rq->hash);
32452fdbbccSChristoph Hellwig RB_CLEAR_NODE(&rq->rb_node);
32552fdbbccSChristoph Hellwig rq->tag = BLK_MQ_NO_TAG;
32652fdbbccSChristoph Hellwig rq->internal_tag = BLK_MQ_NO_TAG;
32752fdbbccSChristoph Hellwig rq->start_time_ns = ktime_get_ns();
32852fdbbccSChristoph Hellwig rq->part = NULL;
32952fdbbccSChristoph Hellwig blk_crypto_rq_set_defaults(rq);
33052fdbbccSChristoph Hellwig }
33152fdbbccSChristoph Hellwig EXPORT_SYMBOL(blk_rq_init);
33252fdbbccSChristoph Hellwig
3335c17f45eSChengming Zhou /* Set start and alloc time when the allocated request is actually used */
blk_mq_rq_time_init(struct request * rq,u64 alloc_time_ns)3345c17f45eSChengming Zhou static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
3355c17f45eSChengming Zhou {
3365c17f45eSChengming Zhou if (blk_mq_need_time_stamp(rq))
3375c17f45eSChengming Zhou rq->start_time_ns = ktime_get_ns();
3385c17f45eSChengming Zhou else
3395c17f45eSChengming Zhou rq->start_time_ns = 0;
3405c17f45eSChengming Zhou
3415c17f45eSChengming Zhou #ifdef CONFIG_BLK_RQ_ALLOC_TIME
3425c17f45eSChengming Zhou if (blk_queue_rq_alloc_time(rq->q))
3435c17f45eSChengming Zhou rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
3445c17f45eSChengming Zhou else
3455c17f45eSChengming Zhou rq->alloc_time_ns = 0;
3465c17f45eSChengming Zhou #endif
3475c17f45eSChengming Zhou }
3485c17f45eSChengming Zhou
blk_mq_rq_ctx_init(struct blk_mq_alloc_data * data,struct blk_mq_tags * tags,unsigned int tag)349e4cdf1a1SChristoph Hellwig static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
3505c17f45eSChengming Zhou struct blk_mq_tags *tags, unsigned int tag)
351320ae51fSJens Axboe {
352605f784eSPavel Begunkov struct blk_mq_ctx *ctx = data->ctx;
353605f784eSPavel Begunkov struct blk_mq_hw_ctx *hctx = data->hctx;
354605f784eSPavel Begunkov struct request_queue *q = data->q;
355e4cdf1a1SChristoph Hellwig struct request *rq = tags->static_rqs[tag];
356c3a148d2SBart Van Assche
357c7b84d42SJens Axboe rq->q = q;
358c7b84d42SJens Axboe rq->mq_ctx = ctx;
359c7b84d42SJens Axboe rq->mq_hctx = hctx;
360c7b84d42SJens Axboe rq->cmd_flags = data->cmd_flags;
361e4cdf1a1SChristoph Hellwig
36212845906SPavel Begunkov if (data->flags & BLK_MQ_REQ_PM)
36356f8da64SJens Axboe data->rq_flags |= RQF_PM;
36412845906SPavel Begunkov if (blk_queue_io_stat(q))
36556f8da64SJens Axboe data->rq_flags |= RQF_IO_STAT;
36656f8da64SJens Axboe rq->rq_flags = data->rq_flags;
36712845906SPavel Begunkov
368dd6216bbSChristoph Hellwig if (data->rq_flags & RQF_SCHED_TAGS) {
369c7b84d42SJens Axboe rq->tag = BLK_MQ_NO_TAG;
370c7b84d42SJens Axboe rq->internal_tag = tag;
371dd6216bbSChristoph Hellwig } else {
372dd6216bbSChristoph Hellwig rq->tag = tag;
373dd6216bbSChristoph Hellwig rq->internal_tag = BLK_MQ_NO_TAG;
374320ae51fSJens Axboe }
375c7b84d42SJens Axboe rq->timeout = 0;
376320ae51fSJens Axboe
377af76e555SChristoph Hellwig rq->part = NULL;
378544ccc8dSOmar Sandoval rq->io_start_time_ns = 0;
3793d244306SHou Tao rq->stats_sectors = 0;
380af76e555SChristoph Hellwig rq->nr_phys_segments = 0;
381af76e555SChristoph Hellwig #if defined(CONFIG_BLK_DEV_INTEGRITY)
382af76e555SChristoph Hellwig rq->nr_integrity_segments = 0;
383af76e555SChristoph Hellwig #endif
384af76e555SChristoph Hellwig rq->end_io = NULL;
385af76e555SChristoph Hellwig rq->end_io_data = NULL;
386af76e555SChristoph Hellwig
3874f266f2bSPavel Begunkov blk_crypto_rq_set_defaults(rq);
3884f266f2bSPavel Begunkov INIT_LIST_HEAD(&rq->queuelist);
3894f266f2bSPavel Begunkov /* tag was already set */
3904f266f2bSPavel Begunkov WRITE_ONCE(rq->deadline, 0);
3910a467d0fSJens Axboe req_ref_set(rq, 1);
3927ea4d8a4SChristoph Hellwig
393dd6216bbSChristoph Hellwig if (rq->rq_flags & RQF_USE_SCHED) {
3947ea4d8a4SChristoph Hellwig struct elevator_queue *e = data->q->elevator;
3957ea4d8a4SChristoph Hellwig
3964f266f2bSPavel Begunkov INIT_HLIST_NODE(&rq->hash);
3974f266f2bSPavel Begunkov RB_CLEAR_NODE(&rq->rb_node);
3984f266f2bSPavel Begunkov
399dd6216bbSChristoph Hellwig if (e->type->ops.prepare_request)
4007ea4d8a4SChristoph Hellwig e->type->ops.prepare_request(rq);
4017ea4d8a4SChristoph Hellwig }
4027ea4d8a4SChristoph Hellwig
4035dee8577SChristoph Hellwig return rq;
4045dee8577SChristoph Hellwig }
4055dee8577SChristoph Hellwig
406349302daSJens Axboe static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data * data)4075c17f45eSChengming Zhou __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
408349302daSJens Axboe {
409349302daSJens Axboe unsigned int tag, tag_offset;
410fe6134f6SJens Axboe struct blk_mq_tags *tags;
411349302daSJens Axboe struct request *rq;
412fe6134f6SJens Axboe unsigned long tag_mask;
413349302daSJens Axboe int i, nr = 0;
414349302daSJens Axboe
415fe6134f6SJens Axboe tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
416fe6134f6SJens Axboe if (unlikely(!tag_mask))
417349302daSJens Axboe return NULL;
418349302daSJens Axboe
419fe6134f6SJens Axboe tags = blk_mq_tags_from_data(data);
420fe6134f6SJens Axboe for (i = 0; tag_mask; i++) {
421fe6134f6SJens Axboe if (!(tag_mask & (1UL << i)))
422349302daSJens Axboe continue;
423349302daSJens Axboe tag = tag_offset + i;
424a22c00beSJens Axboe prefetch(tags->static_rqs[tag]);
425fe6134f6SJens Axboe tag_mask &= ~(1UL << i);
4265c17f45eSChengming Zhou rq = blk_mq_rq_ctx_init(data, tags, tag);
427013a7f95SJens Axboe rq_list_add(data->cached_rq, rq);
428c5fc7b93SJens Axboe nr++;
429349302daSJens Axboe }
430c5fc7b93SJens Axboe /* caller already holds a reference, add for remainder */
431c5fc7b93SJens Axboe percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
432349302daSJens Axboe data->nr_tags -= nr;
433349302daSJens Axboe
434013a7f95SJens Axboe return rq_list_pop(data->cached_rq);
435349302daSJens Axboe }
436349302daSJens Axboe
__blk_mq_alloc_requests(struct blk_mq_alloc_data * data)437b90cfaedSChristoph Hellwig static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
438d2c0d383SChristoph Hellwig {
439e6e7abffSChristoph Hellwig struct request_queue *q = data->q;
4406f816b4bSTejun Heo u64 alloc_time_ns = 0;
44147c122e3SJens Axboe struct request *rq;
442600c3b0cSChristoph Hellwig unsigned int tag;
443d2c0d383SChristoph Hellwig
4446f816b4bSTejun Heo /* alloc_time includes depth and tag waits */
4456f816b4bSTejun Heo if (blk_queue_rq_alloc_time(q))
4466f816b4bSTejun Heo alloc_time_ns = ktime_get_ns();
4476f816b4bSTejun Heo
448f9afca4dSJens Axboe if (data->cmd_flags & REQ_NOWAIT)
44903a07c92SGoldwyn Rodrigues data->flags |= BLK_MQ_REQ_NOWAIT;
450d2c0d383SChristoph Hellwig
451b2c67e1fSBart Van Assche retry:
452b2c67e1fSBart Van Assche data->ctx = blk_mq_get_ctx(q);
453b2c67e1fSBart Van Assche data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
454b2c67e1fSBart Van Assche
455781dd830SJens Axboe if (q->elevator) {
456dd6216bbSChristoph Hellwig /*
457dd6216bbSChristoph Hellwig * All requests use scheduler tags when an I/O scheduler is
458dd6216bbSChristoph Hellwig * enabled for the queue.
459dd6216bbSChristoph Hellwig */
460dd6216bbSChristoph Hellwig data->rq_flags |= RQF_SCHED_TAGS;
461781dd830SJens Axboe
462d2c0d383SChristoph Hellwig /*
4638d663f34SLin Feng * Flush/passthrough requests are special and go directly to the
464dd6216bbSChristoph Hellwig * dispatch list.
465d2c0d383SChristoph Hellwig */
466be4c4278SBart Van Assche if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
467dd6216bbSChristoph Hellwig !blk_op_is_passthrough(data->cmd_flags)) {
468dd6216bbSChristoph Hellwig struct elevator_mq_ops *ops = &q->elevator->type->ops;
469dd6216bbSChristoph Hellwig
470dd6216bbSChristoph Hellwig WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
471dd6216bbSChristoph Hellwig
472dd6216bbSChristoph Hellwig data->rq_flags |= RQF_USE_SCHED;
473dd6216bbSChristoph Hellwig if (ops->limit_depth)
474dd6216bbSChristoph Hellwig ops->limit_depth(data->cmd_flags, data);
475dd6216bbSChristoph Hellwig }
476b2c67e1fSBart Van Assche } else {
477600c3b0cSChristoph Hellwig blk_mq_tag_busy(data->hctx);
478b2c67e1fSBart Van Assche }
479600c3b0cSChristoph Hellwig
48099e48cd6SJohn Garry if (data->flags & BLK_MQ_REQ_RESERVED)
48199e48cd6SJohn Garry data->rq_flags |= RQF_RESV;
48299e48cd6SJohn Garry
483bf0beec0SMing Lei /*
484349302daSJens Axboe * Try batched alloc if we want more than 1 tag.
485349302daSJens Axboe */
486349302daSJens Axboe if (data->nr_tags > 1) {
4875c17f45eSChengming Zhou rq = __blk_mq_alloc_requests_batch(data);
4885c17f45eSChengming Zhou if (rq) {
4895c17f45eSChengming Zhou blk_mq_rq_time_init(rq, alloc_time_ns);
490349302daSJens Axboe return rq;
4915c17f45eSChengming Zhou }
492349302daSJens Axboe data->nr_tags = 1;
493349302daSJens Axboe }
494349302daSJens Axboe
495349302daSJens Axboe /*
496bf0beec0SMing Lei * Waiting allocations only fail because of an inactive hctx. In that
497bf0beec0SMing Lei * case just retry the hctx assignment and tag allocation as CPU hotplug
498bf0beec0SMing Lei * should have migrated us to an online CPU by now.
499bf0beec0SMing Lei */
500e4cdf1a1SChristoph Hellwig tag = blk_mq_get_tag(data);
501bf0beec0SMing Lei if (tag == BLK_MQ_NO_TAG) {
502bf0beec0SMing Lei if (data->flags & BLK_MQ_REQ_NOWAIT)
503037cebb8SChristoph Hellwig return NULL;
504bf0beec0SMing Lei /*
505b90cfaedSChristoph Hellwig * Give up the CPU and sleep for a random short time to
506b90cfaedSChristoph Hellwig * ensure that thread using a realtime scheduling class
507b90cfaedSChristoph Hellwig * are migrated off the CPU, and thus off the hctx that
508b90cfaedSChristoph Hellwig * is going away.
509bf0beec0SMing Lei */
510bf0beec0SMing Lei msleep(3);
511bf0beec0SMing Lei goto retry;
512bf0beec0SMing Lei }
513b90cfaedSChristoph Hellwig
5145c17f45eSChengming Zhou rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
5155c17f45eSChengming Zhou blk_mq_rq_time_init(rq, alloc_time_ns);
5165c17f45eSChengming Zhou return rq;
517d2c0d383SChristoph Hellwig }
518d2c0d383SChristoph Hellwig
blk_mq_rq_cache_fill(struct request_queue * q,struct blk_plug * plug,blk_opf_t opf,blk_mq_req_flags_t flags)5194b6a5d9cSJens Axboe static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
5204b6a5d9cSJens Axboe struct blk_plug *plug,
5214b6a5d9cSJens Axboe blk_opf_t opf,
5229a95e4efSBart Van Assche blk_mq_req_flags_t flags)
523320ae51fSJens Axboe {
524e6e7abffSChristoph Hellwig struct blk_mq_alloc_data data = {
525e6e7abffSChristoph Hellwig .q = q,
526e6e7abffSChristoph Hellwig .flags = flags,
52716458cf3SBart Van Assche .cmd_flags = opf,
5284b6a5d9cSJens Axboe .nr_tags = plug->nr_ios,
5294b6a5d9cSJens Axboe .cached_rq = &plug->cached_rq,
530e6e7abffSChristoph Hellwig };
531bd166ef1SJens Axboe struct request *rq;
5324b6a5d9cSJens Axboe
5334b6a5d9cSJens Axboe if (blk_queue_enter(q, flags))
5344b6a5d9cSJens Axboe return NULL;
5354b6a5d9cSJens Axboe
5364b6a5d9cSJens Axboe plug->nr_ios = 1;
5374b6a5d9cSJens Axboe
5384b6a5d9cSJens Axboe rq = __blk_mq_alloc_requests(&data);
5394b6a5d9cSJens Axboe if (unlikely(!rq))
5404b6a5d9cSJens Axboe blk_queue_exit(q);
5414b6a5d9cSJens Axboe return rq;
5424b6a5d9cSJens Axboe }
5434b6a5d9cSJens Axboe
blk_mq_alloc_cached_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)5444b6a5d9cSJens Axboe static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
5454b6a5d9cSJens Axboe blk_opf_t opf,
5464b6a5d9cSJens Axboe blk_mq_req_flags_t flags)
5474b6a5d9cSJens Axboe {
5484b6a5d9cSJens Axboe struct blk_plug *plug = current->plug;
5494b6a5d9cSJens Axboe struct request *rq;
5504b6a5d9cSJens Axboe
5514b6a5d9cSJens Axboe if (!plug)
5524b6a5d9cSJens Axboe return NULL;
55340467282SJinlong Chen
5544b6a5d9cSJens Axboe if (rq_list_empty(plug->cached_rq)) {
5554b6a5d9cSJens Axboe if (plug->nr_ios == 1)
5564b6a5d9cSJens Axboe return NULL;
5574b6a5d9cSJens Axboe rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
55840467282SJinlong Chen if (!rq)
5594b6a5d9cSJens Axboe return NULL;
56040467282SJinlong Chen } else {
5614b6a5d9cSJens Axboe rq = rq_list_peek(&plug->cached_rq);
5624b6a5d9cSJens Axboe if (!rq || rq->q != q)
5634b6a5d9cSJens Axboe return NULL;
5644b6a5d9cSJens Axboe
5654b6a5d9cSJens Axboe if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
5664b6a5d9cSJens Axboe return NULL;
5674b6a5d9cSJens Axboe if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
5684b6a5d9cSJens Axboe return NULL;
5694b6a5d9cSJens Axboe
5704b6a5d9cSJens Axboe plug->cached_rq = rq_list_next(rq);
5715c17f45eSChengming Zhou blk_mq_rq_time_init(rq, 0);
57240467282SJinlong Chen }
57340467282SJinlong Chen
5744b6a5d9cSJens Axboe rq->cmd_flags = opf;
5754b6a5d9cSJens Axboe INIT_LIST_HEAD(&rq->queuelist);
5764b6a5d9cSJens Axboe return rq;
5774b6a5d9cSJens Axboe }
5784b6a5d9cSJens Axboe
blk_mq_alloc_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)5794b6a5d9cSJens Axboe struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
5804b6a5d9cSJens Axboe blk_mq_req_flags_t flags)
5814b6a5d9cSJens Axboe {
5824b6a5d9cSJens Axboe struct request *rq;
5834b6a5d9cSJens Axboe
5844b6a5d9cSJens Axboe rq = blk_mq_alloc_cached_request(q, opf, flags);
5854b6a5d9cSJens Axboe if (!rq) {
5864b6a5d9cSJens Axboe struct blk_mq_alloc_data data = {
5874b6a5d9cSJens Axboe .q = q,
5884b6a5d9cSJens Axboe .flags = flags,
5894b6a5d9cSJens Axboe .cmd_flags = opf,
5904b6a5d9cSJens Axboe .nr_tags = 1,
5914b6a5d9cSJens Axboe };
592a492f075SJoe Lawrence int ret;
593320ae51fSJens Axboe
5943a0a5299SBart Van Assche ret = blk_queue_enter(q, flags);
595a492f075SJoe Lawrence if (ret)
596a492f075SJoe Lawrence return ERR_PTR(ret);
597320ae51fSJens Axboe
598b90cfaedSChristoph Hellwig rq = __blk_mq_alloc_requests(&data);
599bd166ef1SJens Axboe if (!rq)
600a5ea5811SChristoph Hellwig goto out_queue_exit;
6014b6a5d9cSJens Axboe }
6020c4de0f3SChristoph Hellwig rq->__data_len = 0;
6030c4de0f3SChristoph Hellwig rq->__sector = (sector_t) -1;
6040c4de0f3SChristoph Hellwig rq->bio = rq->biotail = NULL;
605320ae51fSJens Axboe return rq;
606a5ea5811SChristoph Hellwig out_queue_exit:
607a5ea5811SChristoph Hellwig blk_queue_exit(q);
608a5ea5811SChristoph Hellwig return ERR_PTR(-EWOULDBLOCK);
609320ae51fSJens Axboe }
6104bb659b1SJens Axboe EXPORT_SYMBOL(blk_mq_alloc_request);
611320ae51fSJens Axboe
blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx)612cd6ce148SBart Van Assche struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
61316458cf3SBart Van Assche blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
6141f5bd336SMing Lin {
615e6e7abffSChristoph Hellwig struct blk_mq_alloc_data data = {
616e6e7abffSChristoph Hellwig .q = q,
617e6e7abffSChristoph Hellwig .flags = flags,
61816458cf3SBart Van Assche .cmd_flags = opf,
61947c122e3SJens Axboe .nr_tags = 1,
620e6e7abffSChristoph Hellwig };
621600c3b0cSChristoph Hellwig u64 alloc_time_ns = 0;
622e3c5a78cSJohn Garry struct request *rq;
6236d2809d5SOmar Sandoval unsigned int cpu;
624600c3b0cSChristoph Hellwig unsigned int tag;
6251f5bd336SMing Lin int ret;
6261f5bd336SMing Lin
627600c3b0cSChristoph Hellwig /* alloc_time includes depth and tag waits */
628600c3b0cSChristoph Hellwig if (blk_queue_rq_alloc_time(q))
629600c3b0cSChristoph Hellwig alloc_time_ns = ktime_get_ns();
630600c3b0cSChristoph Hellwig
6311f5bd336SMing Lin /*
6321f5bd336SMing Lin * If the tag allocator sleeps we could get an allocation for a
6331f5bd336SMing Lin * different hardware context. No need to complicate the low level
6341f5bd336SMing Lin * allocator for this for the rare use case of a command tied to
6351f5bd336SMing Lin * a specific queue.
6361f5bd336SMing Lin */
6376ee858a3SKemeng Shi if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
6386ee858a3SKemeng Shi WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
6391f5bd336SMing Lin return ERR_PTR(-EINVAL);
6401f5bd336SMing Lin
6411f5bd336SMing Lin if (hctx_idx >= q->nr_hw_queues)
6421f5bd336SMing Lin return ERR_PTR(-EIO);
6431f5bd336SMing Lin
6443a0a5299SBart Van Assche ret = blk_queue_enter(q, flags);
6451f5bd336SMing Lin if (ret)
6461f5bd336SMing Lin return ERR_PTR(ret);
6471f5bd336SMing Lin
648c8712c6aSChristoph Hellwig /*
649c8712c6aSChristoph Hellwig * Check if the hardware context is actually mapped to anything.
650c8712c6aSChristoph Hellwig * If not tell the caller that it should skip this queue.
651c8712c6aSChristoph Hellwig */
652a5ea5811SChristoph Hellwig ret = -EXDEV;
6534e5cc99eSMing Lei data.hctx = xa_load(&q->hctx_table, hctx_idx);
654e6e7abffSChristoph Hellwig if (!blk_mq_hw_queue_mapped(data.hctx))
655a5ea5811SChristoph Hellwig goto out_queue_exit;
656e6e7abffSChristoph Hellwig cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
65714dc7a18SBart Van Assche if (cpu >= nr_cpu_ids)
65814dc7a18SBart Van Assche goto out_queue_exit;
659e6e7abffSChristoph Hellwig data.ctx = __blk_mq_get_ctx(q, cpu);
6601f5bd336SMing Lin
661dd6216bbSChristoph Hellwig if (q->elevator)
662dd6216bbSChristoph Hellwig data.rq_flags |= RQF_SCHED_TAGS;
663781dd830SJens Axboe else
664dd6216bbSChristoph Hellwig blk_mq_tag_busy(data.hctx);
665600c3b0cSChristoph Hellwig
66699e48cd6SJohn Garry if (flags & BLK_MQ_REQ_RESERVED)
66799e48cd6SJohn Garry data.rq_flags |= RQF_RESV;
66899e48cd6SJohn Garry
669a5ea5811SChristoph Hellwig ret = -EWOULDBLOCK;
670600c3b0cSChristoph Hellwig tag = blk_mq_get_tag(&data);
671600c3b0cSChristoph Hellwig if (tag == BLK_MQ_NO_TAG)
672a5ea5811SChristoph Hellwig goto out_queue_exit;
6735c17f45eSChengming Zhou rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
6745c17f45eSChengming Zhou blk_mq_rq_time_init(rq, alloc_time_ns);
675e3c5a78cSJohn Garry rq->__data_len = 0;
676e3c5a78cSJohn Garry rq->__sector = (sector_t) -1;
677e3c5a78cSJohn Garry rq->bio = rq->biotail = NULL;
678e3c5a78cSJohn Garry return rq;
679600c3b0cSChristoph Hellwig
680a5ea5811SChristoph Hellwig out_queue_exit:
681a5ea5811SChristoph Hellwig blk_queue_exit(q);
682a5ea5811SChristoph Hellwig return ERR_PTR(ret);
6831f5bd336SMing Lin }
6841f5bd336SMing Lin EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
6851f5bd336SMing Lin
blk_mq_finish_request(struct request * rq)686e5c0ca13SChengming Zhou static void blk_mq_finish_request(struct request *rq)
687e5c0ca13SChengming Zhou {
688e5c0ca13SChengming Zhou struct request_queue *q = rq->q;
689e5c0ca13SChengming Zhou
690e5c0ca13SChengming Zhou if (rq->rq_flags & RQF_USE_SCHED) {
691e5c0ca13SChengming Zhou q->elevator->type->ops.finish_request(rq);
692e5c0ca13SChengming Zhou /*
693e5c0ca13SChengming Zhou * For postflush request that may need to be
694e5c0ca13SChengming Zhou * completed twice, we should clear this flag
695e5c0ca13SChengming Zhou * to avoid double finish_request() on the rq.
696e5c0ca13SChengming Zhou */
697e5c0ca13SChengming Zhou rq->rq_flags &= ~RQF_USE_SCHED;
698e5c0ca13SChengming Zhou }
699e5c0ca13SChengming Zhou }
700e5c0ca13SChengming Zhou
__blk_mq_free_request(struct request * rq)70112f5b931SKeith Busch static void __blk_mq_free_request(struct request *rq)
70212f5b931SKeith Busch {
70312f5b931SKeith Busch struct request_queue *q = rq->q;
70412f5b931SKeith Busch struct blk_mq_ctx *ctx = rq->mq_ctx;
705ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
70612f5b931SKeith Busch const int sched_tag = rq->internal_tag;
70712f5b931SKeith Busch
708a892c8d5SSatya Tangirala blk_crypto_free_request(rq);
709986d413bSBart Van Assche blk_pm_mark_last_busy(rq);
710ea4f995eSJens Axboe rq->mq_hctx = NULL;
711ddad5933STian Lan
712ddad5933STian Lan if (rq->rq_flags & RQF_MQ_INFLIGHT)
713ddad5933STian Lan __blk_mq_dec_active_requests(hctx);
714ddad5933STian Lan
71576647368SChristoph Hellwig if (rq->tag != BLK_MQ_NO_TAG)
716cae740a0SJohn Garry blk_mq_put_tag(hctx->tags, ctx, rq->tag);
71776647368SChristoph Hellwig if (sched_tag != BLK_MQ_NO_TAG)
718cae740a0SJohn Garry blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
71912f5b931SKeith Busch blk_mq_sched_restart(hctx);
72012f5b931SKeith Busch blk_queue_exit(q);
72112f5b931SKeith Busch }
72212f5b931SKeith Busch
blk_mq_free_request(struct request * rq)7236af54051SChristoph Hellwig void blk_mq_free_request(struct request *rq)
724320ae51fSJens Axboe {
725320ae51fSJens Axboe struct request_queue *q = rq->q;
726320ae51fSJens Axboe
727e5c0ca13SChengming Zhou blk_mq_finish_request(rq);
7286af54051SChristoph Hellwig
7297beb2f84SJens Axboe if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
730d152c682SChristoph Hellwig laptop_io_completion(q->disk->bdi);
7317beb2f84SJens Axboe
732a7905043SJosef Bacik rq_qos_done(q, rq);
7330d2602caSJens Axboe
73412f5b931SKeith Busch WRITE_ONCE(rq->state, MQ_RQ_IDLE);
7350a467d0fSJens Axboe if (req_ref_put_and_test(rq))
73612f5b931SKeith Busch __blk_mq_free_request(rq);
737320ae51fSJens Axboe }
7381a3b595aSJens Axboe EXPORT_SYMBOL_GPL(blk_mq_free_request);
739320ae51fSJens Axboe
blk_mq_free_plug_rqs(struct blk_plug * plug)74047c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug)
741320ae51fSJens Axboe {
74247c122e3SJens Axboe struct request *rq;
743fe1f4526SJens Axboe
744c5fc7b93SJens Axboe while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
74547c122e3SJens Axboe blk_mq_free_request(rq);
74647c122e3SJens Axboe }
747522a7775SOmar Sandoval
blk_dump_rq_flags(struct request * rq,char * msg)74822350ad7SChristoph Hellwig void blk_dump_rq_flags(struct request *rq, char *msg)
74922350ad7SChristoph Hellwig {
75022350ad7SChristoph Hellwig printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
751f3fa33acSChristoph Hellwig rq->q->disk ? rq->q->disk->disk_name : "?",
75216458cf3SBart Van Assche (__force unsigned long long) rq->cmd_flags);
75322350ad7SChristoph Hellwig
75422350ad7SChristoph Hellwig printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
75522350ad7SChristoph Hellwig (unsigned long long)blk_rq_pos(rq),
75622350ad7SChristoph Hellwig blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
75722350ad7SChristoph Hellwig printk(KERN_INFO " bio %p, biotail %p, len %u\n",
75822350ad7SChristoph Hellwig rq->bio, rq->biotail, blk_rq_bytes(rq));
75922350ad7SChristoph Hellwig }
76022350ad7SChristoph Hellwig EXPORT_SYMBOL(blk_dump_rq_flags);
76122350ad7SChristoph Hellwig
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)7629be3e06fSJens Axboe static void req_bio_endio(struct request *rq, struct bio *bio,
7639be3e06fSJens Axboe unsigned int nbytes, blk_status_t error)
7649be3e06fSJens Axboe {
765478eb72bSPavel Begunkov if (unlikely(error)) {
7669be3e06fSJens Axboe bio->bi_status = error;
767478eb72bSPavel Begunkov } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
7689be3e06fSJens Axboe /*
7699be3e06fSJens Axboe * Partial zone append completions cannot be supported as the
7709be3e06fSJens Axboe * BIO fragments may end up not being written sequentially.
7719be3e06fSJens Axboe */
772cc80b5d7SDamien Le Moal if (bio->bi_iter.bi_size != nbytes)
7739be3e06fSJens Axboe bio->bi_status = BLK_STS_IOERR;
774cc80b5d7SDamien Le Moal else
7759be3e06fSJens Axboe bio->bi_iter.bi_sector = rq->__sector;
7769be3e06fSJens Axboe }
7779be3e06fSJens Axboe
778478eb72bSPavel Begunkov bio_advance(bio, nbytes);
779478eb72bSPavel Begunkov
780478eb72bSPavel Begunkov if (unlikely(rq->rq_flags & RQF_QUIET))
781478eb72bSPavel Begunkov bio_set_flag(bio, BIO_QUIET);
7829be3e06fSJens Axboe /* don't actually finish bio if it's part of flush sequence */
7839be3e06fSJens Axboe if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
7849be3e06fSJens Axboe bio_endio(bio);
7859be3e06fSJens Axboe }
7869be3e06fSJens Axboe
blk_account_io_completion(struct request * req,unsigned int bytes)7879be3e06fSJens Axboe static void blk_account_io_completion(struct request *req, unsigned int bytes)
7889be3e06fSJens Axboe {
7899be3e06fSJens Axboe if (req->part && blk_do_io_stat(req)) {
7909be3e06fSJens Axboe const int sgrp = op_stat_group(req_op(req));
7919be3e06fSJens Axboe
7929be3e06fSJens Axboe part_stat_lock();
7939be3e06fSJens Axboe part_stat_add(req->part, sectors[sgrp], bytes >> 9);
7949be3e06fSJens Axboe part_stat_unlock();
7959be3e06fSJens Axboe }
7969be3e06fSJens Axboe }
7979be3e06fSJens Axboe
blk_print_req_error(struct request * req,blk_status_t status)7980d7a29a2SChristoph Hellwig static void blk_print_req_error(struct request *req, blk_status_t status)
7990d7a29a2SChristoph Hellwig {
8000d7a29a2SChristoph Hellwig printk_ratelimited(KERN_ERR
8010d7a29a2SChristoph Hellwig "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
8020d7a29a2SChristoph Hellwig "phys_seg %u prio class %u\n",
8030d7a29a2SChristoph Hellwig blk_status_to_str(status),
804f3fa33acSChristoph Hellwig req->q->disk ? req->q->disk->disk_name : "?",
80516458cf3SBart Van Assche blk_rq_pos(req), (__force u32)req_op(req),
80616458cf3SBart Van Assche blk_op_str(req_op(req)),
80716458cf3SBart Van Assche (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
8080d7a29a2SChristoph Hellwig req->nr_phys_segments,
8090d7a29a2SChristoph Hellwig IOPRIO_PRIO_CLASS(req->ioprio));
8100d7a29a2SChristoph Hellwig }
8110d7a29a2SChristoph Hellwig
8125581a5ddSJens Axboe /*
8135581a5ddSJens Axboe * Fully end IO on a request. Does not support partial completions, or
8145581a5ddSJens Axboe * errors.
8155581a5ddSJens Axboe */
blk_complete_request(struct request * req)8165581a5ddSJens Axboe static void blk_complete_request(struct request *req)
8175581a5ddSJens Axboe {
8185581a5ddSJens Axboe const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
8195581a5ddSJens Axboe int total_bytes = blk_rq_bytes(req);
8205581a5ddSJens Axboe struct bio *bio = req->bio;
8215581a5ddSJens Axboe
8225581a5ddSJens Axboe trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
8235581a5ddSJens Axboe
8245581a5ddSJens Axboe if (!bio)
8255581a5ddSJens Axboe return;
8265581a5ddSJens Axboe
8275581a5ddSJens Axboe #ifdef CONFIG_BLK_DEV_INTEGRITY
8285581a5ddSJens Axboe if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
8295581a5ddSJens Axboe req->q->integrity.profile->complete_fn(req, total_bytes);
8305581a5ddSJens Axboe #endif
8315581a5ddSJens Axboe
8329cd1e566SEric Biggers /*
8339cd1e566SEric Biggers * Upper layers may call blk_crypto_evict_key() anytime after the last
8349cd1e566SEric Biggers * bio_endio(). Therefore, the keyslot must be released before that.
8359cd1e566SEric Biggers */
8369cd1e566SEric Biggers blk_crypto_rq_put_keyslot(req);
8379cd1e566SEric Biggers
8385581a5ddSJens Axboe blk_account_io_completion(req, total_bytes);
8395581a5ddSJens Axboe
8405581a5ddSJens Axboe do {
8415581a5ddSJens Axboe struct bio *next = bio->bi_next;
8425581a5ddSJens Axboe
8435581a5ddSJens Axboe /* Completion has already been traced */
8445581a5ddSJens Axboe bio_clear_flag(bio, BIO_TRACE_COMPLETION);
845a12821d5SPankaj Raghav
846a12821d5SPankaj Raghav if (req_op(req) == REQ_OP_ZONE_APPEND)
847a12821d5SPankaj Raghav bio->bi_iter.bi_sector = req->__sector;
848a12821d5SPankaj Raghav
8495581a5ddSJens Axboe if (!is_flush)
8505581a5ddSJens Axboe bio_endio(bio);
8515581a5ddSJens Axboe bio = next;
8525581a5ddSJens Axboe } while (bio);
8535581a5ddSJens Axboe
8545581a5ddSJens Axboe /*
8555581a5ddSJens Axboe * Reset counters so that the request stacking driver
8565581a5ddSJens Axboe * can find how many bytes remain in the request
8575581a5ddSJens Axboe * later.
8585581a5ddSJens Axboe */
859ab3e1d3bSJens Axboe if (!req->end_io) {
8605581a5ddSJens Axboe req->bio = NULL;
8615581a5ddSJens Axboe req->__data_len = 0;
8625581a5ddSJens Axboe }
863ab3e1d3bSJens Axboe }
8645581a5ddSJens Axboe
8659be3e06fSJens Axboe /**
8669be3e06fSJens Axboe * blk_update_request - Complete multiple bytes without completing the request
8679be3e06fSJens Axboe * @req: the request being processed
8689be3e06fSJens Axboe * @error: block status code
8699be3e06fSJens Axboe * @nr_bytes: number of bytes to complete for @req
8709be3e06fSJens Axboe *
8719be3e06fSJens Axboe * Description:
8729be3e06fSJens Axboe * Ends I/O on a number of bytes attached to @req, but doesn't complete
8739be3e06fSJens Axboe * the request structure even if @req doesn't have leftover.
8749be3e06fSJens Axboe * If @req has leftover, sets it up for the next range of segments.
8759be3e06fSJens Axboe *
8769be3e06fSJens Axboe * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
8779be3e06fSJens Axboe * %false return from this function.
8789be3e06fSJens Axboe *
8799be3e06fSJens Axboe * Note:
8809be3e06fSJens Axboe * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
8819be3e06fSJens Axboe * except in the consistency check at the end of this function.
8829be3e06fSJens Axboe *
8839be3e06fSJens Axboe * Return:
8849be3e06fSJens Axboe * %false - this request doesn't have any more data
8859be3e06fSJens Axboe * %true - this request has more data
8869be3e06fSJens Axboe **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)8879be3e06fSJens Axboe bool blk_update_request(struct request *req, blk_status_t error,
8889be3e06fSJens Axboe unsigned int nr_bytes)
8899be3e06fSJens Axboe {
8909be3e06fSJens Axboe int total_bytes;
8919be3e06fSJens Axboe
8928a7d267bSChristoph Hellwig trace_block_rq_complete(req, error, nr_bytes);
8939be3e06fSJens Axboe
8949be3e06fSJens Axboe if (!req->bio)
8959be3e06fSJens Axboe return false;
8969be3e06fSJens Axboe
8979be3e06fSJens Axboe #ifdef CONFIG_BLK_DEV_INTEGRITY
8989be3e06fSJens Axboe if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
8999be3e06fSJens Axboe error == BLK_STS_OK)
9009be3e06fSJens Axboe req->q->integrity.profile->complete_fn(req, nr_bytes);
9019be3e06fSJens Axboe #endif
9029be3e06fSJens Axboe
9039cd1e566SEric Biggers /*
9049cd1e566SEric Biggers * Upper layers may call blk_crypto_evict_key() anytime after the last
9059cd1e566SEric Biggers * bio_endio(). Therefore, the keyslot must be released before that.
9069cd1e566SEric Biggers */
9079cd1e566SEric Biggers if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
9089cd1e566SEric Biggers __blk_crypto_rq_put_keyslot(req);
9099cd1e566SEric Biggers
9109be3e06fSJens Axboe if (unlikely(error && !blk_rq_is_passthrough(req) &&
9113d973a76SChristoph Hellwig !(req->rq_flags & RQF_QUIET)) &&
9123d973a76SChristoph Hellwig !test_bit(GD_DEAD, &req->q->disk->state)) {
9139be3e06fSJens Axboe blk_print_req_error(req, error);
914d5869fdcSYang Shi trace_block_rq_error(req, error, nr_bytes);
915d5869fdcSYang Shi }
9169be3e06fSJens Axboe
9179be3e06fSJens Axboe blk_account_io_completion(req, nr_bytes);
9189be3e06fSJens Axboe
9199be3e06fSJens Axboe total_bytes = 0;
9209be3e06fSJens Axboe while (req->bio) {
9219be3e06fSJens Axboe struct bio *bio = req->bio;
9229be3e06fSJens Axboe unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
9239be3e06fSJens Axboe
9249be3e06fSJens Axboe if (bio_bytes == bio->bi_iter.bi_size)
9259be3e06fSJens Axboe req->bio = bio->bi_next;
9269be3e06fSJens Axboe
9279be3e06fSJens Axboe /* Completion has already been traced */
9289be3e06fSJens Axboe bio_clear_flag(bio, BIO_TRACE_COMPLETION);
9299be3e06fSJens Axboe req_bio_endio(req, bio, bio_bytes, error);
9309be3e06fSJens Axboe
9319be3e06fSJens Axboe total_bytes += bio_bytes;
9329be3e06fSJens Axboe nr_bytes -= bio_bytes;
9339be3e06fSJens Axboe
9349be3e06fSJens Axboe if (!nr_bytes)
9359be3e06fSJens Axboe break;
9369be3e06fSJens Axboe }
9379be3e06fSJens Axboe
9389be3e06fSJens Axboe /*
9399be3e06fSJens Axboe * completely done
9409be3e06fSJens Axboe */
9419be3e06fSJens Axboe if (!req->bio) {
9429be3e06fSJens Axboe /*
9439be3e06fSJens Axboe * Reset counters so that the request stacking driver
9449be3e06fSJens Axboe * can find how many bytes remain in the request
9459be3e06fSJens Axboe * later.
9469be3e06fSJens Axboe */
9479be3e06fSJens Axboe req->__data_len = 0;
9489be3e06fSJens Axboe return false;
9499be3e06fSJens Axboe }
9509be3e06fSJens Axboe
9519be3e06fSJens Axboe req->__data_len -= total_bytes;
9529be3e06fSJens Axboe
9539be3e06fSJens Axboe /* update sector only for requests with clear definition of sector */
9549be3e06fSJens Axboe if (!blk_rq_is_passthrough(req))
9559be3e06fSJens Axboe req->__sector += total_bytes >> 9;
9569be3e06fSJens Axboe
9579be3e06fSJens Axboe /* mixed attributes always follow the first bio */
9589be3e06fSJens Axboe if (req->rq_flags & RQF_MIXED_MERGE) {
9599be3e06fSJens Axboe req->cmd_flags &= ~REQ_FAILFAST_MASK;
9609be3e06fSJens Axboe req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
9619be3e06fSJens Axboe }
9629be3e06fSJens Axboe
9639be3e06fSJens Axboe if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
9649be3e06fSJens Axboe /*
9659be3e06fSJens Axboe * If total number of sectors is less than the first segment
9669be3e06fSJens Axboe * size, something has gone terribly wrong.
9679be3e06fSJens Axboe */
9689be3e06fSJens Axboe if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
9699be3e06fSJens Axboe blk_dump_rq_flags(req, "request botched");
9709be3e06fSJens Axboe req->__data_len = blk_rq_cur_bytes(req);
9719be3e06fSJens Axboe }
9729be3e06fSJens Axboe
9739be3e06fSJens Axboe /* recalculate the number of segments */
9749be3e06fSJens Axboe req->nr_phys_segments = blk_recalc_rq_segments(req);
9759be3e06fSJens Axboe }
9769be3e06fSJens Axboe
9779be3e06fSJens Axboe return true;
9789be3e06fSJens Axboe }
9799be3e06fSJens Axboe EXPORT_SYMBOL_GPL(blk_update_request);
9809be3e06fSJens Axboe
blk_account_io_done(struct request * req,u64 now)981450b7879SChristoph Hellwig static inline void blk_account_io_done(struct request *req, u64 now)
982450b7879SChristoph Hellwig {
9835a80bd07SHengqi Chen trace_block_io_done(req);
9845a80bd07SHengqi Chen
985450b7879SChristoph Hellwig /*
986450b7879SChristoph Hellwig * Account IO completion. flush_rq isn't accounted as a
987450b7879SChristoph Hellwig * normal IO on queueing nor completion. Accounting the
988450b7879SChristoph Hellwig * containing request is enough.
989450b7879SChristoph Hellwig */
990450b7879SChristoph Hellwig if (blk_do_io_stat(req) && req->part &&
99106965037SChaitanya Kulkarni !(req->rq_flags & RQF_FLUSH_SEQ)) {
99206965037SChaitanya Kulkarni const int sgrp = op_stat_group(req_op(req));
99306965037SChaitanya Kulkarni
99406965037SChaitanya Kulkarni part_stat_lock();
99506965037SChaitanya Kulkarni update_io_ticks(req->part, jiffies, true);
99606965037SChaitanya Kulkarni part_stat_inc(req->part, ios[sgrp]);
99706965037SChaitanya Kulkarni part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
998e5d98cc3SYu Kuai part_stat_local_dec(req->part,
999e5d98cc3SYu Kuai in_flight[op_is_write(req_op(req))]);
100006965037SChaitanya Kulkarni part_stat_unlock();
100106965037SChaitanya Kulkarni }
1002450b7879SChristoph Hellwig }
1003450b7879SChristoph Hellwig
blk_account_io_start(struct request * req)1004e165fb4dSChaitanya Kulkarni static inline void blk_account_io_start(struct request *req)
1005450b7879SChristoph Hellwig {
10065a80bd07SHengqi Chen trace_block_io_start(req);
10075a80bd07SHengqi Chen
1008e165fb4dSChaitanya Kulkarni if (blk_do_io_stat(req)) {
100941fa7222SChristoph Hellwig /*
101041fa7222SChristoph Hellwig * All non-passthrough requests are created from a bio with one
101141fa7222SChristoph Hellwig * exception: when a flush command that is part of a flush sequence
101241fa7222SChristoph Hellwig * generated by the state machine in blk-flush.c is cloned onto the
101341fa7222SChristoph Hellwig * lower device by dm-multipath we can get here without a bio.
101441fa7222SChristoph Hellwig */
1015e165fb4dSChaitanya Kulkarni if (req->bio)
1016e165fb4dSChaitanya Kulkarni req->part = req->bio->bi_bdev;
101741fa7222SChristoph Hellwig else
1018e165fb4dSChaitanya Kulkarni req->part = req->q->disk->part0;
1019450b7879SChristoph Hellwig
1020450b7879SChristoph Hellwig part_stat_lock();
1021e165fb4dSChaitanya Kulkarni update_io_ticks(req->part, jiffies, false);
1022e5d98cc3SYu Kuai part_stat_local_inc(req->part,
1023e5d98cc3SYu Kuai in_flight[op_is_write(req_op(req))]);
1024450b7879SChristoph Hellwig part_stat_unlock();
1025450b7879SChristoph Hellwig }
1026450b7879SChristoph Hellwig }
1027450b7879SChristoph Hellwig
__blk_mq_end_request_acct(struct request * rq,u64 now)1028f794f335SJens Axboe static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
10290d11e6acSMing Lei {
103054bdd67dSKeith Busch if (rq->rq_flags & RQF_STATS)
1031320ae51fSJens Axboe blk_stat_add(rq, now);
1032320ae51fSJens Axboe
1033320ae51fSJens Axboe blk_mq_sched_completed_request(rq, now);
1034320ae51fSJens Axboe blk_account_io_done(rq, now);
10358971a3b7SPavel Begunkov }
1036320ae51fSJens Axboe
__blk_mq_end_request(struct request * rq,blk_status_t error)1037f794f335SJens Axboe inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1038f794f335SJens Axboe {
1039f794f335SJens Axboe if (blk_mq_need_time_stamp(rq))
1040f794f335SJens Axboe __blk_mq_end_request_acct(rq, ktime_get_ns());
1041320ae51fSJens Axboe
1042e5c0ca13SChengming Zhou blk_mq_finish_request(rq);
1043e5c0ca13SChengming Zhou
104491b63639SChristoph Hellwig if (rq->end_io) {
1045a7905043SJosef Bacik rq_qos_done(rq->q, rq);
1046de671d61SJens Axboe if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1047de671d61SJens Axboe blk_mq_free_request(rq);
104891b63639SChristoph Hellwig } else {
1049320ae51fSJens Axboe blk_mq_free_request(rq);
1050320ae51fSJens Axboe }
105191b63639SChristoph Hellwig }
1052c8a446adSChristoph Hellwig EXPORT_SYMBOL(__blk_mq_end_request);
105363151a44SChristoph Hellwig
blk_mq_end_request(struct request * rq,blk_status_t error)10542a842acaSChristoph Hellwig void blk_mq_end_request(struct request *rq, blk_status_t error)
105563151a44SChristoph Hellwig {
105663151a44SChristoph Hellwig if (blk_update_request(rq, error, blk_rq_bytes(rq)))
105763151a44SChristoph Hellwig BUG();
1058c8a446adSChristoph Hellwig __blk_mq_end_request(rq, error);
105963151a44SChristoph Hellwig }
1060c8a446adSChristoph Hellwig EXPORT_SYMBOL(blk_mq_end_request);
1061320ae51fSJens Axboe
1062f794f335SJens Axboe #define TAG_COMP_BATCH 32
1063f794f335SJens Axboe
blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags)1064f794f335SJens Axboe static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1065f794f335SJens Axboe int *tag_array, int nr_tags)
1066f794f335SJens Axboe {
1067f794f335SJens Axboe struct request_queue *q = hctx->queue;
1068f794f335SJens Axboe
10693b87c6eaSMing Lei /*
10703b87c6eaSMing Lei * All requests should have been marked as RQF_MQ_INFLIGHT, so
10713b87c6eaSMing Lei * update hctx->nr_active in batch
10723b87c6eaSMing Lei */
10733b87c6eaSMing Lei if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
10743b87c6eaSMing Lei __blk_mq_sub_active_requests(hctx, nr_tags);
10753b87c6eaSMing Lei
1076f794f335SJens Axboe blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1077f794f335SJens Axboe percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1078f794f335SJens Axboe }
1079f794f335SJens Axboe
blk_mq_end_request_batch(struct io_comp_batch * iob)1080f794f335SJens Axboe void blk_mq_end_request_batch(struct io_comp_batch *iob)
1081f794f335SJens Axboe {
1082f794f335SJens Axboe int tags[TAG_COMP_BATCH], nr_tags = 0;
108302f7eab0SJens Axboe struct blk_mq_hw_ctx *cur_hctx = NULL;
1084f794f335SJens Axboe struct request *rq;
1085f794f335SJens Axboe u64 now = 0;
1086f794f335SJens Axboe
1087f794f335SJens Axboe if (iob->need_ts)
1088f794f335SJens Axboe now = ktime_get_ns();
1089f794f335SJens Axboe
1090f794f335SJens Axboe while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1091f794f335SJens Axboe prefetch(rq->bio);
1092f794f335SJens Axboe prefetch(rq->rq_next);
1093f794f335SJens Axboe
10945581a5ddSJens Axboe blk_complete_request(rq);
1095f794f335SJens Axboe if (iob->need_ts)
1096f794f335SJens Axboe __blk_mq_end_request_acct(rq, now);
1097f794f335SJens Axboe
1098e5c0ca13SChengming Zhou blk_mq_finish_request(rq);
1099e5c0ca13SChengming Zhou
110098b26a0eSJens Axboe rq_qos_done(rq->q, rq);
110198b26a0eSJens Axboe
1102ab3e1d3bSJens Axboe /*
1103ab3e1d3bSJens Axboe * If end_io handler returns NONE, then it still has
1104ab3e1d3bSJens Axboe * ownership of the request.
1105ab3e1d3bSJens Axboe */
1106ab3e1d3bSJens Axboe if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1107ab3e1d3bSJens Axboe continue;
1108ab3e1d3bSJens Axboe
1109f794f335SJens Axboe WRITE_ONCE(rq->state, MQ_RQ_IDLE);
11100a467d0fSJens Axboe if (!req_ref_put_and_test(rq))
1111f794f335SJens Axboe continue;
1112f794f335SJens Axboe
1113f794f335SJens Axboe blk_crypto_free_request(rq);
1114f794f335SJens Axboe blk_pm_mark_last_busy(rq);
1115f794f335SJens Axboe
111602f7eab0SJens Axboe if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
111702f7eab0SJens Axboe if (cur_hctx)
111802f7eab0SJens Axboe blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1119f794f335SJens Axboe nr_tags = 0;
112002f7eab0SJens Axboe cur_hctx = rq->mq_hctx;
1121f794f335SJens Axboe }
1122f794f335SJens Axboe tags[nr_tags++] = rq->tag;
1123f794f335SJens Axboe }
1124f794f335SJens Axboe
1125f794f335SJens Axboe if (nr_tags)
112602f7eab0SJens Axboe blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1127f794f335SJens Axboe }
1128f794f335SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1129f794f335SJens Axboe
blk_complete_reqs(struct llist_head * list)1130f9ab4918SSebastian Andrzej Siewior static void blk_complete_reqs(struct llist_head *list)
1131c3077b5dSChristoph Hellwig {
1132f9ab4918SSebastian Andrzej Siewior struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1133f9ab4918SSebastian Andrzej Siewior struct request *rq, *next;
1134c3077b5dSChristoph Hellwig
1135f9ab4918SSebastian Andrzej Siewior llist_for_each_entry_safe(rq, next, entry, ipi_list)
1136c3077b5dSChristoph Hellwig rq->q->mq_ops->complete(rq);
1137c3077b5dSChristoph Hellwig }
1138c3077b5dSChristoph Hellwig
blk_done_softirq(struct softirq_action * h)1139f9ab4918SSebastian Andrzej Siewior static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1140115243f5SChristoph Hellwig {
1141f9ab4918SSebastian Andrzej Siewior blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1142c3077b5dSChristoph Hellwig }
1143c3077b5dSChristoph Hellwig
blk_softirq_cpu_dead(unsigned int cpu)1144c3077b5dSChristoph Hellwig static int blk_softirq_cpu_dead(unsigned int cpu)
1145c3077b5dSChristoph Hellwig {
1146f9ab4918SSebastian Andrzej Siewior blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1147c3077b5dSChristoph Hellwig return 0;
1148c3077b5dSChristoph Hellwig }
1149c3077b5dSChristoph Hellwig
__blk_mq_complete_request_remote(void * data)115030a91cb4SChristoph Hellwig static void __blk_mq_complete_request_remote(void *data)
1151320ae51fSJens Axboe {
1152f9ab4918SSebastian Andrzej Siewior __raise_softirq_irqoff(BLOCK_SOFTIRQ);
115336e76539SMing Lei }
115436e76539SMing Lei
blk_mq_complete_need_ipi(struct request * rq)115596339526SChristoph Hellwig static inline bool blk_mq_complete_need_ipi(struct request *rq)
115696339526SChristoph Hellwig {
115796339526SChristoph Hellwig int cpu = raw_smp_processor_id();
115896339526SChristoph Hellwig
115996339526SChristoph Hellwig if (!IS_ENABLED(CONFIG_SMP) ||
116096339526SChristoph Hellwig !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
116196339526SChristoph Hellwig return false;
116271425189SSebastian Andrzej Siewior /*
116371425189SSebastian Andrzej Siewior * With force threaded interrupts enabled, raising softirq from an SMP
116471425189SSebastian Andrzej Siewior * function call will always result in waking the ksoftirqd thread.
116571425189SSebastian Andrzej Siewior * This is probably worse than completing the request on a different
116671425189SSebastian Andrzej Siewior * cache domain.
116771425189SSebastian Andrzej Siewior */
116891cc470eSTanner Love if (force_irqthreads())
116971425189SSebastian Andrzej Siewior return false;
117096339526SChristoph Hellwig
117196339526SChristoph Hellwig /* same CPU or cache domain? Complete locally */
117296339526SChristoph Hellwig if (cpu == rq->mq_ctx->cpu ||
117396339526SChristoph Hellwig (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
117496339526SChristoph Hellwig cpus_share_cache(cpu, rq->mq_ctx->cpu)))
117596339526SChristoph Hellwig return false;
117696339526SChristoph Hellwig
117796339526SChristoph Hellwig /* don't try to IPI to an offline CPU */
117896339526SChristoph Hellwig return cpu_online(rq->mq_ctx->cpu);
117996339526SChristoph Hellwig }
118096339526SChristoph Hellwig
blk_mq_complete_send_ipi(struct request * rq)1181f9ab4918SSebastian Andrzej Siewior static void blk_mq_complete_send_ipi(struct request *rq)
1182f9ab4918SSebastian Andrzej Siewior {
1183f9ab4918SSebastian Andrzej Siewior unsigned int cpu;
1184f9ab4918SSebastian Andrzej Siewior
1185f9ab4918SSebastian Andrzej Siewior cpu = rq->mq_ctx->cpu;
1186660e802cSChengming Zhou if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1187660e802cSChengming Zhou smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
1188f9ab4918SSebastian Andrzej Siewior }
1189f9ab4918SSebastian Andrzej Siewior
blk_mq_raise_softirq(struct request * rq)1190f9ab4918SSebastian Andrzej Siewior static void blk_mq_raise_softirq(struct request *rq)
1191f9ab4918SSebastian Andrzej Siewior {
1192f9ab4918SSebastian Andrzej Siewior struct llist_head *list;
1193f9ab4918SSebastian Andrzej Siewior
1194f9ab4918SSebastian Andrzej Siewior preempt_disable();
1195f9ab4918SSebastian Andrzej Siewior list = this_cpu_ptr(&blk_cpu_done);
1196f9ab4918SSebastian Andrzej Siewior if (llist_add(&rq->ipi_list, list))
1197f9ab4918SSebastian Andrzej Siewior raise_softirq(BLOCK_SOFTIRQ);
1198f9ab4918SSebastian Andrzej Siewior preempt_enable();
1199f9ab4918SSebastian Andrzej Siewior }
1200f9ab4918SSebastian Andrzej Siewior
blk_mq_complete_request_remote(struct request * rq)120140d09b53SChristoph Hellwig bool blk_mq_complete_request_remote(struct request *rq)
120240d09b53SChristoph Hellwig {
120340d09b53SChristoph Hellwig WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
120440d09b53SChristoph Hellwig
12054ab32bf3SJens Axboe /*
1206f168420cSLiu Song * For request which hctx has only one ctx mapping,
1207f168420cSLiu Song * or a polled request, always complete locally,
1208f168420cSLiu Song * it's pointless to redirect the completion.
12094ab32bf3SJens Axboe */
121030654614SEd Tsai if ((rq->mq_hctx->nr_ctx == 1 &&
121130654614SEd Tsai rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1212f168420cSLiu Song rq->cmd_flags & REQ_POLLED)
121340d09b53SChristoph Hellwig return false;
1214320ae51fSJens Axboe
121540d09b53SChristoph Hellwig if (blk_mq_complete_need_ipi(rq)) {
1216f9ab4918SSebastian Andrzej Siewior blk_mq_complete_send_ipi(rq);
1217f9ab4918SSebastian Andrzej Siewior return true;
12183d6efbf6SChristoph Hellwig }
121940d09b53SChristoph Hellwig
1220f9ab4918SSebastian Andrzej Siewior if (rq->q->nr_hw_queues == 1) {
1221f9ab4918SSebastian Andrzej Siewior blk_mq_raise_softirq(rq);
122240d09b53SChristoph Hellwig return true;
1223320ae51fSJens Axboe }
1224f9ab4918SSebastian Andrzej Siewior return false;
1225f9ab4918SSebastian Andrzej Siewior }
122640d09b53SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
122740d09b53SChristoph Hellwig
1228320ae51fSJens Axboe /**
122915f73f5bSChristoph Hellwig * blk_mq_complete_request - end I/O on a request
123015f73f5bSChristoph Hellwig * @rq: the request being processed
1231320ae51fSJens Axboe *
123215f73f5bSChristoph Hellwig * Description:
123315f73f5bSChristoph Hellwig * Complete a request by scheduling the ->complete_rq operation.
123415f73f5bSChristoph Hellwig **/
blk_mq_complete_request(struct request * rq)123515f73f5bSChristoph Hellwig void blk_mq_complete_request(struct request *rq)
1236320ae51fSJens Axboe {
123740d09b53SChristoph Hellwig if (!blk_mq_complete_request_remote(rq))
123896339526SChristoph Hellwig rq->q->mq_ops->complete(rq);
1239320ae51fSJens Axboe }
124015f73f5bSChristoph Hellwig EXPORT_SYMBOL(blk_mq_complete_request);
124130a91cb4SChristoph Hellwig
124230a91cb4SChristoph Hellwig /**
1243105663f7SAndré Almeida * blk_mq_start_request - Start processing a request
1244105663f7SAndré Almeida * @rq: Pointer to request to be started
1245105663f7SAndré Almeida *
1246105663f7SAndré Almeida * Function used by device drivers to notify the block layer that a request
1247105663f7SAndré Almeida * is going to be processed now, so blk layer can do proper initializations
1248105663f7SAndré Almeida * such as starting the timeout timer.
1249105663f7SAndré Almeida */
blk_mq_start_request(struct request * rq)1250e2490073SChristoph Hellwig void blk_mq_start_request(struct request *rq)
1251320ae51fSJens Axboe {
1252320ae51fSJens Axboe struct request_queue *q = rq->q;
1253320ae51fSJens Axboe
1254a54895faSChristoph Hellwig trace_block_rq_issue(rq);
1255320ae51fSJens Axboe
1256cf43e6beSJens Axboe if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
12574cddeacaSTejun Heo rq->io_start_time_ns = ktime_get_ns();
12583d244306SHou Tao rq->stats_sectors = blk_rq_sectors(rq);
1259cf43e6beSJens Axboe rq->rq_flags |= RQF_STATS;
1260a7905043SJosef Bacik rq_qos_issue(q, rq);
1261cf43e6beSJens Axboe }
1262cf43e6beSJens Axboe
12631d9bd516STejun Heo WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1264538b7534SJens Axboe
1265538b7534SJens Axboe blk_add_timer(rq);
126612f5b931SKeith Busch WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
126749f5baa5SChristoph Hellwig
126854d4e6abSMax Gurtovoy #ifdef CONFIG_BLK_DEV_INTEGRITY
126954d4e6abSMax Gurtovoy if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
127054d4e6abSMax Gurtovoy q->integrity.profile->prepare_fn(rq);
127154d4e6abSMax Gurtovoy #endif
12723e08773cSChristoph Hellwig if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1273f6c80cffSKeith Busch WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1274320ae51fSJens Axboe }
1275e2490073SChristoph Hellwig EXPORT_SYMBOL(blk_mq_start_request);
1276320ae51fSJens Axboe
1277a327c341SMing Lei /*
1278a327c341SMing Lei * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1279a327c341SMing Lei * queues. This is important for md arrays to benefit from merging
1280a327c341SMing Lei * requests.
1281a327c341SMing Lei */
blk_plug_max_rq_count(struct blk_plug * plug)1282a327c341SMing Lei static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1283a327c341SMing Lei {
1284a327c341SMing Lei if (plug->multiple_queues)
1285a327c341SMing Lei return BLK_MAX_REQUEST_COUNT * 2;
1286a327c341SMing Lei return BLK_MAX_REQUEST_COUNT;
1287a327c341SMing Lei }
1288a327c341SMing Lei
blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq)1289a327c341SMing Lei static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1290a327c341SMing Lei {
1291a327c341SMing Lei struct request *last = rq_list_peek(&plug->mq_list);
1292a327c341SMing Lei
1293a327c341SMing Lei if (!plug->rq_count) {
1294a327c341SMing Lei trace_block_plug(rq->q);
1295a327c341SMing Lei } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1296a327c341SMing Lei (!blk_queue_nomerges(rq->q) &&
1297a327c341SMing Lei blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1298a327c341SMing Lei blk_mq_flush_plug_list(plug, false);
1299878eb6e4SAl Viro last = NULL;
1300a327c341SMing Lei trace_block_plug(rq->q);
1301a327c341SMing Lei }
1302a327c341SMing Lei
1303a327c341SMing Lei if (!plug->multiple_queues && last && last->q != rq->q)
1304a327c341SMing Lei plug->multiple_queues = true;
1305c6b7a3a2SMing Lei /*
1306c6b7a3a2SMing Lei * Any request allocated from sched tags can't be issued to
1307c6b7a3a2SMing Lei * ->queue_rqs() directly
1308c6b7a3a2SMing Lei */
1309c6b7a3a2SMing Lei if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1310a327c341SMing Lei plug->has_elevator = true;
1311a327c341SMing Lei rq->rq_next = NULL;
1312a327c341SMing Lei rq_list_add(&plug->mq_list, rq);
1313a327c341SMing Lei plug->rq_count++;
1314a327c341SMing Lei }
1315a327c341SMing Lei
13164054cff9SChristoph Hellwig /**
13174054cff9SChristoph Hellwig * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
13184054cff9SChristoph Hellwig * @rq: request to insert
13194054cff9SChristoph Hellwig * @at_head: insert request at head or tail of queue
13204054cff9SChristoph Hellwig *
13214054cff9SChristoph Hellwig * Description:
13224054cff9SChristoph Hellwig * Insert a fully prepared request at the back of the I/O scheduler queue
13234054cff9SChristoph Hellwig * for execution. Don't wait for completion.
13244054cff9SChristoph Hellwig *
13254054cff9SChristoph Hellwig * Note:
13264054cff9SChristoph Hellwig * This function will invoke @done directly if the queue is dead.
13274054cff9SChristoph Hellwig */
blk_execute_rq_nowait(struct request * rq,bool at_head)1328e2e53086SChristoph Hellwig void blk_execute_rq_nowait(struct request *rq, bool at_head)
13294054cff9SChristoph Hellwig {
1330f0dbe6e8SChristoph Hellwig struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1331f0dbe6e8SChristoph Hellwig
1332ae948fd6SChristoph Hellwig WARN_ON(irqs_disabled());
1333ae948fd6SChristoph Hellwig WARN_ON(!blk_rq_is_passthrough(rq));
13344054cff9SChristoph Hellwig
1335ae948fd6SChristoph Hellwig blk_account_io_start(rq);
1336110fdb44SPankaj Raghav
1337110fdb44SPankaj Raghav /*
1338110fdb44SPankaj Raghav * As plugging can be enabled for passthrough requests on a zoned
1339110fdb44SPankaj Raghav * device, directly accessing the plug instead of using blk_mq_plug()
1340110fdb44SPankaj Raghav * should not have any consequences.
1341110fdb44SPankaj Raghav */
1342f0dbe6e8SChristoph Hellwig if (current->plug && !at_head) {
1343ae948fd6SChristoph Hellwig blk_add_rq_to_plug(current->plug, rq);
1344f0dbe6e8SChristoph Hellwig return;
1345f0dbe6e8SChristoph Hellwig }
1346f0dbe6e8SChristoph Hellwig
1347710fa378SChristoph Hellwig blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
134865a558f6SBart Van Assche blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
13494054cff9SChristoph Hellwig }
13504054cff9SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
13514054cff9SChristoph Hellwig
135232ac5a9bSChristoph Hellwig struct blk_rq_wait {
135332ac5a9bSChristoph Hellwig struct completion done;
135432ac5a9bSChristoph Hellwig blk_status_t ret;
135532ac5a9bSChristoph Hellwig };
135632ac5a9bSChristoph Hellwig
blk_end_sync_rq(struct request * rq,blk_status_t ret)1357de671d61SJens Axboe static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
135832ac5a9bSChristoph Hellwig {
135932ac5a9bSChristoph Hellwig struct blk_rq_wait *wait = rq->end_io_data;
136032ac5a9bSChristoph Hellwig
136132ac5a9bSChristoph Hellwig wait->ret = ret;
136232ac5a9bSChristoph Hellwig complete(&wait->done);
1363de671d61SJens Axboe return RQ_END_IO_NONE;
136432ac5a9bSChristoph Hellwig }
136532ac5a9bSChristoph Hellwig
blk_rq_is_poll(struct request * rq)1366c6e99ea4SKanchan Joshi bool blk_rq_is_poll(struct request *rq)
13674054cff9SChristoph Hellwig {
13684054cff9SChristoph Hellwig if (!rq->mq_hctx)
13694054cff9SChristoph Hellwig return false;
13704054cff9SChristoph Hellwig if (rq->mq_hctx->type != HCTX_TYPE_POLL)
13714054cff9SChristoph Hellwig return false;
13724054cff9SChristoph Hellwig return true;
13734054cff9SChristoph Hellwig }
1374c6e99ea4SKanchan Joshi EXPORT_SYMBOL_GPL(blk_rq_is_poll);
13754054cff9SChristoph Hellwig
blk_rq_poll_completion(struct request * rq,struct completion * wait)13764054cff9SChristoph Hellwig static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
13774054cff9SChristoph Hellwig {
13784054cff9SChristoph Hellwig do {
1379f6c80cffSKeith Busch blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
13804054cff9SChristoph Hellwig cond_resched();
13814054cff9SChristoph Hellwig } while (!completion_done(wait));
13824054cff9SChristoph Hellwig }
13834054cff9SChristoph Hellwig
13844054cff9SChristoph Hellwig /**
13854054cff9SChristoph Hellwig * blk_execute_rq - insert a request into queue for execution
13864054cff9SChristoph Hellwig * @rq: request to insert
13874054cff9SChristoph Hellwig * @at_head: insert request at head or tail of queue
13884054cff9SChristoph Hellwig *
13894054cff9SChristoph Hellwig * Description:
13904054cff9SChristoph Hellwig * Insert a fully prepared request at the back of the I/O scheduler queue
13914054cff9SChristoph Hellwig * for execution and wait for completion.
13924054cff9SChristoph Hellwig * Return: The blk_status_t result provided to blk_mq_end_request().
13934054cff9SChristoph Hellwig */
blk_execute_rq(struct request * rq,bool at_head)1394b84ba30bSChristoph Hellwig blk_status_t blk_execute_rq(struct request *rq, bool at_head)
13954054cff9SChristoph Hellwig {
1396f0dbe6e8SChristoph Hellwig struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
139732ac5a9bSChristoph Hellwig struct blk_rq_wait wait = {
139832ac5a9bSChristoph Hellwig .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
139932ac5a9bSChristoph Hellwig };
14004054cff9SChristoph Hellwig
1401ae948fd6SChristoph Hellwig WARN_ON(irqs_disabled());
1402ae948fd6SChristoph Hellwig WARN_ON(!blk_rq_is_passthrough(rq));
1403ae948fd6SChristoph Hellwig
14044054cff9SChristoph Hellwig rq->end_io_data = &wait;
1405ae948fd6SChristoph Hellwig rq->end_io = blk_end_sync_rq;
14064054cff9SChristoph Hellwig
1407ae948fd6SChristoph Hellwig blk_account_io_start(rq);
1408710fa378SChristoph Hellwig blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1409f0dbe6e8SChristoph Hellwig blk_mq_run_hw_queue(hctx, false);
14104054cff9SChristoph Hellwig
1411ae948fd6SChristoph Hellwig if (blk_rq_is_poll(rq)) {
141232ac5a9bSChristoph Hellwig blk_rq_poll_completion(rq, &wait.done);
1413ae948fd6SChristoph Hellwig } else {
1414ae948fd6SChristoph Hellwig /*
1415ae948fd6SChristoph Hellwig * Prevent hang_check timer from firing at us during very long
1416ae948fd6SChristoph Hellwig * I/O
1417ae948fd6SChristoph Hellwig */
1418ae948fd6SChristoph Hellwig unsigned long hang_check = sysctl_hung_task_timeout_secs;
1419ae948fd6SChristoph Hellwig
1420ae948fd6SChristoph Hellwig if (hang_check)
142132ac5a9bSChristoph Hellwig while (!wait_for_completion_io_timeout(&wait.done,
14224054cff9SChristoph Hellwig hang_check * (HZ/2)))
14234054cff9SChristoph Hellwig ;
14244054cff9SChristoph Hellwig else
142532ac5a9bSChristoph Hellwig wait_for_completion_io(&wait.done);
1426ae948fd6SChristoph Hellwig }
14274054cff9SChristoph Hellwig
142832ac5a9bSChristoph Hellwig return wait.ret;
14294054cff9SChristoph Hellwig }
14304054cff9SChristoph Hellwig EXPORT_SYMBOL(blk_execute_rq);
14314054cff9SChristoph Hellwig
__blk_mq_requeue_request(struct request * rq)1432ed0791b2SChristoph Hellwig static void __blk_mq_requeue_request(struct request *rq)
1433320ae51fSJens Axboe {
1434320ae51fSJens Axboe struct request_queue *q = rq->q;
1435320ae51fSJens Axboe
1436923218f6SMing Lei blk_mq_put_driver_tag(rq);
1437923218f6SMing Lei
1438a54895faSChristoph Hellwig trace_block_rq_requeue(rq);
1439a7905043SJosef Bacik rq_qos_requeue(q, rq);
144049f5baa5SChristoph Hellwig
144112f5b931SKeith Busch if (blk_mq_request_started(rq)) {
144212f5b931SKeith Busch WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1443da661267SChristoph Hellwig rq->rq_flags &= ~RQF_TIMED_OUT;
1444320ae51fSJens Axboe }
1445e2490073SChristoph Hellwig }
1446320ae51fSJens Axboe
blk_mq_requeue_request(struct request * rq,bool kick_requeue_list)14472b053acaSBart Van Assche void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1448ed0791b2SChristoph Hellwig {
1449214a4418SChristoph Hellwig struct request_queue *q = rq->q;
14509a67aa52SChristoph Hellwig unsigned long flags;
1451214a4418SChristoph Hellwig
1452ed0791b2SChristoph Hellwig __blk_mq_requeue_request(rq);
1453ed0791b2SChristoph Hellwig
1454105976f5SMing Lei /* this request will be re-inserted to io scheduler queue */
1455105976f5SMing Lei blk_mq_sched_requeue_request(rq);
1456105976f5SMing Lei
14579a67aa52SChristoph Hellwig spin_lock_irqsave(&q->requeue_lock, flags);
14589a67aa52SChristoph Hellwig list_add_tail(&rq->queuelist, &q->requeue_list);
14599a67aa52SChristoph Hellwig spin_unlock_irqrestore(&q->requeue_lock, flags);
1460214a4418SChristoph Hellwig
1461214a4418SChristoph Hellwig if (kick_requeue_list)
1462214a4418SChristoph Hellwig blk_mq_kick_requeue_list(q);
1463ed0791b2SChristoph Hellwig }
1464ed0791b2SChristoph Hellwig EXPORT_SYMBOL(blk_mq_requeue_request);
1465ed0791b2SChristoph Hellwig
blk_mq_requeue_work(struct work_struct * work)14666fca6a61SChristoph Hellwig static void blk_mq_requeue_work(struct work_struct *work)
14676fca6a61SChristoph Hellwig {
14686fca6a61SChristoph Hellwig struct request_queue *q =
14692849450aSMike Snitzer container_of(work, struct request_queue, requeue_work.work);
14706fca6a61SChristoph Hellwig LIST_HEAD(rq_list);
14719a67aa52SChristoph Hellwig LIST_HEAD(flush_list);
14729a67aa52SChristoph Hellwig struct request *rq;
14736fca6a61SChristoph Hellwig
147418e9781dSJens Axboe spin_lock_irq(&q->requeue_lock);
14756fca6a61SChristoph Hellwig list_splice_init(&q->requeue_list, &rq_list);
14769a67aa52SChristoph Hellwig list_splice_init(&q->flush_list, &flush_list);
147718e9781dSJens Axboe spin_unlock_irq(&q->requeue_lock);
14786fca6a61SChristoph Hellwig
14799a67aa52SChristoph Hellwig while (!list_empty(&rq_list)) {
14809a67aa52SChristoph Hellwig rq = list_entry(rq_list.next, struct request, queuelist);
1481a1e948b8SChristoph Hellwig /*
1482a1e948b8SChristoph Hellwig * If RQF_DONTPREP ist set, the request has been started by the
1483a1e948b8SChristoph Hellwig * driver already and might have driver-specific data allocated
1484a1e948b8SChristoph Hellwig * already. Insert it into the hctx dispatch list to avoid
1485a1e948b8SChristoph Hellwig * block layer merges for the request.
1486a1e948b8SChristoph Hellwig */
1487a1e948b8SChristoph Hellwig if (rq->rq_flags & RQF_DONTPREP) {
14886fca6a61SChristoph Hellwig list_del_init(&rq->queuelist);
14892b597613SChristoph Hellwig blk_mq_request_bypass_insert(rq, 0);
14909a67aa52SChristoph Hellwig } else {
1491a1e948b8SChristoph Hellwig list_del_init(&rq->queuelist);
1492710fa378SChristoph Hellwig blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
14936fca6a61SChristoph Hellwig }
14946fca6a61SChristoph Hellwig }
14956fca6a61SChristoph Hellwig
14969a67aa52SChristoph Hellwig while (!list_empty(&flush_list)) {
14979a67aa52SChristoph Hellwig rq = list_entry(flush_list.next, struct request, queuelist);
14986fca6a61SChristoph Hellwig list_del_init(&rq->queuelist);
1499710fa378SChristoph Hellwig blk_mq_insert_request(rq, 0);
15006fca6a61SChristoph Hellwig }
15016fca6a61SChristoph Hellwig
150252d7f1b5SBart Van Assche blk_mq_run_hw_queues(q, false);
15036fca6a61SChristoph Hellwig }
15046fca6a61SChristoph Hellwig
blk_mq_kick_requeue_list(struct request_queue * q)15056fca6a61SChristoph Hellwig void blk_mq_kick_requeue_list(struct request_queue *q)
15066fca6a61SChristoph Hellwig {
1507ae943d20SBart Van Assche kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
15086fca6a61SChristoph Hellwig }
15096fca6a61SChristoph Hellwig EXPORT_SYMBOL(blk_mq_kick_requeue_list);
15106fca6a61SChristoph Hellwig
blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs)15112849450aSMike Snitzer void blk_mq_delay_kick_requeue_list(struct request_queue *q,
15122849450aSMike Snitzer unsigned long msecs)
15132849450aSMike Snitzer {
1514d4acf365SBart Van Assche kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
15152849450aSMike Snitzer msecs_to_jiffies(msecs));
15162849450aSMike Snitzer }
15172849450aSMike Snitzer EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
15182849450aSMike Snitzer
blk_is_flush_data_rq(struct request * rq)1519511f6025SMing Lei static bool blk_is_flush_data_rq(struct request *rq)
1520511f6025SMing Lei {
1521511f6025SMing Lei return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1522511f6025SMing Lei }
1523511f6025SMing Lei
blk_mq_rq_inflight(struct request * rq,void * priv)15242dd6532eSJohn Garry static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1525ae879912SJens Axboe {
1526ae879912SJens Axboe /*
15278ab30a33SJohn Garry * If we find a request that isn't idle we know the queue is busy
15288ab30a33SJohn Garry * as it's checked in the iter.
15298ab30a33SJohn Garry * Return false to stop the iteration.
1530511f6025SMing Lei *
1531511f6025SMing Lei * In case of queue quiesce, if one flush data request is completed,
1532511f6025SMing Lei * don't count it as inflight given the flush sequence is suspended,
1533511f6025SMing Lei * and the original flush data request is invisible to driver, just
1534511f6025SMing Lei * like other pending requests because of quiesce
1535ae879912SJens Axboe */
1536511f6025SMing Lei if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1537511f6025SMing Lei blk_is_flush_data_rq(rq) &&
1538511f6025SMing Lei blk_mq_request_completed(rq))) {
1539ae879912SJens Axboe bool *busy = priv;
1540ae879912SJens Axboe
1541ae879912SJens Axboe *busy = true;
1542ae879912SJens Axboe return false;
1543ae879912SJens Axboe }
1544ae879912SJens Axboe
1545ae879912SJens Axboe return true;
1546ae879912SJens Axboe }
1547ae879912SJens Axboe
blk_mq_queue_inflight(struct request_queue * q)15483c94d83cSJens Axboe bool blk_mq_queue_inflight(struct request_queue *q)
1549ae879912SJens Axboe {
1550ae879912SJens Axboe bool busy = false;
1551ae879912SJens Axboe
15523c94d83cSJens Axboe blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1553ae879912SJens Axboe return busy;
1554ae879912SJens Axboe }
15553c94d83cSJens Axboe EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1556ae879912SJens Axboe
blk_mq_rq_timed_out(struct request * req)15579bdb4833SJohn Garry static void blk_mq_rq_timed_out(struct request *req)
1558320ae51fSJens Axboe {
1559da661267SChristoph Hellwig req->rq_flags |= RQF_TIMED_OUT;
1560d1210d5aSChristoph Hellwig if (req->q->mq_ops->timeout) {
1561d1210d5aSChristoph Hellwig enum blk_eh_timer_return ret;
156287ee7b11SJens Axboe
15639bdb4833SJohn Garry ret = req->q->mq_ops->timeout(req);
1564d1210d5aSChristoph Hellwig if (ret == BLK_EH_DONE)
1565d1210d5aSChristoph Hellwig return;
1566d1210d5aSChristoph Hellwig WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
156787ee7b11SJens Axboe }
1568d1210d5aSChristoph Hellwig
1569d1210d5aSChristoph Hellwig blk_add_timer(req);
157087ee7b11SJens Axboe }
157187ee7b11SJens Axboe
157282c22947SDavid Jeffery struct blk_expired_data {
157382c22947SDavid Jeffery bool has_timedout_rq;
157482c22947SDavid Jeffery unsigned long next;
157582c22947SDavid Jeffery unsigned long timeout_start;
157682c22947SDavid Jeffery };
157782c22947SDavid Jeffery
blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired)157882c22947SDavid Jeffery static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
157912f5b931SKeith Busch {
158012f5b931SKeith Busch unsigned long deadline;
158112f5b931SKeith Busch
158212f5b931SKeith Busch if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
158312f5b931SKeith Busch return false;
1584da661267SChristoph Hellwig if (rq->rq_flags & RQF_TIMED_OUT)
1585da661267SChristoph Hellwig return false;
158612f5b931SKeith Busch
1587079076b3SChristoph Hellwig deadline = READ_ONCE(rq->deadline);
158882c22947SDavid Jeffery if (time_after_eq(expired->timeout_start, deadline))
158912f5b931SKeith Busch return true;
159012f5b931SKeith Busch
159182c22947SDavid Jeffery if (expired->next == 0)
159282c22947SDavid Jeffery expired->next = deadline;
159382c22947SDavid Jeffery else if (time_after(expired->next, deadline))
159482c22947SDavid Jeffery expired->next = deadline;
159512f5b931SKeith Busch return false;
159612f5b931SKeith Busch }
159712f5b931SKeith Busch
blk_mq_put_rq_ref(struct request * rq)15982e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq)
15992e315dc0SMing Lei {
1600de671d61SJens Axboe if (is_flush_rq(rq)) {
1601de671d61SJens Axboe if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1602de671d61SJens Axboe blk_mq_free_request(rq);
1603de671d61SJens Axboe } else if (req_ref_put_and_test(rq)) {
16042e315dc0SMing Lei __blk_mq_free_request(rq);
16052e315dc0SMing Lei }
1606de671d61SJens Axboe }
16072e315dc0SMing Lei
blk_mq_check_expired(struct request * rq,void * priv)16082dd6532eSJohn Garry static bool blk_mq_check_expired(struct request *rq, void *priv)
1609320ae51fSJens Axboe {
161082c22947SDavid Jeffery struct blk_expired_data *expired = priv;
161181481eb4SChristoph Hellwig
161212f5b931SKeith Busch /*
1613c797b40cSMing Lei * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1614c797b40cSMing Lei * be reallocated underneath the timeout handler's processing, then
1615c797b40cSMing Lei * the expire check is reliable. If the request is not expired, then
1616c797b40cSMing Lei * it was completed and reallocated as a new request after returning
1617c797b40cSMing Lei * from blk_mq_check_expired().
161812f5b931SKeith Busch */
161982c22947SDavid Jeffery if (blk_mq_req_expired(rq, expired)) {
162082c22947SDavid Jeffery expired->has_timedout_rq = true;
162182c22947SDavid Jeffery return false;
162282c22947SDavid Jeffery }
162382c22947SDavid Jeffery return true;
162482c22947SDavid Jeffery }
162582c22947SDavid Jeffery
blk_mq_handle_expired(struct request * rq,void * priv)162682c22947SDavid Jeffery static bool blk_mq_handle_expired(struct request *rq, void *priv)
162782c22947SDavid Jeffery {
162882c22947SDavid Jeffery struct blk_expired_data *expired = priv;
162982c22947SDavid Jeffery
163082c22947SDavid Jeffery if (blk_mq_req_expired(rq, expired))
16319bdb4833SJohn Garry blk_mq_rq_timed_out(rq);
16327baa8572SJens Axboe return true;
16331d9bd516STejun Heo }
16341d9bd516STejun Heo
blk_mq_timeout_work(struct work_struct * work)1635287922ebSChristoph Hellwig static void blk_mq_timeout_work(struct work_struct *work)
163681481eb4SChristoph Hellwig {
1637287922ebSChristoph Hellwig struct request_queue *q =
1638287922ebSChristoph Hellwig container_of(work, struct request_queue, timeout_work);
163982c22947SDavid Jeffery struct blk_expired_data expired = {
164082c22947SDavid Jeffery .timeout_start = jiffies,
164182c22947SDavid Jeffery };
16421d9bd516STejun Heo struct blk_mq_hw_ctx *hctx;
16434f481208SMing Lei unsigned long i;
1644320ae51fSJens Axboe
164571f79fb3SGabriel Krisman Bertazi /* A deadlock might occur if a request is stuck requiring a
164671f79fb3SGabriel Krisman Bertazi * timeout at the same time a queue freeze is waiting
164771f79fb3SGabriel Krisman Bertazi * completion, since the timeout code would not be able to
164871f79fb3SGabriel Krisman Bertazi * acquire the queue reference here.
164971f79fb3SGabriel Krisman Bertazi *
165071f79fb3SGabriel Krisman Bertazi * That's why we don't use blk_queue_enter here; instead, we use
165171f79fb3SGabriel Krisman Bertazi * percpu_ref_tryget directly, because we need to be able to
165271f79fb3SGabriel Krisman Bertazi * obtain a reference even in the short window between the queue
165371f79fb3SGabriel Krisman Bertazi * starting to freeze, by dropping the first reference in
16541671d522SMing Lei * blk_freeze_queue_start, and the moment the last request is
165571f79fb3SGabriel Krisman Bertazi * consumed, marked by the instant q_usage_counter reaches
165671f79fb3SGabriel Krisman Bertazi * zero.
165771f79fb3SGabriel Krisman Bertazi */
165871f79fb3SGabriel Krisman Bertazi if (!percpu_ref_tryget(&q->q_usage_counter))
1659287922ebSChristoph Hellwig return;
1660287922ebSChristoph Hellwig
166182c22947SDavid Jeffery /* check if there is any timed-out request */
166282c22947SDavid Jeffery blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
166382c22947SDavid Jeffery if (expired.has_timedout_rq) {
166482c22947SDavid Jeffery /*
166582c22947SDavid Jeffery * Before walking tags, we must ensure any submit started
166682c22947SDavid Jeffery * before the current time has finished. Since the submit
166782c22947SDavid Jeffery * uses srcu or rcu, wait for a synchronization point to
166882c22947SDavid Jeffery * ensure all running submits have finished
166982c22947SDavid Jeffery */
1670483239c7SChristoph Hellwig blk_mq_wait_quiesce_done(q->tag_set);
1671320ae51fSJens Axboe
167282c22947SDavid Jeffery expired.next = 0;
167382c22947SDavid Jeffery blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
167482c22947SDavid Jeffery }
167582c22947SDavid Jeffery
167682c22947SDavid Jeffery if (expired.next != 0) {
167782c22947SDavid Jeffery mod_timer(&q->timeout, expired.next);
16780d2602caSJens Axboe } else {
1679fcd36c36SBart Van Assche /*
1680fcd36c36SBart Van Assche * Request timeouts are handled as a forward rolling timer. If
1681fcd36c36SBart Van Assche * we end up here it means that no requests are pending and
1682fcd36c36SBart Van Assche * also that no request has been pending for a while. Mark
1683fcd36c36SBart Van Assche * each hctx as idle.
1684fcd36c36SBart Van Assche */
1685f054b56cSMing Lei queue_for_each_hw_ctx(q, hctx, i) {
1686f054b56cSMing Lei /* the hctx may be unmapped, so check it here */
1687f054b56cSMing Lei if (blk_mq_hw_queue_mapped(hctx))
16880d2602caSJens Axboe blk_mq_tag_idle(hctx);
16890d2602caSJens Axboe }
1690320ae51fSJens Axboe }
1691287922ebSChristoph Hellwig blk_queue_exit(q);
1692f054b56cSMing Lei }
1693320ae51fSJens Axboe
169488459642SOmar Sandoval struct flush_busy_ctx_data {
169588459642SOmar Sandoval struct blk_mq_hw_ctx *hctx;
169688459642SOmar Sandoval struct list_head *list;
169788459642SOmar Sandoval };
169888459642SOmar Sandoval
flush_busy_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)169988459642SOmar Sandoval static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
170088459642SOmar Sandoval {
170188459642SOmar Sandoval struct flush_busy_ctx_data *flush_data = data;
170288459642SOmar Sandoval struct blk_mq_hw_ctx *hctx = flush_data->hctx;
170388459642SOmar Sandoval struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1704c16d6b5aSMing Lei enum hctx_type type = hctx->type;
170588459642SOmar Sandoval
170688459642SOmar Sandoval spin_lock(&ctx->lock);
1707c16d6b5aSMing Lei list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1708e9a99a63SOmar Sandoval sbitmap_clear_bit(sb, bitnr);
170988459642SOmar Sandoval spin_unlock(&ctx->lock);
171088459642SOmar Sandoval return true;
171188459642SOmar Sandoval }
171288459642SOmar Sandoval
1713320ae51fSJens Axboe /*
17141429d7c9SJens Axboe * Process software queues that have been marked busy, splicing them
17151429d7c9SJens Axboe * to the for-dispatch
17161429d7c9SJens Axboe */
blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)17172c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
17181429d7c9SJens Axboe {
171988459642SOmar Sandoval struct flush_busy_ctx_data data = {
172088459642SOmar Sandoval .hctx = hctx,
172188459642SOmar Sandoval .list = list,
172288459642SOmar Sandoval };
17231429d7c9SJens Axboe
172488459642SOmar Sandoval sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
17251429d7c9SJens Axboe }
17262c3ad667SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
17271429d7c9SJens Axboe
1728b347689fSMing Lei struct dispatch_rq_data {
1729b347689fSMing Lei struct blk_mq_hw_ctx *hctx;
1730b347689fSMing Lei struct request *rq;
1731b347689fSMing Lei };
1732b347689fSMing Lei
dispatch_rq_from_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1733b347689fSMing Lei static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1734b347689fSMing Lei void *data)
1735b347689fSMing Lei {
1736b347689fSMing Lei struct dispatch_rq_data *dispatch_data = data;
1737b347689fSMing Lei struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1738b347689fSMing Lei struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1739c16d6b5aSMing Lei enum hctx_type type = hctx->type;
1740b347689fSMing Lei
1741b347689fSMing Lei spin_lock(&ctx->lock);
1742c16d6b5aSMing Lei if (!list_empty(&ctx->rq_lists[type])) {
1743c16d6b5aSMing Lei dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1744b347689fSMing Lei list_del_init(&dispatch_data->rq->queuelist);
1745c16d6b5aSMing Lei if (list_empty(&ctx->rq_lists[type]))
1746b347689fSMing Lei sbitmap_clear_bit(sb, bitnr);
1747b347689fSMing Lei }
1748b347689fSMing Lei spin_unlock(&ctx->lock);
1749b347689fSMing Lei
1750b347689fSMing Lei return !dispatch_data->rq;
1751b347689fSMing Lei }
1752b347689fSMing Lei
blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start)1753b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1754b347689fSMing Lei struct blk_mq_ctx *start)
1755b347689fSMing Lei {
1756f31967f0SJens Axboe unsigned off = start ? start->index_hw[hctx->type] : 0;
1757b347689fSMing Lei struct dispatch_rq_data data = {
1758b347689fSMing Lei .hctx = hctx,
1759b347689fSMing Lei .rq = NULL,
1760b347689fSMing Lei };
1761b347689fSMing Lei
1762b347689fSMing Lei __sbitmap_for_each_set(&hctx->ctx_map, off,
1763b347689fSMing Lei dispatch_rq_from_ctx, &data);
1764b347689fSMing Lei
1765b347689fSMing Lei return data.rq;
1766b347689fSMing Lei }
1767b347689fSMing Lei
__blk_mq_alloc_driver_tag(struct request * rq)1768a808a9d5SJens Axboe static bool __blk_mq_alloc_driver_tag(struct request *rq)
1769703fd1c0SJens Axboe {
1770ae0f1a73SJohn Garry struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1771570e9b73SMing Lei unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1772570e9b73SMing Lei int tag;
1773570e9b73SMing Lei
1774568f2700SMing Lei blk_mq_tag_busy(rq->mq_hctx);
1775568f2700SMing Lei
1776570e9b73SMing Lei if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1777ae0f1a73SJohn Garry bt = &rq->mq_hctx->tags->breserved_tags;
1778570e9b73SMing Lei tag_offset = 0;
177928500850SMing Lei } else {
1780570e9b73SMing Lei if (!hctx_may_queue(rq->mq_hctx, bt))
1781570e9b73SMing Lei return false;
178228500850SMing Lei }
178328500850SMing Lei
1784570e9b73SMing Lei tag = __sbitmap_queue_get(bt);
1785570e9b73SMing Lei if (tag == BLK_MQ_NO_TAG)
1786570e9b73SMing Lei return false;
1787570e9b73SMing Lei
1788570e9b73SMing Lei rq->tag = tag + tag_offset;
1789570e9b73SMing Lei return true;
1790570e9b73SMing Lei }
1791570e9b73SMing Lei
__blk_mq_get_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)1792a808a9d5SJens Axboe bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1793570e9b73SMing Lei {
1794a808a9d5SJens Axboe if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1795568f2700SMing Lei return false;
1796568f2700SMing Lei
179751db1c37SMing Lei if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1798568f2700SMing Lei !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1799568f2700SMing Lei rq->rq_flags |= RQF_MQ_INFLIGHT;
1800bccf5e26SJohn Garry __blk_mq_inc_active_requests(hctx);
1801568f2700SMing Lei }
1802568f2700SMing Lei hctx->tags->rqs[rq->tag] = rq;
1803570e9b73SMing Lei return true;
1804570e9b73SMing Lei }
1805570e9b73SMing Lei
blk_mq_dispatch_wake(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)1806eb619fdbSJens Axboe static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1807eb619fdbSJens Axboe int flags, void *key)
1808da55f2ccSOmar Sandoval {
1809da55f2ccSOmar Sandoval struct blk_mq_hw_ctx *hctx;
1810da55f2ccSOmar Sandoval
1811da55f2ccSOmar Sandoval hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1812da55f2ccSOmar Sandoval
18135815839bSMing Lei spin_lock(&hctx->dispatch_wait_lock);
1814e8618575SJens Axboe if (!list_empty(&wait->entry)) {
1815e8618575SJens Axboe struct sbitmap_queue *sbq;
1816e8618575SJens Axboe
1817eb619fdbSJens Axboe list_del_init(&wait->entry);
1818ae0f1a73SJohn Garry sbq = &hctx->tags->bitmap_tags;
1819e8618575SJens Axboe atomic_dec(&sbq->ws_active);
1820e8618575SJens Axboe }
18215815839bSMing Lei spin_unlock(&hctx->dispatch_wait_lock);
18225815839bSMing Lei
1823da55f2ccSOmar Sandoval blk_mq_run_hw_queue(hctx, true);
1824da55f2ccSOmar Sandoval return 1;
1825da55f2ccSOmar Sandoval }
1826da55f2ccSOmar Sandoval
1827f906a6a0SJens Axboe /*
1828f906a6a0SJens Axboe * Mark us waiting for a tag. For shared tags, this involves hooking us into
1829ee3e4de5SBart Van Assche * the tag wakeups. For non-shared tags, we can simply mark us needing a
1830ee3e4de5SBart Van Assche * restart. For both cases, take care to check the condition again after
1831f906a6a0SJens Axboe * marking us as waiting.
1832f906a6a0SJens Axboe */
blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq)18332278d69fSMing Lei static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1834eb619fdbSJens Axboe struct request *rq)
1835da55f2ccSOmar Sandoval {
183698b99e94SKemeng Shi struct sbitmap_queue *sbq;
18375815839bSMing Lei struct wait_queue_head *wq;
1838f906a6a0SJens Axboe wait_queue_entry_t *wait;
1839f906a6a0SJens Axboe bool ret;
1840da55f2ccSOmar Sandoval
184147df9ce9SKemeng Shi if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
184247df9ce9SKemeng Shi !(blk_mq_is_shared_tags(hctx->flags))) {
1843684b7324SYufen Yu blk_mq_sched_mark_restart_hctx(hctx);
1844c27d53fbSBart Van Assche
1845c27d53fbSBart Van Assche /*
1846c27d53fbSBart Van Assche * It's possible that a tag was freed in the window between the
1847c27d53fbSBart Van Assche * allocation failure and adding the hardware queue to the wait
1848c27d53fbSBart Van Assche * queue.
1849c27d53fbSBart Van Assche *
1850c27d53fbSBart Van Assche * Don't clear RESTART here, someone else could have set it.
1851c27d53fbSBart Van Assche * At most this will cost an extra queue run.
1852c27d53fbSBart Van Assche */
18538ab6bb9eSMing Lei return blk_mq_get_driver_tag(rq);
1854c27d53fbSBart Van Assche }
1855c27d53fbSBart Van Assche
18562278d69fSMing Lei wait = &hctx->dispatch_wait;
1857eb619fdbSJens Axboe if (!list_empty_careful(&wait->entry))
1858da55f2ccSOmar Sandoval return false;
1859da55f2ccSOmar Sandoval
186098b99e94SKemeng Shi if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
186198b99e94SKemeng Shi sbq = &hctx->tags->breserved_tags;
186298b99e94SKemeng Shi else
186398b99e94SKemeng Shi sbq = &hctx->tags->bitmap_tags;
1864e8618575SJens Axboe wq = &bt_wait_ptr(sbq, hctx)->wait;
18655815839bSMing Lei
18665815839bSMing Lei spin_lock_irq(&wq->lock);
18675815839bSMing Lei spin_lock(&hctx->dispatch_wait_lock);
1868eb619fdbSJens Axboe if (!list_empty(&wait->entry)) {
18695815839bSMing Lei spin_unlock(&hctx->dispatch_wait_lock);
18705815839bSMing Lei spin_unlock_irq(&wq->lock);
1871eb619fdbSJens Axboe return false;
1872eb619fdbSJens Axboe }
1873eb619fdbSJens Axboe
1874e8618575SJens Axboe atomic_inc(&sbq->ws_active);
18755815839bSMing Lei wait->flags &= ~WQ_FLAG_EXCLUSIVE;
18765815839bSMing Lei __add_wait_queue(wq, wait);
1877da55f2ccSOmar Sandoval
1878da55f2ccSOmar Sandoval /*
18796d8b0162SMing Lei * Add one explicit barrier since blk_mq_get_driver_tag() may
18806d8b0162SMing Lei * not imply barrier in case of failure.
18816d8b0162SMing Lei *
18826d8b0162SMing Lei * Order adding us to wait queue and allocating driver tag.
18836d8b0162SMing Lei *
18846d8b0162SMing Lei * The pair is the one implied in sbitmap_queue_wake_up() which
18856d8b0162SMing Lei * orders clearing sbitmap tag bits and waitqueue_active() in
18866d8b0162SMing Lei * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
18876d8b0162SMing Lei *
18886d8b0162SMing Lei * Otherwise, re-order of adding wait queue and getting driver tag
18896d8b0162SMing Lei * may cause __sbitmap_queue_wake_up() to wake up nothing because
18906d8b0162SMing Lei * the waitqueue_active() may not observe us in wait queue.
18916d8b0162SMing Lei */
18926d8b0162SMing Lei smp_mb();
18936d8b0162SMing Lei
18946d8b0162SMing Lei /*
1895eb619fdbSJens Axboe * It's possible that a tag was freed in the window between the
1896eb619fdbSJens Axboe * allocation failure and adding the hardware queue to the wait
1897eb619fdbSJens Axboe * queue.
1898da55f2ccSOmar Sandoval */
18998ab6bb9eSMing Lei ret = blk_mq_get_driver_tag(rq);
1900f906a6a0SJens Axboe if (!ret) {
19015815839bSMing Lei spin_unlock(&hctx->dispatch_wait_lock);
19025815839bSMing Lei spin_unlock_irq(&wq->lock);
1903eb619fdbSJens Axboe return false;
1904eb619fdbSJens Axboe }
1905eb619fdbSJens Axboe
1906eb619fdbSJens Axboe /*
1907eb619fdbSJens Axboe * We got a tag, remove ourselves from the wait queue to ensure
1908eb619fdbSJens Axboe * someone else gets the wakeup.
1909eb619fdbSJens Axboe */
1910eb619fdbSJens Axboe list_del_init(&wait->entry);
1911e8618575SJens Axboe atomic_dec(&sbq->ws_active);
19125815839bSMing Lei spin_unlock(&hctx->dispatch_wait_lock);
19135815839bSMing Lei spin_unlock_irq(&wq->lock);
1914c27d53fbSBart Van Assche
1915da55f2ccSOmar Sandoval return true;
1916da55f2ccSOmar Sandoval }
1917da55f2ccSOmar Sandoval
19186e768717SMing Lei #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
19196e768717SMing Lei #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
19206e768717SMing Lei /*
19216e768717SMing Lei * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
19226e768717SMing Lei * - EWMA is one simple way to compute running average value
19236e768717SMing Lei * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
19246e768717SMing Lei * - take 4 as factor for avoiding to get too small(0) result, and this
19256e768717SMing Lei * factor doesn't matter because EWMA decreases exponentially
19266e768717SMing Lei */
blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy)19276e768717SMing Lei static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
19286e768717SMing Lei {
19296e768717SMing Lei unsigned int ewma;
19306e768717SMing Lei
19316e768717SMing Lei ewma = hctx->dispatch_busy;
19326e768717SMing Lei
19336e768717SMing Lei if (!ewma && !busy)
19346e768717SMing Lei return;
19356e768717SMing Lei
19366e768717SMing Lei ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
19376e768717SMing Lei if (busy)
19386e768717SMing Lei ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
19396e768717SMing Lei ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
19406e768717SMing Lei
19416e768717SMing Lei hctx->dispatch_busy = ewma;
19426e768717SMing Lei }
19436e768717SMing Lei
194486ff7c2aSMing Lei #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
194586ff7c2aSMing Lei
blk_mq_handle_dev_resource(struct request * rq,struct list_head * list)1946c92a4103SJohannes Thumshirn static void blk_mq_handle_dev_resource(struct request *rq,
1947c92a4103SJohannes Thumshirn struct list_head *list)
1948c92a4103SJohannes Thumshirn {
1949c92a4103SJohannes Thumshirn list_add(&rq->queuelist, list);
1950c92a4103SJohannes Thumshirn __blk_mq_requeue_request(rq);
1951c92a4103SJohannes Thumshirn }
1952c92a4103SJohannes Thumshirn
blk_mq_handle_zone_resource(struct request * rq,struct list_head * zone_list)19530512a75bSKeith Busch static void blk_mq_handle_zone_resource(struct request *rq,
19540512a75bSKeith Busch struct list_head *zone_list)
19550512a75bSKeith Busch {
19560512a75bSKeith Busch /*
19570512a75bSKeith Busch * If we end up here it is because we cannot dispatch a request to a
19580512a75bSKeith Busch * specific zone due to LLD level zone-write locking or other zone
19590512a75bSKeith Busch * related resource not being available. In this case, set the request
19600512a75bSKeith Busch * aside in zone_list for retrying it later.
19610512a75bSKeith Busch */
19620512a75bSKeith Busch list_add(&rq->queuelist, zone_list);
19630512a75bSKeith Busch __blk_mq_requeue_request(rq);
19640512a75bSKeith Busch }
19650512a75bSKeith Busch
196675383524SMing Lei enum prep_dispatch {
196775383524SMing Lei PREP_DISPATCH_OK,
196875383524SMing Lei PREP_DISPATCH_NO_TAG,
196975383524SMing Lei PREP_DISPATCH_NO_BUDGET,
197075383524SMing Lei };
197175383524SMing Lei
blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget)197275383524SMing Lei static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
197375383524SMing Lei bool need_budget)
1974f04c3df3SJens Axboe {
197575383524SMing Lei struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
19762a5a24aaSMing Lei int budget_token = -1;
1977f04c3df3SJens Axboe
19782a5a24aaSMing Lei if (need_budget) {
19792a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(rq->q);
19802a5a24aaSMing Lei if (budget_token < 0) {
19815fe56de7SJohn Garry blk_mq_put_driver_tag(rq);
198275383524SMing Lei return PREP_DISPATCH_NO_BUDGET;
19835fe56de7SJohn Garry }
19842a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token);
19852a5a24aaSMing Lei }
19860bca799bSMing Lei
19878ab6bb9eSMing Lei if (!blk_mq_get_driver_tag(rq)) {
19883c782d67SJens Axboe /*
1989da55f2ccSOmar Sandoval * The initial allocation attempt failed, so we need to
1990eb619fdbSJens Axboe * rerun the hardware queue when a tag is freed. The
1991eb619fdbSJens Axboe * waitqueue takes care of that. If the queue is run
1992eb619fdbSJens Axboe * before we add this entry back on the dispatch list,
1993eb619fdbSJens Axboe * we'll re-run it below.
19943c782d67SJens Axboe */
19952278d69fSMing Lei if (!blk_mq_mark_tag_wait(hctx, rq)) {
1996f906a6a0SJens Axboe /*
19971fd40b5eSMing Lei * All budgets not got from this function will be put
19981fd40b5eSMing Lei * together during handling partial dispatch
1999f906a6a0SJens Axboe */
20001fd40b5eSMing Lei if (need_budget)
20012a5a24aaSMing Lei blk_mq_put_dispatch_budget(rq->q, budget_token);
200275383524SMing Lei return PREP_DISPATCH_NO_TAG;
200375383524SMing Lei }
200475383524SMing Lei }
200575383524SMing Lei
200675383524SMing Lei return PREP_DISPATCH_OK;
200775383524SMing Lei }
200875383524SMing Lei
20091fd40b5eSMing Lei /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
blk_mq_release_budgets(struct request_queue * q,struct list_head * list)20101fd40b5eSMing Lei static void blk_mq_release_budgets(struct request_queue *q,
20112a5a24aaSMing Lei struct list_head *list)
20121fd40b5eSMing Lei {
20132a5a24aaSMing Lei struct request *rq;
20141fd40b5eSMing Lei
20152a5a24aaSMing Lei list_for_each_entry(rq, list, queuelist) {
20162a5a24aaSMing Lei int budget_token = blk_mq_get_rq_budget_token(rq);
20172a5a24aaSMing Lei
20182a5a24aaSMing Lei if (budget_token >= 0)
20192a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token);
20202a5a24aaSMing Lei }
20211fd40b5eSMing Lei }
20221fd40b5eSMing Lei
20231429d7c9SJens Axboe /*
202434c9f547SKemeng Shi * blk_mq_commit_rqs will notify driver using bd->last that there is no
202534c9f547SKemeng Shi * more requests. (See comment in struct blk_mq_ops for commit_rqs for
202634c9f547SKemeng Shi * details)
202734c9f547SKemeng Shi * Attention, we should explicitly call this in unusual cases:
202834c9f547SKemeng Shi * 1) did not queue everything initially scheduled to queue
202934c9f547SKemeng Shi * 2) the last attempt to queue a request failed
203034c9f547SKemeng Shi */
blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int queued,bool from_schedule)203134c9f547SKemeng Shi static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
203234c9f547SKemeng Shi bool from_schedule)
203334c9f547SKemeng Shi {
203434c9f547SKemeng Shi if (hctx->queue->mq_ops->commit_rqs && queued) {
203534c9f547SKemeng Shi trace_block_unplug(hctx->queue, queued, !from_schedule);
203634c9f547SKemeng Shi hctx->queue->mq_ops->commit_rqs(hctx);
203734c9f547SKemeng Shi }
203834c9f547SKemeng Shi }
203934c9f547SKemeng Shi
204034c9f547SKemeng Shi /*
20411429d7c9SJens Axboe * Returns true if we did some work AND can potentially do more.
20421429d7c9SJens Axboe */
blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,unsigned int nr_budgets)2043445874e8SMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
20441fd40b5eSMing Lei unsigned int nr_budgets)
20451429d7c9SJens Axboe {
204675383524SMing Lei enum prep_dispatch prep;
2047445874e8SMing Lei struct request_queue *q = hctx->queue;
2048f1ce99f7SKemeng Shi struct request *rq;
20494ea58fe4SKemeng Shi int queued;
2050703fd1c0SJens Axboe blk_status_t ret = BLK_STS_OK;
2051703fd1c0SJens Axboe LIST_HEAD(zone_list);
20529586e67bSNaohiro Aota bool needs_resource = false;
20531429d7c9SJens Axboe
20541429d7c9SJens Axboe if (list_empty(list))
2055f04c3df3SJens Axboe return false;
2056f04c3df3SJens Axboe
2057f04c3df3SJens Axboe /*
2058f04c3df3SJens Axboe * Now process all the entries, sending them to the driver.
2059f04c3df3SJens Axboe */
20604ea58fe4SKemeng Shi queued = 0;
2061f04c3df3SJens Axboe do {
2062f04c3df3SJens Axboe struct blk_mq_queue_data bd;
2063f04c3df3SJens Axboe
2064f04c3df3SJens Axboe rq = list_first_entry(list, struct request, queuelist);
2065f04c3df3SJens Axboe
2066445874e8SMing Lei WARN_ON_ONCE(hctx != rq->mq_hctx);
20671fd40b5eSMing Lei prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
206875383524SMing Lei if (prep != PREP_DISPATCH_OK)
2069bd166ef1SJens Axboe break;
2070de148297SMing Lei
2071f04c3df3SJens Axboe list_del_init(&rq->queuelist);
2072f04c3df3SJens Axboe
2073f04c3df3SJens Axboe bd.rq = rq;
2074f1ce99f7SKemeng Shi bd.last = list_empty(list);
2075f04c3df3SJens Axboe
20761fd40b5eSMing Lei /*
20771fd40b5eSMing Lei * once the request is queued to lld, no need to cover the
20781fd40b5eSMing Lei * budget any more
20791fd40b5eSMing Lei */
20801fd40b5eSMing Lei if (nr_budgets)
20811fd40b5eSMing Lei nr_budgets--;
2082f04c3df3SJens Axboe ret = q->mq_ops->queue_rq(hctx, &bd);
20837bf13729SMing Lei switch (ret) {
20847bf13729SMing Lei case BLK_STS_OK:
20857bf13729SMing Lei queued++;
2086f04c3df3SJens Axboe break;
20877bf13729SMing Lei case BLK_STS_RESOURCE:
20889586e67bSNaohiro Aota needs_resource = true;
20899586e67bSNaohiro Aota fallthrough;
20907bf13729SMing Lei case BLK_STS_DEV_RESOURCE:
20917bf13729SMing Lei blk_mq_handle_dev_resource(rq, list);
20927bf13729SMing Lei goto out;
20937bf13729SMing Lei case BLK_STS_ZONE_RESOURCE:
20940512a75bSKeith Busch /*
20950512a75bSKeith Busch * Move the request to zone_list and keep going through
20960512a75bSKeith Busch * the dispatch list to find more requests the drive can
20970512a75bSKeith Busch * accept.
20980512a75bSKeith Busch */
20990512a75bSKeith Busch blk_mq_handle_zone_resource(rq, &zone_list);
21009586e67bSNaohiro Aota needs_resource = true;
21010512a75bSKeith Busch break;
21027bf13729SMing Lei default:
2103e21ee5a6SHannes Reinecke blk_mq_end_request(rq, ret);
2104fc17b653SChristoph Hellwig }
210581380ca1SOmar Sandoval } while (!list_empty(list));
21067bf13729SMing Lei out:
21070512a75bSKeith Busch if (!list_empty(&zone_list))
21080512a75bSKeith Busch list_splice_tail_init(&zone_list, list);
21090512a75bSKeith Busch
2110632bfb63Syangerkun /* If we didn't flush the entire list, we could have told the driver
2111632bfb63Syangerkun * there was more coming, but that turned out to be a lie.
2112632bfb63Syangerkun */
2113e4ef2e05SKemeng Shi if (!list_empty(list) || ret != BLK_STS_OK)
2114e4ef2e05SKemeng Shi blk_mq_commit_rqs(hctx, queued, false);
2115e4ef2e05SKemeng Shi
2116f04c3df3SJens Axboe /*
2117f04c3df3SJens Axboe * Any items that need requeuing? Stuff them into hctx->dispatch,
2118f04c3df3SJens Axboe * that is where we will continue on next queue run.
2119f04c3df3SJens Axboe */
2120f04c3df3SJens Axboe if (!list_empty(list)) {
212186ff7c2aSMing Lei bool needs_restart;
212275383524SMing Lei /* For non-shared tags, the RESTART check will suffice */
212375383524SMing Lei bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
212447df9ce9SKemeng Shi ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
212547df9ce9SKemeng Shi blk_mq_is_shared_tags(hctx->flags));
212686ff7c2aSMing Lei
21272a5a24aaSMing Lei if (nr_budgets)
21282a5a24aaSMing Lei blk_mq_release_budgets(q, list);
2129f04c3df3SJens Axboe
2130f04c3df3SJens Axboe spin_lock(&hctx->lock);
213101e99aecSMing Lei list_splice_tail_init(list, &hctx->dispatch);
2132f04c3df3SJens Axboe spin_unlock(&hctx->lock);
2133f04c3df3SJens Axboe
2134f04c3df3SJens Axboe /*
2135d7d8535fSMing Lei * Order adding requests to hctx->dispatch and checking
2136d7d8535fSMing Lei * SCHED_RESTART flag. The pair of this smp_mb() is the one
2137d7d8535fSMing Lei * in blk_mq_sched_restart(). Avoid restart code path to
2138d7d8535fSMing Lei * miss the new added requests to hctx->dispatch, meantime
2139d7d8535fSMing Lei * SCHED_RESTART is observed here.
2140d7d8535fSMing Lei */
2141d7d8535fSMing Lei smp_mb();
2142d7d8535fSMing Lei
2143d7d8535fSMing Lei /*
2144710c785fSBart Van Assche * If SCHED_RESTART was set by the caller of this function and
2145710c785fSBart Van Assche * it is no longer set that means that it was cleared by another
2146710c785fSBart Van Assche * thread and hence that a queue rerun is needed.
2147f04c3df3SJens Axboe *
2148eb619fdbSJens Axboe * If 'no_tag' is set, that means that we failed getting
2149eb619fdbSJens Axboe * a driver tag with an I/O scheduler attached. If our dispatch
2150eb619fdbSJens Axboe * waitqueue is no longer active, ensure that we run the queue
2151eb619fdbSJens Axboe * AFTER adding our entries back to the list.
2152bd166ef1SJens Axboe *
2153710c785fSBart Van Assche * If no I/O scheduler has been configured it is possible that
2154710c785fSBart Van Assche * the hardware queue got stopped and restarted before requests
2155710c785fSBart Van Assche * were pushed back onto the dispatch list. Rerun the queue to
2156710c785fSBart Van Assche * avoid starvation. Notes:
2157710c785fSBart Van Assche * - blk_mq_run_hw_queue() checks whether or not a queue has
2158710c785fSBart Van Assche * been stopped before rerunning a queue.
2159710c785fSBart Van Assche * - Some but not all block drivers stop a queue before
2160fc17b653SChristoph Hellwig * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2161710c785fSBart Van Assche * and dm-rq.
216286ff7c2aSMing Lei *
216386ff7c2aSMing Lei * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
216486ff7c2aSMing Lei * bit is set, run queue after a delay to avoid IO stalls
2165ab3cee37SDouglas Anderson * that could otherwise occur if the queue is idle. We'll do
21669586e67bSNaohiro Aota * similar if we couldn't get budget or couldn't lock a zone
21679586e67bSNaohiro Aota * and SCHED_RESTART is set.
2168bd166ef1SJens Axboe */
216986ff7c2aSMing Lei needs_restart = blk_mq_sched_needs_restart(hctx);
21709586e67bSNaohiro Aota if (prep == PREP_DISPATCH_NO_BUDGET)
21719586e67bSNaohiro Aota needs_resource = true;
217286ff7c2aSMing Lei if (!needs_restart ||
2173eb619fdbSJens Axboe (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2174f04c3df3SJens Axboe blk_mq_run_hw_queue(hctx, true);
21756d5e8d21SMiaohe Lin else if (needs_resource)
217686ff7c2aSMing Lei blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
21771f57f8d4SJens Axboe
21786e768717SMing Lei blk_mq_update_dispatch_busy(hctx, true);
21791f57f8d4SJens Axboe return false;
21804ea58fe4SKemeng Shi }
2181f04c3df3SJens Axboe
21824ea58fe4SKemeng Shi blk_mq_update_dispatch_busy(hctx, false);
21834ea58fe4SKemeng Shi return true;
2184f04c3df3SJens Axboe }
2185f04c3df3SJens Axboe
blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx)2186f82ddf19SMing Lei static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2187f82ddf19SMing Lei {
2188f82ddf19SMing Lei int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2189f82ddf19SMing Lei
2190f82ddf19SMing Lei if (cpu >= nr_cpu_ids)
2191f82ddf19SMing Lei cpu = cpumask_first(hctx->cpumask);
2192f82ddf19SMing Lei return cpu;
2193f82ddf19SMing Lei }
2194f82ddf19SMing Lei
2195506e931fSJens Axboe /*
2196506e931fSJens Axboe * It'd be great if the workqueue API had a way to pass
2197506e931fSJens Axboe * in a mask and had some smarts for more clever placement.
2198506e931fSJens Axboe * For now we just round-robin here, switching for every
2199506e931fSJens Axboe * BLK_MQ_CPU_WORK_BATCH queued items.
2200506e931fSJens Axboe */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)2201506e931fSJens Axboe static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2202506e931fSJens Axboe {
22037bed4595SMing Lei bool tried = false;
2204476f8c98SMing Lei int next_cpu = hctx->next_cpu;
22057bed4595SMing Lei
2206b657d7e6SChristoph Hellwig if (hctx->queue->nr_hw_queues == 1)
2207b657d7e6SChristoph Hellwig return WORK_CPU_UNBOUND;
2208506e931fSJens Axboe
2209506e931fSJens Axboe if (--hctx->next_cpu_batch <= 0) {
22107bed4595SMing Lei select_cpu:
2211476f8c98SMing Lei next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
221220e4d813SChristoph Hellwig cpu_online_mask);
2213506e931fSJens Axboe if (next_cpu >= nr_cpu_ids)
2214f82ddf19SMing Lei next_cpu = blk_mq_first_mapped_cpu(hctx);
2215506e931fSJens Axboe hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2216506e931fSJens Axboe }
2217506e931fSJens Axboe
22187bed4595SMing Lei /*
22197bed4595SMing Lei * Do unbound schedule if we can't find a online CPU for this hctx,
22207bed4595SMing Lei * and it should only happen in the path of handling CPU DEAD.
22217bed4595SMing Lei */
2222476f8c98SMing Lei if (!cpu_online(next_cpu)) {
22237bed4595SMing Lei if (!tried) {
22247bed4595SMing Lei tried = true;
22257bed4595SMing Lei goto select_cpu;
22267bed4595SMing Lei }
22277bed4595SMing Lei
22287bed4595SMing Lei /*
22297bed4595SMing Lei * Make sure to re-select CPU next time once after CPUs
22307bed4595SMing Lei * in hctx->cpumask become online again.
22317bed4595SMing Lei */
2232476f8c98SMing Lei hctx->next_cpu = next_cpu;
22337bed4595SMing Lei hctx->next_cpu_batch = 1;
22347bed4595SMing Lei return WORK_CPU_UNBOUND;
22357bed4595SMing Lei }
2236476f8c98SMing Lei
2237476f8c98SMing Lei hctx->next_cpu = next_cpu;
2238476f8c98SMing Lei return next_cpu;
2239b657d7e6SChristoph Hellwig }
2240b657d7e6SChristoph Hellwig
2241105663f7SAndré Almeida /**
2242105663f7SAndré Almeida * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2243105663f7SAndré Almeida * @hctx: Pointer to the hardware queue to run.
2244fa94ba8aSMinwoo Im * @msecs: Milliseconds of delay to wait before running the queue.
2245105663f7SAndré Almeida *
2246105663f7SAndré Almeida * Run a hardware queue asynchronously with a delay of @msecs.
2247105663f7SAndré Almeida */
blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)22487587a5aeSBart Van Assche void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
22497587a5aeSBart Van Assche {
22501aa8d875SChristoph Hellwig if (unlikely(blk_mq_hctx_stopped(hctx)))
22511aa8d875SChristoph Hellwig return;
22521aa8d875SChristoph Hellwig kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
22531aa8d875SChristoph Hellwig msecs_to_jiffies(msecs));
22547587a5aeSBart Van Assche }
22557587a5aeSBart Van Assche EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
22567587a5aeSBart Van Assche
blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx * hctx)2257679b1874SMuchun Song static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx)
2258679b1874SMuchun Song {
2259679b1874SMuchun Song bool need_run;
2260679b1874SMuchun Song
2261679b1874SMuchun Song /*
2262679b1874SMuchun Song * When queue is quiesced, we may be switching io scheduler, or
2263679b1874SMuchun Song * updating nr_hw_queues, or other things, and we can't run queue
2264679b1874SMuchun Song * any more, even blk_mq_hctx_has_pending() can't be called safely.
2265679b1874SMuchun Song *
2266679b1874SMuchun Song * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2267679b1874SMuchun Song * quiesced.
2268679b1874SMuchun Song */
2269679b1874SMuchun Song __blk_mq_run_dispatch_ops(hctx->queue, false,
2270679b1874SMuchun Song need_run = !blk_queue_quiesced(hctx->queue) &&
2271679b1874SMuchun Song blk_mq_hctx_has_pending(hctx));
2272679b1874SMuchun Song return need_run;
2273679b1874SMuchun Song }
2274679b1874SMuchun Song
2275105663f7SAndré Almeida /**
2276105663f7SAndré Almeida * blk_mq_run_hw_queue - Start to run a hardware queue.
2277105663f7SAndré Almeida * @hctx: Pointer to the hardware queue to run.
2278105663f7SAndré Almeida * @async: If we want to run the queue asynchronously.
2279105663f7SAndré Almeida *
2280105663f7SAndré Almeida * Check if the request queue is not in a quiesced state and if there are
2281105663f7SAndré Almeida * pending requests to be sent. If this is true, run the queue to send requests
2282105663f7SAndré Almeida * to hardware.
2283105663f7SAndré Almeida */
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2284626fb735SJohn Garry void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
22857587a5aeSBart Van Assche {
228624f5a90fSMing Lei bool need_run;
228724f5a90fSMing Lei
228824f5a90fSMing Lei /*
22894d5bba5bSChristoph Hellwig * We can't run the queue inline with interrupts disabled.
22904d5bba5bSChristoph Hellwig */
22914d5bba5bSChristoph Hellwig WARN_ON_ONCE(!async && in_interrupt());
22924d5bba5bSChristoph Hellwig
229365a558f6SBart Van Assche might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
229465a558f6SBart Van Assche
2295679b1874SMuchun Song need_run = blk_mq_hw_queue_need_run(hctx);
2296679b1874SMuchun Song if (!need_run) {
2297679b1874SMuchun Song unsigned long flags;
2298679b1874SMuchun Song
22994d5bba5bSChristoph Hellwig /*
2300679b1874SMuchun Song * Synchronize with blk_mq_unquiesce_queue(), because we check
2301679b1874SMuchun Song * if hw queue is quiesced locklessly above, we need the use
2302679b1874SMuchun Song * ->queue_lock to make sure we see the up-to-date status to
2303679b1874SMuchun Song * not miss rerunning the hw queue.
230424f5a90fSMing Lei */
2305679b1874SMuchun Song spin_lock_irqsave(&hctx->queue->queue_lock, flags);
2306679b1874SMuchun Song need_run = blk_mq_hw_queue_need_run(hctx);
2307679b1874SMuchun Song spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);
230824f5a90fSMing Lei
23091aa8d875SChristoph Hellwig if (!need_run)
23101aa8d875SChristoph Hellwig return;
2311679b1874SMuchun Song }
23121aa8d875SChristoph Hellwig
231365a558f6SBart Van Assche if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
23141aa8d875SChristoph Hellwig blk_mq_delay_run_hw_queue(hctx, 0);
23151aa8d875SChristoph Hellwig return;
23161aa8d875SChristoph Hellwig }
23171aa8d875SChristoph Hellwig
23184d5bba5bSChristoph Hellwig blk_mq_run_dispatch_ops(hctx->queue,
23194d5bba5bSChristoph Hellwig blk_mq_sched_dispatch_requests(hctx));
2320320ae51fSJens Axboe }
23215b727272SOmar Sandoval EXPORT_SYMBOL(blk_mq_run_hw_queue);
2322320ae51fSJens Axboe
2323b6e68ee8SJan Kara /*
2324b6e68ee8SJan Kara * Return prefered queue to dispatch from (if any) for non-mq aware IO
2325b6e68ee8SJan Kara * scheduler.
2326b6e68ee8SJan Kara */
blk_mq_get_sq_hctx(struct request_queue * q)2327b6e68ee8SJan Kara static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2328b6e68ee8SJan Kara {
23295d05426eSMing Lei struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2330b6e68ee8SJan Kara /*
2331b6e68ee8SJan Kara * If the IO scheduler does not respect hardware queues when
2332b6e68ee8SJan Kara * dispatching, we just don't bother with multiple HW queues and
2333b6e68ee8SJan Kara * dispatch from hctx for the current CPU since running multiple queues
2334b6e68ee8SJan Kara * just causes lock contention inside the scheduler and pointless cache
2335b6e68ee8SJan Kara * bouncing.
2336b6e68ee8SJan Kara */
233751ab80f0SBart Van Assche struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
23385d05426eSMing Lei
2339b6e68ee8SJan Kara if (!blk_mq_hctx_stopped(hctx))
2340b6e68ee8SJan Kara return hctx;
2341b6e68ee8SJan Kara return NULL;
2342b6e68ee8SJan Kara }
2343b6e68ee8SJan Kara
2344105663f7SAndré Almeida /**
234524f7bb88SMauro Carvalho Chehab * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2346105663f7SAndré Almeida * @q: Pointer to the request queue to run.
2347105663f7SAndré Almeida * @async: If we want to run the queue asynchronously.
2348105663f7SAndré Almeida */
blk_mq_run_hw_queues(struct request_queue * q,bool async)2349b94ec296SMike Snitzer void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2350320ae51fSJens Axboe {
2351b6e68ee8SJan Kara struct blk_mq_hw_ctx *hctx, *sq_hctx;
23524f481208SMing Lei unsigned long i;
2353320ae51fSJens Axboe
2354b6e68ee8SJan Kara sq_hctx = NULL;
23554d337cebSMing Lei if (blk_queue_sq_sched(q))
2356b6e68ee8SJan Kara sq_hctx = blk_mq_get_sq_hctx(q);
2357320ae51fSJens Axboe queue_for_each_hw_ctx(q, hctx, i) {
235879f720a7SJens Axboe if (blk_mq_hctx_stopped(hctx))
2359320ae51fSJens Axboe continue;
2360b6e68ee8SJan Kara /*
2361b6e68ee8SJan Kara * Dispatch from this hctx either if there's no hctx preferred
2362b6e68ee8SJan Kara * by IO scheduler or if it has requests that bypass the
2363b6e68ee8SJan Kara * scheduler.
2364b6e68ee8SJan Kara */
2365b6e68ee8SJan Kara if (!sq_hctx || sq_hctx == hctx ||
2366b6e68ee8SJan Kara !list_empty_careful(&hctx->dispatch))
2367b94ec296SMike Snitzer blk_mq_run_hw_queue(hctx, async);
2368320ae51fSJens Axboe }
2369320ae51fSJens Axboe }
2370b94ec296SMike Snitzer EXPORT_SYMBOL(blk_mq_run_hw_queues);
2371320ae51fSJens Axboe
2372fd001443SBart Van Assche /**
2373b9151e7bSDouglas Anderson * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2374b9151e7bSDouglas Anderson * @q: Pointer to the request queue to run.
2375fa94ba8aSMinwoo Im * @msecs: Milliseconds of delay to wait before running the queues.
2376b9151e7bSDouglas Anderson */
blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs)2377b9151e7bSDouglas Anderson void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2378b9151e7bSDouglas Anderson {
2379b6e68ee8SJan Kara struct blk_mq_hw_ctx *hctx, *sq_hctx;
23804f481208SMing Lei unsigned long i;
2381b9151e7bSDouglas Anderson
2382b6e68ee8SJan Kara sq_hctx = NULL;
23834d337cebSMing Lei if (blk_queue_sq_sched(q))
2384b6e68ee8SJan Kara sq_hctx = blk_mq_get_sq_hctx(q);
2385b9151e7bSDouglas Anderson queue_for_each_hw_ctx(q, hctx, i) {
2386b9151e7bSDouglas Anderson if (blk_mq_hctx_stopped(hctx))
2387b9151e7bSDouglas Anderson continue;
2388b6e68ee8SJan Kara /*
23898f5fea65SDavid Jeffery * If there is already a run_work pending, leave the
23908f5fea65SDavid Jeffery * pending delay untouched. Otherwise, a hctx can stall
23918f5fea65SDavid Jeffery * if another hctx is re-delaying the other's work
23928f5fea65SDavid Jeffery * before the work executes.
23938f5fea65SDavid Jeffery */
23948f5fea65SDavid Jeffery if (delayed_work_pending(&hctx->run_work))
23958f5fea65SDavid Jeffery continue;
23968f5fea65SDavid Jeffery /*
2397b6e68ee8SJan Kara * Dispatch from this hctx either if there's no hctx preferred
2398b6e68ee8SJan Kara * by IO scheduler or if it has requests that bypass the
2399b6e68ee8SJan Kara * scheduler.
2400b6e68ee8SJan Kara */
2401b6e68ee8SJan Kara if (!sq_hctx || sq_hctx == hctx ||
2402b6e68ee8SJan Kara !list_empty_careful(&hctx->dispatch))
2403b9151e7bSDouglas Anderson blk_mq_delay_run_hw_queue(hctx, msecs);
2404b9151e7bSDouglas Anderson }
2405b9151e7bSDouglas Anderson }
2406b9151e7bSDouglas Anderson EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2407b9151e7bSDouglas Anderson
240839a70c76SMing Lei /*
240939a70c76SMing Lei * This function is often used for pausing .queue_rq() by driver when
241039a70c76SMing Lei * there isn't enough resource or some conditions aren't satisfied, and
24114d606219SBart Van Assche * BLK_STS_RESOURCE is usually returned.
241239a70c76SMing Lei *
241339a70c76SMing Lei * We do not guarantee that dispatch can be drained or blocked
241439a70c76SMing Lei * after blk_mq_stop_hw_queue() returns. Please use
241539a70c76SMing Lei * blk_mq_quiesce_queue() for that requirement.
241639a70c76SMing Lei */
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)2417320ae51fSJens Axboe void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2418320ae51fSJens Axboe {
2419641a9ed6SMing Lei cancel_delayed_work(&hctx->run_work);
2420641a9ed6SMing Lei
2421641a9ed6SMing Lei set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2422320ae51fSJens Axboe }
2423320ae51fSJens Axboe EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2424320ae51fSJens Axboe
242539a70c76SMing Lei /*
242639a70c76SMing Lei * This function is often used for pausing .queue_rq() by driver when
242739a70c76SMing Lei * there isn't enough resource or some conditions aren't satisfied, and
24284d606219SBart Van Assche * BLK_STS_RESOURCE is usually returned.
242939a70c76SMing Lei *
243039a70c76SMing Lei * We do not guarantee that dispatch can be drained or blocked
243139a70c76SMing Lei * after blk_mq_stop_hw_queues() returns. Please use
243239a70c76SMing Lei * blk_mq_quiesce_queue() for that requirement.
243339a70c76SMing Lei */
blk_mq_stop_hw_queues(struct request_queue * q)24342719aa21SJens Axboe void blk_mq_stop_hw_queues(struct request_queue *q)
24352719aa21SJens Axboe {
2436641a9ed6SMing Lei struct blk_mq_hw_ctx *hctx;
24374f481208SMing Lei unsigned long i;
2438641a9ed6SMing Lei
2439641a9ed6SMing Lei queue_for_each_hw_ctx(q, hctx, i)
2440641a9ed6SMing Lei blk_mq_stop_hw_queue(hctx);
2441280d45f6SChristoph Hellwig }
2442280d45f6SChristoph Hellwig EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2443280d45f6SChristoph Hellwig
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)2444320ae51fSJens Axboe void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2445320ae51fSJens Axboe {
2446320ae51fSJens Axboe clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2447e4043dcfSJens Axboe
244865a558f6SBart Van Assche blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2449320ae51fSJens Axboe }
2450320ae51fSJens Axboe EXPORT_SYMBOL(blk_mq_start_hw_queue);
2451320ae51fSJens Axboe
blk_mq_start_hw_queues(struct request_queue * q)24522f268556SChristoph Hellwig void blk_mq_start_hw_queues(struct request_queue *q)
24532f268556SChristoph Hellwig {
24542f268556SChristoph Hellwig struct blk_mq_hw_ctx *hctx;
24554f481208SMing Lei unsigned long i;
24562f268556SChristoph Hellwig
24572f268556SChristoph Hellwig queue_for_each_hw_ctx(q, hctx, i)
24582f268556SChristoph Hellwig blk_mq_start_hw_queue(hctx);
24592f268556SChristoph Hellwig }
24602f268556SChristoph Hellwig EXPORT_SYMBOL(blk_mq_start_hw_queues);
24612f268556SChristoph Hellwig
blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2462ae911c5eSJens Axboe void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2463ae911c5eSJens Axboe {
2464ae911c5eSJens Axboe if (!blk_mq_hctx_stopped(hctx))
2465ae911c5eSJens Axboe return;
2466ae911c5eSJens Axboe
2467ae911c5eSJens Axboe clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2468e95080fbSMuchun Song /*
2469e95080fbSMuchun Song * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
2470e95080fbSMuchun Song * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
2471e95080fbSMuchun Song * list in the subsequent routine.
2472e95080fbSMuchun Song */
2473e95080fbSMuchun Song smp_mb__after_atomic();
2474ae911c5eSJens Axboe blk_mq_run_hw_queue(hctx, async);
2475ae911c5eSJens Axboe }
2476ae911c5eSJens Axboe EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2477ae911c5eSJens Axboe
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)24781b4a3258SChristoph Hellwig void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2479320ae51fSJens Axboe {
2480320ae51fSJens Axboe struct blk_mq_hw_ctx *hctx;
24814f481208SMing Lei unsigned long i;
2482320ae51fSJens Axboe
2483ae911c5eSJens Axboe queue_for_each_hw_ctx(q, hctx, i)
248465a558f6SBart Van Assche blk_mq_start_stopped_hw_queue(hctx, async ||
248565a558f6SBart Van Assche (hctx->flags & BLK_MQ_F_BLOCKING));
2486320ae51fSJens Axboe }
2487320ae51fSJens Axboe EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2488320ae51fSJens Axboe
blk_mq_run_work_fn(struct work_struct * work)248970f4db63SChristoph Hellwig static void blk_mq_run_work_fn(struct work_struct *work)
2490320ae51fSJens Axboe {
2491c20a1a2cSChristoph Hellwig struct blk_mq_hw_ctx *hctx =
2492c20a1a2cSChristoph Hellwig container_of(work, struct blk_mq_hw_ctx, run_work.work);
2493320ae51fSJens Axboe
24944d5bba5bSChristoph Hellwig blk_mq_run_dispatch_ops(hctx->queue,
24954d5bba5bSChristoph Hellwig blk_mq_sched_dispatch_requests(hctx));
2496320ae51fSJens Axboe }
2497320ae51fSJens Axboe
2498105663f7SAndré Almeida /**
2499105663f7SAndré Almeida * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2500105663f7SAndré Almeida * @rq: Pointer to request to be inserted.
25012b597613SChristoph Hellwig * @flags: BLK_MQ_INSERT_*
2502105663f7SAndré Almeida *
2503157f377bSJens Axboe * Should only be used carefully, when the caller knows we want to
2504157f377bSJens Axboe * bypass a potential IO scheduler on the target device.
2505157f377bSJens Axboe */
blk_mq_request_bypass_insert(struct request * rq,blk_insert_t flags)2506360f2648SChristoph Hellwig static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2507157f377bSJens Axboe {
2508ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2509157f377bSJens Axboe
2510157f377bSJens Axboe spin_lock(&hctx->lock);
25112b597613SChristoph Hellwig if (flags & BLK_MQ_INSERT_AT_HEAD)
251201e99aecSMing Lei list_add(&rq->queuelist, &hctx->dispatch);
251301e99aecSMing Lei else
2514157f377bSJens Axboe list_add_tail(&rq->queuelist, &hctx->dispatch);
2515157f377bSJens Axboe spin_unlock(&hctx->lock);
2516157f377bSJens Axboe }
2517157f377bSJens Axboe
blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)251805a93117SChristoph Hellwig static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
251905a93117SChristoph Hellwig struct blk_mq_ctx *ctx, struct list_head *list,
252005a93117SChristoph Hellwig bool run_queue_async)
2521320ae51fSJens Axboe {
25223f0cedc7SMing Lei struct request *rq;
2523c16d6b5aSMing Lei enum hctx_type type = hctx->type;
25243f0cedc7SMing Lei
2525320ae51fSJens Axboe /*
252694aa228cSChristoph Hellwig * Try to issue requests directly if the hw queue isn't busy to save an
252794aa228cSChristoph Hellwig * extra enqueue & dequeue to the sw queue.
252894aa228cSChristoph Hellwig */
252994aa228cSChristoph Hellwig if (!hctx->dispatch_busy && !run_queue_async) {
253094aa228cSChristoph Hellwig blk_mq_run_dispatch_ops(hctx->queue,
253194aa228cSChristoph Hellwig blk_mq_try_issue_list_directly(hctx, list));
253294aa228cSChristoph Hellwig if (list_empty(list))
253394aa228cSChristoph Hellwig goto out;
253494aa228cSChristoph Hellwig }
253594aa228cSChristoph Hellwig
253694aa228cSChristoph Hellwig /*
2537320ae51fSJens Axboe * preemption doesn't flush plug list, so it's possible ctx->cpu is
2538320ae51fSJens Axboe * offline now
2539320ae51fSJens Axboe */
25403f0cedc7SMing Lei list_for_each_entry(rq, list, queuelist) {
2541e57690feSJens Axboe BUG_ON(rq->mq_ctx != ctx);
2542a54895faSChristoph Hellwig trace_block_rq_insert(rq);
254365a558f6SBart Van Assche if (rq->cmd_flags & REQ_NOWAIT)
254465a558f6SBart Van Assche run_queue_async = true;
2545320ae51fSJens Axboe }
25463f0cedc7SMing Lei
25473f0cedc7SMing Lei spin_lock(&ctx->lock);
2548c16d6b5aSMing Lei list_splice_tail_init(list, &ctx->rq_lists[type]);
2549cfd0c552SMing Lei blk_mq_hctx_mark_pending(hctx, ctx);
2550320ae51fSJens Axboe spin_unlock(&ctx->lock);
255194aa228cSChristoph Hellwig out:
255294aa228cSChristoph Hellwig blk_mq_run_hw_queue(hctx, run_queue_async);
2553320ae51fSJens Axboe }
2554320ae51fSJens Axboe
blk_mq_insert_request(struct request * rq,blk_insert_t flags)2555710fa378SChristoph Hellwig static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
25562bd215dfSChristoph Hellwig {
25572bd215dfSChristoph Hellwig struct request_queue *q = rq->q;
25582bd215dfSChristoph Hellwig struct blk_mq_ctx *ctx = rq->mq_ctx;
25592bd215dfSChristoph Hellwig struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
25602bd215dfSChristoph Hellwig
256153548d2aSChristoph Hellwig if (blk_rq_is_passthrough(rq)) {
256253548d2aSChristoph Hellwig /*
256353548d2aSChristoph Hellwig * Passthrough request have to be added to hctx->dispatch
256453548d2aSChristoph Hellwig * directly. The device may be in a situation where it can't
256553548d2aSChristoph Hellwig * handle FS request, and always returns BLK_STS_RESOURCE for
256653548d2aSChristoph Hellwig * them, which gets them added to hctx->dispatch.
256753548d2aSChristoph Hellwig *
256853548d2aSChristoph Hellwig * If a passthrough request is required to unblock the queues,
256953548d2aSChristoph Hellwig * and it is added to the scheduler queue, there is no chance to
257053548d2aSChristoph Hellwig * dispatch it given we prioritize requests in hctx->dispatch.
257153548d2aSChristoph Hellwig */
25722b597613SChristoph Hellwig blk_mq_request_bypass_insert(rq, flags);
2573be4c4278SBart Van Assche } else if (req_op(rq) == REQ_OP_FLUSH) {
25742bd215dfSChristoph Hellwig /*
25752bd215dfSChristoph Hellwig * Firstly normal IO request is inserted to scheduler queue or
25762bd215dfSChristoph Hellwig * sw queue, meantime we add flush request to dispatch queue(
25772bd215dfSChristoph Hellwig * hctx->dispatch) directly and there is at most one in-flight
25782bd215dfSChristoph Hellwig * flush request for each hw queue, so it doesn't matter to add
25792bd215dfSChristoph Hellwig * flush request to tail or front of the dispatch queue.
25802bd215dfSChristoph Hellwig *
25812bd215dfSChristoph Hellwig * Secondly in case of NCQ, flush request belongs to non-NCQ
25822bd215dfSChristoph Hellwig * command, and queueing it will fail when there is any
25832bd215dfSChristoph Hellwig * in-flight normal IO request(NCQ command). When adding flush
25842bd215dfSChristoph Hellwig * rq to the front of hctx->dispatch, it is easier to introduce
25852bd215dfSChristoph Hellwig * extra time to flush rq's latency because of S_SCHED_RESTART
25862bd215dfSChristoph Hellwig * compared with adding to the tail of dispatch queue, then
25872bd215dfSChristoph Hellwig * chance of flush merge is increased, and less flush requests
25882bd215dfSChristoph Hellwig * will be issued to controller. It is observed that ~10% time
25892bd215dfSChristoph Hellwig * is saved in blktests block/004 on disk attached to AHCI/NCQ
25902bd215dfSChristoph Hellwig * drive when adding flush rq to the front of hctx->dispatch.
25912bd215dfSChristoph Hellwig *
25922bd215dfSChristoph Hellwig * Simply queue flush rq to the front of hctx->dispatch so that
25932bd215dfSChristoph Hellwig * intensive flush workloads can benefit in case of NCQ HW.
25942bd215dfSChristoph Hellwig */
25952b597613SChristoph Hellwig blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
259653548d2aSChristoph Hellwig } else if (q->elevator) {
25972bd215dfSChristoph Hellwig LIST_HEAD(list);
25982bd215dfSChristoph Hellwig
259953548d2aSChristoph Hellwig WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
260053548d2aSChristoph Hellwig
26012bd215dfSChristoph Hellwig list_add(&rq->queuelist, &list);
260293fffe16SChristoph Hellwig q->elevator->type->ops.insert_requests(hctx, &list, flags);
26032bd215dfSChristoph Hellwig } else {
26044ec5c055SChristoph Hellwig trace_block_rq_insert(rq);
26054ec5c055SChristoph Hellwig
26062bd215dfSChristoph Hellwig spin_lock(&ctx->lock);
2607710fa378SChristoph Hellwig if (flags & BLK_MQ_INSERT_AT_HEAD)
26084ec5c055SChristoph Hellwig list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
26094ec5c055SChristoph Hellwig else
26104ec5c055SChristoph Hellwig list_add_tail(&rq->queuelist,
26114ec5c055SChristoph Hellwig &ctx->rq_lists[hctx->type]);
2612a88db1e0SChristoph Hellwig blk_mq_hctx_mark_pending(hctx, ctx);
26132bd215dfSChristoph Hellwig spin_unlock(&ctx->lock);
26142bd215dfSChristoph Hellwig }
2615320ae51fSJens Axboe }
2616320ae51fSJens Axboe
blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs)261714ccb66bSChristoph Hellwig static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
261814ccb66bSChristoph Hellwig unsigned int nr_segs)
2619320ae51fSJens Axboe {
262093f221aeSEric Biggers int err;
262193f221aeSEric Biggers
2622f924cddeSChristoph Hellwig if (bio->bi_opf & REQ_RAHEAD)
2623f924cddeSChristoph Hellwig rq->cmd_flags |= REQ_FAILFAST_MASK;
2624f924cddeSChristoph Hellwig
2625f924cddeSChristoph Hellwig rq->__sector = bio->bi_iter.bi_sector;
262614ccb66bSChristoph Hellwig blk_rq_bio_prep(rq, bio, nr_segs);
262793f221aeSEric Biggers
262893f221aeSEric Biggers /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
262993f221aeSEric Biggers err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
263093f221aeSEric Biggers WARN_ON_ONCE(err);
26314b570521SJens Axboe
2632b5af37abSKonstantin Khlebnikov blk_account_io_start(rq);
2633320ae51fSJens Axboe }
2634320ae51fSJens Axboe
__blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last)26350f95549cSMike Snitzer static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
26363e08773cSChristoph Hellwig struct request *rq, bool last)
2637f984df1fSShaohua Li {
2638f984df1fSShaohua Li struct request_queue *q = rq->q;
2639f984df1fSShaohua Li struct blk_mq_queue_data bd = {
2640f984df1fSShaohua Li .rq = rq,
2641be94f058SJens Axboe .last = last,
2642f984df1fSShaohua Li };
2643f06345adSJens Axboe blk_status_t ret;
26440f95549cSMike Snitzer
26450f95549cSMike Snitzer /*
26460f95549cSMike Snitzer * For OK queue, we are done. For error, caller may kill it.
26470f95549cSMike Snitzer * Any other error (busy), just add it to our list as we
26480f95549cSMike Snitzer * previously would have done.
26490f95549cSMike Snitzer */
26500f95549cSMike Snitzer ret = q->mq_ops->queue_rq(hctx, &bd);
26510f95549cSMike Snitzer switch (ret) {
26520f95549cSMike Snitzer case BLK_STS_OK:
26536ce3dd6eSMing Lei blk_mq_update_dispatch_busy(hctx, false);
26540f95549cSMike Snitzer break;
26550f95549cSMike Snitzer case BLK_STS_RESOURCE:
265686ff7c2aSMing Lei case BLK_STS_DEV_RESOURCE:
26576ce3dd6eSMing Lei blk_mq_update_dispatch_busy(hctx, true);
26580f95549cSMike Snitzer __blk_mq_requeue_request(rq);
26590f95549cSMike Snitzer break;
26600f95549cSMike Snitzer default:
26616ce3dd6eSMing Lei blk_mq_update_dispatch_busy(hctx, false);
26620f95549cSMike Snitzer break;
26630f95549cSMike Snitzer }
26640f95549cSMike Snitzer
26650f95549cSMike Snitzer return ret;
26660f95549cSMike Snitzer }
26670f95549cSMike Snitzer
blk_mq_get_budget_and_tag(struct request * rq)26682b71b877SChristoph Hellwig static bool blk_mq_get_budget_and_tag(struct request *rq)
26690f95549cSMike Snitzer {
26702a5a24aaSMing Lei int budget_token;
2671d964f04aSMing Lei
26722b71b877SChristoph Hellwig budget_token = blk_mq_get_dispatch_budget(rq->q);
26732a5a24aaSMing Lei if (budget_token < 0)
26742b71b877SChristoph Hellwig return false;
26752a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token);
26768ab6bb9eSMing Lei if (!blk_mq_get_driver_tag(rq)) {
26772b71b877SChristoph Hellwig blk_mq_put_dispatch_budget(rq->q, budget_token);
26782b71b877SChristoph Hellwig return false;
267988022d72SMing Lei }
26802b71b877SChristoph Hellwig return true;
26817f556a44SJianchao Wang }
2682fd9c40f6SBart Van Assche
2683105663f7SAndré Almeida /**
2684105663f7SAndré Almeida * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2685105663f7SAndré Almeida * @hctx: Pointer of the associated hardware queue.
2686105663f7SAndré Almeida * @rq: Pointer to request to be sent.
2687105663f7SAndré Almeida *
2688105663f7SAndré Almeida * If the device has enough resources to accept a new request now, send the
2689105663f7SAndré Almeida * request directly to device driver. Else, insert at hctx->dispatch queue, so
2690105663f7SAndré Almeida * we can try send it another time in the future. Requests inserted at this
2691105663f7SAndré Almeida * queue have higher priority.
2692105663f7SAndré Almeida */
blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq)2693fd9c40f6SBart Van Assche static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26943e08773cSChristoph Hellwig struct request *rq)
2695fd9c40f6SBart Van Assche {
2696e1f44ac0SChristoph Hellwig blk_status_t ret;
2697fd9c40f6SBart Van Assche
2698e1f44ac0SChristoph Hellwig if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2699710fa378SChristoph Hellwig blk_mq_insert_request(rq, 0);
2700fe0d9800SMuchun Song blk_mq_run_hw_queue(hctx, false);
2701e1f44ac0SChristoph Hellwig return;
2702e1f44ac0SChristoph Hellwig }
2703e1f44ac0SChristoph Hellwig
2704dd6216bbSChristoph Hellwig if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2705710fa378SChristoph Hellwig blk_mq_insert_request(rq, 0);
270665a558f6SBart Van Assche blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2707e1f44ac0SChristoph Hellwig return;
2708e1f44ac0SChristoph Hellwig }
2709e1f44ac0SChristoph Hellwig
2710e1f44ac0SChristoph Hellwig ret = __blk_mq_issue_directly(hctx, rq, true);
2711e1f44ac0SChristoph Hellwig switch (ret) {
2712e1f44ac0SChristoph Hellwig case BLK_STS_OK:
2713e1f44ac0SChristoph Hellwig break;
2714e1f44ac0SChristoph Hellwig case BLK_STS_RESOURCE:
2715e1f44ac0SChristoph Hellwig case BLK_STS_DEV_RESOURCE:
27162b597613SChristoph Hellwig blk_mq_request_bypass_insert(rq, 0);
27172394395cSChristoph Hellwig blk_mq_run_hw_queue(hctx, false);
2718e1f44ac0SChristoph Hellwig break;
2719e1f44ac0SChristoph Hellwig default:
27207f556a44SJianchao Wang blk_mq_end_request(rq, ret);
2721e1f44ac0SChristoph Hellwig break;
2722e1f44ac0SChristoph Hellwig }
27237f556a44SJianchao Wang }
27247f556a44SJianchao Wang
blk_mq_request_issue_directly(struct request * rq,bool last)272506c8c691SChristoph Hellwig static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2726fd9c40f6SBart Van Assche {
2727e1f44ac0SChristoph Hellwig struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2728e1f44ac0SChristoph Hellwig
2729e1f44ac0SChristoph Hellwig if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2730710fa378SChristoph Hellwig blk_mq_insert_request(rq, 0);
2731fe0d9800SMuchun Song blk_mq_run_hw_queue(hctx, false);
2732e1f44ac0SChristoph Hellwig return BLK_STS_OK;
2733e1f44ac0SChristoph Hellwig }
2734e1f44ac0SChristoph Hellwig
2735e1f44ac0SChristoph Hellwig if (!blk_mq_get_budget_and_tag(rq))
2736e1f44ac0SChristoph Hellwig return BLK_STS_RESOURCE;
2737e1f44ac0SChristoph Hellwig return __blk_mq_issue_directly(hctx, rq, last);
27385eb6126eSChristoph Hellwig }
27395eb6126eSChristoph Hellwig
blk_mq_plug_issue_direct(struct blk_plug * plug)27403e368fb0SKemeng Shi static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2741b84c5b50SChristoph Hellwig {
2742b84c5b50SChristoph Hellwig struct blk_mq_hw_ctx *hctx = NULL;
2743b84c5b50SChristoph Hellwig struct request *rq;
2744b84c5b50SChristoph Hellwig int queued = 0;
27450d617a83SKemeng Shi blk_status_t ret = BLK_STS_OK;
2746b84c5b50SChristoph Hellwig
2747b84c5b50SChristoph Hellwig while ((rq = rq_list_pop(&plug->mq_list))) {
2748b84c5b50SChristoph Hellwig bool last = rq_list_empty(plug->mq_list);
2749b84c5b50SChristoph Hellwig
2750b84c5b50SChristoph Hellwig if (hctx != rq->mq_hctx) {
275134c9f547SKemeng Shi if (hctx) {
275234c9f547SKemeng Shi blk_mq_commit_rqs(hctx, queued, false);
275334c9f547SKemeng Shi queued = 0;
275434c9f547SKemeng Shi }
2755b84c5b50SChristoph Hellwig hctx = rq->mq_hctx;
2756b84c5b50SChristoph Hellwig }
2757b84c5b50SChristoph Hellwig
2758b84c5b50SChristoph Hellwig ret = blk_mq_request_issue_directly(rq, last);
2759b84c5b50SChristoph Hellwig switch (ret) {
2760b84c5b50SChristoph Hellwig case BLK_STS_OK:
2761b84c5b50SChristoph Hellwig queued++;
2762b84c5b50SChristoph Hellwig break;
2763b84c5b50SChristoph Hellwig case BLK_STS_RESOURCE:
2764b84c5b50SChristoph Hellwig case BLK_STS_DEV_RESOURCE:
27652b597613SChristoph Hellwig blk_mq_request_bypass_insert(rq, 0);
27662394395cSChristoph Hellwig blk_mq_run_hw_queue(hctx, false);
27670d617a83SKemeng Shi goto out;
2768b84c5b50SChristoph Hellwig default:
2769b84c5b50SChristoph Hellwig blk_mq_end_request(rq, ret);
2770b84c5b50SChristoph Hellwig break;
2771b84c5b50SChristoph Hellwig }
2772b84c5b50SChristoph Hellwig }
2773b84c5b50SChristoph Hellwig
27740d617a83SKemeng Shi out:
27750d617a83SKemeng Shi if (ret != BLK_STS_OK)
277634c9f547SKemeng Shi blk_mq_commit_rqs(hctx, queued, false);
2777b84c5b50SChristoph Hellwig }
2778b84c5b50SChristoph Hellwig
__blk_mq_flush_plug_list(struct request_queue * q,struct blk_plug * plug)2779518579a9SKeith Busch static void __blk_mq_flush_plug_list(struct request_queue *q,
2780518579a9SKeith Busch struct blk_plug *plug)
2781518579a9SKeith Busch {
2782518579a9SKeith Busch if (blk_queue_quiesced(q))
2783518579a9SKeith Busch return;
2784518579a9SKeith Busch q->mq_ops->queue_rqs(&plug->mq_list);
2785518579a9SKeith Busch }
2786518579a9SKeith Busch
blk_mq_dispatch_plug_list(struct blk_plug * plug,bool from_sched)278726fed4acSJens Axboe static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
278826fed4acSJens Axboe {
278926fed4acSJens Axboe struct blk_mq_hw_ctx *this_hctx = NULL;
279026fed4acSJens Axboe struct blk_mq_ctx *this_ctx = NULL;
279126fed4acSJens Axboe struct request *requeue_list = NULL;
279234e0a279SJan Kara struct request **requeue_lastp = &requeue_list;
279326fed4acSJens Axboe unsigned int depth = 0;
2794d97217e7SMing Lei bool is_passthrough = false;
279526fed4acSJens Axboe LIST_HEAD(list);
279626fed4acSJens Axboe
279726fed4acSJens Axboe do {
279826fed4acSJens Axboe struct request *rq = rq_list_pop(&plug->mq_list);
279926fed4acSJens Axboe
280026fed4acSJens Axboe if (!this_hctx) {
280126fed4acSJens Axboe this_hctx = rq->mq_hctx;
280226fed4acSJens Axboe this_ctx = rq->mq_ctx;
2803d97217e7SMing Lei is_passthrough = blk_rq_is_passthrough(rq);
2804d97217e7SMing Lei } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2805d97217e7SMing Lei is_passthrough != blk_rq_is_passthrough(rq)) {
280634e0a279SJan Kara rq_list_add_tail(&requeue_lastp, rq);
280726fed4acSJens Axboe continue;
280826fed4acSJens Axboe }
280934e0a279SJan Kara list_add(&rq->queuelist, &list);
281026fed4acSJens Axboe depth++;
281126fed4acSJens Axboe } while (!rq_list_empty(plug->mq_list));
281226fed4acSJens Axboe
281326fed4acSJens Axboe plug->mq_list = requeue_list;
281426fed4acSJens Axboe trace_block_unplug(this_hctx->queue, depth, !from_sched);
281505a93117SChristoph Hellwig
281605a93117SChristoph Hellwig percpu_ref_get(&this_hctx->queue->q_usage_counter);
2817d97217e7SMing Lei /* passthrough requests should never be issued to the I/O scheduler */
28182293cae7SMing Lei if (is_passthrough) {
28192293cae7SMing Lei spin_lock(&this_hctx->lock);
28202293cae7SMing Lei list_splice_tail_init(&list, &this_hctx->dispatch);
28212293cae7SMing Lei spin_unlock(&this_hctx->lock);
28222293cae7SMing Lei blk_mq_run_hw_queue(this_hctx, from_sched);
28232293cae7SMing Lei } else if (this_hctx->queue->elevator) {
282405a93117SChristoph Hellwig this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
282593fffe16SChristoph Hellwig &list, 0);
282605a93117SChristoph Hellwig blk_mq_run_hw_queue(this_hctx, from_sched);
282705a93117SChristoph Hellwig } else {
282805a93117SChristoph Hellwig blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
282905a93117SChristoph Hellwig }
283005a93117SChristoph Hellwig percpu_ref_put(&this_hctx->queue->q_usage_counter);
283126fed4acSJens Axboe }
283226fed4acSJens Axboe
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)2833b84c5b50SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2834b84c5b50SChristoph Hellwig {
28353c67d44dSJens Axboe struct request *rq;
2836b84c5b50SChristoph Hellwig
283770904263SRoss Lagerwall /*
283870904263SRoss Lagerwall * We may have been called recursively midway through handling
283970904263SRoss Lagerwall * plug->mq_list via a schedule() in the driver's queue_rq() callback.
284070904263SRoss Lagerwall * To avoid mq_list changing under our feet, clear rq_count early and
284170904263SRoss Lagerwall * bail out specifically if rq_count is 0 rather than checking
284270904263SRoss Lagerwall * whether the mq_list is empty.
284370904263SRoss Lagerwall */
284470904263SRoss Lagerwall if (plug->rq_count == 0)
2845b84c5b50SChristoph Hellwig return;
2846b84c5b50SChristoph Hellwig plug->rq_count = 0;
2847b84c5b50SChristoph Hellwig
2848b84c5b50SChristoph Hellwig if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
28493c67d44dSJens Axboe struct request_queue *q;
28503c67d44dSJens Axboe
28513c67d44dSJens Axboe rq = rq_list_peek(&plug->mq_list);
28523c67d44dSJens Axboe q = rq->q;
28533c67d44dSJens Axboe
28543c67d44dSJens Axboe /*
28553c67d44dSJens Axboe * Peek first request and see if we have a ->queue_rqs() hook.
28563c67d44dSJens Axboe * If we do, we can dispatch the whole plug list in one go. We
28573c67d44dSJens Axboe * already know at this point that all requests belong to the
28583c67d44dSJens Axboe * same queue, caller must ensure that's the case.
28593c67d44dSJens Axboe *
28603c67d44dSJens Axboe * Since we pass off the full list to the driver at this point,
28613c67d44dSJens Axboe * we do not increment the active request count for the queue.
28623c67d44dSJens Axboe * Bypass shared tags for now because of that.
28633c67d44dSJens Axboe */
28643c67d44dSJens Axboe if (q->mq_ops->queue_rqs &&
28653c67d44dSJens Axboe !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
28663c67d44dSJens Axboe blk_mq_run_dispatch_ops(q,
2867518579a9SKeith Busch __blk_mq_flush_plug_list(q, plug));
28683c67d44dSJens Axboe if (rq_list_empty(plug->mq_list))
28693c67d44dSJens Axboe return;
28703c67d44dSJens Axboe }
287173f3760eSMing Lei
287273f3760eSMing Lei blk_mq_run_dispatch_ops(q,
28733e368fb0SKemeng Shi blk_mq_plug_issue_direct(plug));
2874b84c5b50SChristoph Hellwig if (rq_list_empty(plug->mq_list))
2875b84c5b50SChristoph Hellwig return;
2876b84c5b50SChristoph Hellwig }
2877b84c5b50SChristoph Hellwig
2878b84c5b50SChristoph Hellwig do {
287926fed4acSJens Axboe blk_mq_dispatch_plug_list(plug, from_schedule);
2880b84c5b50SChristoph Hellwig } while (!rq_list_empty(plug->mq_list));
2881b84c5b50SChristoph Hellwig }
2882b84c5b50SChristoph Hellwig
blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list)288394aa228cSChristoph Hellwig static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
28846ce3dd6eSMing Lei struct list_head *list)
28856ce3dd6eSMing Lei {
2886536167d4SKeith Busch int queued = 0;
2887984ce0a7SKemeng Shi blk_status_t ret = BLK_STS_OK;
2888536167d4SKeith Busch
28896ce3dd6eSMing Lei while (!list_empty(list)) {
28906ce3dd6eSMing Lei struct request *rq = list_first_entry(list, struct request,
28916ce3dd6eSMing Lei queuelist);
28926ce3dd6eSMing Lei
28936ce3dd6eSMing Lei list_del_init(&rq->queuelist);
2894fd9c40f6SBart Van Assche ret = blk_mq_request_issue_directly(rq, list_empty(list));
289527e8b2bbSKemeng Shi switch (ret) {
289627e8b2bbSKemeng Shi case BLK_STS_OK:
289727e8b2bbSKemeng Shi queued++;
289827e8b2bbSKemeng Shi break;
289927e8b2bbSKemeng Shi case BLK_STS_RESOURCE:
290027e8b2bbSKemeng Shi case BLK_STS_DEV_RESOURCE:
29012b597613SChristoph Hellwig blk_mq_request_bypass_insert(rq, 0);
29022394395cSChristoph Hellwig if (list_empty(list))
29032394395cSChristoph Hellwig blk_mq_run_hw_queue(hctx, false);
290427e8b2bbSKemeng Shi goto out;
290527e8b2bbSKemeng Shi default:
290627e8b2bbSKemeng Shi blk_mq_end_request(rq, ret);
2907fd9c40f6SBart Van Assche break;
2908fd9c40f6SBart Van Assche }
29096ce3dd6eSMing Lei }
2910d666ba98SJens Axboe
291127e8b2bbSKemeng Shi out:
2912984ce0a7SKemeng Shi if (ret != BLK_STS_OK)
2913984ce0a7SKemeng Shi blk_mq_commit_rqs(hctx, queued, false);
29146ce3dd6eSMing Lei }
29156ce3dd6eSMing Lei
blk_mq_attempt_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)2916b131f201SMing Lei static bool blk_mq_attempt_bio_merge(struct request_queue *q,
29170c5bcc92SChristoph Hellwig struct bio *bio, unsigned int nr_segs)
2918900e0807SJens Axboe {
2919900e0807SJens Axboe if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
29200c5bcc92SChristoph Hellwig if (blk_attempt_plug_merge(q, bio, nr_segs))
2921900e0807SJens Axboe return true;
2922900e0807SJens Axboe if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2923900e0807SJens Axboe return true;
2924900e0807SJens Axboe }
2925900e0807SJens Axboe return false;
2926900e0807SJens Axboe }
2927900e0807SJens Axboe
blk_mq_get_new_requests(struct request_queue * q,struct blk_plug * plug,struct bio * bio,unsigned int nsegs)292871539717SJens Axboe static struct request *blk_mq_get_new_requests(struct request_queue *q,
292971539717SJens Axboe struct blk_plug *plug,
29300a5aa8d1SShin'ichiro Kawasaki struct bio *bio,
29310a5aa8d1SShin'ichiro Kawasaki unsigned int nsegs)
293271539717SJens Axboe {
293371539717SJens Axboe struct blk_mq_alloc_data data = {
293471539717SJens Axboe .q = q,
293571539717SJens Axboe .nr_tags = 1,
29369d497e29SMing Lei .cmd_flags = bio->bi_opf,
293771539717SJens Axboe };
293871539717SJens Axboe struct request *rq;
293971539717SJens Axboe
29400a5aa8d1SShin'ichiro Kawasaki if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2941b80056bdSChristoph Hellwig return NULL;
29420a5aa8d1SShin'ichiro Kawasaki
29430a5aa8d1SShin'ichiro Kawasaki rq_qos_throttle(q, bio);
29440a5aa8d1SShin'ichiro Kawasaki
294571539717SJens Axboe if (plug) {
294671539717SJens Axboe data.nr_tags = plug->nr_ios;
294771539717SJens Axboe plug->nr_ios = 1;
294871539717SJens Axboe data.cached_rq = &plug->cached_rq;
294971539717SJens Axboe }
295071539717SJens Axboe
295171539717SJens Axboe rq = __blk_mq_alloc_requests(&data);
2952373b5416SJens Axboe if (rq)
295371539717SJens Axboe return rq;
295471539717SJens Axboe rq_qos_cleanup(q, bio);
295571539717SJens Axboe if (bio->bi_opf & REQ_NOWAIT)
295671539717SJens Axboe bio_wouldblock_error(bio);
295771539717SJens Axboe return NULL;
295871539717SJens Axboe }
295971539717SJens Axboe
2960b80056bdSChristoph Hellwig /* return true if this @rq can be used for @bio */
blk_mq_can_use_cached_rq(struct request * rq,struct blk_plug * plug,struct bio * bio)2961b80056bdSChristoph Hellwig static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
2962b80056bdSChristoph Hellwig struct bio *bio)
296371539717SJens Axboe {
2964b80056bdSChristoph Hellwig enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
2965b80056bdSChristoph Hellwig enum hctx_type hctx_type = rq->mq_hctx->type;
2966b637108aSMing Lei
2967b80056bdSChristoph Hellwig WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
29685b13bc8aSChristoph Hellwig
296977465647SPavel Begunkov if (type != hctx_type &&
297077465647SPavel Begunkov !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
2971b80056bdSChristoph Hellwig return false;
2972b80056bdSChristoph Hellwig if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2973b80056bdSChristoph Hellwig return false;
29745b13bc8aSChristoph Hellwig
29752645672fSJens Axboe /*
29762645672fSJens Axboe * If any qos ->throttle() end up blocking, we will have flushed the
29772645672fSJens Axboe * plug and hence killed the cached_rq list as well. Pop this entry
29782645672fSJens Axboe * before we throttle.
29792645672fSJens Axboe */
298071539717SJens Axboe plug->cached_rq = rq_list_next(rq);
2981b80056bdSChristoph Hellwig rq_qos_throttle(rq->q, bio);
29822645672fSJens Axboe
29835c17f45eSChengming Zhou blk_mq_rq_time_init(rq, 0);
2984b80056bdSChristoph Hellwig rq->cmd_flags = bio->bi_opf;
298571539717SJens Axboe INIT_LIST_HEAD(&rq->queuelist);
2986b80056bdSChristoph Hellwig return true;
298771539717SJens Axboe }
298871539717SJens Axboe
2989105663f7SAndré Almeida /**
2990c62b37d9SChristoph Hellwig * blk_mq_submit_bio - Create and send a request to block device.
2991105663f7SAndré Almeida * @bio: Bio pointer.
2992105663f7SAndré Almeida *
2993105663f7SAndré Almeida * Builds up a request structure from @q and @bio and send to the device. The
2994105663f7SAndré Almeida * request may not be queued directly to hardware if:
2995105663f7SAndré Almeida * * This request can be merged with another one
2996105663f7SAndré Almeida * * We want to place request at plug queue for possible future merging
2997105663f7SAndré Almeida * * There is an IO scheduler active at this queue
2998105663f7SAndré Almeida *
2999105663f7SAndré Almeida * It will not queue the request if there is an error with the bio, or at the
3000105663f7SAndré Almeida * request creation.
3001105663f7SAndré Almeida */
blk_mq_submit_bio(struct bio * bio)30023e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio)
300307068d5bSJens Axboe {
3004ed6cddefSPavel Begunkov struct request_queue *q = bdev_get_queue(bio->bi_bdev);
30056deacb3bSChristoph Hellwig struct blk_plug *plug = blk_mq_plug(bio);
3006ef295ecfSChristoph Hellwig const int is_sync = op_is_sync(bio->bi_opf);
3007f0dbe6e8SChristoph Hellwig struct blk_mq_hw_ctx *hctx;
3008b80056bdSChristoph Hellwig struct request *rq = NULL;
3009abd45c15SJens Axboe unsigned int nr_segs = 1;
3010a892c8d5SSatya Tangirala blk_status_t ret;
301107068d5bSJens Axboe
301251d798cdSChristoph Hellwig bio = blk_queue_bounce(bio, q);
30139c6227e0SJan Kara
3014b80056bdSChristoph Hellwig if (plug) {
3015b80056bdSChristoph Hellwig rq = rq_list_peek(&plug->cached_rq);
3016b80056bdSChristoph Hellwig if (rq && rq->q != q)
3017b80056bdSChristoph Hellwig rq = NULL;
3018b80056bdSChristoph Hellwig }
3019b80056bdSChristoph Hellwig if (rq) {
30208b607504SJens Axboe if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
30218b607504SJens Axboe bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
30228b607504SJens Axboe if (!bio)
30238b607504SJens Axboe return;
30248b607504SJens Axboe }
3025b80056bdSChristoph Hellwig if (!bio_integrity_prep(bio))
30260a5aa8d1SShin'ichiro Kawasaki return;
3027b80056bdSChristoph Hellwig if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
3028b80056bdSChristoph Hellwig return;
3029b80056bdSChristoph Hellwig if (blk_mq_can_use_cached_rq(rq, plug, bio))
3030b80056bdSChristoph Hellwig goto done;
3031b80056bdSChristoph Hellwig percpu_ref_get(&q->q_usage_counter);
3032b80056bdSChristoph Hellwig } else {
3033b80056bdSChristoph Hellwig if (unlikely(bio_queue_enter(bio)))
3034b80056bdSChristoph Hellwig return;
30358b607504SJens Axboe if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
30368b607504SJens Axboe bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
30378b607504SJens Axboe if (!bio)
30388b607504SJens Axboe goto fail;
30398b607504SJens Axboe }
3040b80056bdSChristoph Hellwig if (!bio_integrity_prep(bio))
3041b80056bdSChristoph Hellwig goto fail;
3042b80056bdSChristoph Hellwig }
3043b80056bdSChristoph Hellwig
30440a5aa8d1SShin'ichiro Kawasaki rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3045b80056bdSChristoph Hellwig if (unlikely(!rq)) {
3046b80056bdSChristoph Hellwig fail:
3047b80056bdSChristoph Hellwig blk_queue_exit(q);
3048900e0807SJens Axboe return;
30495b13bc8aSChristoph Hellwig }
305087760e5eSJens Axboe
3051b80056bdSChristoph Hellwig done:
3052e8a676d6SChristoph Hellwig trace_block_getrq(bio);
3053d6f1dda2SXiaoguang Wang
3054c1c80384SJosef Bacik rq_qos_track(q, rq, bio);
305507068d5bSJens Axboe
305614ccb66bSChristoph Hellwig blk_mq_bio_to_request(rq, bio, nr_segs);
3057923218f6SMing Lei
30589cd1e566SEric Biggers ret = blk_crypto_rq_get_keyslot(rq);
3059a892c8d5SSatya Tangirala if (ret != BLK_STS_OK) {
3060a892c8d5SSatya Tangirala bio->bi_status = ret;
3061a892c8d5SSatya Tangirala bio_endio(bio);
3062a892c8d5SSatya Tangirala blk_mq_free_request(rq);
30633e08773cSChristoph Hellwig return;
3064a892c8d5SSatya Tangirala }
3065a892c8d5SSatya Tangirala
3066360f2648SChristoph Hellwig if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3067d92ca9d8SChristoph Hellwig return;
3068d92ca9d8SChristoph Hellwig
3069f0dbe6e8SChristoph Hellwig if (plug) {
3070ce5b009cSJens Axboe blk_add_rq_to_plug(plug, rq);
3071f0dbe6e8SChristoph Hellwig return;
3072f0dbe6e8SChristoph Hellwig }
3073f0dbe6e8SChristoph Hellwig
3074f0dbe6e8SChristoph Hellwig hctx = rq->mq_hctx;
3075dd6216bbSChristoph Hellwig if ((rq->rq_flags & RQF_USE_SCHED) ||
3076f0dbe6e8SChristoph Hellwig (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3077710fa378SChristoph Hellwig blk_mq_insert_request(rq, 0);
3078f0dbe6e8SChristoph Hellwig blk_mq_run_hw_queue(hctx, true);
3079f0dbe6e8SChristoph Hellwig } else {
3080f0dbe6e8SChristoph Hellwig blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3081f0dbe6e8SChristoph Hellwig }
3082ab42f35dSMing Lei }
3083320ae51fSJens Axboe
3084248c7933SChristoph Hellwig #ifdef CONFIG_BLK_MQ_STACKING
308506c8c691SChristoph Hellwig /**
3086a5efda3cSChristoph Hellwig * blk_insert_cloned_request - Helper for stacking drivers to submit a request
3087a5efda3cSChristoph Hellwig * @rq: the request being queued
308806c8c691SChristoph Hellwig */
blk_insert_cloned_request(struct request * rq)308928db4711SChristoph Hellwig blk_status_t blk_insert_cloned_request(struct request *rq)
309006c8c691SChristoph Hellwig {
309128db4711SChristoph Hellwig struct request_queue *q = rq->q;
309206c8c691SChristoph Hellwig unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
309349d24398SUday Shankar unsigned int max_segments = blk_rq_get_max_segments(rq);
3094a5efda3cSChristoph Hellwig blk_status_t ret;
309506c8c691SChristoph Hellwig
309606c8c691SChristoph Hellwig if (blk_rq_sectors(rq) > max_sectors) {
309706c8c691SChristoph Hellwig /*
309806c8c691SChristoph Hellwig * SCSI device does not have a good way to return if
309906c8c691SChristoph Hellwig * Write Same/Zero is actually supported. If a device rejects
310006c8c691SChristoph Hellwig * a non-read/write command (discard, write same,etc.) the
310106c8c691SChristoph Hellwig * low-level device driver will set the relevant queue limit to
310206c8c691SChristoph Hellwig * 0 to prevent blk-lib from issuing more of the offending
310306c8c691SChristoph Hellwig * operations. Commands queued prior to the queue limit being
310406c8c691SChristoph Hellwig * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
310506c8c691SChristoph Hellwig * errors being propagated to upper layers.
310606c8c691SChristoph Hellwig */
310706c8c691SChristoph Hellwig if (max_sectors == 0)
310806c8c691SChristoph Hellwig return BLK_STS_NOTSUPP;
310906c8c691SChristoph Hellwig
311006c8c691SChristoph Hellwig printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
311106c8c691SChristoph Hellwig __func__, blk_rq_sectors(rq), max_sectors);
311206c8c691SChristoph Hellwig return BLK_STS_IOERR;
311306c8c691SChristoph Hellwig }
311406c8c691SChristoph Hellwig
311506c8c691SChristoph Hellwig /*
311606c8c691SChristoph Hellwig * The queue settings related to segment counting may differ from the
311706c8c691SChristoph Hellwig * original queue.
311806c8c691SChristoph Hellwig */
311906c8c691SChristoph Hellwig rq->nr_phys_segments = blk_recalc_rq_segments(rq);
312049d24398SUday Shankar if (rq->nr_phys_segments > max_segments) {
312149d24398SUday Shankar printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
312249d24398SUday Shankar __func__, rq->nr_phys_segments, max_segments);
312306c8c691SChristoph Hellwig return BLK_STS_IOERR;
312406c8c691SChristoph Hellwig }
312506c8c691SChristoph Hellwig
312628db4711SChristoph Hellwig if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
312706c8c691SChristoph Hellwig return BLK_STS_IOERR;
312806c8c691SChristoph Hellwig
31295b8562f0SEric Biggers ret = blk_crypto_rq_get_keyslot(rq);
31305b8562f0SEric Biggers if (ret != BLK_STS_OK)
31315b8562f0SEric Biggers return ret;
313206c8c691SChristoph Hellwig
313306c8c691SChristoph Hellwig blk_account_io_start(rq);
313406c8c691SChristoph Hellwig
313506c8c691SChristoph Hellwig /*
313606c8c691SChristoph Hellwig * Since we have a scheduler attached on the top device,
313706c8c691SChristoph Hellwig * bypass a potential scheduler on the bottom device for
313806c8c691SChristoph Hellwig * insert.
313906c8c691SChristoph Hellwig */
314028db4711SChristoph Hellwig blk_mq_run_dispatch_ops(q,
31414cafe86cSMing Lei ret = blk_mq_request_issue_directly(rq, true));
3142592ee119SYu Kuai if (ret)
3143592ee119SYu Kuai blk_account_io_done(rq, ktime_get_ns());
31444cafe86cSMing Lei return ret;
314506c8c691SChristoph Hellwig }
314606c8c691SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
314706c8c691SChristoph Hellwig
314806c8c691SChristoph Hellwig /**
314906c8c691SChristoph Hellwig * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
315006c8c691SChristoph Hellwig * @rq: the clone request to be cleaned up
315106c8c691SChristoph Hellwig *
315206c8c691SChristoph Hellwig * Description:
315306c8c691SChristoph Hellwig * Free all bios in @rq for a cloned request.
315406c8c691SChristoph Hellwig */
blk_rq_unprep_clone(struct request * rq)315506c8c691SChristoph Hellwig void blk_rq_unprep_clone(struct request *rq)
315606c8c691SChristoph Hellwig {
315706c8c691SChristoph Hellwig struct bio *bio;
315806c8c691SChristoph Hellwig
315906c8c691SChristoph Hellwig while ((bio = rq->bio) != NULL) {
316006c8c691SChristoph Hellwig rq->bio = bio->bi_next;
316106c8c691SChristoph Hellwig
316206c8c691SChristoph Hellwig bio_put(bio);
316306c8c691SChristoph Hellwig }
316406c8c691SChristoph Hellwig }
316506c8c691SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
316606c8c691SChristoph Hellwig
316706c8c691SChristoph Hellwig /**
316806c8c691SChristoph Hellwig * blk_rq_prep_clone - Helper function to setup clone request
316906c8c691SChristoph Hellwig * @rq: the request to be setup
317006c8c691SChristoph Hellwig * @rq_src: original request to be cloned
317106c8c691SChristoph Hellwig * @bs: bio_set that bios for clone are allocated from
317206c8c691SChristoph Hellwig * @gfp_mask: memory allocation mask for bio
317306c8c691SChristoph Hellwig * @bio_ctr: setup function to be called for each clone bio.
317406c8c691SChristoph Hellwig * Returns %0 for success, non %0 for failure.
317506c8c691SChristoph Hellwig * @data: private data to be passed to @bio_ctr
317606c8c691SChristoph Hellwig *
317706c8c691SChristoph Hellwig * Description:
317806c8c691SChristoph Hellwig * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
317906c8c691SChristoph Hellwig * Also, pages which the original bios are pointing to are not copied
318006c8c691SChristoph Hellwig * and the cloned bios just point same pages.
318106c8c691SChristoph Hellwig * So cloned bios must be completed before original bios, which means
318206c8c691SChristoph Hellwig * the caller must complete @rq before @rq_src.
318306c8c691SChristoph Hellwig */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)318406c8c691SChristoph Hellwig int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
318506c8c691SChristoph Hellwig struct bio_set *bs, gfp_t gfp_mask,
318606c8c691SChristoph Hellwig int (*bio_ctr)(struct bio *, struct bio *, void *),
318706c8c691SChristoph Hellwig void *data)
318806c8c691SChristoph Hellwig {
318906c8c691SChristoph Hellwig struct bio *bio, *bio_src;
319006c8c691SChristoph Hellwig
319106c8c691SChristoph Hellwig if (!bs)
319206c8c691SChristoph Hellwig bs = &fs_bio_set;
319306c8c691SChristoph Hellwig
319406c8c691SChristoph Hellwig __rq_for_each_bio(bio_src, rq_src) {
3195abfc426dSChristoph Hellwig bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3196abfc426dSChristoph Hellwig bs);
319706c8c691SChristoph Hellwig if (!bio)
319806c8c691SChristoph Hellwig goto free_and_out;
319906c8c691SChristoph Hellwig
320006c8c691SChristoph Hellwig if (bio_ctr && bio_ctr(bio, bio_src, data))
320106c8c691SChristoph Hellwig goto free_and_out;
320206c8c691SChristoph Hellwig
320306c8c691SChristoph Hellwig if (rq->bio) {
320406c8c691SChristoph Hellwig rq->biotail->bi_next = bio;
320506c8c691SChristoph Hellwig rq->biotail = bio;
320606c8c691SChristoph Hellwig } else {
320706c8c691SChristoph Hellwig rq->bio = rq->biotail = bio;
320806c8c691SChristoph Hellwig }
320906c8c691SChristoph Hellwig bio = NULL;
321006c8c691SChristoph Hellwig }
321106c8c691SChristoph Hellwig
321206c8c691SChristoph Hellwig /* Copy attributes of the original request to the clone request. */
321306c8c691SChristoph Hellwig rq->__sector = blk_rq_pos(rq_src);
321406c8c691SChristoph Hellwig rq->__data_len = blk_rq_bytes(rq_src);
321506c8c691SChristoph Hellwig if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
321606c8c691SChristoph Hellwig rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
321706c8c691SChristoph Hellwig rq->special_vec = rq_src->special_vec;
321806c8c691SChristoph Hellwig }
321906c8c691SChristoph Hellwig rq->nr_phys_segments = rq_src->nr_phys_segments;
322006c8c691SChristoph Hellwig rq->ioprio = rq_src->ioprio;
322106c8c691SChristoph Hellwig
322206c8c691SChristoph Hellwig if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
322306c8c691SChristoph Hellwig goto free_and_out;
322406c8c691SChristoph Hellwig
322506c8c691SChristoph Hellwig return 0;
322606c8c691SChristoph Hellwig
322706c8c691SChristoph Hellwig free_and_out:
322806c8c691SChristoph Hellwig if (bio)
322906c8c691SChristoph Hellwig bio_put(bio);
323006c8c691SChristoph Hellwig blk_rq_unprep_clone(rq);
323106c8c691SChristoph Hellwig
323206c8c691SChristoph Hellwig return -ENOMEM;
323306c8c691SChristoph Hellwig }
323406c8c691SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3235248c7933SChristoph Hellwig #endif /* CONFIG_BLK_MQ_STACKING */
323606c8c691SChristoph Hellwig
3237f2b8f3ceSChristoph Hellwig /*
3238f2b8f3ceSChristoph Hellwig * Steal bios from a request and add them to a bio list.
3239f2b8f3ceSChristoph Hellwig * The request must not have been partially completed before.
3240f2b8f3ceSChristoph Hellwig */
blk_steal_bios(struct bio_list * list,struct request * rq)3241f2b8f3ceSChristoph Hellwig void blk_steal_bios(struct bio_list *list, struct request *rq)
3242f2b8f3ceSChristoph Hellwig {
3243f2b8f3ceSChristoph Hellwig if (rq->bio) {
3244f2b8f3ceSChristoph Hellwig if (list->tail)
3245f2b8f3ceSChristoph Hellwig list->tail->bi_next = rq->bio;
3246f2b8f3ceSChristoph Hellwig else
3247f2b8f3ceSChristoph Hellwig list->head = rq->bio;
3248f2b8f3ceSChristoph Hellwig list->tail = rq->biotail;
3249f2b8f3ceSChristoph Hellwig
3250f2b8f3ceSChristoph Hellwig rq->bio = NULL;
3251f2b8f3ceSChristoph Hellwig rq->biotail = NULL;
3252f2b8f3ceSChristoph Hellwig }
3253f2b8f3ceSChristoph Hellwig
3254f2b8f3ceSChristoph Hellwig rq->__data_len = 0;
3255f2b8f3ceSChristoph Hellwig }
3256f2b8f3ceSChristoph Hellwig EXPORT_SYMBOL_GPL(blk_steal_bios);
3257f2b8f3ceSChristoph Hellwig
order_to_size(unsigned int order)3258bd63141dSMing Lei static size_t order_to_size(unsigned int order)
3259bd63141dSMing Lei {
3260bd63141dSMing Lei return (size_t)PAGE_SIZE << order;
3261bd63141dSMing Lei }
3262bd63141dSMing Lei
3263bd63141dSMing Lei /* called before freeing request pool in @tags */
blk_mq_clear_rq_mapping(struct blk_mq_tags * drv_tags,struct blk_mq_tags * tags)3264f32e4eafSJohn Garry static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3265f32e4eafSJohn Garry struct blk_mq_tags *tags)
3266bd63141dSMing Lei {
3267bd63141dSMing Lei struct page *page;
3268bd63141dSMing Lei unsigned long flags;
3269bd63141dSMing Lei
327076dd2980SYu Kuai /*
327176dd2980SYu Kuai * There is no need to clear mapping if driver tags is not initialized
327276dd2980SYu Kuai * or the mapping belongs to the driver tags.
327376dd2980SYu Kuai */
327476dd2980SYu Kuai if (!drv_tags || drv_tags == tags)
32754f245d5bSJohn Garry return;
32764f245d5bSJohn Garry
3277bd63141dSMing Lei list_for_each_entry(page, &tags->page_list, lru) {
3278bd63141dSMing Lei unsigned long start = (unsigned long)page_address(page);
3279bd63141dSMing Lei unsigned long end = start + order_to_size(page->private);
3280bd63141dSMing Lei int i;
3281bd63141dSMing Lei
3282f32e4eafSJohn Garry for (i = 0; i < drv_tags->nr_tags; i++) {
3283bd63141dSMing Lei struct request *rq = drv_tags->rqs[i];
3284bd63141dSMing Lei unsigned long rq_addr = (unsigned long)rq;
3285bd63141dSMing Lei
3286bd63141dSMing Lei if (rq_addr >= start && rq_addr < end) {
32870a467d0fSJens Axboe WARN_ON_ONCE(req_ref_read(rq) != 0);
3288bd63141dSMing Lei cmpxchg(&drv_tags->rqs[i], rq, NULL);
3289bd63141dSMing Lei }
3290bd63141dSMing Lei }
3291bd63141dSMing Lei }
3292bd63141dSMing Lei
3293bd63141dSMing Lei /*
3294bd63141dSMing Lei * Wait until all pending iteration is done.
3295bd63141dSMing Lei *
3296bd63141dSMing Lei * Request reference is cleared and it is guaranteed to be observed
3297bd63141dSMing Lei * after the ->lock is released.
3298bd63141dSMing Lei */
3299bd63141dSMing Lei spin_lock_irqsave(&drv_tags->lock, flags);
3300bd63141dSMing Lei spin_unlock_irqrestore(&drv_tags->lock, flags);
3301bd63141dSMing Lei }
3302bd63141dSMing Lei
blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3303cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
33042c3ad667SJens Axboe unsigned int hctx_idx)
3305320ae51fSJens Axboe {
3306f32e4eafSJohn Garry struct blk_mq_tags *drv_tags;
3307320ae51fSJens Axboe struct page *page;
3308320ae51fSJens Axboe
3309e02657eaSMing Lei if (list_empty(&tags->page_list))
3310e02657eaSMing Lei return;
3311e02657eaSMing Lei
3312079a2e3eSJohn Garry if (blk_mq_is_shared_tags(set->flags))
3313079a2e3eSJohn Garry drv_tags = set->shared_tags;
3314e155b0c2SJohn Garry else
3315f32e4eafSJohn Garry drv_tags = set->tags[hctx_idx];
3316f32e4eafSJohn Garry
331765de57bbSJohn Garry if (tags->static_rqs && set->ops->exit_request) {
3318e9b267d9SChristoph Hellwig int i;
3319e9b267d9SChristoph Hellwig
332024d2f903SChristoph Hellwig for (i = 0; i < tags->nr_tags; i++) {
33212af8cbe3SJens Axboe struct request *rq = tags->static_rqs[i];
33222af8cbe3SJens Axboe
33232af8cbe3SJens Axboe if (!rq)
3324e9b267d9SChristoph Hellwig continue;
3325d6296d39SChristoph Hellwig set->ops->exit_request(set, rq, hctx_idx);
33262af8cbe3SJens Axboe tags->static_rqs[i] = NULL;
3327e9b267d9SChristoph Hellwig }
3328e9b267d9SChristoph Hellwig }
3329e9b267d9SChristoph Hellwig
3330f32e4eafSJohn Garry blk_mq_clear_rq_mapping(drv_tags, tags);
3331bd63141dSMing Lei
333224d2f903SChristoph Hellwig while (!list_empty(&tags->page_list)) {
333324d2f903SChristoph Hellwig page = list_first_entry(&tags->page_list, struct page, lru);
33346753471cSDave Hansen list_del_init(&page->lru);
3335f75782e4SCatalin Marinas /*
3336f75782e4SCatalin Marinas * Remove kmemleak object previously allocated in
3337273938bfSRaul E Rangel * blk_mq_alloc_rqs().
3338f75782e4SCatalin Marinas */
3339f75782e4SCatalin Marinas kmemleak_free(page_address(page));
3340320ae51fSJens Axboe __free_pages(page, page->private);
3341320ae51fSJens Axboe }
3342cc71a6f4SJens Axboe }
3343320ae51fSJens Axboe
blk_mq_free_rq_map(struct blk_mq_tags * tags)3344e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3345cc71a6f4SJens Axboe {
334624d2f903SChristoph Hellwig kfree(tags->rqs);
3347cc71a6f4SJens Axboe tags->rqs = NULL;
33482af8cbe3SJens Axboe kfree(tags->static_rqs);
33492af8cbe3SJens Axboe tags->static_rqs = NULL;
3350320ae51fSJens Axboe
3351e155b0c2SJohn Garry blk_mq_free_tags(tags);
3352320ae51fSJens Axboe }
3353320ae51fSJens Axboe
hctx_idx_to_type(struct blk_mq_tag_set * set,unsigned int hctx_idx)33544d805131SMing Lei static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
33554d805131SMing Lei unsigned int hctx_idx)
33564d805131SMing Lei {
33574d805131SMing Lei int i;
33584d805131SMing Lei
33594d805131SMing Lei for (i = 0; i < set->nr_maps; i++) {
33604d805131SMing Lei unsigned int start = set->map[i].queue_offset;
33614d805131SMing Lei unsigned int end = start + set->map[i].nr_queues;
33624d805131SMing Lei
33634d805131SMing Lei if (hctx_idx >= start && hctx_idx < end)
33644d805131SMing Lei break;
33654d805131SMing Lei }
33664d805131SMing Lei
33674d805131SMing Lei if (i >= set->nr_maps)
33684d805131SMing Lei i = HCTX_TYPE_DEFAULT;
33694d805131SMing Lei
33704d805131SMing Lei return i;
33714d805131SMing Lei }
33724d805131SMing Lei
blk_mq_get_hctx_node(struct blk_mq_tag_set * set,unsigned int hctx_idx)33734d805131SMing Lei static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
33744d805131SMing Lei unsigned int hctx_idx)
33754d805131SMing Lei {
33764d805131SMing Lei enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
33774d805131SMing Lei
33784d805131SMing Lei return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
33794d805131SMing Lei }
33804d805131SMing Lei
blk_mq_alloc_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int nr_tags,unsigned int reserved_tags)338163064be1SJohn Garry static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3382cc71a6f4SJens Axboe unsigned int hctx_idx,
3383cc71a6f4SJens Axboe unsigned int nr_tags,
3384e155b0c2SJohn Garry unsigned int reserved_tags)
3385320ae51fSJens Axboe {
33864d805131SMing Lei int node = blk_mq_get_hctx_node(set, hctx_idx);
338724d2f903SChristoph Hellwig struct blk_mq_tags *tags;
3388320ae51fSJens Axboe
338959f082e4SShaohua Li if (node == NUMA_NO_NODE)
339059f082e4SShaohua Li node = set->numa_node;
339159f082e4SShaohua Li
3392e155b0c2SJohn Garry tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3393e155b0c2SJohn Garry BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
339424d2f903SChristoph Hellwig if (!tags)
339524d2f903SChristoph Hellwig return NULL;
3396320ae51fSJens Axboe
3397590b5b7dSKees Cook tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
339836e1f3d1SGabriel Krisman Bertazi GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
339959f082e4SShaohua Li node);
34007edfd681SJinlong Chen if (!tags->rqs)
34017edfd681SJinlong Chen goto err_free_tags;
3402320ae51fSJens Axboe
3403590b5b7dSKees Cook tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
34042af8cbe3SJens Axboe GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
340559f082e4SShaohua Li node);
34067edfd681SJinlong Chen if (!tags->static_rqs)
34077edfd681SJinlong Chen goto err_free_rqs;
34082af8cbe3SJens Axboe
3409cc71a6f4SJens Axboe return tags;
34107edfd681SJinlong Chen
34117edfd681SJinlong Chen err_free_rqs:
34127edfd681SJinlong Chen kfree(tags->rqs);
34137edfd681SJinlong Chen err_free_tags:
34147edfd681SJinlong Chen blk_mq_free_tags(tags);
34157edfd681SJinlong Chen return NULL;
3416cc71a6f4SJens Axboe }
3417cc71a6f4SJens Axboe
blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node)34181d9bd516STejun Heo static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
34191d9bd516STejun Heo unsigned int hctx_idx, int node)
34201d9bd516STejun Heo {
34211d9bd516STejun Heo int ret;
34221d9bd516STejun Heo
34231d9bd516STejun Heo if (set->ops->init_request) {
34241d9bd516STejun Heo ret = set->ops->init_request(set, rq, hctx_idx, node);
34251d9bd516STejun Heo if (ret)
34261d9bd516STejun Heo return ret;
34271d9bd516STejun Heo }
34281d9bd516STejun Heo
342912f5b931SKeith Busch WRITE_ONCE(rq->state, MQ_RQ_IDLE);
34301d9bd516STejun Heo return 0;
34311d9bd516STejun Heo }
34321d9bd516STejun Heo
blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth)343363064be1SJohn Garry static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
343463064be1SJohn Garry struct blk_mq_tags *tags,
3435cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth)
3436cc71a6f4SJens Axboe {
3437cc71a6f4SJens Axboe unsigned int i, j, entries_per_page, max_order = 4;
34384d805131SMing Lei int node = blk_mq_get_hctx_node(set, hctx_idx);
3439cc71a6f4SJens Axboe size_t rq_size, left;
344059f082e4SShaohua Li
344159f082e4SShaohua Li if (node == NUMA_NO_NODE)
344259f082e4SShaohua Li node = set->numa_node;
3443cc71a6f4SJens Axboe
3444cc71a6f4SJens Axboe INIT_LIST_HEAD(&tags->page_list);
3445cc71a6f4SJens Axboe
3446320ae51fSJens Axboe /*
3447320ae51fSJens Axboe * rq_size is the size of the request plus driver payload, rounded
3448320ae51fSJens Axboe * to the cacheline size
3449320ae51fSJens Axboe */
345024d2f903SChristoph Hellwig rq_size = round_up(sizeof(struct request) + set->cmd_size,
3451320ae51fSJens Axboe cache_line_size());
3452cc71a6f4SJens Axboe left = rq_size * depth;
3453320ae51fSJens Axboe
3454cc71a6f4SJens Axboe for (i = 0; i < depth; ) {
3455320ae51fSJens Axboe int this_order = max_order;
3456320ae51fSJens Axboe struct page *page;
3457320ae51fSJens Axboe int to_do;
3458320ae51fSJens Axboe void *p;
3459320ae51fSJens Axboe
3460b3a834b1SBartlomiej Zolnierkiewicz while (this_order && left < order_to_size(this_order - 1))
3461320ae51fSJens Axboe this_order--;
3462320ae51fSJens Axboe
3463320ae51fSJens Axboe do {
346459f082e4SShaohua Li page = alloc_pages_node(node,
346536e1f3d1SGabriel Krisman Bertazi GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
346624d2f903SChristoph Hellwig this_order);
3467320ae51fSJens Axboe if (page)
3468320ae51fSJens Axboe break;
3469320ae51fSJens Axboe if (!this_order--)
3470320ae51fSJens Axboe break;
3471320ae51fSJens Axboe if (order_to_size(this_order) < rq_size)
3472320ae51fSJens Axboe break;
3473320ae51fSJens Axboe } while (1);
3474320ae51fSJens Axboe
3475320ae51fSJens Axboe if (!page)
347624d2f903SChristoph Hellwig goto fail;
3477320ae51fSJens Axboe
3478320ae51fSJens Axboe page->private = this_order;
347924d2f903SChristoph Hellwig list_add_tail(&page->lru, &tags->page_list);
3480320ae51fSJens Axboe
3481320ae51fSJens Axboe p = page_address(page);
3482f75782e4SCatalin Marinas /*
3483f75782e4SCatalin Marinas * Allow kmemleak to scan these pages as they contain pointers
3484f75782e4SCatalin Marinas * to additional allocations like via ops->init_request().
3485f75782e4SCatalin Marinas */
348636e1f3d1SGabriel Krisman Bertazi kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3487320ae51fSJens Axboe entries_per_page = order_to_size(this_order) / rq_size;
3488cc71a6f4SJens Axboe to_do = min(entries_per_page, depth - i);
3489320ae51fSJens Axboe left -= to_do * rq_size;
3490320ae51fSJens Axboe for (j = 0; j < to_do; j++) {
34912af8cbe3SJens Axboe struct request *rq = p;
34922af8cbe3SJens Axboe
34932af8cbe3SJens Axboe tags->static_rqs[i] = rq;
34941d9bd516STejun Heo if (blk_mq_init_request(set, rq, hctx_idx, node)) {
34952af8cbe3SJens Axboe tags->static_rqs[i] = NULL;
349624d2f903SChristoph Hellwig goto fail;
3497e9b267d9SChristoph Hellwig }
3498e9b267d9SChristoph Hellwig
3499320ae51fSJens Axboe p += rq_size;
3500320ae51fSJens Axboe i++;
3501320ae51fSJens Axboe }
3502320ae51fSJens Axboe }
3503cc71a6f4SJens Axboe return 0;
3504320ae51fSJens Axboe
350524d2f903SChristoph Hellwig fail:
3506cc71a6f4SJens Axboe blk_mq_free_rqs(set, tags, hctx_idx);
3507cc71a6f4SJens Axboe return -ENOMEM;
3508320ae51fSJens Axboe }
3509320ae51fSJens Axboe
3510bf0beec0SMing Lei struct rq_iter_data {
3511bf0beec0SMing Lei struct blk_mq_hw_ctx *hctx;
3512bf0beec0SMing Lei bool has_rq;
3513bf0beec0SMing Lei };
3514bf0beec0SMing Lei
blk_mq_has_request(struct request * rq,void * data)35152dd6532eSJohn Garry static bool blk_mq_has_request(struct request *rq, void *data)
3516bf0beec0SMing Lei {
3517bf0beec0SMing Lei struct rq_iter_data *iter_data = data;
3518bf0beec0SMing Lei
3519bf0beec0SMing Lei if (rq->mq_hctx != iter_data->hctx)
3520bf0beec0SMing Lei return true;
3521bf0beec0SMing Lei iter_data->has_rq = true;
3522bf0beec0SMing Lei return false;
3523bf0beec0SMing Lei }
3524bf0beec0SMing Lei
blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx)3525bf0beec0SMing Lei static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3526bf0beec0SMing Lei {
3527bf0beec0SMing Lei struct blk_mq_tags *tags = hctx->sched_tags ?
3528bf0beec0SMing Lei hctx->sched_tags : hctx->tags;
3529bf0beec0SMing Lei struct rq_iter_data data = {
3530bf0beec0SMing Lei .hctx = hctx,
3531bf0beec0SMing Lei };
3532bf0beec0SMing Lei
3533bf0beec0SMing Lei blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3534bf0beec0SMing Lei return data.has_rq;
3535bf0beec0SMing Lei }
3536bf0beec0SMing Lei
blk_mq_last_cpu_in_hctx(unsigned int cpu,struct blk_mq_hw_ctx * hctx)3537bf0beec0SMing Lei static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3538bf0beec0SMing Lei struct blk_mq_hw_ctx *hctx)
3539bf0beec0SMing Lei {
35409b51d9d8SYury Norov if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3541bf0beec0SMing Lei return false;
3542bf0beec0SMing Lei if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3543bf0beec0SMing Lei return false;
3544bf0beec0SMing Lei return true;
3545bf0beec0SMing Lei }
3546bf0beec0SMing Lei
blk_mq_hctx_notify_offline(unsigned int cpu,struct hlist_node * node)3547bf0beec0SMing Lei static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3548bf0beec0SMing Lei {
3549bf0beec0SMing Lei struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3550bf0beec0SMing Lei struct blk_mq_hw_ctx, cpuhp_online);
3551bf0beec0SMing Lei
3552bf0beec0SMing Lei if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3553bf0beec0SMing Lei !blk_mq_last_cpu_in_hctx(cpu, hctx))
3554bf0beec0SMing Lei return 0;
3555bf0beec0SMing Lei
3556bf0beec0SMing Lei /*
3557bf0beec0SMing Lei * Prevent new request from being allocated on the current hctx.
3558bf0beec0SMing Lei *
3559bf0beec0SMing Lei * The smp_mb__after_atomic() Pairs with the implied barrier in
3560bf0beec0SMing Lei * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
3561bf0beec0SMing Lei * seen once we return from the tag allocator.
3562bf0beec0SMing Lei */
3563bf0beec0SMing Lei set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3564bf0beec0SMing Lei smp_mb__after_atomic();
3565bf0beec0SMing Lei
3566bf0beec0SMing Lei /*
3567bf0beec0SMing Lei * Try to grab a reference to the queue and wait for any outstanding
3568bf0beec0SMing Lei * requests. If we could not grab a reference the queue has been
3569bf0beec0SMing Lei * frozen and there are no requests.
3570bf0beec0SMing Lei */
3571bf0beec0SMing Lei if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3572bf0beec0SMing Lei while (blk_mq_hctx_has_requests(hctx))
3573bf0beec0SMing Lei msleep(5);
3574bf0beec0SMing Lei percpu_ref_put(&hctx->queue->q_usage_counter);
3575bf0beec0SMing Lei }
3576bf0beec0SMing Lei
3577bf0beec0SMing Lei return 0;
3578bf0beec0SMing Lei }
3579bf0beec0SMing Lei
blk_mq_hctx_notify_online(unsigned int cpu,struct hlist_node * node)3580bf0beec0SMing Lei static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3581bf0beec0SMing Lei {
3582bf0beec0SMing Lei struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3583bf0beec0SMing Lei struct blk_mq_hw_ctx, cpuhp_online);
3584bf0beec0SMing Lei
3585bf0beec0SMing Lei if (cpumask_test_cpu(cpu, hctx->cpumask))
3586bf0beec0SMing Lei clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3587bf0beec0SMing Lei return 0;
3588bf0beec0SMing Lei }
3589bf0beec0SMing Lei
3590e57690feSJens Axboe /*
3591e57690feSJens Axboe * 'cpu' is going away. splice any existing rq_list entries from this
3592e57690feSJens Axboe * software queue to the hw queue dispatch list, and ensure that it
3593e57690feSJens Axboe * gets run.
3594e57690feSJens Axboe */
blk_mq_hctx_notify_dead(unsigned int cpu,struct hlist_node * node)35959467f859SThomas Gleixner static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3596484b4061SJens Axboe {
35979467f859SThomas Gleixner struct blk_mq_hw_ctx *hctx;
3598484b4061SJens Axboe struct blk_mq_ctx *ctx;
3599484b4061SJens Axboe LIST_HEAD(tmp);
3600c16d6b5aSMing Lei enum hctx_type type;
3601484b4061SJens Axboe
36029467f859SThomas Gleixner hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3603bf0beec0SMing Lei if (!cpumask_test_cpu(cpu, hctx->cpumask))
3604bf0beec0SMing Lei return 0;
3605bf0beec0SMing Lei
3606e57690feSJens Axboe ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3607c16d6b5aSMing Lei type = hctx->type;
3608484b4061SJens Axboe
3609484b4061SJens Axboe spin_lock(&ctx->lock);
3610c16d6b5aSMing Lei if (!list_empty(&ctx->rq_lists[type])) {
3611c16d6b5aSMing Lei list_splice_init(&ctx->rq_lists[type], &tmp);
3612484b4061SJens Axboe blk_mq_hctx_clear_pending(hctx, ctx);
3613484b4061SJens Axboe }
3614484b4061SJens Axboe spin_unlock(&ctx->lock);
3615484b4061SJens Axboe
3616484b4061SJens Axboe if (list_empty(&tmp))
36179467f859SThomas Gleixner return 0;
3618484b4061SJens Axboe
3619e57690feSJens Axboe spin_lock(&hctx->lock);
3620e57690feSJens Axboe list_splice_tail_init(&tmp, &hctx->dispatch);
3621e57690feSJens Axboe spin_unlock(&hctx->lock);
3622484b4061SJens Axboe
3623484b4061SJens Axboe blk_mq_run_hw_queue(hctx, true);
36249467f859SThomas Gleixner return 0;
3625484b4061SJens Axboe }
3626484b4061SJens Axboe
__blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)362758bf9358SMing Lei static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3628484b4061SJens Axboe {
362958bf9358SMing Lei lockdep_assert_held(&blk_mq_cpuhp_lock);
363058bf9358SMing Lei
363158bf9358SMing Lei if (!(hctx->flags & BLK_MQ_F_STACKING) &&
363258bf9358SMing Lei !hlist_unhashed(&hctx->cpuhp_online)) {
3633bf0beec0SMing Lei cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3634bf0beec0SMing Lei &hctx->cpuhp_online);
363558bf9358SMing Lei INIT_HLIST_NODE(&hctx->cpuhp_online);
363658bf9358SMing Lei }
363758bf9358SMing Lei
363858bf9358SMing Lei if (!hlist_unhashed(&hctx->cpuhp_dead)) {
36399467f859SThomas Gleixner cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
36409467f859SThomas Gleixner &hctx->cpuhp_dead);
364158bf9358SMing Lei INIT_HLIST_NODE(&hctx->cpuhp_dead);
364258bf9358SMing Lei }
364358bf9358SMing Lei }
364458bf9358SMing Lei
blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)364558bf9358SMing Lei static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
364658bf9358SMing Lei {
364758bf9358SMing Lei mutex_lock(&blk_mq_cpuhp_lock);
364858bf9358SMing Lei __blk_mq_remove_cpuhp(hctx);
364958bf9358SMing Lei mutex_unlock(&blk_mq_cpuhp_lock);
365058bf9358SMing Lei }
365158bf9358SMing Lei
__blk_mq_add_cpuhp(struct blk_mq_hw_ctx * hctx)365258bf9358SMing Lei static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
365358bf9358SMing Lei {
365458bf9358SMing Lei lockdep_assert_held(&blk_mq_cpuhp_lock);
365558bf9358SMing Lei
365658bf9358SMing Lei if (!(hctx->flags & BLK_MQ_F_STACKING) &&
365758bf9358SMing Lei hlist_unhashed(&hctx->cpuhp_online))
365858bf9358SMing Lei cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
365958bf9358SMing Lei &hctx->cpuhp_online);
366058bf9358SMing Lei
366158bf9358SMing Lei if (hlist_unhashed(&hctx->cpuhp_dead))
366258bf9358SMing Lei cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
366358bf9358SMing Lei &hctx->cpuhp_dead);
366458bf9358SMing Lei }
366558bf9358SMing Lei
__blk_mq_remove_cpuhp_list(struct list_head * head)366658bf9358SMing Lei static void __blk_mq_remove_cpuhp_list(struct list_head *head)
366758bf9358SMing Lei {
366858bf9358SMing Lei struct blk_mq_hw_ctx *hctx;
366958bf9358SMing Lei
367058bf9358SMing Lei lockdep_assert_held(&blk_mq_cpuhp_lock);
367158bf9358SMing Lei
367258bf9358SMing Lei list_for_each_entry(hctx, head, hctx_list)
367358bf9358SMing Lei __blk_mq_remove_cpuhp(hctx);
367458bf9358SMing Lei }
367558bf9358SMing Lei
367658bf9358SMing Lei /*
367758bf9358SMing Lei * Unregister cpuhp callbacks from exited hw queues
367858bf9358SMing Lei *
367958bf9358SMing Lei * Safe to call if this `request_queue` is live
368058bf9358SMing Lei */
blk_mq_remove_hw_queues_cpuhp(struct request_queue * q)368158bf9358SMing Lei static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
368258bf9358SMing Lei {
368358bf9358SMing Lei LIST_HEAD(hctx_list);
368458bf9358SMing Lei
368558bf9358SMing Lei spin_lock(&q->unused_hctx_lock);
368658bf9358SMing Lei list_splice_init(&q->unused_hctx_list, &hctx_list);
368758bf9358SMing Lei spin_unlock(&q->unused_hctx_lock);
368858bf9358SMing Lei
368958bf9358SMing Lei mutex_lock(&blk_mq_cpuhp_lock);
369058bf9358SMing Lei __blk_mq_remove_cpuhp_list(&hctx_list);
369158bf9358SMing Lei mutex_unlock(&blk_mq_cpuhp_lock);
369258bf9358SMing Lei
369358bf9358SMing Lei spin_lock(&q->unused_hctx_lock);
369458bf9358SMing Lei list_splice(&hctx_list, &q->unused_hctx_list);
369558bf9358SMing Lei spin_unlock(&q->unused_hctx_lock);
369658bf9358SMing Lei }
369758bf9358SMing Lei
369858bf9358SMing Lei /*
369958bf9358SMing Lei * Register cpuhp callbacks from all hw queues
370058bf9358SMing Lei *
370158bf9358SMing Lei * Safe to call if this `request_queue` is live
370258bf9358SMing Lei */
blk_mq_add_hw_queues_cpuhp(struct request_queue * q)370358bf9358SMing Lei static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
370458bf9358SMing Lei {
370558bf9358SMing Lei struct blk_mq_hw_ctx *hctx;
370658bf9358SMing Lei unsigned long i;
370758bf9358SMing Lei
370858bf9358SMing Lei mutex_lock(&blk_mq_cpuhp_lock);
370958bf9358SMing Lei queue_for_each_hw_ctx(q, hctx, i)
371058bf9358SMing Lei __blk_mq_add_cpuhp(hctx);
371158bf9358SMing Lei mutex_unlock(&blk_mq_cpuhp_lock);
3712484b4061SJens Axboe }
3713484b4061SJens Axboe
3714364b6181SMing Lei /*
3715364b6181SMing Lei * Before freeing hw queue, clearing the flush request reference in
3716364b6181SMing Lei * tags->rqs[] for avoiding potential UAF.
3717364b6181SMing Lei */
blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq)3718364b6181SMing Lei static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3719364b6181SMing Lei unsigned int queue_depth, struct request *flush_rq)
3720364b6181SMing Lei {
3721364b6181SMing Lei int i;
3722364b6181SMing Lei unsigned long flags;
3723364b6181SMing Lei
3724364b6181SMing Lei /* The hw queue may not be mapped yet */
3725364b6181SMing Lei if (!tags)
3726364b6181SMing Lei return;
3727364b6181SMing Lei
37280a467d0fSJens Axboe WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3729364b6181SMing Lei
3730364b6181SMing Lei for (i = 0; i < queue_depth; i++)
3731364b6181SMing Lei cmpxchg(&tags->rqs[i], flush_rq, NULL);
3732364b6181SMing Lei
3733364b6181SMing Lei /*
3734364b6181SMing Lei * Wait until all pending iteration is done.
3735364b6181SMing Lei *
3736364b6181SMing Lei * Request reference is cleared and it is guaranteed to be observed
3737364b6181SMing Lei * after the ->lock is released.
3738364b6181SMing Lei */
3739364b6181SMing Lei spin_lock_irqsave(&tags->lock, flags);
3740364b6181SMing Lei spin_unlock_irqrestore(&tags->lock, flags);
3741364b6181SMing Lei }
3742364b6181SMing Lei
3743c3b4afcaSMing Lei /* hctx->ctxs will be freed in queue's release handler */
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)374408e98fc6SMing Lei static void blk_mq_exit_hctx(struct request_queue *q,
374508e98fc6SMing Lei struct blk_mq_tag_set *set,
374608e98fc6SMing Lei struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
374708e98fc6SMing Lei {
3748364b6181SMing Lei struct request *flush_rq = hctx->fq->flush_rq;
3749364b6181SMing Lei
37508ab0b7dcSMing Lei if (blk_mq_hw_queue_mapped(hctx))
375108e98fc6SMing Lei blk_mq_tag_idle(hctx);
375208e98fc6SMing Lei
37536cfeadbfSMing Lei if (blk_queue_init_done(q))
3754364b6181SMing Lei blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3755364b6181SMing Lei set->queue_depth, flush_rq);
3756f70ced09SMing Lei if (set->ops->exit_request)
3757364b6181SMing Lei set->ops->exit_request(set, flush_rq, hctx_idx);
3758f70ced09SMing Lei
375908e98fc6SMing Lei if (set->ops->exit_hctx)
376008e98fc6SMing Lei set->ops->exit_hctx(hctx, hctx_idx);
376108e98fc6SMing Lei
37624e5cc99eSMing Lei xa_erase(&q->hctx_table, hctx_idx);
37634e5cc99eSMing Lei
37642f8f1336SMing Lei spin_lock(&q->unused_hctx_lock);
37652f8f1336SMing Lei list_add(&hctx->hctx_list, &q->unused_hctx_list);
37662f8f1336SMing Lei spin_unlock(&q->unused_hctx_lock);
376708e98fc6SMing Lei }
376808e98fc6SMing Lei
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)3769624dbe47SMing Lei static void blk_mq_exit_hw_queues(struct request_queue *q,
3770624dbe47SMing Lei struct blk_mq_tag_set *set, int nr_queue)
3771624dbe47SMing Lei {
3772624dbe47SMing Lei struct blk_mq_hw_ctx *hctx;
37734f481208SMing Lei unsigned long i;
3774624dbe47SMing Lei
3775624dbe47SMing Lei queue_for_each_hw_ctx(q, hctx, i) {
3776624dbe47SMing Lei if (i == nr_queue)
3777624dbe47SMing Lei break;
377858bf9358SMing Lei blk_mq_remove_cpuhp(hctx);
377908e98fc6SMing Lei blk_mq_exit_hctx(q, set, hctx, i);
3780624dbe47SMing Lei }
3781624dbe47SMing Lei }
3782624dbe47SMing Lei
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)378308e98fc6SMing Lei static int blk_mq_init_hctx(struct request_queue *q,
378408e98fc6SMing Lei struct blk_mq_tag_set *set,
378508e98fc6SMing Lei struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3786320ae51fSJens Axboe {
37877c6c5b7cSMing Lei hctx->queue_num = hctx_idx;
3788320ae51fSJens Axboe
37897c6c5b7cSMing Lei hctx->tags = set->tags[hctx_idx];
37907c6c5b7cSMing Lei
37917c6c5b7cSMing Lei if (set->ops->init_hctx &&
37927c6c5b7cSMing Lei set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3793079fcc92SMing Lei goto fail;
37947c6c5b7cSMing Lei
37957c6c5b7cSMing Lei if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
37967c6c5b7cSMing Lei hctx->numa_node))
37977c6c5b7cSMing Lei goto exit_hctx;
37984e5cc99eSMing Lei
37994e5cc99eSMing Lei if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
38004e5cc99eSMing Lei goto exit_flush_rq;
38014e5cc99eSMing Lei
38027c6c5b7cSMing Lei return 0;
38037c6c5b7cSMing Lei
38044e5cc99eSMing Lei exit_flush_rq:
38054e5cc99eSMing Lei if (set->ops->exit_request)
38064e5cc99eSMing Lei set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
38077c6c5b7cSMing Lei exit_hctx:
38087c6c5b7cSMing Lei if (set->ops->exit_hctx)
38097c6c5b7cSMing Lei set->ops->exit_hctx(hctx, hctx_idx);
3810079fcc92SMing Lei fail:
38117c6c5b7cSMing Lei return -1;
38127c6c5b7cSMing Lei }
38137c6c5b7cSMing Lei
38147c6c5b7cSMing Lei static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node)38157c6c5b7cSMing Lei blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
38167c6c5b7cSMing Lei int node)
38177c6c5b7cSMing Lei {
38187c6c5b7cSMing Lei struct blk_mq_hw_ctx *hctx;
38197c6c5b7cSMing Lei gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
38207c6c5b7cSMing Lei
3821704b914fSMing Lei hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
38227c6c5b7cSMing Lei if (!hctx)
38237c6c5b7cSMing Lei goto fail_alloc_hctx;
38247c6c5b7cSMing Lei
38257c6c5b7cSMing Lei if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
38267c6c5b7cSMing Lei goto free_hctx;
38277c6c5b7cSMing Lei
38287c6c5b7cSMing Lei atomic_set(&hctx->nr_active, 0);
3829320ae51fSJens Axboe if (node == NUMA_NO_NODE)
38307c6c5b7cSMing Lei node = set->numa_node;
38317c6c5b7cSMing Lei hctx->numa_node = node;
3832320ae51fSJens Axboe
38339f993737SJens Axboe INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3834320ae51fSJens Axboe spin_lock_init(&hctx->lock);
3835320ae51fSJens Axboe INIT_LIST_HEAD(&hctx->dispatch);
383658bf9358SMing Lei INIT_HLIST_NODE(&hctx->cpuhp_dead);
383758bf9358SMing Lei INIT_HLIST_NODE(&hctx->cpuhp_online);
3838320ae51fSJens Axboe hctx->queue = q;
383951db1c37SMing Lei hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3840320ae51fSJens Axboe
38412f8f1336SMing Lei INIT_LIST_HEAD(&hctx->hctx_list);
38422f8f1336SMing Lei
3843320ae51fSJens Axboe /*
3844a68aafa5SJens Axboe * Allocate space for all possible cpus to avoid allocation at
3845320ae51fSJens Axboe * runtime
3846320ae51fSJens Axboe */
3847d904bfa7SJohannes Thumshirn hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
38487c6c5b7cSMing Lei gfp, node);
3849320ae51fSJens Axboe if (!hctx->ctxs)
38507c6c5b7cSMing Lei goto free_cpumask;
3851320ae51fSJens Axboe
38525b202853SJianchao Wang if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3853c548e62bSMing Lei gfp, node, false, false))
385408e98fc6SMing Lei goto free_ctxs;
3855320ae51fSJens Axboe hctx->nr_ctx = 0;
3856320ae51fSJens Axboe
38575815839bSMing Lei spin_lock_init(&hctx->dispatch_wait_lock);
3858eb619fdbSJens Axboe init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3859eb619fdbSJens Axboe INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3860eb619fdbSJens Axboe
3861754a1572SGuoqing Jiang hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3862f70ced09SMing Lei if (!hctx->fq)
38637c6c5b7cSMing Lei goto free_bitmap;
3864f70ced09SMing Lei
38657c6c5b7cSMing Lei blk_mq_hctx_kobj_init(hctx);
38666a83e74dSBart Van Assche
38677c6c5b7cSMing Lei return hctx;
386808e98fc6SMing Lei
386908e98fc6SMing Lei free_bitmap:
387088459642SOmar Sandoval sbitmap_free(&hctx->ctx_map);
387108e98fc6SMing Lei free_ctxs:
387208e98fc6SMing Lei kfree(hctx->ctxs);
38737c6c5b7cSMing Lei free_cpumask:
38747c6c5b7cSMing Lei free_cpumask_var(hctx->cpumask);
38757c6c5b7cSMing Lei free_hctx:
38767c6c5b7cSMing Lei kfree(hctx);
38777c6c5b7cSMing Lei fail_alloc_hctx:
38787c6c5b7cSMing Lei return NULL;
387908e98fc6SMing Lei }
388008e98fc6SMing Lei
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)3881320ae51fSJens Axboe static void blk_mq_init_cpu_queues(struct request_queue *q,
3882320ae51fSJens Axboe unsigned int nr_hw_queues)
3883320ae51fSJens Axboe {
3884b3c661b1SJens Axboe struct blk_mq_tag_set *set = q->tag_set;
3885b3c661b1SJens Axboe unsigned int i, j;
3886320ae51fSJens Axboe
3887320ae51fSJens Axboe for_each_possible_cpu(i) {
3888320ae51fSJens Axboe struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3889320ae51fSJens Axboe struct blk_mq_hw_ctx *hctx;
3890c16d6b5aSMing Lei int k;
3891320ae51fSJens Axboe
3892320ae51fSJens Axboe __ctx->cpu = i;
3893320ae51fSJens Axboe spin_lock_init(&__ctx->lock);
3894c16d6b5aSMing Lei for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3895c16d6b5aSMing Lei INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3896c16d6b5aSMing Lei
3897320ae51fSJens Axboe __ctx->queue = q;
3898320ae51fSJens Axboe
3899320ae51fSJens Axboe /*
3900320ae51fSJens Axboe * Set local node, IFF we have more than one hw queue. If
3901320ae51fSJens Axboe * not, we remain on the home node of the device
3902320ae51fSJens Axboe */
3903b3c661b1SJens Axboe for (j = 0; j < set->nr_maps; j++) {
3904b3c661b1SJens Axboe hctx = blk_mq_map_queue_type(q, j, i);
3905320ae51fSJens Axboe if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3906576e85c5SXianting Tian hctx->numa_node = cpu_to_node(i);
3907320ae51fSJens Axboe }
3908320ae51fSJens Axboe }
3909b3c661b1SJens Axboe }
3910320ae51fSJens Axboe
blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int depth)391163064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
391263064be1SJohn Garry unsigned int hctx_idx,
391363064be1SJohn Garry unsigned int depth)
391463064be1SJohn Garry {
391563064be1SJohn Garry struct blk_mq_tags *tags;
391663064be1SJohn Garry int ret;
391763064be1SJohn Garry
3918e155b0c2SJohn Garry tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
391963064be1SJohn Garry if (!tags)
392063064be1SJohn Garry return NULL;
392163064be1SJohn Garry
392263064be1SJohn Garry ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
392363064be1SJohn Garry if (ret) {
3924e155b0c2SJohn Garry blk_mq_free_rq_map(tags);
392563064be1SJohn Garry return NULL;
392663064be1SJohn Garry }
392763064be1SJohn Garry
392863064be1SJohn Garry return tags;
392963064be1SJohn Garry }
393063064be1SJohn Garry
__blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,int hctx_idx)393163064be1SJohn Garry static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
393203b63b02SWeiping Zhang int hctx_idx)
3933cc71a6f4SJens Axboe {
3934079a2e3eSJohn Garry if (blk_mq_is_shared_tags(set->flags)) {
3935079a2e3eSJohn Garry set->tags[hctx_idx] = set->shared_tags;
3936cc71a6f4SJens Axboe
3937cc71a6f4SJens Axboe return true;
3938cc71a6f4SJens Axboe }
3939cc71a6f4SJens Axboe
394063064be1SJohn Garry set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3941cc71a6f4SJens Axboe set->queue_depth);
3942cc71a6f4SJens Axboe
394363064be1SJohn Garry return set->tags[hctx_idx];
3944cc71a6f4SJens Axboe }
3945cc71a6f4SJens Axboe
blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3946645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3947645db34eSJohn Garry struct blk_mq_tags *tags,
3948cc71a6f4SJens Axboe unsigned int hctx_idx)
3949cc71a6f4SJens Axboe {
3950645db34eSJohn Garry if (tags) {
3951645db34eSJohn Garry blk_mq_free_rqs(set, tags, hctx_idx);
3952e155b0c2SJohn Garry blk_mq_free_rq_map(tags);
3953cc71a6f4SJens Axboe }
3954bd166ef1SJens Axboe }
3955cc71a6f4SJens Axboe
__blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx)3956e155b0c2SJohn Garry static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3957e155b0c2SJohn Garry unsigned int hctx_idx)
3958e155b0c2SJohn Garry {
3959079a2e3eSJohn Garry if (!blk_mq_is_shared_tags(set->flags))
3960e155b0c2SJohn Garry blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3961e155b0c2SJohn Garry
3962e155b0c2SJohn Garry set->tags[hctx_idx] = NULL;
3963e155b0c2SJohn Garry }
3964e155b0c2SJohn Garry
blk_mq_map_swqueue(struct request_queue * q)39654b855ad3SChristoph Hellwig static void blk_mq_map_swqueue(struct request_queue *q)
3966320ae51fSJens Axboe {
39674f481208SMing Lei unsigned int j, hctx_idx;
39684f481208SMing Lei unsigned long i;
3969320ae51fSJens Axboe struct blk_mq_hw_ctx *hctx;
3970320ae51fSJens Axboe struct blk_mq_ctx *ctx;
39712a34c087SMing Lei struct blk_mq_tag_set *set = q->tag_set;
3972320ae51fSJens Axboe
3973320ae51fSJens Axboe queue_for_each_hw_ctx(q, hctx, i) {
3974e4043dcfSJens Axboe cpumask_clear(hctx->cpumask);
3975320ae51fSJens Axboe hctx->nr_ctx = 0;
3976d416c92cShuhai hctx->dispatch_from = NULL;
3977320ae51fSJens Axboe }
3978320ae51fSJens Axboe
3979320ae51fSJens Axboe /*
39804b855ad3SChristoph Hellwig * Map software to hardware queues.
39814412efecSMing Lei *
39824412efecSMing Lei * If the cpu isn't present, the cpu is mapped to first hctx.
3983320ae51fSJens Axboe */
398420e4d813SChristoph Hellwig for_each_possible_cpu(i) {
3985fd689871SMing Lei
3986fd689871SMing Lei ctx = per_cpu_ptr(q->queue_ctx, i);
3987fd689871SMing Lei for (j = 0; j < set->nr_maps; j++) {
3988fd689871SMing Lei if (!set->map[j].nr_queues) {
3989fd689871SMing Lei ctx->hctxs[j] = blk_mq_map_queue_type(q,
3990fd689871SMing Lei HCTX_TYPE_DEFAULT, i);
3991fd689871SMing Lei continue;
3992fd689871SMing Lei }
3993fd689871SMing Lei hctx_idx = set->map[j].mq_map[i];
39944412efecSMing Lei /* unmapped hw queue can be remapped after CPU topo changed */
39954412efecSMing Lei if (!set->tags[hctx_idx] &&
399663064be1SJohn Garry !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
39974412efecSMing Lei /*
39984412efecSMing Lei * If tags initialization fail for some hctx,
39994412efecSMing Lei * that hctx won't be brought online. In this
40004412efecSMing Lei * case, remap the current ctx to hctx[0] which
40014412efecSMing Lei * is guaranteed to always have tags allocated
40024412efecSMing Lei */
4003fd689871SMing Lei set->map[j].mq_map[i] = 0;
4004bb94aea1SJianchao Wang }
4005e5edd5f2SMing Lei
4006b3c661b1SJens Axboe hctx = blk_mq_map_queue_type(q, j, i);
40078ccdf4a3SJianchao Wang ctx->hctxs[j] = hctx;
4008b3c661b1SJens Axboe /*
4009b3c661b1SJens Axboe * If the CPU is already set in the mask, then we've
4010b3c661b1SJens Axboe * mapped this one already. This can happen if
4011b3c661b1SJens Axboe * devices share queues across queue maps.
4012b3c661b1SJens Axboe */
4013b3c661b1SJens Axboe if (cpumask_test_cpu(i, hctx->cpumask))
4014b3c661b1SJens Axboe continue;
4015b3c661b1SJens Axboe
4016e4043dcfSJens Axboe cpumask_set_cpu(i, hctx->cpumask);
4017b3c661b1SJens Axboe hctx->type = j;
4018f31967f0SJens Axboe ctx->index_hw[hctx->type] = hctx->nr_ctx;
4019320ae51fSJens Axboe hctx->ctxs[hctx->nr_ctx++] = ctx;
4020f31967f0SJens Axboe
4021f31967f0SJens Axboe /*
4022f31967f0SJens Axboe * If the nr_ctx type overflows, we have exceeded the
4023f31967f0SJens Axboe * amount of sw queues we can support.
4024f31967f0SJens Axboe */
4025f31967f0SJens Axboe BUG_ON(!hctx->nr_ctx);
4026320ae51fSJens Axboe }
4027bb94aea1SJianchao Wang
4028bb94aea1SJianchao Wang for (; j < HCTX_MAX_TYPES; j++)
4029bb94aea1SJianchao Wang ctx->hctxs[j] = blk_mq_map_queue_type(q,
4030bb94aea1SJianchao Wang HCTX_TYPE_DEFAULT, i);
4031b3c661b1SJens Axboe }
4032506e931fSJens Axboe
4033506e931fSJens Axboe queue_for_each_hw_ctx(q, hctx, i) {
40344412efecSMing Lei /*
40354412efecSMing Lei * If no software queues are mapped to this hardware queue,
40364412efecSMing Lei * disable it and free the request entries.
40374412efecSMing Lei */
40384412efecSMing Lei if (!hctx->nr_ctx) {
40394412efecSMing Lei /* Never unmap queue 0. We need it as a
40404412efecSMing Lei * fallback in case of a new remap fails
40414412efecSMing Lei * allocation
40424412efecSMing Lei */
4043e155b0c2SJohn Garry if (i)
4044e155b0c2SJohn Garry __blk_mq_free_map_and_rqs(set, i);
40454412efecSMing Lei
40464412efecSMing Lei hctx->tags = NULL;
40474412efecSMing Lei continue;
40484412efecSMing Lei }
4049484b4061SJens Axboe
40502a34c087SMing Lei hctx->tags = set->tags[i];
40512a34c087SMing Lei WARN_ON(!hctx->tags);
40522a34c087SMing Lei
4053484b4061SJens Axboe /*
4054889fa31fSChong Yuan * Set the map size to the number of mapped software queues.
4055889fa31fSChong Yuan * This is more accurate and more efficient than looping
4056889fa31fSChong Yuan * over all possibly mapped software queues.
4057889fa31fSChong Yuan */
405888459642SOmar Sandoval sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
4059889fa31fSChong Yuan
4060889fa31fSChong Yuan /*
4061484b4061SJens Axboe * Initialize batch roundrobin counts
4062484b4061SJens Axboe */
4063f82ddf19SMing Lei hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
4064506e931fSJens Axboe hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
4065506e931fSJens Axboe }
4066320ae51fSJens Axboe }
4067320ae51fSJens Axboe
40688e8320c9SJens Axboe /*
40698e8320c9SJens Axboe * Caller needs to ensure that we're either frozen/quiesced, or that
40708e8320c9SJens Axboe * the queue isn't live yet.
40718e8320c9SJens Axboe */
queue_set_hctx_shared(struct request_queue * q,bool shared)40722404e607SJeff Moyer static void queue_set_hctx_shared(struct request_queue *q, bool shared)
40730d2602caSJens Axboe {
40740d2602caSJens Axboe struct blk_mq_hw_ctx *hctx;
40754f481208SMing Lei unsigned long i;
40760d2602caSJens Axboe
40770d2602caSJens Axboe queue_for_each_hw_ctx(q, hctx, i) {
4078454bb677SYu Kuai if (shared) {
407951db1c37SMing Lei hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4080454bb677SYu Kuai } else {
4081454bb677SYu Kuai blk_mq_tag_idle(hctx);
408251db1c37SMing Lei hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
40830d2602caSJens Axboe }
40842404e607SJeff Moyer }
4085454bb677SYu Kuai }
40862404e607SJeff Moyer
blk_mq_update_tag_set_shared(struct blk_mq_tag_set * set,bool shared)4087655ac300SHannes Reinecke static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
40888e8320c9SJens Axboe bool shared)
40892404e607SJeff Moyer {
40902404e607SJeff Moyer struct request_queue *q;
40912404e607SJeff Moyer
4092705cda97SBart Van Assche lockdep_assert_held(&set->tag_list_lock);
4093705cda97SBart Van Assche
40942404e607SJeff Moyer list_for_each_entry(q, &set->tag_list, tag_set_list) {
40952404e607SJeff Moyer blk_mq_freeze_queue(q);
40962404e607SJeff Moyer queue_set_hctx_shared(q, shared);
40970d2602caSJens Axboe blk_mq_unfreeze_queue(q);
40980d2602caSJens Axboe }
40990d2602caSJens Axboe }
41000d2602caSJens Axboe
blk_mq_del_queue_tag_set(struct request_queue * q)41010d2602caSJens Axboe static void blk_mq_del_queue_tag_set(struct request_queue *q)
41020d2602caSJens Axboe {
41030d2602caSJens Axboe struct blk_mq_tag_set *set = q->tag_set;
41040d2602caSJens Axboe
41050d2602caSJens Axboe mutex_lock(&set->tag_list_lock);
410608c875cbSDaniel Wagner list_del(&q->tag_set_list);
41072404e607SJeff Moyer if (list_is_singular(&set->tag_list)) {
41082404e607SJeff Moyer /* just transitioned to unshared */
410951db1c37SMing Lei set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
41102404e607SJeff Moyer /* update existing queue */
4111655ac300SHannes Reinecke blk_mq_update_tag_set_shared(set, false);
41122404e607SJeff Moyer }
41130d2602caSJens Axboe mutex_unlock(&set->tag_list_lock);
4114a347c7adSRoman Pen INIT_LIST_HEAD(&q->tag_set_list);
41150d2602caSJens Axboe }
41160d2602caSJens Axboe
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)41170d2602caSJens Axboe static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
41180d2602caSJens Axboe struct request_queue *q)
41190d2602caSJens Axboe {
41200d2602caSJens Axboe mutex_lock(&set->tag_list_lock);
41212404e607SJeff Moyer
4122ff821d27SJens Axboe /*
4123ff821d27SJens Axboe * Check to see if we're transitioning to shared (from 1 to 2 queues).
4124ff821d27SJens Axboe */
4125ff821d27SJens Axboe if (!list_empty(&set->tag_list) &&
412651db1c37SMing Lei !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
412751db1c37SMing Lei set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
41282404e607SJeff Moyer /* update existing queue */
4129655ac300SHannes Reinecke blk_mq_update_tag_set_shared(set, true);
41302404e607SJeff Moyer }
413151db1c37SMing Lei if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
41322404e607SJeff Moyer queue_set_hctx_shared(q, true);
413308c875cbSDaniel Wagner list_add_tail(&q->tag_set_list, &set->tag_list);
41342404e607SJeff Moyer
41350d2602caSJens Axboe mutex_unlock(&set->tag_list_lock);
41360d2602caSJens Axboe }
41370d2602caSJens Axboe
41381db4909eSMing Lei /* All allocations will be freed in release handler of q->mq_kobj */
blk_mq_alloc_ctxs(struct request_queue * q)41391db4909eSMing Lei static int blk_mq_alloc_ctxs(struct request_queue *q)
41401db4909eSMing Lei {
41411db4909eSMing Lei struct blk_mq_ctxs *ctxs;
41421db4909eSMing Lei int cpu;
41431db4909eSMing Lei
41441db4909eSMing Lei ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
41451db4909eSMing Lei if (!ctxs)
41461db4909eSMing Lei return -ENOMEM;
41471db4909eSMing Lei
41481db4909eSMing Lei ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
41491db4909eSMing Lei if (!ctxs->queue_ctx)
41501db4909eSMing Lei goto fail;
41511db4909eSMing Lei
41521db4909eSMing Lei for_each_possible_cpu(cpu) {
41531db4909eSMing Lei struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
41541db4909eSMing Lei ctx->ctxs = ctxs;
41551db4909eSMing Lei }
41561db4909eSMing Lei
41571db4909eSMing Lei q->mq_kobj = &ctxs->kobj;
41581db4909eSMing Lei q->queue_ctx = ctxs->queue_ctx;
41591db4909eSMing Lei
41601db4909eSMing Lei return 0;
41611db4909eSMing Lei fail:
41621db4909eSMing Lei kfree(ctxs);
41631db4909eSMing Lei return -ENOMEM;
41641db4909eSMing Lei }
41651db4909eSMing Lei
4166e09aae7eSMing Lei /*
4167e09aae7eSMing Lei * It is the actual release handler for mq, but we do it from
4168e09aae7eSMing Lei * request queue's release handler for avoiding use-after-free
4169e09aae7eSMing Lei * and headache because q->mq_kobj shouldn't have been introduced,
4170e09aae7eSMing Lei * but we can't group ctx/kctx kobj without it.
4171e09aae7eSMing Lei */
blk_mq_release(struct request_queue * q)4172e09aae7eSMing Lei void blk_mq_release(struct request_queue *q)
4173e09aae7eSMing Lei {
41742f8f1336SMing Lei struct blk_mq_hw_ctx *hctx, *next;
41754f481208SMing Lei unsigned long i;
4176e09aae7eSMing Lei
41772f8f1336SMing Lei queue_for_each_hw_ctx(q, hctx, i)
41782f8f1336SMing Lei WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
41792f8f1336SMing Lei
41802f8f1336SMing Lei /* all hctx are in .unused_hctx_list now */
41812f8f1336SMing Lei list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
41822f8f1336SMing Lei list_del_init(&hctx->hctx_list);
41836c8b232eSMing Lei kobject_put(&hctx->kobj);
4184c3b4afcaSMing Lei }
4185e09aae7eSMing Lei
41864e5cc99eSMing Lei xa_destroy(&q->hctx_table);
4187e09aae7eSMing Lei
41887ea5fe31SMing Lei /*
41897ea5fe31SMing Lei * release .mq_kobj and sw queue's kobject now because
41907ea5fe31SMing Lei * both share lifetime with request queue.
41917ea5fe31SMing Lei */
41927ea5fe31SMing Lei blk_mq_sysfs_deinit(q);
4193e09aae7eSMing Lei }
4194e09aae7eSMing Lei
blk_mq_init_queue_data(struct blk_mq_tag_set * set,void * queuedata)41955ec780a6SChristoph Hellwig static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
41962f227bb9SChristoph Hellwig void *queuedata)
4197320ae51fSJens Axboe {
419826a9750aSChristoph Hellwig struct request_queue *q;
419926a9750aSChristoph Hellwig int ret;
4200b62c21b7SMike Snitzer
420180bd4a7aSChristoph Hellwig q = blk_alloc_queue(set->numa_node);
420226a9750aSChristoph Hellwig if (!q)
4203b62c21b7SMike Snitzer return ERR_PTR(-ENOMEM);
420426a9750aSChristoph Hellwig q->queuedata = queuedata;
420526a9750aSChristoph Hellwig ret = blk_mq_init_allocated_queue(set, q);
420626a9750aSChristoph Hellwig if (ret) {
42076f8191fdSChristoph Hellwig blk_put_queue(q);
420826a9750aSChristoph Hellwig return ERR_PTR(ret);
420926a9750aSChristoph Hellwig }
4210b62c21b7SMike Snitzer return q;
4211b62c21b7SMike Snitzer }
42122f227bb9SChristoph Hellwig
blk_mq_init_queue(struct blk_mq_tag_set * set)42132f227bb9SChristoph Hellwig struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
42142f227bb9SChristoph Hellwig {
42152f227bb9SChristoph Hellwig return blk_mq_init_queue_data(set, NULL);
42162f227bb9SChristoph Hellwig }
4217b62c21b7SMike Snitzer EXPORT_SYMBOL(blk_mq_init_queue);
4218b62c21b7SMike Snitzer
42196f8191fdSChristoph Hellwig /**
42206f8191fdSChristoph Hellwig * blk_mq_destroy_queue - shutdown a request queue
42216f8191fdSChristoph Hellwig * @q: request queue to shutdown
42226f8191fdSChristoph Hellwig *
422381ea42b9SBart Van Assche * This shuts down a request queue allocated by blk_mq_init_queue(). All future
422481ea42b9SBart Van Assche * requests will be failed with -ENODEV. The caller is responsible for dropping
422581ea42b9SBart Van Assche * the reference from blk_mq_init_queue() by calling blk_put_queue().
42266f8191fdSChristoph Hellwig *
42276f8191fdSChristoph Hellwig * Context: can sleep
42286f8191fdSChristoph Hellwig */
blk_mq_destroy_queue(struct request_queue * q)42296f8191fdSChristoph Hellwig void blk_mq_destroy_queue(struct request_queue *q)
42306f8191fdSChristoph Hellwig {
42316f8191fdSChristoph Hellwig WARN_ON_ONCE(!queue_is_mq(q));
42326f8191fdSChristoph Hellwig WARN_ON_ONCE(blk_queue_registered(q));
42336f8191fdSChristoph Hellwig
42346f8191fdSChristoph Hellwig might_sleep();
42356f8191fdSChristoph Hellwig
42366f8191fdSChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_DYING, q);
42376f8191fdSChristoph Hellwig blk_queue_start_drain(q);
423856c1ee92SJinlong Chen blk_mq_freeze_queue_wait(q);
42396f8191fdSChristoph Hellwig
42406f8191fdSChristoph Hellwig blk_sync_queue(q);
42416f8191fdSChristoph Hellwig blk_mq_cancel_work_sync(q);
42426f8191fdSChristoph Hellwig blk_mq_exit_queue(q);
42436f8191fdSChristoph Hellwig }
42446f8191fdSChristoph Hellwig EXPORT_SYMBOL(blk_mq_destroy_queue);
42456f8191fdSChristoph Hellwig
__blk_mq_alloc_disk(struct blk_mq_tag_set * set,void * queuedata,struct lock_class_key * lkclass)42464dcc4874SChristoph Hellwig struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
42474dcc4874SChristoph Hellwig struct lock_class_key *lkclass)
42489316a9edSJens Axboe {
42499316a9edSJens Axboe struct request_queue *q;
4250b461dfc4SChristoph Hellwig struct gendisk *disk;
42519316a9edSJens Axboe
4252b461dfc4SChristoph Hellwig q = blk_mq_init_queue_data(set, queuedata);
4253b461dfc4SChristoph Hellwig if (IS_ERR(q))
4254b461dfc4SChristoph Hellwig return ERR_CAST(q);
42559316a9edSJens Axboe
42564a1fa41dSChristoph Hellwig disk = __alloc_disk_node(q, set->numa_node, lkclass);
4257b461dfc4SChristoph Hellwig if (!disk) {
42580a3e5cc7SChristoph Hellwig blk_mq_destroy_queue(q);
42592b3f056fSChristoph Hellwig blk_put_queue(q);
4260b461dfc4SChristoph Hellwig return ERR_PTR(-ENOMEM);
42619316a9edSJens Axboe }
42626f8191fdSChristoph Hellwig set_bit(GD_OWNS_QUEUE, &disk->state);
4263b461dfc4SChristoph Hellwig return disk;
42649316a9edSJens Axboe }
4265b461dfc4SChristoph Hellwig EXPORT_SYMBOL(__blk_mq_alloc_disk);
42669316a9edSJens Axboe
blk_mq_alloc_disk_for_queue(struct request_queue * q,struct lock_class_key * lkclass)42676f8191fdSChristoph Hellwig struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
42686f8191fdSChristoph Hellwig struct lock_class_key *lkclass)
42696f8191fdSChristoph Hellwig {
427022c17e27SChristoph Hellwig struct gendisk *disk;
427122c17e27SChristoph Hellwig
42726f8191fdSChristoph Hellwig if (!blk_get_queue(q))
42736f8191fdSChristoph Hellwig return NULL;
427422c17e27SChristoph Hellwig disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
427522c17e27SChristoph Hellwig if (!disk)
427622c17e27SChristoph Hellwig blk_put_queue(q);
427722c17e27SChristoph Hellwig return disk;
42786f8191fdSChristoph Hellwig }
42796f8191fdSChristoph Hellwig EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
42806f8191fdSChristoph Hellwig
4281*ee18012cSMing Lei /*
4282*ee18012cSMing Lei * Only hctx removed from cpuhp list can be reused
4283*ee18012cSMing Lei */
blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx * hctx)4284*ee18012cSMing Lei static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx)
4285*ee18012cSMing Lei {
4286*ee18012cSMing Lei return hlist_unhashed(&hctx->cpuhp_online) &&
4287*ee18012cSMing Lei hlist_unhashed(&hctx->cpuhp_dead);
4288*ee18012cSMing Lei }
4289*ee18012cSMing Lei
blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node)429034d11ffaSJianchao Wang static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
429134d11ffaSJianchao Wang struct blk_mq_tag_set *set, struct request_queue *q,
429234d11ffaSJianchao Wang int hctx_idx, int node)
429334d11ffaSJianchao Wang {
42942f8f1336SMing Lei struct blk_mq_hw_ctx *hctx = NULL, *tmp;
429534d11ffaSJianchao Wang
42962f8f1336SMing Lei /* reuse dead hctx first */
42972f8f1336SMing Lei spin_lock(&q->unused_hctx_lock);
42982f8f1336SMing Lei list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4299*ee18012cSMing Lei if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) {
43002f8f1336SMing Lei hctx = tmp;
43012f8f1336SMing Lei break;
43022f8f1336SMing Lei }
43032f8f1336SMing Lei }
43042f8f1336SMing Lei if (hctx)
43052f8f1336SMing Lei list_del_init(&hctx->hctx_list);
43062f8f1336SMing Lei spin_unlock(&q->unused_hctx_lock);
43072f8f1336SMing Lei
43082f8f1336SMing Lei if (!hctx)
43097c6c5b7cSMing Lei hctx = blk_mq_alloc_hctx(q, set, node);
431034d11ffaSJianchao Wang if (!hctx)
43117c6c5b7cSMing Lei goto fail;
431234d11ffaSJianchao Wang
43137c6c5b7cSMing Lei if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
43147c6c5b7cSMing Lei goto free_hctx;
431534d11ffaSJianchao Wang
431634d11ffaSJianchao Wang return hctx;
43177c6c5b7cSMing Lei
43187c6c5b7cSMing Lei free_hctx:
43197c6c5b7cSMing Lei kobject_put(&hctx->kobj);
43207c6c5b7cSMing Lei fail:
43217c6c5b7cSMing Lei return NULL;
432234d11ffaSJianchao Wang }
432334d11ffaSJianchao Wang
blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q)4324868f2f0bSKeith Busch static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4325b62c21b7SMike Snitzer struct request_queue *q)
4326b62c21b7SMike Snitzer {
43274e5cc99eSMing Lei struct blk_mq_hw_ctx *hctx;
43284e5cc99eSMing Lei unsigned long i, j;
4329ac0d6b92SBart Van Assche
4330fb350e0aSMing Lei /* protect against switching io scheduler */
4331fb350e0aSMing Lei mutex_lock(&q->sysfs_lock);
433224d2f903SChristoph Hellwig for (i = 0; i < set->nr_hw_queues; i++) {
4333306f13eeSMing Lei int old_node;
43344d805131SMing Lei int node = blk_mq_get_hctx_node(set, i);
43354e5cc99eSMing Lei struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4336868f2f0bSKeith Busch
4337306f13eeSMing Lei if (old_hctx) {
4338306f13eeSMing Lei old_node = old_hctx->numa_node;
4339306f13eeSMing Lei blk_mq_exit_hctx(q, set, old_hctx, i);
4340306f13eeSMing Lei }
4341320ae51fSJens Axboe
43424e5cc99eSMing Lei if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4343306f13eeSMing Lei if (!old_hctx)
4344868f2f0bSKeith Busch break;
4345306f13eeSMing Lei pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4346306f13eeSMing Lei node, old_node);
43474e5cc99eSMing Lei hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
43484e5cc99eSMing Lei WARN_ON_ONCE(!hctx);
4349868f2f0bSKeith Busch }
4350320ae51fSJens Axboe }
4351e01ad46dSJianchao Wang /*
4352e01ad46dSJianchao Wang * Increasing nr_hw_queues fails. Free the newly allocated
4353e01ad46dSJianchao Wang * hctxs and keep the previous q->nr_hw_queues.
4354e01ad46dSJianchao Wang */
4355e01ad46dSJianchao Wang if (i != set->nr_hw_queues) {
4356e01ad46dSJianchao Wang j = q->nr_hw_queues;
4357e01ad46dSJianchao Wang } else {
4358e01ad46dSJianchao Wang j = i;
4359e01ad46dSJianchao Wang q->nr_hw_queues = set->nr_hw_queues;
4360e01ad46dSJianchao Wang }
436134d11ffaSJianchao Wang
43624e5cc99eSMing Lei xa_for_each_start(&q->hctx_table, j, hctx, j)
4363868f2f0bSKeith Busch blk_mq_exit_hctx(q, set, hctx, j);
4364fb350e0aSMing Lei mutex_unlock(&q->sysfs_lock);
436558bf9358SMing Lei
436658bf9358SMing Lei /* unregister cpuhp callbacks for exited hctxs */
436758bf9358SMing Lei blk_mq_remove_hw_queues_cpuhp(q);
436858bf9358SMing Lei
436958bf9358SMing Lei /* register cpuhp for new initialized hctxs */
437058bf9358SMing Lei blk_mq_add_hw_queues_cpuhp(q);
4371868f2f0bSKeith Busch }
4372868f2f0bSKeith Busch
blk_mq_update_poll_flag(struct request_queue * q)437342ee3061SMing Lei static void blk_mq_update_poll_flag(struct request_queue *q)
437442ee3061SMing Lei {
437542ee3061SMing Lei struct blk_mq_tag_set *set = q->tag_set;
437642ee3061SMing Lei
437742ee3061SMing Lei if (set->nr_maps > HCTX_TYPE_POLL &&
437842ee3061SMing Lei set->map[HCTX_TYPE_POLL].nr_queues)
437942ee3061SMing Lei blk_queue_flag_set(QUEUE_FLAG_POLL, q);
438042ee3061SMing Lei else
438142ee3061SMing Lei blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
438242ee3061SMing Lei }
438342ee3061SMing Lei
blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q)438426a9750aSChristoph Hellwig int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
438526a9750aSChristoph Hellwig struct request_queue *q)
4386868f2f0bSKeith Busch {
438766841672SMing Lei /* mark the queue as mq asap */
438866841672SMing Lei q->mq_ops = set->ops;
438966841672SMing Lei
43901db4909eSMing Lei if (blk_mq_alloc_ctxs(q))
439154bdd67dSKeith Busch goto err_exit;
4392868f2f0bSKeith Busch
4393737f98cfSMing Lei /* init q->mq_kobj and sw queues' kobjects */
4394737f98cfSMing Lei blk_mq_sysfs_init(q);
4395737f98cfSMing Lei
43962f8f1336SMing Lei INIT_LIST_HEAD(&q->unused_hctx_list);
43972f8f1336SMing Lei spin_lock_init(&q->unused_hctx_lock);
43982f8f1336SMing Lei
43994e5cc99eSMing Lei xa_init(&q->hctx_table);
44004e5cc99eSMing Lei
4401868f2f0bSKeith Busch blk_mq_realloc_hw_ctxs(set, q);
4402868f2f0bSKeith Busch if (!q->nr_hw_queues)
4403868f2f0bSKeith Busch goto err_hctxs;
4404320ae51fSJens Axboe
4405287922ebSChristoph Hellwig INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4406e56f698bSMing Lei blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4407320ae51fSJens Axboe
4408a8908939SJens Axboe q->tag_set = set;
4409320ae51fSJens Axboe
441094eddfbeSJens Axboe q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
441142ee3061SMing Lei blk_mq_update_poll_flag(q);
4412320ae51fSJens Axboe
44132849450aSMike Snitzer INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
44149a67aa52SChristoph Hellwig INIT_LIST_HEAD(&q->flush_list);
44156fca6a61SChristoph Hellwig INIT_LIST_HEAD(&q->requeue_list);
44166fca6a61SChristoph Hellwig spin_lock_init(&q->requeue_lock);
44176fca6a61SChristoph Hellwig
4418eba71768SJens Axboe q->nr_requests = set->queue_depth;
4419eba71768SJens Axboe
442024d2f903SChristoph Hellwig blk_mq_init_cpu_queues(q, set->nr_hw_queues);
44210d2602caSJens Axboe blk_mq_add_queue_tag_set(set, q);
44224b855ad3SChristoph Hellwig blk_mq_map_swqueue(q);
442326a9750aSChristoph Hellwig return 0;
442418741986SChristoph Hellwig
4425320ae51fSJens Axboe err_hctxs:
4426943f45b9SChen Jun blk_mq_release(q);
4427c7de5726SMing Lin err_exit:
4428c7de5726SMing Lin q->mq_ops = NULL;
442926a9750aSChristoph Hellwig return -ENOMEM;
4430320ae51fSJens Axboe }
4431b62c21b7SMike Snitzer EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4432320ae51fSJens Axboe
4433c7e2d94bSMing Lei /* tags can _not_ be used after returning from blk_mq_exit_queue */
blk_mq_exit_queue(struct request_queue * q)4434c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q)
4435320ae51fSJens Axboe {
4436624dbe47SMing Lei struct blk_mq_tag_set *set = q->tag_set;
4437320ae51fSJens Axboe
4438630ef623SBart Van Assche /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4439624dbe47SMing Lei blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4440630ef623SBart Van Assche /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4441630ef623SBart Van Assche blk_mq_del_queue_tag_set(q);
4442320ae51fSJens Axboe }
4443320ae51fSJens Axboe
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)4444a5164405SJens Axboe static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4445a5164405SJens Axboe {
4446a5164405SJens Axboe int i;
4447a5164405SJens Axboe
4448079a2e3eSJohn Garry if (blk_mq_is_shared_tags(set->flags)) {
4449079a2e3eSJohn Garry set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4450e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX,
4451e155b0c2SJohn Garry set->queue_depth);
4452079a2e3eSJohn Garry if (!set->shared_tags)
4453e155b0c2SJohn Garry return -ENOMEM;
4454e155b0c2SJohn Garry }
4455e155b0c2SJohn Garry
44568229cca8SXianting Tian for (i = 0; i < set->nr_hw_queues; i++) {
445763064be1SJohn Garry if (!__blk_mq_alloc_map_and_rqs(set, i))
4458a5164405SJens Axboe goto out_unwind;
44598229cca8SXianting Tian cond_resched();
44608229cca8SXianting Tian }
4461a5164405SJens Axboe
4462a5164405SJens Axboe return 0;
4463a5164405SJens Axboe
4464a5164405SJens Axboe out_unwind:
4465a5164405SJens Axboe while (--i >= 0)
4466e155b0c2SJohn Garry __blk_mq_free_map_and_rqs(set, i);
4467e155b0c2SJohn Garry
4468079a2e3eSJohn Garry if (blk_mq_is_shared_tags(set->flags)) {
4469079a2e3eSJohn Garry blk_mq_free_map_and_rqs(set, set->shared_tags,
4470e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX);
4471645db34eSJohn Garry }
4472a5164405SJens Axboe
4473a5164405SJens Axboe return -ENOMEM;
4474a5164405SJens Axboe }
4475a5164405SJens Axboe
4476a5164405SJens Axboe /*
4477a5164405SJens Axboe * Allocate the request maps associated with this tag_set. Note that this
4478a5164405SJens Axboe * may reduce the depth asked for, if memory is tight. set->queue_depth
4479a5164405SJens Axboe * will be updated to reflect the allocated depth.
4480a5164405SJens Axboe */
blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set * set)448163064be1SJohn Garry static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4482a5164405SJens Axboe {
4483a5164405SJens Axboe unsigned int depth;
4484a5164405SJens Axboe int err;
4485a5164405SJens Axboe
4486a5164405SJens Axboe depth = set->queue_depth;
4487a5164405SJens Axboe do {
4488a5164405SJens Axboe err = __blk_mq_alloc_rq_maps(set);
4489a5164405SJens Axboe if (!err)
4490a5164405SJens Axboe break;
4491a5164405SJens Axboe
4492a5164405SJens Axboe set->queue_depth >>= 1;
4493a5164405SJens Axboe if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4494a5164405SJens Axboe err = -ENOMEM;
4495a5164405SJens Axboe break;
4496a5164405SJens Axboe }
4497a5164405SJens Axboe } while (set->queue_depth);
4498a5164405SJens Axboe
4499a5164405SJens Axboe if (!set->queue_depth || err) {
4500a5164405SJens Axboe pr_err("blk-mq: failed to allocate request map\n");
4501a5164405SJens Axboe return -ENOMEM;
4502a5164405SJens Axboe }
4503a5164405SJens Axboe
4504a5164405SJens Axboe if (depth != set->queue_depth)
4505a5164405SJens Axboe pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4506a5164405SJens Axboe depth, set->queue_depth);
4507a5164405SJens Axboe
4508a5164405SJens Axboe return 0;
4509a5164405SJens Axboe }
4510a5164405SJens Axboe
blk_mq_update_queue_map(struct blk_mq_tag_set * set)4511a4e1d0b7SBart Van Assche static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4512ebe8bddbSOmar Sandoval {
45136e66b493SBart Van Assche /*
45146e66b493SBart Van Assche * blk_mq_map_queues() and multiple .map_queues() implementations
45156e66b493SBart Van Assche * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
45166e66b493SBart Van Assche * number of hardware queues.
45176e66b493SBart Van Assche */
45186e66b493SBart Van Assche if (set->nr_maps == 1)
45196e66b493SBart Van Assche set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
45206e66b493SBart Van Assche
452159388702SMing Lei if (set->ops->map_queues && !is_kdump_kernel()) {
4522b3c661b1SJens Axboe int i;
4523b3c661b1SJens Axboe
45247d4901a9SMing Lei /*
45257d4901a9SMing Lei * transport .map_queues is usually done in the following
45267d4901a9SMing Lei * way:
45277d4901a9SMing Lei *
45287d4901a9SMing Lei * for (queue = 0; queue < set->nr_hw_queues; queue++) {
45297d4901a9SMing Lei * mask = get_cpu_mask(queue)
45307d4901a9SMing Lei * for_each_cpu(cpu, mask)
4531b3c661b1SJens Axboe * set->map[x].mq_map[cpu] = queue;
45327d4901a9SMing Lei * }
45337d4901a9SMing Lei *
45347d4901a9SMing Lei * When we need to remap, the table has to be cleared for
45357d4901a9SMing Lei * killing stale mapping since one CPU may not be mapped
45367d4901a9SMing Lei * to any hw queue.
45377d4901a9SMing Lei */
4538b3c661b1SJens Axboe for (i = 0; i < set->nr_maps; i++)
4539b3c661b1SJens Axboe blk_mq_clear_mq_map(&set->map[i]);
45407d4901a9SMing Lei
4541a4e1d0b7SBart Van Assche set->ops->map_queues(set);
4542b3c661b1SJens Axboe } else {
4543b3c661b1SJens Axboe BUG_ON(set->nr_maps > 1);
4544a4e1d0b7SBart Van Assche blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4545ebe8bddbSOmar Sandoval }
4546b3c661b1SJens Axboe }
4547ebe8bddbSOmar Sandoval
blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set * set,int new_nr_hw_queues)4548f7e76dbcSBart Van Assche static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4549ee9d5521SChristoph Hellwig int new_nr_hw_queues)
4550f7e76dbcSBart Van Assche {
4551f7e76dbcSBart Van Assche struct blk_mq_tags **new_tags;
4552e1dd7bc9SChengming Zhou int i;
4553f7e76dbcSBart Van Assche
45546be6d112SChengming Zhou if (set->nr_hw_queues >= new_nr_hw_queues)
4555d4b2e0d4SShin'ichiro Kawasaki goto done;
4556f7e76dbcSBart Van Assche
4557f7e76dbcSBart Van Assche new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4558f7e76dbcSBart Van Assche GFP_KERNEL, set->numa_node);
4559f7e76dbcSBart Van Assche if (!new_tags)
4560f7e76dbcSBart Van Assche return -ENOMEM;
4561f7e76dbcSBart Van Assche
4562f7e76dbcSBart Van Assche if (set->tags)
4563ee9d5521SChristoph Hellwig memcpy(new_tags, set->tags, set->nr_hw_queues *
4564f7e76dbcSBart Van Assche sizeof(*set->tags));
4565f7e76dbcSBart Van Assche kfree(set->tags);
4566f7e76dbcSBart Van Assche set->tags = new_tags;
45677222657eSChengming Zhou
45687222657eSChengming Zhou for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
45697222657eSChengming Zhou if (!__blk_mq_alloc_map_and_rqs(set, i)) {
45707222657eSChengming Zhou while (--i >= set->nr_hw_queues)
45717222657eSChengming Zhou __blk_mq_free_map_and_rqs(set, i);
45727222657eSChengming Zhou return -ENOMEM;
45737222657eSChengming Zhou }
45747222657eSChengming Zhou cond_resched();
45757222657eSChengming Zhou }
45767222657eSChengming Zhou
4577d4b2e0d4SShin'ichiro Kawasaki done:
4578f7e76dbcSBart Van Assche set->nr_hw_queues = new_nr_hw_queues;
4579f7e76dbcSBart Van Assche return 0;
4580f7e76dbcSBart Van Assche }
4581f7e76dbcSBart Van Assche
4582a4391c64SJens Axboe /*
4583a4391c64SJens Axboe * Alloc a tag set to be associated with one or more request queues.
4584a4391c64SJens Axboe * May fail with EINVAL for various error conditions. May adjust the
4585c018c84fSMinwoo Im * requested depth down, if it's too large. In that case, the set
4586a4391c64SJens Axboe * value will be stored in set->queue_depth.
4587a4391c64SJens Axboe */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)458824d2f903SChristoph Hellwig int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
458924d2f903SChristoph Hellwig {
4590b3c661b1SJens Axboe int i, ret;
4591da695ba2SChristoph Hellwig
4592205fb5f5SBart Van Assche BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4593205fb5f5SBart Van Assche
459424d2f903SChristoph Hellwig if (!set->nr_hw_queues)
459524d2f903SChristoph Hellwig return -EINVAL;
4596a4391c64SJens Axboe if (!set->queue_depth)
459724d2f903SChristoph Hellwig return -EINVAL;
459824d2f903SChristoph Hellwig if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
459924d2f903SChristoph Hellwig return -EINVAL;
460024d2f903SChristoph Hellwig
46017d7e0f90SChristoph Hellwig if (!set->ops->queue_rq)
460224d2f903SChristoph Hellwig return -EINVAL;
460324d2f903SChristoph Hellwig
4604de148297SMing Lei if (!set->ops->get_budget ^ !set->ops->put_budget)
4605de148297SMing Lei return -EINVAL;
4606de148297SMing Lei
4607a4391c64SJens Axboe if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4608a4391c64SJens Axboe pr_info("blk-mq: reduced tag depth to %u\n",
4609a4391c64SJens Axboe BLK_MQ_MAX_DEPTH);
4610a4391c64SJens Axboe set->queue_depth = BLK_MQ_MAX_DEPTH;
4611a4391c64SJens Axboe }
461224d2f903SChristoph Hellwig
4613b3c661b1SJens Axboe if (!set->nr_maps)
4614b3c661b1SJens Axboe set->nr_maps = 1;
4615b3c661b1SJens Axboe else if (set->nr_maps > HCTX_MAX_TYPES)
4616b3c661b1SJens Axboe return -EINVAL;
4617b3c661b1SJens Axboe
46186637fadfSShaohua Li /*
46196637fadfSShaohua Li * If a crashdump is active, then we are potentially in a very
46206637fadfSShaohua Li * memory constrained environment. Limit us to 1 queue and
46216637fadfSShaohua Li * 64 tags to prevent using too much memory.
46226637fadfSShaohua Li */
46236637fadfSShaohua Li if (is_kdump_kernel()) {
46246637fadfSShaohua Li set->nr_hw_queues = 1;
462559388702SMing Lei set->nr_maps = 1;
46266637fadfSShaohua Li set->queue_depth = min(64U, set->queue_depth);
46276637fadfSShaohua Li }
4628868f2f0bSKeith Busch /*
4629392546aeSJens Axboe * There is no use for more h/w queues than cpus if we just have
4630392546aeSJens Axboe * a single map
4631868f2f0bSKeith Busch */
4632392546aeSJens Axboe if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4633868f2f0bSKeith Busch set->nr_hw_queues = nr_cpu_ids;
46346637fadfSShaohua Li
463580bd4a7aSChristoph Hellwig if (set->flags & BLK_MQ_F_BLOCKING) {
463680bd4a7aSChristoph Hellwig set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
463780bd4a7aSChristoph Hellwig if (!set->srcu)
4638a5164405SJens Axboe return -ENOMEM;
463980bd4a7aSChristoph Hellwig ret = init_srcu_struct(set->srcu);
464080bd4a7aSChristoph Hellwig if (ret)
464180bd4a7aSChristoph Hellwig goto out_free_srcu;
464280bd4a7aSChristoph Hellwig }
464324d2f903SChristoph Hellwig
4644da695ba2SChristoph Hellwig ret = -ENOMEM;
46455ee20298SChristoph Hellwig set->tags = kcalloc_node(set->nr_hw_queues,
46465ee20298SChristoph Hellwig sizeof(struct blk_mq_tags *), GFP_KERNEL,
46475ee20298SChristoph Hellwig set->numa_node);
46485ee20298SChristoph Hellwig if (!set->tags)
464980bd4a7aSChristoph Hellwig goto out_cleanup_srcu;
465024d2f903SChristoph Hellwig
4651b3c661b1SJens Axboe for (i = 0; i < set->nr_maps; i++) {
4652b3c661b1SJens Axboe set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
465307b35eb5SMing Lei sizeof(set->map[i].mq_map[0]),
4654da695ba2SChristoph Hellwig GFP_KERNEL, set->numa_node);
4655b3c661b1SJens Axboe if (!set->map[i].mq_map)
4656b3c661b1SJens Axboe goto out_free_mq_map;
465759388702SMing Lei set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4658b3c661b1SJens Axboe }
4659bdd17e75SChristoph Hellwig
4660a4e1d0b7SBart Van Assche blk_mq_update_queue_map(set);
4661da695ba2SChristoph Hellwig
466263064be1SJohn Garry ret = blk_mq_alloc_set_map_and_rqs(set);
4663da695ba2SChristoph Hellwig if (ret)
4664bdd17e75SChristoph Hellwig goto out_free_mq_map;
466524d2f903SChristoph Hellwig
46660d2602caSJens Axboe mutex_init(&set->tag_list_lock);
46670d2602caSJens Axboe INIT_LIST_HEAD(&set->tag_list);
46680d2602caSJens Axboe
466924d2f903SChristoph Hellwig return 0;
4670bdd17e75SChristoph Hellwig
4671bdd17e75SChristoph Hellwig out_free_mq_map:
4672b3c661b1SJens Axboe for (i = 0; i < set->nr_maps; i++) {
4673b3c661b1SJens Axboe kfree(set->map[i].mq_map);
4674b3c661b1SJens Axboe set->map[i].mq_map = NULL;
4675b3c661b1SJens Axboe }
46765676e7b6SRobert Elliott kfree(set->tags);
46775676e7b6SRobert Elliott set->tags = NULL;
467880bd4a7aSChristoph Hellwig out_cleanup_srcu:
467980bd4a7aSChristoph Hellwig if (set->flags & BLK_MQ_F_BLOCKING)
468080bd4a7aSChristoph Hellwig cleanup_srcu_struct(set->srcu);
468180bd4a7aSChristoph Hellwig out_free_srcu:
468280bd4a7aSChristoph Hellwig if (set->flags & BLK_MQ_F_BLOCKING)
468380bd4a7aSChristoph Hellwig kfree(set->srcu);
4684da695ba2SChristoph Hellwig return ret;
468524d2f903SChristoph Hellwig }
468624d2f903SChristoph Hellwig EXPORT_SYMBOL(blk_mq_alloc_tag_set);
468724d2f903SChristoph Hellwig
4688cdb14e0fSChristoph Hellwig /* allocate and initialize a tagset for a simple single-queue device */
blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set * set,const struct blk_mq_ops * ops,unsigned int queue_depth,unsigned int set_flags)4689cdb14e0fSChristoph Hellwig int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4690cdb14e0fSChristoph Hellwig const struct blk_mq_ops *ops, unsigned int queue_depth,
4691cdb14e0fSChristoph Hellwig unsigned int set_flags)
4692cdb14e0fSChristoph Hellwig {
4693cdb14e0fSChristoph Hellwig memset(set, 0, sizeof(*set));
4694cdb14e0fSChristoph Hellwig set->ops = ops;
4695cdb14e0fSChristoph Hellwig set->nr_hw_queues = 1;
4696cdb14e0fSChristoph Hellwig set->nr_maps = 1;
4697cdb14e0fSChristoph Hellwig set->queue_depth = queue_depth;
4698cdb14e0fSChristoph Hellwig set->numa_node = NUMA_NO_NODE;
4699cdb14e0fSChristoph Hellwig set->flags = set_flags;
4700cdb14e0fSChristoph Hellwig return blk_mq_alloc_tag_set(set);
4701cdb14e0fSChristoph Hellwig }
4702cdb14e0fSChristoph Hellwig EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4703cdb14e0fSChristoph Hellwig
blk_mq_free_tag_set(struct blk_mq_tag_set * set)470424d2f903SChristoph Hellwig void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
470524d2f903SChristoph Hellwig {
4706b3c661b1SJens Axboe int i, j;
470724d2f903SChristoph Hellwig
4708f7e76dbcSBart Van Assche for (i = 0; i < set->nr_hw_queues; i++)
4709e155b0c2SJohn Garry __blk_mq_free_map_and_rqs(set, i);
4710484b4061SJens Axboe
4711079a2e3eSJohn Garry if (blk_mq_is_shared_tags(set->flags)) {
4712079a2e3eSJohn Garry blk_mq_free_map_and_rqs(set, set->shared_tags,
4713e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX);
4714e155b0c2SJohn Garry }
471532bc15afSJohn Garry
4716b3c661b1SJens Axboe for (j = 0; j < set->nr_maps; j++) {
4717b3c661b1SJens Axboe kfree(set->map[j].mq_map);
4718b3c661b1SJens Axboe set->map[j].mq_map = NULL;
4719b3c661b1SJens Axboe }
4720bdd17e75SChristoph Hellwig
4721981bd189SMing Lei kfree(set->tags);
47225676e7b6SRobert Elliott set->tags = NULL;
472380bd4a7aSChristoph Hellwig if (set->flags & BLK_MQ_F_BLOCKING) {
472480bd4a7aSChristoph Hellwig cleanup_srcu_struct(set->srcu);
472580bd4a7aSChristoph Hellwig kfree(set->srcu);
472680bd4a7aSChristoph Hellwig }
472724d2f903SChristoph Hellwig }
472824d2f903SChristoph Hellwig EXPORT_SYMBOL(blk_mq_free_tag_set);
472924d2f903SChristoph Hellwig
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)4730e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4731e3a2b3f9SJens Axboe {
4732e3a2b3f9SJens Axboe struct blk_mq_tag_set *set = q->tag_set;
4733e3a2b3f9SJens Axboe struct blk_mq_hw_ctx *hctx;
47344f481208SMing Lei int ret;
47354f481208SMing Lei unsigned long i;
4736e3a2b3f9SJens Axboe
4737bd166ef1SJens Axboe if (!set)
4738e3a2b3f9SJens Axboe return -EINVAL;
4739e3a2b3f9SJens Axboe
4740e5fa8140SAleksei Zakharov if (q->nr_requests == nr)
4741e5fa8140SAleksei Zakharov return 0;
4742e5fa8140SAleksei Zakharov
474370f36b60SJens Axboe blk_mq_freeze_queue(q);
474424f5a90fSMing Lei blk_mq_quiesce_queue(q);
474570f36b60SJens Axboe
4746e3a2b3f9SJens Axboe ret = 0;
4747e3a2b3f9SJens Axboe queue_for_each_hw_ctx(q, hctx, i) {
4748e9137d4bSKeith Busch if (!hctx->tags)
4749e9137d4bSKeith Busch continue;
4750bd166ef1SJens Axboe /*
4751bd166ef1SJens Axboe * If we're using an MQ scheduler, just update the scheduler
4752bd166ef1SJens Axboe * queue depth. This is similar to what the old code would do.
4753bd166ef1SJens Axboe */
4754f6adcef5SJohn Garry if (hctx->sched_tags) {
475570f36b60SJens Axboe ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
475670f36b60SJens Axboe nr, true);
4757f6adcef5SJohn Garry } else {
4758f6adcef5SJohn Garry ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4759f6adcef5SJohn Garry false);
476070f36b60SJens Axboe }
4761e3a2b3f9SJens Axboe if (ret)
4762e3a2b3f9SJens Axboe break;
476377f1e0a5SJens Axboe if (q->elevator && q->elevator->type->ops.depth_updated)
476477f1e0a5SJens Axboe q->elevator->type->ops.depth_updated(hctx);
4765e3a2b3f9SJens Axboe }
4766d97e594cSJohn Garry if (!ret) {
4767e3a2b3f9SJens Axboe q->nr_requests = nr;
4768079a2e3eSJohn Garry if (blk_mq_is_shared_tags(set->flags)) {
47698fa04464SJohn Garry if (q->elevator)
4770079a2e3eSJohn Garry blk_mq_tag_update_sched_shared_tags(q);
47718fa04464SJohn Garry else
4772079a2e3eSJohn Garry blk_mq_tag_resize_shared_tags(set, nr);
47738fa04464SJohn Garry }
4774d97e594cSJohn Garry }
4775e3a2b3f9SJens Axboe
477624f5a90fSMing Lei blk_mq_unquiesce_queue(q);
477770f36b60SJens Axboe blk_mq_unfreeze_queue(q);
477870f36b60SJens Axboe
4779e3a2b3f9SJens Axboe return ret;
4780e3a2b3f9SJens Axboe }
4781e3a2b3f9SJens Axboe
4782d48ece20SJianchao Wang /*
4783d48ece20SJianchao Wang * request_queue and elevator_type pair.
4784d48ece20SJianchao Wang * It is just used by __blk_mq_update_nr_hw_queues to cache
4785d48ece20SJianchao Wang * the elevator_type associated with a request_queue.
4786d48ece20SJianchao Wang */
4787d48ece20SJianchao Wang struct blk_mq_qe_pair {
4788d48ece20SJianchao Wang struct list_head node;
4789d48ece20SJianchao Wang struct request_queue *q;
4790d48ece20SJianchao Wang struct elevator_type *type;
4791d48ece20SJianchao Wang };
4792d48ece20SJianchao Wang
4793d48ece20SJianchao Wang /*
4794d48ece20SJianchao Wang * Cache the elevator_type in qe pair list and switch the
4795d48ece20SJianchao Wang * io scheduler to 'none'
4796d48ece20SJianchao Wang */
blk_mq_elv_switch_none(struct list_head * head,struct request_queue * q)4797d48ece20SJianchao Wang static bool blk_mq_elv_switch_none(struct list_head *head,
4798d48ece20SJianchao Wang struct request_queue *q)
4799d48ece20SJianchao Wang {
4800d48ece20SJianchao Wang struct blk_mq_qe_pair *qe;
4801d48ece20SJianchao Wang
4802d48ece20SJianchao Wang qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4803d48ece20SJianchao Wang if (!qe)
4804d48ece20SJianchao Wang return false;
4805d48ece20SJianchao Wang
48065fd7a84aSMing Lei /* q->elevator needs protection from ->sysfs_lock */
48075fd7a84aSMing Lei mutex_lock(&q->sysfs_lock);
48085fd7a84aSMing Lei
480924516565SMing Lei /* the check has to be done with holding sysfs_lock */
481024516565SMing Lei if (!q->elevator) {
481124516565SMing Lei kfree(qe);
481224516565SMing Lei goto unlock;
481324516565SMing Lei }
481424516565SMing Lei
4815d48ece20SJianchao Wang INIT_LIST_HEAD(&qe->node);
4816d48ece20SJianchao Wang qe->q = q;
4817d48ece20SJianchao Wang qe->type = q->elevator->type;
4818dd6f7f17SChristoph Hellwig /* keep a reference to the elevator module as we'll switch back */
4819dd6f7f17SChristoph Hellwig __elevator_get(qe->type);
4820d48ece20SJianchao Wang list_add(&qe->node, head);
482164b36075SChristoph Hellwig elevator_disable(q);
482224516565SMing Lei unlock:
4823d48ece20SJianchao Wang mutex_unlock(&q->sysfs_lock);
4824d48ece20SJianchao Wang
4825d48ece20SJianchao Wang return true;
4826d48ece20SJianchao Wang }
4827d48ece20SJianchao Wang
blk_lookup_qe_pair(struct list_head * head,struct request_queue * q)48284a3b666eSJakob Koschel static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
48294a3b666eSJakob Koschel struct request_queue *q)
48304a3b666eSJakob Koschel {
48314a3b666eSJakob Koschel struct blk_mq_qe_pair *qe;
48324a3b666eSJakob Koschel
48334a3b666eSJakob Koschel list_for_each_entry(qe, head, node)
48344a3b666eSJakob Koschel if (qe->q == q)
48354a3b666eSJakob Koschel return qe;
48364a3b666eSJakob Koschel
48374a3b666eSJakob Koschel return NULL;
48384a3b666eSJakob Koschel }
48394a3b666eSJakob Koschel
blk_mq_elv_switch_back(struct list_head * head,struct request_queue * q)4840d48ece20SJianchao Wang static void blk_mq_elv_switch_back(struct list_head *head,
4841d48ece20SJianchao Wang struct request_queue *q)
4842d48ece20SJianchao Wang {
4843d48ece20SJianchao Wang struct blk_mq_qe_pair *qe;
48444a3b666eSJakob Koschel struct elevator_type *t;
4845d48ece20SJianchao Wang
48464a3b666eSJakob Koschel qe = blk_lookup_qe_pair(head, q);
48474a3b666eSJakob Koschel if (!qe)
4848d48ece20SJianchao Wang return;
48494a3b666eSJakob Koschel t = qe->type;
4850d48ece20SJianchao Wang list_del(&qe->node);
4851d48ece20SJianchao Wang kfree(qe);
4852d48ece20SJianchao Wang
4853d48ece20SJianchao Wang mutex_lock(&q->sysfs_lock);
48548237c01fSKeith Busch elevator_switch(q, t);
48558ed40ee3SJinlong Chen /* drop the reference acquired in blk_mq_elv_switch_none */
48568ed40ee3SJinlong Chen elevator_put(t);
4857d48ece20SJianchao Wang mutex_unlock(&q->sysfs_lock);
4858d48ece20SJianchao Wang }
4859d48ece20SJianchao Wang
__blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4860e4dc2b32SKeith Busch static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4861e4dc2b32SKeith Busch int nr_hw_queues)
4862868f2f0bSKeith Busch {
4863868f2f0bSKeith Busch struct request_queue *q;
4864d48ece20SJianchao Wang LIST_HEAD(head);
48656be6d112SChengming Zhou int prev_nr_hw_queues = set->nr_hw_queues;
48666be6d112SChengming Zhou int i;
4867868f2f0bSKeith Busch
4868705cda97SBart Van Assche lockdep_assert_held(&set->tag_list_lock);
4869705cda97SBart Van Assche
4870392546aeSJens Axboe if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4871868f2f0bSKeith Busch nr_hw_queues = nr_cpu_ids;
4872fe35ec58SWeiping Zhang if (nr_hw_queues < 1)
4873fe35ec58SWeiping Zhang return;
4874fe35ec58SWeiping Zhang if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4875868f2f0bSKeith Busch return;
4876868f2f0bSKeith Busch
4877868f2f0bSKeith Busch list_for_each_entry(q, &set->tag_list, tag_set_list)
4878868f2f0bSKeith Busch blk_mq_freeze_queue(q);
4879d48ece20SJianchao Wang /*
4880d48ece20SJianchao Wang * Switch IO scheduler to 'none', cleaning up the data associated
4881d48ece20SJianchao Wang * with the previous scheduler. We will switch back once we are done
4882d48ece20SJianchao Wang * updating the new sw to hw queue mappings.
4883d48ece20SJianchao Wang */
4884d48ece20SJianchao Wang list_for_each_entry(q, &set->tag_list, tag_set_list)
4885d48ece20SJianchao Wang if (!blk_mq_elv_switch_none(&head, q))
4886d48ece20SJianchao Wang goto switch_back;
4887868f2f0bSKeith Busch
4888477e19deSJianchao Wang list_for_each_entry(q, &set->tag_list, tag_set_list) {
4889477e19deSJianchao Wang blk_mq_debugfs_unregister_hctxs(q);
4890eaa870f9SChristoph Hellwig blk_mq_sysfs_unregister_hctxs(q);
4891477e19deSJianchao Wang }
4892477e19deSJianchao Wang
4893ee9d5521SChristoph Hellwig if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4894f7e76dbcSBart Van Assche goto reregister;
4895f7e76dbcSBart Van Assche
4896e01ad46dSJianchao Wang fallback:
4897aa880ad6SWeiping Zhang blk_mq_update_queue_map(set);
4898868f2f0bSKeith Busch list_for_each_entry(q, &set->tag_list, tag_set_list) {
4899868f2f0bSKeith Busch blk_mq_realloc_hw_ctxs(set, q);
490042ee3061SMing Lei blk_mq_update_poll_flag(q);
4901e01ad46dSJianchao Wang if (q->nr_hw_queues != set->nr_hw_queues) {
4902a846a8e6SYe Bin int i = prev_nr_hw_queues;
4903a846a8e6SYe Bin
4904e01ad46dSJianchao Wang pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4905e01ad46dSJianchao Wang nr_hw_queues, prev_nr_hw_queues);
4906a846a8e6SYe Bin for (; i < set->nr_hw_queues; i++)
4907a846a8e6SYe Bin __blk_mq_free_map_and_rqs(set, i);
4908a846a8e6SYe Bin
4909e01ad46dSJianchao Wang set->nr_hw_queues = prev_nr_hw_queues;
4910e01ad46dSJianchao Wang goto fallback;
4911e01ad46dSJianchao Wang }
4912477e19deSJianchao Wang blk_mq_map_swqueue(q);
4913477e19deSJianchao Wang }
4914477e19deSJianchao Wang
4915f7e76dbcSBart Van Assche reregister:
4916477e19deSJianchao Wang list_for_each_entry(q, &set->tag_list, tag_set_list) {
4917eaa870f9SChristoph Hellwig blk_mq_sysfs_register_hctxs(q);
4918477e19deSJianchao Wang blk_mq_debugfs_register_hctxs(q);
4919868f2f0bSKeith Busch }
4920868f2f0bSKeith Busch
4921d48ece20SJianchao Wang switch_back:
4922d48ece20SJianchao Wang list_for_each_entry(q, &set->tag_list, tag_set_list)
4923d48ece20SJianchao Wang blk_mq_elv_switch_back(&head, q);
4924d48ece20SJianchao Wang
4925868f2f0bSKeith Busch list_for_each_entry(q, &set->tag_list, tag_set_list)
4926868f2f0bSKeith Busch blk_mq_unfreeze_queue(q);
49276be6d112SChengming Zhou
49286be6d112SChengming Zhou /* Free the excess tags when nr_hw_queues shrink. */
49296be6d112SChengming Zhou for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
49306be6d112SChengming Zhou __blk_mq_free_map_and_rqs(set, i);
4931868f2f0bSKeith Busch }
4932e4dc2b32SKeith Busch
blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4933e4dc2b32SKeith Busch void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4934e4dc2b32SKeith Busch {
4935e4dc2b32SKeith Busch mutex_lock(&set->tag_list_lock);
4936e4dc2b32SKeith Busch __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4937e4dc2b32SKeith Busch mutex_unlock(&set->tag_list_lock);
4938e4dc2b32SKeith Busch }
4939868f2f0bSKeith Busch EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4940868f2f0bSKeith Busch
blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags)4941f6c80cffSKeith Busch static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4942f6c80cffSKeith Busch struct io_comp_batch *iob, unsigned int flags)
4943bbd7bb70SJens Axboe {
4944c6699d6fSChristoph Hellwig long state = get_current_state();
4945bbd7bb70SJens Axboe int ret;
4946bbd7bb70SJens Axboe
4947c6699d6fSChristoph Hellwig do {
49485a72e899SJens Axboe ret = q->mq_ops->poll(hctx, iob);
4949bbd7bb70SJens Axboe if (ret > 0) {
4950849a3700SJens Axboe __set_current_state(TASK_RUNNING);
495185f4d4b6SJens Axboe return ret;
4952bbd7bb70SJens Axboe }
4953bbd7bb70SJens Axboe
4954bbd7bb70SJens Axboe if (signal_pending_state(state, current))
4955849a3700SJens Axboe __set_current_state(TASK_RUNNING);
4956b03fbd4fSPeter Zijlstra if (task_is_running(current))
495785f4d4b6SJens Axboe return 1;
4958c6699d6fSChristoph Hellwig
4959ef99b2d3SChristoph Hellwig if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4960bbd7bb70SJens Axboe break;
4961bbd7bb70SJens Axboe cpu_relax();
4962aa61bec3SJens Axboe } while (!need_resched());
4963bbd7bb70SJens Axboe
496467b4110fSNitesh Shetty __set_current_state(TASK_RUNNING);
496585f4d4b6SJens Axboe return 0;
4966bbd7bb70SJens Axboe }
4967bbd7bb70SJens Axboe
blk_mq_poll(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags)4968f6c80cffSKeith Busch int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4969f6c80cffSKeith Busch struct io_comp_batch *iob, unsigned int flags)
4970f6c80cffSKeith Busch {
4971f6c80cffSKeith Busch struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4972f6c80cffSKeith Busch
4973f6c80cffSKeith Busch return blk_hctx_poll(q, hctx, iob, flags);
4974f6c80cffSKeith Busch }
4975f6c80cffSKeith Busch
blk_rq_poll(struct request * rq,struct io_comp_batch * iob,unsigned int poll_flags)4976f6c80cffSKeith Busch int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4977f6c80cffSKeith Busch unsigned int poll_flags)
4978f6c80cffSKeith Busch {
4979f6c80cffSKeith Busch struct request_queue *q = rq->q;
4980f6c80cffSKeith Busch int ret;
4981f6c80cffSKeith Busch
4982f6c80cffSKeith Busch if (!blk_rq_is_poll(rq))
4983f6c80cffSKeith Busch return 0;
4984f6c80cffSKeith Busch if (!percpu_ref_tryget(&q->q_usage_counter))
4985f6c80cffSKeith Busch return 0;
4986f6c80cffSKeith Busch
4987f6c80cffSKeith Busch ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4988f6c80cffSKeith Busch blk_queue_exit(q);
4989f6c80cffSKeith Busch
4990f6c80cffSKeith Busch return ret;
4991f6c80cffSKeith Busch }
4992f6c80cffSKeith Busch EXPORT_SYMBOL_GPL(blk_rq_poll);
4993f6c80cffSKeith Busch
blk_mq_rq_cpu(struct request * rq)49949cf2bab6SJens Axboe unsigned int blk_mq_rq_cpu(struct request *rq)
49959cf2bab6SJens Axboe {
49969cf2bab6SJens Axboe return rq->mq_ctx->cpu;
49979cf2bab6SJens Axboe }
49989cf2bab6SJens Axboe EXPORT_SYMBOL(blk_mq_rq_cpu);
49999cf2bab6SJens Axboe
blk_mq_cancel_work_sync(struct request_queue * q)50002a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q)
50012a19b28fSMing Lei {
50022a19b28fSMing Lei struct blk_mq_hw_ctx *hctx;
50034f481208SMing Lei unsigned long i;
50042a19b28fSMing Lei
50052a19b28fSMing Lei cancel_delayed_work_sync(&q->requeue_work);
50062a19b28fSMing Lei
50072a19b28fSMing Lei queue_for_each_hw_ctx(q, hctx, i)
50082a19b28fSMing Lei cancel_delayed_work_sync(&hctx->run_work);
50092a19b28fSMing Lei }
50102a19b28fSMing Lei
blk_mq_init(void)5011320ae51fSJens Axboe static int __init blk_mq_init(void)
5012320ae51fSJens Axboe {
5013c3077b5dSChristoph Hellwig int i;
5014c3077b5dSChristoph Hellwig
5015c3077b5dSChristoph Hellwig for_each_possible_cpu(i)
5016f9ab4918SSebastian Andrzej Siewior init_llist_head(&per_cpu(blk_cpu_done, i));
5017660e802cSChengming Zhou for_each_possible_cpu(i)
5018660e802cSChengming Zhou INIT_CSD(&per_cpu(blk_cpu_csd, i),
5019660e802cSChengming Zhou __blk_mq_complete_request_remote, NULL);
5020c3077b5dSChristoph Hellwig open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
5021c3077b5dSChristoph Hellwig
5022c3077b5dSChristoph Hellwig cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
5023c3077b5dSChristoph Hellwig "block/softirq:dead", NULL,
5024c3077b5dSChristoph Hellwig blk_softirq_cpu_dead);
50259467f859SThomas Gleixner cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
50269467f859SThomas Gleixner blk_mq_hctx_notify_dead);
5027bf0beec0SMing Lei cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
5028bf0beec0SMing Lei blk_mq_hctx_notify_online,
5029bf0beec0SMing Lei blk_mq_hctx_notify_offline);
5030320ae51fSJens Axboe return 0;
5031320ae51fSJens Axboe }
5032320ae51fSJens Axboe subsys_initcall(blk_mq_init);
5033