1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/t10-pi.h>
36 #include "blk.h"
37 #include "blk-mq.h"
38 #include "blk-mq-debugfs.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43
44 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
46 static DEFINE_MUTEX(blk_mq_cpuhp_lock);
47
48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
49 static void blk_mq_request_bypass_insert(struct request *rq,
50 blk_insert_t flags);
51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
52 struct list_head *list);
53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
54 struct io_comp_batch *iob, unsigned int flags);
55
56 /*
57 * Check if any of the ctx, dispatch list or elevator
58 * have pending work in this hardware queue.
59 */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
61 {
62 return !list_empty_careful(&hctx->dispatch) ||
63 sbitmap_any_bit_set(&hctx->ctx_map) ||
64 blk_mq_sched_has_work(hctx);
65 }
66
67 /*
68 * Mark this ctx as having pending work in this hardware queue
69 */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
71 struct blk_mq_ctx *ctx)
72 {
73 const int bit = ctx->index_hw[hctx->type];
74
75 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
76 sbitmap_set_bit(&hctx->ctx_map, bit);
77 }
78
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)79 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
80 struct blk_mq_ctx *ctx)
81 {
82 const int bit = ctx->index_hw[hctx->type];
83
84 sbitmap_clear_bit(&hctx->ctx_map, bit);
85 }
86
87 struct mq_inflight {
88 struct block_device *part;
89 unsigned int inflight[2];
90 };
91
blk_mq_check_inflight(struct request * rq,void * priv)92 static bool blk_mq_check_inflight(struct request *rq, void *priv)
93 {
94 struct mq_inflight *mi = priv;
95
96 if (rq->part && blk_do_io_stat(rq) &&
97 (!mi->part->bd_partno || rq->part == mi->part) &&
98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
99 mi->inflight[rq_data_dir(rq)]++;
100
101 return true;
102 }
103
blk_mq_in_flight(struct request_queue * q,struct block_device * part)104 unsigned int blk_mq_in_flight(struct request_queue *q,
105 struct block_device *part)
106 {
107 struct mq_inflight mi = { .part = part };
108
109 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
110
111 return mi.inflight[0] + mi.inflight[1];
112 }
113
blk_mq_in_flight_rw(struct request_queue * q,struct block_device * part,unsigned int inflight[2])114 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
115 unsigned int inflight[2])
116 {
117 struct mq_inflight mi = { .part = part };
118
119 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120 inflight[0] = mi.inflight[0];
121 inflight[1] = mi.inflight[1];
122 }
123
blk_freeze_queue_start(struct request_queue * q)124 void blk_freeze_queue_start(struct request_queue *q)
125 {
126 mutex_lock(&q->mq_freeze_lock);
127 if (++q->mq_freeze_depth == 1) {
128 percpu_ref_kill(&q->q_usage_counter);
129 mutex_unlock(&q->mq_freeze_lock);
130 if (queue_is_mq(q))
131 blk_mq_run_hw_queues(q, false);
132 } else {
133 mutex_unlock(&q->mq_freeze_lock);
134 }
135 }
136 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
137
blk_mq_freeze_queue_wait(struct request_queue * q)138 void blk_mq_freeze_queue_wait(struct request_queue *q)
139 {
140 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
141 }
142 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
143
blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout)144 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
145 unsigned long timeout)
146 {
147 return wait_event_timeout(q->mq_freeze_wq,
148 percpu_ref_is_zero(&q->q_usage_counter),
149 timeout);
150 }
151 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
152
153 /*
154 * Guarantee no request is in use, so we can change any data structure of
155 * the queue afterward.
156 */
blk_freeze_queue(struct request_queue * q)157 void blk_freeze_queue(struct request_queue *q)
158 {
159 /*
160 * In the !blk_mq case we are only calling this to kill the
161 * q_usage_counter, otherwise this increases the freeze depth
162 * and waits for it to return to zero. For this reason there is
163 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
164 * exported to drivers as the only user for unfreeze is blk_mq.
165 */
166 blk_freeze_queue_start(q);
167 blk_mq_freeze_queue_wait(q);
168 }
169
blk_mq_freeze_queue(struct request_queue * q)170 void blk_mq_freeze_queue(struct request_queue *q)
171 {
172 /*
173 * ...just an alias to keep freeze and unfreeze actions balanced
174 * in the blk_mq_* namespace
175 */
176 blk_freeze_queue(q);
177 }
178 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
179
__blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic)180 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
181 {
182 mutex_lock(&q->mq_freeze_lock);
183 if (force_atomic)
184 q->q_usage_counter.data->force_atomic = true;
185 q->mq_freeze_depth--;
186 WARN_ON_ONCE(q->mq_freeze_depth < 0);
187 if (!q->mq_freeze_depth) {
188 percpu_ref_resurrect(&q->q_usage_counter);
189 wake_up_all(&q->mq_freeze_wq);
190 }
191 mutex_unlock(&q->mq_freeze_lock);
192 }
193
blk_mq_unfreeze_queue(struct request_queue * q)194 void blk_mq_unfreeze_queue(struct request_queue *q)
195 {
196 __blk_mq_unfreeze_queue(q, false);
197 }
198 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
199
200 /*
201 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
202 * mpt3sas driver such that this function can be removed.
203 */
blk_mq_quiesce_queue_nowait(struct request_queue * q)204 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
205 {
206 unsigned long flags;
207
208 spin_lock_irqsave(&q->queue_lock, flags);
209 if (!q->quiesce_depth++)
210 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
211 spin_unlock_irqrestore(&q->queue_lock, flags);
212 }
213 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
214
215 /**
216 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
217 * @set: tag_set to wait on
218 *
219 * Note: it is driver's responsibility for making sure that quiesce has
220 * been started on or more of the request_queues of the tag_set. This
221 * function only waits for the quiesce on those request_queues that had
222 * the quiesce flag set using blk_mq_quiesce_queue_nowait.
223 */
blk_mq_wait_quiesce_done(struct blk_mq_tag_set * set)224 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
225 {
226 if (set->flags & BLK_MQ_F_BLOCKING)
227 synchronize_srcu(set->srcu);
228 else
229 synchronize_rcu();
230 }
231 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
232
233 /**
234 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
235 * @q: request queue.
236 *
237 * Note: this function does not prevent that the struct request end_io()
238 * callback function is invoked. Once this function is returned, we make
239 * sure no dispatch can happen until the queue is unquiesced via
240 * blk_mq_unquiesce_queue().
241 */
blk_mq_quiesce_queue(struct request_queue * q)242 void blk_mq_quiesce_queue(struct request_queue *q)
243 {
244 blk_mq_quiesce_queue_nowait(q);
245 /* nothing to wait for non-mq queues */
246 if (queue_is_mq(q))
247 blk_mq_wait_quiesce_done(q->tag_set);
248 }
249 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
250
251 /*
252 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
253 * @q: request queue.
254 *
255 * This function recovers queue into the state before quiescing
256 * which is done by blk_mq_quiesce_queue.
257 */
blk_mq_unquiesce_queue(struct request_queue * q)258 void blk_mq_unquiesce_queue(struct request_queue *q)
259 {
260 unsigned long flags;
261 bool run_queue = false;
262
263 spin_lock_irqsave(&q->queue_lock, flags);
264 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
265 ;
266 } else if (!--q->quiesce_depth) {
267 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
268 run_queue = true;
269 }
270 spin_unlock_irqrestore(&q->queue_lock, flags);
271
272 /* dispatch requests which are inserted during quiescing */
273 if (run_queue)
274 blk_mq_run_hw_queues(q, true);
275 }
276 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
277
blk_mq_quiesce_tagset(struct blk_mq_tag_set * set)278 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
279 {
280 struct request_queue *q;
281
282 mutex_lock(&set->tag_list_lock);
283 list_for_each_entry(q, &set->tag_list, tag_set_list) {
284 if (!blk_queue_skip_tagset_quiesce(q))
285 blk_mq_quiesce_queue_nowait(q);
286 }
287 mutex_unlock(&set->tag_list_lock);
288
289 blk_mq_wait_quiesce_done(set);
290 }
291 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
292
blk_mq_unquiesce_tagset(struct blk_mq_tag_set * set)293 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
294 {
295 struct request_queue *q;
296
297 mutex_lock(&set->tag_list_lock);
298 list_for_each_entry(q, &set->tag_list, tag_set_list) {
299 if (!blk_queue_skip_tagset_quiesce(q))
300 blk_mq_unquiesce_queue(q);
301 }
302 mutex_unlock(&set->tag_list_lock);
303 }
304 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
305
blk_mq_wake_waiters(struct request_queue * q)306 void blk_mq_wake_waiters(struct request_queue *q)
307 {
308 struct blk_mq_hw_ctx *hctx;
309 unsigned long i;
310
311 queue_for_each_hw_ctx(q, hctx, i)
312 if (blk_mq_hw_queue_mapped(hctx))
313 blk_mq_tag_wakeup_all(hctx->tags, true);
314 }
315
blk_rq_init(struct request_queue * q,struct request * rq)316 void blk_rq_init(struct request_queue *q, struct request *rq)
317 {
318 memset(rq, 0, sizeof(*rq));
319
320 INIT_LIST_HEAD(&rq->queuelist);
321 rq->q = q;
322 rq->__sector = (sector_t) -1;
323 INIT_HLIST_NODE(&rq->hash);
324 RB_CLEAR_NODE(&rq->rb_node);
325 rq->tag = BLK_MQ_NO_TAG;
326 rq->internal_tag = BLK_MQ_NO_TAG;
327 rq->start_time_ns = ktime_get_ns();
328 rq->part = NULL;
329 blk_crypto_rq_set_defaults(rq);
330 }
331 EXPORT_SYMBOL(blk_rq_init);
332
333 /* Set start and alloc time when the allocated request is actually used */
blk_mq_rq_time_init(struct request * rq,u64 alloc_time_ns)334 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
335 {
336 if (blk_mq_need_time_stamp(rq))
337 rq->start_time_ns = ktime_get_ns();
338 else
339 rq->start_time_ns = 0;
340
341 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
342 if (blk_queue_rq_alloc_time(rq->q))
343 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
344 else
345 rq->alloc_time_ns = 0;
346 #endif
347 }
348
blk_mq_rq_ctx_init(struct blk_mq_alloc_data * data,struct blk_mq_tags * tags,unsigned int tag)349 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
350 struct blk_mq_tags *tags, unsigned int tag)
351 {
352 struct blk_mq_ctx *ctx = data->ctx;
353 struct blk_mq_hw_ctx *hctx = data->hctx;
354 struct request_queue *q = data->q;
355 struct request *rq = tags->static_rqs[tag];
356
357 rq->q = q;
358 rq->mq_ctx = ctx;
359 rq->mq_hctx = hctx;
360 rq->cmd_flags = data->cmd_flags;
361
362 if (data->flags & BLK_MQ_REQ_PM)
363 data->rq_flags |= RQF_PM;
364 if (blk_queue_io_stat(q))
365 data->rq_flags |= RQF_IO_STAT;
366 rq->rq_flags = data->rq_flags;
367
368 if (data->rq_flags & RQF_SCHED_TAGS) {
369 rq->tag = BLK_MQ_NO_TAG;
370 rq->internal_tag = tag;
371 } else {
372 rq->tag = tag;
373 rq->internal_tag = BLK_MQ_NO_TAG;
374 }
375 rq->timeout = 0;
376
377 rq->part = NULL;
378 rq->io_start_time_ns = 0;
379 rq->stats_sectors = 0;
380 rq->nr_phys_segments = 0;
381 #if defined(CONFIG_BLK_DEV_INTEGRITY)
382 rq->nr_integrity_segments = 0;
383 #endif
384 rq->end_io = NULL;
385 rq->end_io_data = NULL;
386
387 blk_crypto_rq_set_defaults(rq);
388 INIT_LIST_HEAD(&rq->queuelist);
389 /* tag was already set */
390 WRITE_ONCE(rq->deadline, 0);
391 req_ref_set(rq, 1);
392
393 if (rq->rq_flags & RQF_USE_SCHED) {
394 struct elevator_queue *e = data->q->elevator;
395
396 INIT_HLIST_NODE(&rq->hash);
397 RB_CLEAR_NODE(&rq->rb_node);
398
399 if (e->type->ops.prepare_request)
400 e->type->ops.prepare_request(rq);
401 }
402
403 return rq;
404 }
405
406 static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data * data)407 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
408 {
409 unsigned int tag, tag_offset;
410 struct blk_mq_tags *tags;
411 struct request *rq;
412 unsigned long tag_mask;
413 int i, nr = 0;
414
415 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
416 if (unlikely(!tag_mask))
417 return NULL;
418
419 tags = blk_mq_tags_from_data(data);
420 for (i = 0; tag_mask; i++) {
421 if (!(tag_mask & (1UL << i)))
422 continue;
423 tag = tag_offset + i;
424 prefetch(tags->static_rqs[tag]);
425 tag_mask &= ~(1UL << i);
426 rq = blk_mq_rq_ctx_init(data, tags, tag);
427 rq_list_add(data->cached_rq, rq);
428 nr++;
429 }
430 /* caller already holds a reference, add for remainder */
431 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
432 data->nr_tags -= nr;
433
434 return rq_list_pop(data->cached_rq);
435 }
436
__blk_mq_alloc_requests(struct blk_mq_alloc_data * data)437 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
438 {
439 struct request_queue *q = data->q;
440 u64 alloc_time_ns = 0;
441 struct request *rq;
442 unsigned int tag;
443
444 /* alloc_time includes depth and tag waits */
445 if (blk_queue_rq_alloc_time(q))
446 alloc_time_ns = ktime_get_ns();
447
448 if (data->cmd_flags & REQ_NOWAIT)
449 data->flags |= BLK_MQ_REQ_NOWAIT;
450
451 retry:
452 data->ctx = blk_mq_get_ctx(q);
453 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
454
455 if (q->elevator) {
456 /*
457 * All requests use scheduler tags when an I/O scheduler is
458 * enabled for the queue.
459 */
460 data->rq_flags |= RQF_SCHED_TAGS;
461
462 /*
463 * Flush/passthrough requests are special and go directly to the
464 * dispatch list.
465 */
466 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
467 !blk_op_is_passthrough(data->cmd_flags)) {
468 struct elevator_mq_ops *ops = &q->elevator->type->ops;
469
470 WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
471
472 data->rq_flags |= RQF_USE_SCHED;
473 if (ops->limit_depth)
474 ops->limit_depth(data->cmd_flags, data);
475 }
476 } else {
477 blk_mq_tag_busy(data->hctx);
478 }
479
480 if (data->flags & BLK_MQ_REQ_RESERVED)
481 data->rq_flags |= RQF_RESV;
482
483 /*
484 * Try batched alloc if we want more than 1 tag.
485 */
486 if (data->nr_tags > 1) {
487 rq = __blk_mq_alloc_requests_batch(data);
488 if (rq) {
489 blk_mq_rq_time_init(rq, alloc_time_ns);
490 return rq;
491 }
492 data->nr_tags = 1;
493 }
494
495 /*
496 * Waiting allocations only fail because of an inactive hctx. In that
497 * case just retry the hctx assignment and tag allocation as CPU hotplug
498 * should have migrated us to an online CPU by now.
499 */
500 tag = blk_mq_get_tag(data);
501 if (tag == BLK_MQ_NO_TAG) {
502 if (data->flags & BLK_MQ_REQ_NOWAIT)
503 return NULL;
504 /*
505 * Give up the CPU and sleep for a random short time to
506 * ensure that thread using a realtime scheduling class
507 * are migrated off the CPU, and thus off the hctx that
508 * is going away.
509 */
510 msleep(3);
511 goto retry;
512 }
513
514 rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
515 blk_mq_rq_time_init(rq, alloc_time_ns);
516 return rq;
517 }
518
blk_mq_rq_cache_fill(struct request_queue * q,struct blk_plug * plug,blk_opf_t opf,blk_mq_req_flags_t flags)519 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
520 struct blk_plug *plug,
521 blk_opf_t opf,
522 blk_mq_req_flags_t flags)
523 {
524 struct blk_mq_alloc_data data = {
525 .q = q,
526 .flags = flags,
527 .cmd_flags = opf,
528 .nr_tags = plug->nr_ios,
529 .cached_rq = &plug->cached_rq,
530 };
531 struct request *rq;
532
533 if (blk_queue_enter(q, flags))
534 return NULL;
535
536 plug->nr_ios = 1;
537
538 rq = __blk_mq_alloc_requests(&data);
539 if (unlikely(!rq))
540 blk_queue_exit(q);
541 return rq;
542 }
543
blk_mq_alloc_cached_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)544 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
545 blk_opf_t opf,
546 blk_mq_req_flags_t flags)
547 {
548 struct blk_plug *plug = current->plug;
549 struct request *rq;
550
551 if (!plug)
552 return NULL;
553
554 if (rq_list_empty(plug->cached_rq)) {
555 if (plug->nr_ios == 1)
556 return NULL;
557 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
558 if (!rq)
559 return NULL;
560 } else {
561 rq = rq_list_peek(&plug->cached_rq);
562 if (!rq || rq->q != q)
563 return NULL;
564
565 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
566 return NULL;
567 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
568 return NULL;
569
570 plug->cached_rq = rq_list_next(rq);
571 blk_mq_rq_time_init(rq, 0);
572 }
573
574 rq->cmd_flags = opf;
575 INIT_LIST_HEAD(&rq->queuelist);
576 return rq;
577 }
578
blk_mq_alloc_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)579 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
580 blk_mq_req_flags_t flags)
581 {
582 struct request *rq;
583
584 rq = blk_mq_alloc_cached_request(q, opf, flags);
585 if (!rq) {
586 struct blk_mq_alloc_data data = {
587 .q = q,
588 .flags = flags,
589 .cmd_flags = opf,
590 .nr_tags = 1,
591 };
592 int ret;
593
594 ret = blk_queue_enter(q, flags);
595 if (ret)
596 return ERR_PTR(ret);
597
598 rq = __blk_mq_alloc_requests(&data);
599 if (!rq)
600 goto out_queue_exit;
601 }
602 rq->__data_len = 0;
603 rq->__sector = (sector_t) -1;
604 rq->bio = rq->biotail = NULL;
605 return rq;
606 out_queue_exit:
607 blk_queue_exit(q);
608 return ERR_PTR(-EWOULDBLOCK);
609 }
610 EXPORT_SYMBOL(blk_mq_alloc_request);
611
blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx)612 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
613 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
614 {
615 struct blk_mq_alloc_data data = {
616 .q = q,
617 .flags = flags,
618 .cmd_flags = opf,
619 .nr_tags = 1,
620 };
621 u64 alloc_time_ns = 0;
622 struct request *rq;
623 unsigned int cpu;
624 unsigned int tag;
625 int ret;
626
627 /* alloc_time includes depth and tag waits */
628 if (blk_queue_rq_alloc_time(q))
629 alloc_time_ns = ktime_get_ns();
630
631 /*
632 * If the tag allocator sleeps we could get an allocation for a
633 * different hardware context. No need to complicate the low level
634 * allocator for this for the rare use case of a command tied to
635 * a specific queue.
636 */
637 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
638 WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
639 return ERR_PTR(-EINVAL);
640
641 if (hctx_idx >= q->nr_hw_queues)
642 return ERR_PTR(-EIO);
643
644 ret = blk_queue_enter(q, flags);
645 if (ret)
646 return ERR_PTR(ret);
647
648 /*
649 * Check if the hardware context is actually mapped to anything.
650 * If not tell the caller that it should skip this queue.
651 */
652 ret = -EXDEV;
653 data.hctx = xa_load(&q->hctx_table, hctx_idx);
654 if (!blk_mq_hw_queue_mapped(data.hctx))
655 goto out_queue_exit;
656 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
657 if (cpu >= nr_cpu_ids)
658 goto out_queue_exit;
659 data.ctx = __blk_mq_get_ctx(q, cpu);
660
661 if (q->elevator)
662 data.rq_flags |= RQF_SCHED_TAGS;
663 else
664 blk_mq_tag_busy(data.hctx);
665
666 if (flags & BLK_MQ_REQ_RESERVED)
667 data.rq_flags |= RQF_RESV;
668
669 ret = -EWOULDBLOCK;
670 tag = blk_mq_get_tag(&data);
671 if (tag == BLK_MQ_NO_TAG)
672 goto out_queue_exit;
673 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
674 blk_mq_rq_time_init(rq, alloc_time_ns);
675 rq->__data_len = 0;
676 rq->__sector = (sector_t) -1;
677 rq->bio = rq->biotail = NULL;
678 return rq;
679
680 out_queue_exit:
681 blk_queue_exit(q);
682 return ERR_PTR(ret);
683 }
684 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
685
blk_mq_finish_request(struct request * rq)686 static void blk_mq_finish_request(struct request *rq)
687 {
688 struct request_queue *q = rq->q;
689
690 if (rq->rq_flags & RQF_USE_SCHED) {
691 q->elevator->type->ops.finish_request(rq);
692 /*
693 * For postflush request that may need to be
694 * completed twice, we should clear this flag
695 * to avoid double finish_request() on the rq.
696 */
697 rq->rq_flags &= ~RQF_USE_SCHED;
698 }
699 }
700
__blk_mq_free_request(struct request * rq)701 static void __blk_mq_free_request(struct request *rq)
702 {
703 struct request_queue *q = rq->q;
704 struct blk_mq_ctx *ctx = rq->mq_ctx;
705 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
706 const int sched_tag = rq->internal_tag;
707
708 blk_crypto_free_request(rq);
709 blk_pm_mark_last_busy(rq);
710 rq->mq_hctx = NULL;
711
712 if (rq->rq_flags & RQF_MQ_INFLIGHT)
713 __blk_mq_dec_active_requests(hctx);
714
715 if (rq->tag != BLK_MQ_NO_TAG)
716 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
717 if (sched_tag != BLK_MQ_NO_TAG)
718 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
719 blk_mq_sched_restart(hctx);
720 blk_queue_exit(q);
721 }
722
blk_mq_free_request(struct request * rq)723 void blk_mq_free_request(struct request *rq)
724 {
725 struct request_queue *q = rq->q;
726
727 blk_mq_finish_request(rq);
728
729 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
730 laptop_io_completion(q->disk->bdi);
731
732 rq_qos_done(q, rq);
733
734 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
735 if (req_ref_put_and_test(rq))
736 __blk_mq_free_request(rq);
737 }
738 EXPORT_SYMBOL_GPL(blk_mq_free_request);
739
blk_mq_free_plug_rqs(struct blk_plug * plug)740 void blk_mq_free_plug_rqs(struct blk_plug *plug)
741 {
742 struct request *rq;
743
744 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
745 blk_mq_free_request(rq);
746 }
747
blk_dump_rq_flags(struct request * rq,char * msg)748 void blk_dump_rq_flags(struct request *rq, char *msg)
749 {
750 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
751 rq->q->disk ? rq->q->disk->disk_name : "?",
752 (__force unsigned long long) rq->cmd_flags);
753
754 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
755 (unsigned long long)blk_rq_pos(rq),
756 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
757 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
758 rq->bio, rq->biotail, blk_rq_bytes(rq));
759 }
760 EXPORT_SYMBOL(blk_dump_rq_flags);
761
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)762 static void req_bio_endio(struct request *rq, struct bio *bio,
763 unsigned int nbytes, blk_status_t error)
764 {
765 if (unlikely(error)) {
766 bio->bi_status = error;
767 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
768 /*
769 * Partial zone append completions cannot be supported as the
770 * BIO fragments may end up not being written sequentially.
771 */
772 if (bio->bi_iter.bi_size != nbytes)
773 bio->bi_status = BLK_STS_IOERR;
774 else
775 bio->bi_iter.bi_sector = rq->__sector;
776 }
777
778 bio_advance(bio, nbytes);
779
780 if (unlikely(rq->rq_flags & RQF_QUIET))
781 bio_set_flag(bio, BIO_QUIET);
782 /* don't actually finish bio if it's part of flush sequence */
783 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
784 bio_endio(bio);
785 }
786
blk_account_io_completion(struct request * req,unsigned int bytes)787 static void blk_account_io_completion(struct request *req, unsigned int bytes)
788 {
789 if (req->part && blk_do_io_stat(req)) {
790 const int sgrp = op_stat_group(req_op(req));
791
792 part_stat_lock();
793 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
794 part_stat_unlock();
795 }
796 }
797
blk_print_req_error(struct request * req,blk_status_t status)798 static void blk_print_req_error(struct request *req, blk_status_t status)
799 {
800 printk_ratelimited(KERN_ERR
801 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
802 "phys_seg %u prio class %u\n",
803 blk_status_to_str(status),
804 req->q->disk ? req->q->disk->disk_name : "?",
805 blk_rq_pos(req), (__force u32)req_op(req),
806 blk_op_str(req_op(req)),
807 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
808 req->nr_phys_segments,
809 IOPRIO_PRIO_CLASS(req->ioprio));
810 }
811
812 /*
813 * Fully end IO on a request. Does not support partial completions, or
814 * errors.
815 */
blk_complete_request(struct request * req)816 static void blk_complete_request(struct request *req)
817 {
818 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
819 int total_bytes = blk_rq_bytes(req);
820 struct bio *bio = req->bio;
821
822 trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
823
824 if (!bio)
825 return;
826
827 #ifdef CONFIG_BLK_DEV_INTEGRITY
828 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
829 req->q->integrity.profile->complete_fn(req, total_bytes);
830 #endif
831
832 /*
833 * Upper layers may call blk_crypto_evict_key() anytime after the last
834 * bio_endio(). Therefore, the keyslot must be released before that.
835 */
836 blk_crypto_rq_put_keyslot(req);
837
838 blk_account_io_completion(req, total_bytes);
839
840 do {
841 struct bio *next = bio->bi_next;
842
843 /* Completion has already been traced */
844 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
845
846 if (req_op(req) == REQ_OP_ZONE_APPEND)
847 bio->bi_iter.bi_sector = req->__sector;
848
849 if (!is_flush)
850 bio_endio(bio);
851 bio = next;
852 } while (bio);
853
854 /*
855 * Reset counters so that the request stacking driver
856 * can find how many bytes remain in the request
857 * later.
858 */
859 if (!req->end_io) {
860 req->bio = NULL;
861 req->__data_len = 0;
862 }
863 }
864
865 /**
866 * blk_update_request - Complete multiple bytes without completing the request
867 * @req: the request being processed
868 * @error: block status code
869 * @nr_bytes: number of bytes to complete for @req
870 *
871 * Description:
872 * Ends I/O on a number of bytes attached to @req, but doesn't complete
873 * the request structure even if @req doesn't have leftover.
874 * If @req has leftover, sets it up for the next range of segments.
875 *
876 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
877 * %false return from this function.
878 *
879 * Note:
880 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
881 * except in the consistency check at the end of this function.
882 *
883 * Return:
884 * %false - this request doesn't have any more data
885 * %true - this request has more data
886 **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)887 bool blk_update_request(struct request *req, blk_status_t error,
888 unsigned int nr_bytes)
889 {
890 int total_bytes;
891
892 trace_block_rq_complete(req, error, nr_bytes);
893
894 if (!req->bio)
895 return false;
896
897 #ifdef CONFIG_BLK_DEV_INTEGRITY
898 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
899 error == BLK_STS_OK)
900 req->q->integrity.profile->complete_fn(req, nr_bytes);
901 #endif
902
903 /*
904 * Upper layers may call blk_crypto_evict_key() anytime after the last
905 * bio_endio(). Therefore, the keyslot must be released before that.
906 */
907 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
908 __blk_crypto_rq_put_keyslot(req);
909
910 if (unlikely(error && !blk_rq_is_passthrough(req) &&
911 !(req->rq_flags & RQF_QUIET)) &&
912 !test_bit(GD_DEAD, &req->q->disk->state)) {
913 blk_print_req_error(req, error);
914 trace_block_rq_error(req, error, nr_bytes);
915 }
916
917 blk_account_io_completion(req, nr_bytes);
918
919 total_bytes = 0;
920 while (req->bio) {
921 struct bio *bio = req->bio;
922 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
923
924 if (bio_bytes == bio->bi_iter.bi_size)
925 req->bio = bio->bi_next;
926
927 /* Completion has already been traced */
928 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
929 req_bio_endio(req, bio, bio_bytes, error);
930
931 total_bytes += bio_bytes;
932 nr_bytes -= bio_bytes;
933
934 if (!nr_bytes)
935 break;
936 }
937
938 /*
939 * completely done
940 */
941 if (!req->bio) {
942 /*
943 * Reset counters so that the request stacking driver
944 * can find how many bytes remain in the request
945 * later.
946 */
947 req->__data_len = 0;
948 return false;
949 }
950
951 req->__data_len -= total_bytes;
952
953 /* update sector only for requests with clear definition of sector */
954 if (!blk_rq_is_passthrough(req))
955 req->__sector += total_bytes >> 9;
956
957 /* mixed attributes always follow the first bio */
958 if (req->rq_flags & RQF_MIXED_MERGE) {
959 req->cmd_flags &= ~REQ_FAILFAST_MASK;
960 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
961 }
962
963 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
964 /*
965 * If total number of sectors is less than the first segment
966 * size, something has gone terribly wrong.
967 */
968 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
969 blk_dump_rq_flags(req, "request botched");
970 req->__data_len = blk_rq_cur_bytes(req);
971 }
972
973 /* recalculate the number of segments */
974 req->nr_phys_segments = blk_recalc_rq_segments(req);
975 }
976
977 return true;
978 }
979 EXPORT_SYMBOL_GPL(blk_update_request);
980
blk_account_io_done(struct request * req,u64 now)981 static inline void blk_account_io_done(struct request *req, u64 now)
982 {
983 trace_block_io_done(req);
984
985 /*
986 * Account IO completion. flush_rq isn't accounted as a
987 * normal IO on queueing nor completion. Accounting the
988 * containing request is enough.
989 */
990 if (blk_do_io_stat(req) && req->part &&
991 !(req->rq_flags & RQF_FLUSH_SEQ)) {
992 const int sgrp = op_stat_group(req_op(req));
993
994 part_stat_lock();
995 update_io_ticks(req->part, jiffies, true);
996 part_stat_inc(req->part, ios[sgrp]);
997 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
998 part_stat_local_dec(req->part,
999 in_flight[op_is_write(req_op(req))]);
1000 part_stat_unlock();
1001 }
1002 }
1003
blk_account_io_start(struct request * req)1004 static inline void blk_account_io_start(struct request *req)
1005 {
1006 trace_block_io_start(req);
1007
1008 if (blk_do_io_stat(req)) {
1009 /*
1010 * All non-passthrough requests are created from a bio with one
1011 * exception: when a flush command that is part of a flush sequence
1012 * generated by the state machine in blk-flush.c is cloned onto the
1013 * lower device by dm-multipath we can get here without a bio.
1014 */
1015 if (req->bio)
1016 req->part = req->bio->bi_bdev;
1017 else
1018 req->part = req->q->disk->part0;
1019
1020 part_stat_lock();
1021 update_io_ticks(req->part, jiffies, false);
1022 part_stat_local_inc(req->part,
1023 in_flight[op_is_write(req_op(req))]);
1024 part_stat_unlock();
1025 }
1026 }
1027
__blk_mq_end_request_acct(struct request * rq,u64 now)1028 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1029 {
1030 if (rq->rq_flags & RQF_STATS)
1031 blk_stat_add(rq, now);
1032
1033 blk_mq_sched_completed_request(rq, now);
1034 blk_account_io_done(rq, now);
1035 }
1036
__blk_mq_end_request(struct request * rq,blk_status_t error)1037 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1038 {
1039 if (blk_mq_need_time_stamp(rq))
1040 __blk_mq_end_request_acct(rq, ktime_get_ns());
1041
1042 blk_mq_finish_request(rq);
1043
1044 if (rq->end_io) {
1045 rq_qos_done(rq->q, rq);
1046 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1047 blk_mq_free_request(rq);
1048 } else {
1049 blk_mq_free_request(rq);
1050 }
1051 }
1052 EXPORT_SYMBOL(__blk_mq_end_request);
1053
blk_mq_end_request(struct request * rq,blk_status_t error)1054 void blk_mq_end_request(struct request *rq, blk_status_t error)
1055 {
1056 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1057 BUG();
1058 __blk_mq_end_request(rq, error);
1059 }
1060 EXPORT_SYMBOL(blk_mq_end_request);
1061
1062 #define TAG_COMP_BATCH 32
1063
blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags)1064 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1065 int *tag_array, int nr_tags)
1066 {
1067 struct request_queue *q = hctx->queue;
1068
1069 /*
1070 * All requests should have been marked as RQF_MQ_INFLIGHT, so
1071 * update hctx->nr_active in batch
1072 */
1073 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
1074 __blk_mq_sub_active_requests(hctx, nr_tags);
1075
1076 blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1077 percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1078 }
1079
blk_mq_end_request_batch(struct io_comp_batch * iob)1080 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1081 {
1082 int tags[TAG_COMP_BATCH], nr_tags = 0;
1083 struct blk_mq_hw_ctx *cur_hctx = NULL;
1084 struct request *rq;
1085 u64 now = 0;
1086
1087 if (iob->need_ts)
1088 now = ktime_get_ns();
1089
1090 while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1091 prefetch(rq->bio);
1092 prefetch(rq->rq_next);
1093
1094 blk_complete_request(rq);
1095 if (iob->need_ts)
1096 __blk_mq_end_request_acct(rq, now);
1097
1098 blk_mq_finish_request(rq);
1099
1100 rq_qos_done(rq->q, rq);
1101
1102 /*
1103 * If end_io handler returns NONE, then it still has
1104 * ownership of the request.
1105 */
1106 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1107 continue;
1108
1109 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1110 if (!req_ref_put_and_test(rq))
1111 continue;
1112
1113 blk_crypto_free_request(rq);
1114 blk_pm_mark_last_busy(rq);
1115
1116 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1117 if (cur_hctx)
1118 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1119 nr_tags = 0;
1120 cur_hctx = rq->mq_hctx;
1121 }
1122 tags[nr_tags++] = rq->tag;
1123 }
1124
1125 if (nr_tags)
1126 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1127 }
1128 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1129
blk_complete_reqs(struct llist_head * list)1130 static void blk_complete_reqs(struct llist_head *list)
1131 {
1132 struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1133 struct request *rq, *next;
1134
1135 llist_for_each_entry_safe(rq, next, entry, ipi_list)
1136 rq->q->mq_ops->complete(rq);
1137 }
1138
blk_done_softirq(struct softirq_action * h)1139 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1140 {
1141 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1142 }
1143
blk_softirq_cpu_dead(unsigned int cpu)1144 static int blk_softirq_cpu_dead(unsigned int cpu)
1145 {
1146 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1147 return 0;
1148 }
1149
__blk_mq_complete_request_remote(void * data)1150 static void __blk_mq_complete_request_remote(void *data)
1151 {
1152 __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1153 }
1154
blk_mq_complete_need_ipi(struct request * rq)1155 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1156 {
1157 int cpu = raw_smp_processor_id();
1158
1159 if (!IS_ENABLED(CONFIG_SMP) ||
1160 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1161 return false;
1162 /*
1163 * With force threaded interrupts enabled, raising softirq from an SMP
1164 * function call will always result in waking the ksoftirqd thread.
1165 * This is probably worse than completing the request on a different
1166 * cache domain.
1167 */
1168 if (force_irqthreads())
1169 return false;
1170
1171 /* same CPU or cache domain? Complete locally */
1172 if (cpu == rq->mq_ctx->cpu ||
1173 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1174 cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1175 return false;
1176
1177 /* don't try to IPI to an offline CPU */
1178 return cpu_online(rq->mq_ctx->cpu);
1179 }
1180
blk_mq_complete_send_ipi(struct request * rq)1181 static void blk_mq_complete_send_ipi(struct request *rq)
1182 {
1183 unsigned int cpu;
1184
1185 cpu = rq->mq_ctx->cpu;
1186 if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1187 smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
1188 }
1189
blk_mq_raise_softirq(struct request * rq)1190 static void blk_mq_raise_softirq(struct request *rq)
1191 {
1192 struct llist_head *list;
1193
1194 preempt_disable();
1195 list = this_cpu_ptr(&blk_cpu_done);
1196 if (llist_add(&rq->ipi_list, list))
1197 raise_softirq(BLOCK_SOFTIRQ);
1198 preempt_enable();
1199 }
1200
blk_mq_complete_request_remote(struct request * rq)1201 bool blk_mq_complete_request_remote(struct request *rq)
1202 {
1203 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1204
1205 /*
1206 * For request which hctx has only one ctx mapping,
1207 * or a polled request, always complete locally,
1208 * it's pointless to redirect the completion.
1209 */
1210 if ((rq->mq_hctx->nr_ctx == 1 &&
1211 rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1212 rq->cmd_flags & REQ_POLLED)
1213 return false;
1214
1215 if (blk_mq_complete_need_ipi(rq)) {
1216 blk_mq_complete_send_ipi(rq);
1217 return true;
1218 }
1219
1220 if (rq->q->nr_hw_queues == 1) {
1221 blk_mq_raise_softirq(rq);
1222 return true;
1223 }
1224 return false;
1225 }
1226 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1227
1228 /**
1229 * blk_mq_complete_request - end I/O on a request
1230 * @rq: the request being processed
1231 *
1232 * Description:
1233 * Complete a request by scheduling the ->complete_rq operation.
1234 **/
blk_mq_complete_request(struct request * rq)1235 void blk_mq_complete_request(struct request *rq)
1236 {
1237 if (!blk_mq_complete_request_remote(rq))
1238 rq->q->mq_ops->complete(rq);
1239 }
1240 EXPORT_SYMBOL(blk_mq_complete_request);
1241
1242 /**
1243 * blk_mq_start_request - Start processing a request
1244 * @rq: Pointer to request to be started
1245 *
1246 * Function used by device drivers to notify the block layer that a request
1247 * is going to be processed now, so blk layer can do proper initializations
1248 * such as starting the timeout timer.
1249 */
blk_mq_start_request(struct request * rq)1250 void blk_mq_start_request(struct request *rq)
1251 {
1252 struct request_queue *q = rq->q;
1253
1254 trace_block_rq_issue(rq);
1255
1256 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1257 rq->io_start_time_ns = ktime_get_ns();
1258 rq->stats_sectors = blk_rq_sectors(rq);
1259 rq->rq_flags |= RQF_STATS;
1260 rq_qos_issue(q, rq);
1261 }
1262
1263 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1264
1265 blk_add_timer(rq);
1266 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1267
1268 #ifdef CONFIG_BLK_DEV_INTEGRITY
1269 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1270 q->integrity.profile->prepare_fn(rq);
1271 #endif
1272 if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1273 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1274 }
1275 EXPORT_SYMBOL(blk_mq_start_request);
1276
1277 /*
1278 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1279 * queues. This is important for md arrays to benefit from merging
1280 * requests.
1281 */
blk_plug_max_rq_count(struct blk_plug * plug)1282 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1283 {
1284 if (plug->multiple_queues)
1285 return BLK_MAX_REQUEST_COUNT * 2;
1286 return BLK_MAX_REQUEST_COUNT;
1287 }
1288
blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq)1289 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1290 {
1291 struct request *last = rq_list_peek(&plug->mq_list);
1292
1293 if (!plug->rq_count) {
1294 trace_block_plug(rq->q);
1295 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1296 (!blk_queue_nomerges(rq->q) &&
1297 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1298 blk_mq_flush_plug_list(plug, false);
1299 last = NULL;
1300 trace_block_plug(rq->q);
1301 }
1302
1303 if (!plug->multiple_queues && last && last->q != rq->q)
1304 plug->multiple_queues = true;
1305 /*
1306 * Any request allocated from sched tags can't be issued to
1307 * ->queue_rqs() directly
1308 */
1309 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1310 plug->has_elevator = true;
1311 rq->rq_next = NULL;
1312 rq_list_add(&plug->mq_list, rq);
1313 plug->rq_count++;
1314 }
1315
1316 /**
1317 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1318 * @rq: request to insert
1319 * @at_head: insert request at head or tail of queue
1320 *
1321 * Description:
1322 * Insert a fully prepared request at the back of the I/O scheduler queue
1323 * for execution. Don't wait for completion.
1324 *
1325 * Note:
1326 * This function will invoke @done directly if the queue is dead.
1327 */
blk_execute_rq_nowait(struct request * rq,bool at_head)1328 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1329 {
1330 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1331
1332 WARN_ON(irqs_disabled());
1333 WARN_ON(!blk_rq_is_passthrough(rq));
1334
1335 blk_account_io_start(rq);
1336
1337 /*
1338 * As plugging can be enabled for passthrough requests on a zoned
1339 * device, directly accessing the plug instead of using blk_mq_plug()
1340 * should not have any consequences.
1341 */
1342 if (current->plug && !at_head) {
1343 blk_add_rq_to_plug(current->plug, rq);
1344 return;
1345 }
1346
1347 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1348 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
1349 }
1350 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1351
1352 struct blk_rq_wait {
1353 struct completion done;
1354 blk_status_t ret;
1355 };
1356
blk_end_sync_rq(struct request * rq,blk_status_t ret)1357 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1358 {
1359 struct blk_rq_wait *wait = rq->end_io_data;
1360
1361 wait->ret = ret;
1362 complete(&wait->done);
1363 return RQ_END_IO_NONE;
1364 }
1365
blk_rq_is_poll(struct request * rq)1366 bool blk_rq_is_poll(struct request *rq)
1367 {
1368 if (!rq->mq_hctx)
1369 return false;
1370 if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1371 return false;
1372 return true;
1373 }
1374 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1375
blk_rq_poll_completion(struct request * rq,struct completion * wait)1376 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1377 {
1378 do {
1379 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1380 cond_resched();
1381 } while (!completion_done(wait));
1382 }
1383
1384 /**
1385 * blk_execute_rq - insert a request into queue for execution
1386 * @rq: request to insert
1387 * @at_head: insert request at head or tail of queue
1388 *
1389 * Description:
1390 * Insert a fully prepared request at the back of the I/O scheduler queue
1391 * for execution and wait for completion.
1392 * Return: The blk_status_t result provided to blk_mq_end_request().
1393 */
blk_execute_rq(struct request * rq,bool at_head)1394 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1395 {
1396 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1397 struct blk_rq_wait wait = {
1398 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1399 };
1400
1401 WARN_ON(irqs_disabled());
1402 WARN_ON(!blk_rq_is_passthrough(rq));
1403
1404 rq->end_io_data = &wait;
1405 rq->end_io = blk_end_sync_rq;
1406
1407 blk_account_io_start(rq);
1408 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1409 blk_mq_run_hw_queue(hctx, false);
1410
1411 if (blk_rq_is_poll(rq)) {
1412 blk_rq_poll_completion(rq, &wait.done);
1413 } else {
1414 /*
1415 * Prevent hang_check timer from firing at us during very long
1416 * I/O
1417 */
1418 unsigned long hang_check = sysctl_hung_task_timeout_secs;
1419
1420 if (hang_check)
1421 while (!wait_for_completion_io_timeout(&wait.done,
1422 hang_check * (HZ/2)))
1423 ;
1424 else
1425 wait_for_completion_io(&wait.done);
1426 }
1427
1428 return wait.ret;
1429 }
1430 EXPORT_SYMBOL(blk_execute_rq);
1431
__blk_mq_requeue_request(struct request * rq)1432 static void __blk_mq_requeue_request(struct request *rq)
1433 {
1434 struct request_queue *q = rq->q;
1435
1436 blk_mq_put_driver_tag(rq);
1437
1438 trace_block_rq_requeue(rq);
1439 rq_qos_requeue(q, rq);
1440
1441 if (blk_mq_request_started(rq)) {
1442 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1443 rq->rq_flags &= ~RQF_TIMED_OUT;
1444 }
1445 }
1446
blk_mq_requeue_request(struct request * rq,bool kick_requeue_list)1447 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1448 {
1449 struct request_queue *q = rq->q;
1450 unsigned long flags;
1451
1452 __blk_mq_requeue_request(rq);
1453
1454 /* this request will be re-inserted to io scheduler queue */
1455 blk_mq_sched_requeue_request(rq);
1456
1457 spin_lock_irqsave(&q->requeue_lock, flags);
1458 list_add_tail(&rq->queuelist, &q->requeue_list);
1459 spin_unlock_irqrestore(&q->requeue_lock, flags);
1460
1461 if (kick_requeue_list)
1462 blk_mq_kick_requeue_list(q);
1463 }
1464 EXPORT_SYMBOL(blk_mq_requeue_request);
1465
blk_mq_requeue_work(struct work_struct * work)1466 static void blk_mq_requeue_work(struct work_struct *work)
1467 {
1468 struct request_queue *q =
1469 container_of(work, struct request_queue, requeue_work.work);
1470 LIST_HEAD(rq_list);
1471 LIST_HEAD(flush_list);
1472 struct request *rq;
1473
1474 spin_lock_irq(&q->requeue_lock);
1475 list_splice_init(&q->requeue_list, &rq_list);
1476 list_splice_init(&q->flush_list, &flush_list);
1477 spin_unlock_irq(&q->requeue_lock);
1478
1479 while (!list_empty(&rq_list)) {
1480 rq = list_entry(rq_list.next, struct request, queuelist);
1481 /*
1482 * If RQF_DONTPREP ist set, the request has been started by the
1483 * driver already and might have driver-specific data allocated
1484 * already. Insert it into the hctx dispatch list to avoid
1485 * block layer merges for the request.
1486 */
1487 if (rq->rq_flags & RQF_DONTPREP) {
1488 list_del_init(&rq->queuelist);
1489 blk_mq_request_bypass_insert(rq, 0);
1490 } else {
1491 list_del_init(&rq->queuelist);
1492 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1493 }
1494 }
1495
1496 while (!list_empty(&flush_list)) {
1497 rq = list_entry(flush_list.next, struct request, queuelist);
1498 list_del_init(&rq->queuelist);
1499 blk_mq_insert_request(rq, 0);
1500 }
1501
1502 blk_mq_run_hw_queues(q, false);
1503 }
1504
blk_mq_kick_requeue_list(struct request_queue * q)1505 void blk_mq_kick_requeue_list(struct request_queue *q)
1506 {
1507 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1508 }
1509 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1510
blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs)1511 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1512 unsigned long msecs)
1513 {
1514 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1515 msecs_to_jiffies(msecs));
1516 }
1517 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1518
blk_is_flush_data_rq(struct request * rq)1519 static bool blk_is_flush_data_rq(struct request *rq)
1520 {
1521 return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1522 }
1523
blk_mq_rq_inflight(struct request * rq,void * priv)1524 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1525 {
1526 /*
1527 * If we find a request that isn't idle we know the queue is busy
1528 * as it's checked in the iter.
1529 * Return false to stop the iteration.
1530 *
1531 * In case of queue quiesce, if one flush data request is completed,
1532 * don't count it as inflight given the flush sequence is suspended,
1533 * and the original flush data request is invisible to driver, just
1534 * like other pending requests because of quiesce
1535 */
1536 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1537 blk_is_flush_data_rq(rq) &&
1538 blk_mq_request_completed(rq))) {
1539 bool *busy = priv;
1540
1541 *busy = true;
1542 return false;
1543 }
1544
1545 return true;
1546 }
1547
blk_mq_queue_inflight(struct request_queue * q)1548 bool blk_mq_queue_inflight(struct request_queue *q)
1549 {
1550 bool busy = false;
1551
1552 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1553 return busy;
1554 }
1555 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1556
blk_mq_rq_timed_out(struct request * req)1557 static void blk_mq_rq_timed_out(struct request *req)
1558 {
1559 req->rq_flags |= RQF_TIMED_OUT;
1560 if (req->q->mq_ops->timeout) {
1561 enum blk_eh_timer_return ret;
1562
1563 ret = req->q->mq_ops->timeout(req);
1564 if (ret == BLK_EH_DONE)
1565 return;
1566 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1567 }
1568
1569 blk_add_timer(req);
1570 }
1571
1572 struct blk_expired_data {
1573 bool has_timedout_rq;
1574 unsigned long next;
1575 unsigned long timeout_start;
1576 };
1577
blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired)1578 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1579 {
1580 unsigned long deadline;
1581
1582 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1583 return false;
1584 if (rq->rq_flags & RQF_TIMED_OUT)
1585 return false;
1586
1587 deadline = READ_ONCE(rq->deadline);
1588 if (time_after_eq(expired->timeout_start, deadline))
1589 return true;
1590
1591 if (expired->next == 0)
1592 expired->next = deadline;
1593 else if (time_after(expired->next, deadline))
1594 expired->next = deadline;
1595 return false;
1596 }
1597
blk_mq_put_rq_ref(struct request * rq)1598 void blk_mq_put_rq_ref(struct request *rq)
1599 {
1600 if (is_flush_rq(rq)) {
1601 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1602 blk_mq_free_request(rq);
1603 } else if (req_ref_put_and_test(rq)) {
1604 __blk_mq_free_request(rq);
1605 }
1606 }
1607
blk_mq_check_expired(struct request * rq,void * priv)1608 static bool blk_mq_check_expired(struct request *rq, void *priv)
1609 {
1610 struct blk_expired_data *expired = priv;
1611
1612 /*
1613 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1614 * be reallocated underneath the timeout handler's processing, then
1615 * the expire check is reliable. If the request is not expired, then
1616 * it was completed and reallocated as a new request after returning
1617 * from blk_mq_check_expired().
1618 */
1619 if (blk_mq_req_expired(rq, expired)) {
1620 expired->has_timedout_rq = true;
1621 return false;
1622 }
1623 return true;
1624 }
1625
blk_mq_handle_expired(struct request * rq,void * priv)1626 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1627 {
1628 struct blk_expired_data *expired = priv;
1629
1630 if (blk_mq_req_expired(rq, expired))
1631 blk_mq_rq_timed_out(rq);
1632 return true;
1633 }
1634
blk_mq_timeout_work(struct work_struct * work)1635 static void blk_mq_timeout_work(struct work_struct *work)
1636 {
1637 struct request_queue *q =
1638 container_of(work, struct request_queue, timeout_work);
1639 struct blk_expired_data expired = {
1640 .timeout_start = jiffies,
1641 };
1642 struct blk_mq_hw_ctx *hctx;
1643 unsigned long i;
1644
1645 /* A deadlock might occur if a request is stuck requiring a
1646 * timeout at the same time a queue freeze is waiting
1647 * completion, since the timeout code would not be able to
1648 * acquire the queue reference here.
1649 *
1650 * That's why we don't use blk_queue_enter here; instead, we use
1651 * percpu_ref_tryget directly, because we need to be able to
1652 * obtain a reference even in the short window between the queue
1653 * starting to freeze, by dropping the first reference in
1654 * blk_freeze_queue_start, and the moment the last request is
1655 * consumed, marked by the instant q_usage_counter reaches
1656 * zero.
1657 */
1658 if (!percpu_ref_tryget(&q->q_usage_counter))
1659 return;
1660
1661 /* check if there is any timed-out request */
1662 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1663 if (expired.has_timedout_rq) {
1664 /*
1665 * Before walking tags, we must ensure any submit started
1666 * before the current time has finished. Since the submit
1667 * uses srcu or rcu, wait for a synchronization point to
1668 * ensure all running submits have finished
1669 */
1670 blk_mq_wait_quiesce_done(q->tag_set);
1671
1672 expired.next = 0;
1673 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1674 }
1675
1676 if (expired.next != 0) {
1677 mod_timer(&q->timeout, expired.next);
1678 } else {
1679 /*
1680 * Request timeouts are handled as a forward rolling timer. If
1681 * we end up here it means that no requests are pending and
1682 * also that no request has been pending for a while. Mark
1683 * each hctx as idle.
1684 */
1685 queue_for_each_hw_ctx(q, hctx, i) {
1686 /* the hctx may be unmapped, so check it here */
1687 if (blk_mq_hw_queue_mapped(hctx))
1688 blk_mq_tag_idle(hctx);
1689 }
1690 }
1691 blk_queue_exit(q);
1692 }
1693
1694 struct flush_busy_ctx_data {
1695 struct blk_mq_hw_ctx *hctx;
1696 struct list_head *list;
1697 };
1698
flush_busy_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1699 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1700 {
1701 struct flush_busy_ctx_data *flush_data = data;
1702 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1703 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1704 enum hctx_type type = hctx->type;
1705
1706 spin_lock(&ctx->lock);
1707 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1708 sbitmap_clear_bit(sb, bitnr);
1709 spin_unlock(&ctx->lock);
1710 return true;
1711 }
1712
1713 /*
1714 * Process software queues that have been marked busy, splicing them
1715 * to the for-dispatch
1716 */
blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)1717 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1718 {
1719 struct flush_busy_ctx_data data = {
1720 .hctx = hctx,
1721 .list = list,
1722 };
1723
1724 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1725 }
1726 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1727
1728 struct dispatch_rq_data {
1729 struct blk_mq_hw_ctx *hctx;
1730 struct request *rq;
1731 };
1732
dispatch_rq_from_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1733 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1734 void *data)
1735 {
1736 struct dispatch_rq_data *dispatch_data = data;
1737 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1738 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1739 enum hctx_type type = hctx->type;
1740
1741 spin_lock(&ctx->lock);
1742 if (!list_empty(&ctx->rq_lists[type])) {
1743 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1744 list_del_init(&dispatch_data->rq->queuelist);
1745 if (list_empty(&ctx->rq_lists[type]))
1746 sbitmap_clear_bit(sb, bitnr);
1747 }
1748 spin_unlock(&ctx->lock);
1749
1750 return !dispatch_data->rq;
1751 }
1752
blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start)1753 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1754 struct blk_mq_ctx *start)
1755 {
1756 unsigned off = start ? start->index_hw[hctx->type] : 0;
1757 struct dispatch_rq_data data = {
1758 .hctx = hctx,
1759 .rq = NULL,
1760 };
1761
1762 __sbitmap_for_each_set(&hctx->ctx_map, off,
1763 dispatch_rq_from_ctx, &data);
1764
1765 return data.rq;
1766 }
1767
__blk_mq_alloc_driver_tag(struct request * rq)1768 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1769 {
1770 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1771 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1772 int tag;
1773
1774 blk_mq_tag_busy(rq->mq_hctx);
1775
1776 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1777 bt = &rq->mq_hctx->tags->breserved_tags;
1778 tag_offset = 0;
1779 } else {
1780 if (!hctx_may_queue(rq->mq_hctx, bt))
1781 return false;
1782 }
1783
1784 tag = __sbitmap_queue_get(bt);
1785 if (tag == BLK_MQ_NO_TAG)
1786 return false;
1787
1788 rq->tag = tag + tag_offset;
1789 return true;
1790 }
1791
__blk_mq_get_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)1792 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1793 {
1794 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1795 return false;
1796
1797 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1798 !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1799 rq->rq_flags |= RQF_MQ_INFLIGHT;
1800 __blk_mq_inc_active_requests(hctx);
1801 }
1802 hctx->tags->rqs[rq->tag] = rq;
1803 return true;
1804 }
1805
blk_mq_dispatch_wake(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)1806 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1807 int flags, void *key)
1808 {
1809 struct blk_mq_hw_ctx *hctx;
1810
1811 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1812
1813 spin_lock(&hctx->dispatch_wait_lock);
1814 if (!list_empty(&wait->entry)) {
1815 struct sbitmap_queue *sbq;
1816
1817 list_del_init(&wait->entry);
1818 sbq = &hctx->tags->bitmap_tags;
1819 atomic_dec(&sbq->ws_active);
1820 }
1821 spin_unlock(&hctx->dispatch_wait_lock);
1822
1823 blk_mq_run_hw_queue(hctx, true);
1824 return 1;
1825 }
1826
1827 /*
1828 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1829 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1830 * restart. For both cases, take care to check the condition again after
1831 * marking us as waiting.
1832 */
blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq)1833 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1834 struct request *rq)
1835 {
1836 struct sbitmap_queue *sbq;
1837 struct wait_queue_head *wq;
1838 wait_queue_entry_t *wait;
1839 bool ret;
1840
1841 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1842 !(blk_mq_is_shared_tags(hctx->flags))) {
1843 blk_mq_sched_mark_restart_hctx(hctx);
1844
1845 /*
1846 * It's possible that a tag was freed in the window between the
1847 * allocation failure and adding the hardware queue to the wait
1848 * queue.
1849 *
1850 * Don't clear RESTART here, someone else could have set it.
1851 * At most this will cost an extra queue run.
1852 */
1853 return blk_mq_get_driver_tag(rq);
1854 }
1855
1856 wait = &hctx->dispatch_wait;
1857 if (!list_empty_careful(&wait->entry))
1858 return false;
1859
1860 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1861 sbq = &hctx->tags->breserved_tags;
1862 else
1863 sbq = &hctx->tags->bitmap_tags;
1864 wq = &bt_wait_ptr(sbq, hctx)->wait;
1865
1866 spin_lock_irq(&wq->lock);
1867 spin_lock(&hctx->dispatch_wait_lock);
1868 if (!list_empty(&wait->entry)) {
1869 spin_unlock(&hctx->dispatch_wait_lock);
1870 spin_unlock_irq(&wq->lock);
1871 return false;
1872 }
1873
1874 atomic_inc(&sbq->ws_active);
1875 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1876 __add_wait_queue(wq, wait);
1877
1878 /*
1879 * Add one explicit barrier since blk_mq_get_driver_tag() may
1880 * not imply barrier in case of failure.
1881 *
1882 * Order adding us to wait queue and allocating driver tag.
1883 *
1884 * The pair is the one implied in sbitmap_queue_wake_up() which
1885 * orders clearing sbitmap tag bits and waitqueue_active() in
1886 * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
1887 *
1888 * Otherwise, re-order of adding wait queue and getting driver tag
1889 * may cause __sbitmap_queue_wake_up() to wake up nothing because
1890 * the waitqueue_active() may not observe us in wait queue.
1891 */
1892 smp_mb();
1893
1894 /*
1895 * It's possible that a tag was freed in the window between the
1896 * allocation failure and adding the hardware queue to the wait
1897 * queue.
1898 */
1899 ret = blk_mq_get_driver_tag(rq);
1900 if (!ret) {
1901 spin_unlock(&hctx->dispatch_wait_lock);
1902 spin_unlock_irq(&wq->lock);
1903 return false;
1904 }
1905
1906 /*
1907 * We got a tag, remove ourselves from the wait queue to ensure
1908 * someone else gets the wakeup.
1909 */
1910 list_del_init(&wait->entry);
1911 atomic_dec(&sbq->ws_active);
1912 spin_unlock(&hctx->dispatch_wait_lock);
1913 spin_unlock_irq(&wq->lock);
1914
1915 return true;
1916 }
1917
1918 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
1919 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
1920 /*
1921 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1922 * - EWMA is one simple way to compute running average value
1923 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1924 * - take 4 as factor for avoiding to get too small(0) result, and this
1925 * factor doesn't matter because EWMA decreases exponentially
1926 */
blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy)1927 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1928 {
1929 unsigned int ewma;
1930
1931 ewma = hctx->dispatch_busy;
1932
1933 if (!ewma && !busy)
1934 return;
1935
1936 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1937 if (busy)
1938 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1939 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1940
1941 hctx->dispatch_busy = ewma;
1942 }
1943
1944 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1945
blk_mq_handle_dev_resource(struct request * rq,struct list_head * list)1946 static void blk_mq_handle_dev_resource(struct request *rq,
1947 struct list_head *list)
1948 {
1949 list_add(&rq->queuelist, list);
1950 __blk_mq_requeue_request(rq);
1951 }
1952
blk_mq_handle_zone_resource(struct request * rq,struct list_head * zone_list)1953 static void blk_mq_handle_zone_resource(struct request *rq,
1954 struct list_head *zone_list)
1955 {
1956 /*
1957 * If we end up here it is because we cannot dispatch a request to a
1958 * specific zone due to LLD level zone-write locking or other zone
1959 * related resource not being available. In this case, set the request
1960 * aside in zone_list for retrying it later.
1961 */
1962 list_add(&rq->queuelist, zone_list);
1963 __blk_mq_requeue_request(rq);
1964 }
1965
1966 enum prep_dispatch {
1967 PREP_DISPATCH_OK,
1968 PREP_DISPATCH_NO_TAG,
1969 PREP_DISPATCH_NO_BUDGET,
1970 };
1971
blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget)1972 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1973 bool need_budget)
1974 {
1975 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1976 int budget_token = -1;
1977
1978 if (need_budget) {
1979 budget_token = blk_mq_get_dispatch_budget(rq->q);
1980 if (budget_token < 0) {
1981 blk_mq_put_driver_tag(rq);
1982 return PREP_DISPATCH_NO_BUDGET;
1983 }
1984 blk_mq_set_rq_budget_token(rq, budget_token);
1985 }
1986
1987 if (!blk_mq_get_driver_tag(rq)) {
1988 /*
1989 * The initial allocation attempt failed, so we need to
1990 * rerun the hardware queue when a tag is freed. The
1991 * waitqueue takes care of that. If the queue is run
1992 * before we add this entry back on the dispatch list,
1993 * we'll re-run it below.
1994 */
1995 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1996 /*
1997 * All budgets not got from this function will be put
1998 * together during handling partial dispatch
1999 */
2000 if (need_budget)
2001 blk_mq_put_dispatch_budget(rq->q, budget_token);
2002 return PREP_DISPATCH_NO_TAG;
2003 }
2004 }
2005
2006 return PREP_DISPATCH_OK;
2007 }
2008
2009 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
blk_mq_release_budgets(struct request_queue * q,struct list_head * list)2010 static void blk_mq_release_budgets(struct request_queue *q,
2011 struct list_head *list)
2012 {
2013 struct request *rq;
2014
2015 list_for_each_entry(rq, list, queuelist) {
2016 int budget_token = blk_mq_get_rq_budget_token(rq);
2017
2018 if (budget_token >= 0)
2019 blk_mq_put_dispatch_budget(q, budget_token);
2020 }
2021 }
2022
2023 /*
2024 * blk_mq_commit_rqs will notify driver using bd->last that there is no
2025 * more requests. (See comment in struct blk_mq_ops for commit_rqs for
2026 * details)
2027 * Attention, we should explicitly call this in unusual cases:
2028 * 1) did not queue everything initially scheduled to queue
2029 * 2) the last attempt to queue a request failed
2030 */
blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int queued,bool from_schedule)2031 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
2032 bool from_schedule)
2033 {
2034 if (hctx->queue->mq_ops->commit_rqs && queued) {
2035 trace_block_unplug(hctx->queue, queued, !from_schedule);
2036 hctx->queue->mq_ops->commit_rqs(hctx);
2037 }
2038 }
2039
2040 /*
2041 * Returns true if we did some work AND can potentially do more.
2042 */
blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,unsigned int nr_budgets)2043 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
2044 unsigned int nr_budgets)
2045 {
2046 enum prep_dispatch prep;
2047 struct request_queue *q = hctx->queue;
2048 struct request *rq;
2049 int queued;
2050 blk_status_t ret = BLK_STS_OK;
2051 LIST_HEAD(zone_list);
2052 bool needs_resource = false;
2053
2054 if (list_empty(list))
2055 return false;
2056
2057 /*
2058 * Now process all the entries, sending them to the driver.
2059 */
2060 queued = 0;
2061 do {
2062 struct blk_mq_queue_data bd;
2063
2064 rq = list_first_entry(list, struct request, queuelist);
2065
2066 WARN_ON_ONCE(hctx != rq->mq_hctx);
2067 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2068 if (prep != PREP_DISPATCH_OK)
2069 break;
2070
2071 list_del_init(&rq->queuelist);
2072
2073 bd.rq = rq;
2074 bd.last = list_empty(list);
2075
2076 /*
2077 * once the request is queued to lld, no need to cover the
2078 * budget any more
2079 */
2080 if (nr_budgets)
2081 nr_budgets--;
2082 ret = q->mq_ops->queue_rq(hctx, &bd);
2083 switch (ret) {
2084 case BLK_STS_OK:
2085 queued++;
2086 break;
2087 case BLK_STS_RESOURCE:
2088 needs_resource = true;
2089 fallthrough;
2090 case BLK_STS_DEV_RESOURCE:
2091 blk_mq_handle_dev_resource(rq, list);
2092 goto out;
2093 case BLK_STS_ZONE_RESOURCE:
2094 /*
2095 * Move the request to zone_list and keep going through
2096 * the dispatch list to find more requests the drive can
2097 * accept.
2098 */
2099 blk_mq_handle_zone_resource(rq, &zone_list);
2100 needs_resource = true;
2101 break;
2102 default:
2103 blk_mq_end_request(rq, ret);
2104 }
2105 } while (!list_empty(list));
2106 out:
2107 if (!list_empty(&zone_list))
2108 list_splice_tail_init(&zone_list, list);
2109
2110 /* If we didn't flush the entire list, we could have told the driver
2111 * there was more coming, but that turned out to be a lie.
2112 */
2113 if (!list_empty(list) || ret != BLK_STS_OK)
2114 blk_mq_commit_rqs(hctx, queued, false);
2115
2116 /*
2117 * Any items that need requeuing? Stuff them into hctx->dispatch,
2118 * that is where we will continue on next queue run.
2119 */
2120 if (!list_empty(list)) {
2121 bool needs_restart;
2122 /* For non-shared tags, the RESTART check will suffice */
2123 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2124 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
2125 blk_mq_is_shared_tags(hctx->flags));
2126
2127 if (nr_budgets)
2128 blk_mq_release_budgets(q, list);
2129
2130 spin_lock(&hctx->lock);
2131 list_splice_tail_init(list, &hctx->dispatch);
2132 spin_unlock(&hctx->lock);
2133
2134 /*
2135 * Order adding requests to hctx->dispatch and checking
2136 * SCHED_RESTART flag. The pair of this smp_mb() is the one
2137 * in blk_mq_sched_restart(). Avoid restart code path to
2138 * miss the new added requests to hctx->dispatch, meantime
2139 * SCHED_RESTART is observed here.
2140 */
2141 smp_mb();
2142
2143 /*
2144 * If SCHED_RESTART was set by the caller of this function and
2145 * it is no longer set that means that it was cleared by another
2146 * thread and hence that a queue rerun is needed.
2147 *
2148 * If 'no_tag' is set, that means that we failed getting
2149 * a driver tag with an I/O scheduler attached. If our dispatch
2150 * waitqueue is no longer active, ensure that we run the queue
2151 * AFTER adding our entries back to the list.
2152 *
2153 * If no I/O scheduler has been configured it is possible that
2154 * the hardware queue got stopped and restarted before requests
2155 * were pushed back onto the dispatch list. Rerun the queue to
2156 * avoid starvation. Notes:
2157 * - blk_mq_run_hw_queue() checks whether or not a queue has
2158 * been stopped before rerunning a queue.
2159 * - Some but not all block drivers stop a queue before
2160 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2161 * and dm-rq.
2162 *
2163 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2164 * bit is set, run queue after a delay to avoid IO stalls
2165 * that could otherwise occur if the queue is idle. We'll do
2166 * similar if we couldn't get budget or couldn't lock a zone
2167 * and SCHED_RESTART is set.
2168 */
2169 needs_restart = blk_mq_sched_needs_restart(hctx);
2170 if (prep == PREP_DISPATCH_NO_BUDGET)
2171 needs_resource = true;
2172 if (!needs_restart ||
2173 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2174 blk_mq_run_hw_queue(hctx, true);
2175 else if (needs_resource)
2176 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2177
2178 blk_mq_update_dispatch_busy(hctx, true);
2179 return false;
2180 }
2181
2182 blk_mq_update_dispatch_busy(hctx, false);
2183 return true;
2184 }
2185
blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx)2186 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2187 {
2188 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2189
2190 if (cpu >= nr_cpu_ids)
2191 cpu = cpumask_first(hctx->cpumask);
2192 return cpu;
2193 }
2194
2195 /*
2196 * It'd be great if the workqueue API had a way to pass
2197 * in a mask and had some smarts for more clever placement.
2198 * For now we just round-robin here, switching for every
2199 * BLK_MQ_CPU_WORK_BATCH queued items.
2200 */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)2201 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2202 {
2203 bool tried = false;
2204 int next_cpu = hctx->next_cpu;
2205
2206 if (hctx->queue->nr_hw_queues == 1)
2207 return WORK_CPU_UNBOUND;
2208
2209 if (--hctx->next_cpu_batch <= 0) {
2210 select_cpu:
2211 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2212 cpu_online_mask);
2213 if (next_cpu >= nr_cpu_ids)
2214 next_cpu = blk_mq_first_mapped_cpu(hctx);
2215 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2216 }
2217
2218 /*
2219 * Do unbound schedule if we can't find a online CPU for this hctx,
2220 * and it should only happen in the path of handling CPU DEAD.
2221 */
2222 if (!cpu_online(next_cpu)) {
2223 if (!tried) {
2224 tried = true;
2225 goto select_cpu;
2226 }
2227
2228 /*
2229 * Make sure to re-select CPU next time once after CPUs
2230 * in hctx->cpumask become online again.
2231 */
2232 hctx->next_cpu = next_cpu;
2233 hctx->next_cpu_batch = 1;
2234 return WORK_CPU_UNBOUND;
2235 }
2236
2237 hctx->next_cpu = next_cpu;
2238 return next_cpu;
2239 }
2240
2241 /**
2242 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2243 * @hctx: Pointer to the hardware queue to run.
2244 * @msecs: Milliseconds of delay to wait before running the queue.
2245 *
2246 * Run a hardware queue asynchronously with a delay of @msecs.
2247 */
blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)2248 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2249 {
2250 if (unlikely(blk_mq_hctx_stopped(hctx)))
2251 return;
2252 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2253 msecs_to_jiffies(msecs));
2254 }
2255 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2256
blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx * hctx)2257 static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx)
2258 {
2259 bool need_run;
2260
2261 /*
2262 * When queue is quiesced, we may be switching io scheduler, or
2263 * updating nr_hw_queues, or other things, and we can't run queue
2264 * any more, even blk_mq_hctx_has_pending() can't be called safely.
2265 *
2266 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2267 * quiesced.
2268 */
2269 __blk_mq_run_dispatch_ops(hctx->queue, false,
2270 need_run = !blk_queue_quiesced(hctx->queue) &&
2271 blk_mq_hctx_has_pending(hctx));
2272 return need_run;
2273 }
2274
2275 /**
2276 * blk_mq_run_hw_queue - Start to run a hardware queue.
2277 * @hctx: Pointer to the hardware queue to run.
2278 * @async: If we want to run the queue asynchronously.
2279 *
2280 * Check if the request queue is not in a quiesced state and if there are
2281 * pending requests to be sent. If this is true, run the queue to send requests
2282 * to hardware.
2283 */
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2284 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2285 {
2286 bool need_run;
2287
2288 /*
2289 * We can't run the queue inline with interrupts disabled.
2290 */
2291 WARN_ON_ONCE(!async && in_interrupt());
2292
2293 might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
2294
2295 need_run = blk_mq_hw_queue_need_run(hctx);
2296 if (!need_run) {
2297 unsigned long flags;
2298
2299 /*
2300 * Synchronize with blk_mq_unquiesce_queue(), because we check
2301 * if hw queue is quiesced locklessly above, we need the use
2302 * ->queue_lock to make sure we see the up-to-date status to
2303 * not miss rerunning the hw queue.
2304 */
2305 spin_lock_irqsave(&hctx->queue->queue_lock, flags);
2306 need_run = blk_mq_hw_queue_need_run(hctx);
2307 spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);
2308
2309 if (!need_run)
2310 return;
2311 }
2312
2313 if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2314 blk_mq_delay_run_hw_queue(hctx, 0);
2315 return;
2316 }
2317
2318 blk_mq_run_dispatch_ops(hctx->queue,
2319 blk_mq_sched_dispatch_requests(hctx));
2320 }
2321 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2322
2323 /*
2324 * Return prefered queue to dispatch from (if any) for non-mq aware IO
2325 * scheduler.
2326 */
blk_mq_get_sq_hctx(struct request_queue * q)2327 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2328 {
2329 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2330 /*
2331 * If the IO scheduler does not respect hardware queues when
2332 * dispatching, we just don't bother with multiple HW queues and
2333 * dispatch from hctx for the current CPU since running multiple queues
2334 * just causes lock contention inside the scheduler and pointless cache
2335 * bouncing.
2336 */
2337 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2338
2339 if (!blk_mq_hctx_stopped(hctx))
2340 return hctx;
2341 return NULL;
2342 }
2343
2344 /**
2345 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2346 * @q: Pointer to the request queue to run.
2347 * @async: If we want to run the queue asynchronously.
2348 */
blk_mq_run_hw_queues(struct request_queue * q,bool async)2349 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2350 {
2351 struct blk_mq_hw_ctx *hctx, *sq_hctx;
2352 unsigned long i;
2353
2354 sq_hctx = NULL;
2355 if (blk_queue_sq_sched(q))
2356 sq_hctx = blk_mq_get_sq_hctx(q);
2357 queue_for_each_hw_ctx(q, hctx, i) {
2358 if (blk_mq_hctx_stopped(hctx))
2359 continue;
2360 /*
2361 * Dispatch from this hctx either if there's no hctx preferred
2362 * by IO scheduler or if it has requests that bypass the
2363 * scheduler.
2364 */
2365 if (!sq_hctx || sq_hctx == hctx ||
2366 !list_empty_careful(&hctx->dispatch))
2367 blk_mq_run_hw_queue(hctx, async);
2368 }
2369 }
2370 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2371
2372 /**
2373 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2374 * @q: Pointer to the request queue to run.
2375 * @msecs: Milliseconds of delay to wait before running the queues.
2376 */
blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs)2377 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2378 {
2379 struct blk_mq_hw_ctx *hctx, *sq_hctx;
2380 unsigned long i;
2381
2382 sq_hctx = NULL;
2383 if (blk_queue_sq_sched(q))
2384 sq_hctx = blk_mq_get_sq_hctx(q);
2385 queue_for_each_hw_ctx(q, hctx, i) {
2386 if (blk_mq_hctx_stopped(hctx))
2387 continue;
2388 /*
2389 * If there is already a run_work pending, leave the
2390 * pending delay untouched. Otherwise, a hctx can stall
2391 * if another hctx is re-delaying the other's work
2392 * before the work executes.
2393 */
2394 if (delayed_work_pending(&hctx->run_work))
2395 continue;
2396 /*
2397 * Dispatch from this hctx either if there's no hctx preferred
2398 * by IO scheduler or if it has requests that bypass the
2399 * scheduler.
2400 */
2401 if (!sq_hctx || sq_hctx == hctx ||
2402 !list_empty_careful(&hctx->dispatch))
2403 blk_mq_delay_run_hw_queue(hctx, msecs);
2404 }
2405 }
2406 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2407
2408 /*
2409 * This function is often used for pausing .queue_rq() by driver when
2410 * there isn't enough resource or some conditions aren't satisfied, and
2411 * BLK_STS_RESOURCE is usually returned.
2412 *
2413 * We do not guarantee that dispatch can be drained or blocked
2414 * after blk_mq_stop_hw_queue() returns. Please use
2415 * blk_mq_quiesce_queue() for that requirement.
2416 */
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)2417 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2418 {
2419 cancel_delayed_work(&hctx->run_work);
2420
2421 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2422 }
2423 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2424
2425 /*
2426 * This function is often used for pausing .queue_rq() by driver when
2427 * there isn't enough resource or some conditions aren't satisfied, and
2428 * BLK_STS_RESOURCE is usually returned.
2429 *
2430 * We do not guarantee that dispatch can be drained or blocked
2431 * after blk_mq_stop_hw_queues() returns. Please use
2432 * blk_mq_quiesce_queue() for that requirement.
2433 */
blk_mq_stop_hw_queues(struct request_queue * q)2434 void blk_mq_stop_hw_queues(struct request_queue *q)
2435 {
2436 struct blk_mq_hw_ctx *hctx;
2437 unsigned long i;
2438
2439 queue_for_each_hw_ctx(q, hctx, i)
2440 blk_mq_stop_hw_queue(hctx);
2441 }
2442 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2443
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)2444 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2445 {
2446 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2447
2448 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2449 }
2450 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2451
blk_mq_start_hw_queues(struct request_queue * q)2452 void blk_mq_start_hw_queues(struct request_queue *q)
2453 {
2454 struct blk_mq_hw_ctx *hctx;
2455 unsigned long i;
2456
2457 queue_for_each_hw_ctx(q, hctx, i)
2458 blk_mq_start_hw_queue(hctx);
2459 }
2460 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2461
blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2462 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2463 {
2464 if (!blk_mq_hctx_stopped(hctx))
2465 return;
2466
2467 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2468 /*
2469 * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
2470 * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
2471 * list in the subsequent routine.
2472 */
2473 smp_mb__after_atomic();
2474 blk_mq_run_hw_queue(hctx, async);
2475 }
2476 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2477
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)2478 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2479 {
2480 struct blk_mq_hw_ctx *hctx;
2481 unsigned long i;
2482
2483 queue_for_each_hw_ctx(q, hctx, i)
2484 blk_mq_start_stopped_hw_queue(hctx, async ||
2485 (hctx->flags & BLK_MQ_F_BLOCKING));
2486 }
2487 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2488
blk_mq_run_work_fn(struct work_struct * work)2489 static void blk_mq_run_work_fn(struct work_struct *work)
2490 {
2491 struct blk_mq_hw_ctx *hctx =
2492 container_of(work, struct blk_mq_hw_ctx, run_work.work);
2493
2494 blk_mq_run_dispatch_ops(hctx->queue,
2495 blk_mq_sched_dispatch_requests(hctx));
2496 }
2497
2498 /**
2499 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2500 * @rq: Pointer to request to be inserted.
2501 * @flags: BLK_MQ_INSERT_*
2502 *
2503 * Should only be used carefully, when the caller knows we want to
2504 * bypass a potential IO scheduler on the target device.
2505 */
blk_mq_request_bypass_insert(struct request * rq,blk_insert_t flags)2506 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2507 {
2508 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2509
2510 spin_lock(&hctx->lock);
2511 if (flags & BLK_MQ_INSERT_AT_HEAD)
2512 list_add(&rq->queuelist, &hctx->dispatch);
2513 else
2514 list_add_tail(&rq->queuelist, &hctx->dispatch);
2515 spin_unlock(&hctx->lock);
2516 }
2517
blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)2518 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
2519 struct blk_mq_ctx *ctx, struct list_head *list,
2520 bool run_queue_async)
2521 {
2522 struct request *rq;
2523 enum hctx_type type = hctx->type;
2524
2525 /*
2526 * Try to issue requests directly if the hw queue isn't busy to save an
2527 * extra enqueue & dequeue to the sw queue.
2528 */
2529 if (!hctx->dispatch_busy && !run_queue_async) {
2530 blk_mq_run_dispatch_ops(hctx->queue,
2531 blk_mq_try_issue_list_directly(hctx, list));
2532 if (list_empty(list))
2533 goto out;
2534 }
2535
2536 /*
2537 * preemption doesn't flush plug list, so it's possible ctx->cpu is
2538 * offline now
2539 */
2540 list_for_each_entry(rq, list, queuelist) {
2541 BUG_ON(rq->mq_ctx != ctx);
2542 trace_block_rq_insert(rq);
2543 if (rq->cmd_flags & REQ_NOWAIT)
2544 run_queue_async = true;
2545 }
2546
2547 spin_lock(&ctx->lock);
2548 list_splice_tail_init(list, &ctx->rq_lists[type]);
2549 blk_mq_hctx_mark_pending(hctx, ctx);
2550 spin_unlock(&ctx->lock);
2551 out:
2552 blk_mq_run_hw_queue(hctx, run_queue_async);
2553 }
2554
blk_mq_insert_request(struct request * rq,blk_insert_t flags)2555 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2556 {
2557 struct request_queue *q = rq->q;
2558 struct blk_mq_ctx *ctx = rq->mq_ctx;
2559 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2560
2561 if (blk_rq_is_passthrough(rq)) {
2562 /*
2563 * Passthrough request have to be added to hctx->dispatch
2564 * directly. The device may be in a situation where it can't
2565 * handle FS request, and always returns BLK_STS_RESOURCE for
2566 * them, which gets them added to hctx->dispatch.
2567 *
2568 * If a passthrough request is required to unblock the queues,
2569 * and it is added to the scheduler queue, there is no chance to
2570 * dispatch it given we prioritize requests in hctx->dispatch.
2571 */
2572 blk_mq_request_bypass_insert(rq, flags);
2573 } else if (req_op(rq) == REQ_OP_FLUSH) {
2574 /*
2575 * Firstly normal IO request is inserted to scheduler queue or
2576 * sw queue, meantime we add flush request to dispatch queue(
2577 * hctx->dispatch) directly and there is at most one in-flight
2578 * flush request for each hw queue, so it doesn't matter to add
2579 * flush request to tail or front of the dispatch queue.
2580 *
2581 * Secondly in case of NCQ, flush request belongs to non-NCQ
2582 * command, and queueing it will fail when there is any
2583 * in-flight normal IO request(NCQ command). When adding flush
2584 * rq to the front of hctx->dispatch, it is easier to introduce
2585 * extra time to flush rq's latency because of S_SCHED_RESTART
2586 * compared with adding to the tail of dispatch queue, then
2587 * chance of flush merge is increased, and less flush requests
2588 * will be issued to controller. It is observed that ~10% time
2589 * is saved in blktests block/004 on disk attached to AHCI/NCQ
2590 * drive when adding flush rq to the front of hctx->dispatch.
2591 *
2592 * Simply queue flush rq to the front of hctx->dispatch so that
2593 * intensive flush workloads can benefit in case of NCQ HW.
2594 */
2595 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2596 } else if (q->elevator) {
2597 LIST_HEAD(list);
2598
2599 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2600
2601 list_add(&rq->queuelist, &list);
2602 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2603 } else {
2604 trace_block_rq_insert(rq);
2605
2606 spin_lock(&ctx->lock);
2607 if (flags & BLK_MQ_INSERT_AT_HEAD)
2608 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2609 else
2610 list_add_tail(&rq->queuelist,
2611 &ctx->rq_lists[hctx->type]);
2612 blk_mq_hctx_mark_pending(hctx, ctx);
2613 spin_unlock(&ctx->lock);
2614 }
2615 }
2616
blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs)2617 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2618 unsigned int nr_segs)
2619 {
2620 int err;
2621
2622 if (bio->bi_opf & REQ_RAHEAD)
2623 rq->cmd_flags |= REQ_FAILFAST_MASK;
2624
2625 rq->__sector = bio->bi_iter.bi_sector;
2626 blk_rq_bio_prep(rq, bio, nr_segs);
2627
2628 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2629 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2630 WARN_ON_ONCE(err);
2631
2632 blk_account_io_start(rq);
2633 }
2634
__blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last)2635 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2636 struct request *rq, bool last)
2637 {
2638 struct request_queue *q = rq->q;
2639 struct blk_mq_queue_data bd = {
2640 .rq = rq,
2641 .last = last,
2642 };
2643 blk_status_t ret;
2644
2645 /*
2646 * For OK queue, we are done. For error, caller may kill it.
2647 * Any other error (busy), just add it to our list as we
2648 * previously would have done.
2649 */
2650 ret = q->mq_ops->queue_rq(hctx, &bd);
2651 switch (ret) {
2652 case BLK_STS_OK:
2653 blk_mq_update_dispatch_busy(hctx, false);
2654 break;
2655 case BLK_STS_RESOURCE:
2656 case BLK_STS_DEV_RESOURCE:
2657 blk_mq_update_dispatch_busy(hctx, true);
2658 __blk_mq_requeue_request(rq);
2659 break;
2660 default:
2661 blk_mq_update_dispatch_busy(hctx, false);
2662 break;
2663 }
2664
2665 return ret;
2666 }
2667
blk_mq_get_budget_and_tag(struct request * rq)2668 static bool blk_mq_get_budget_and_tag(struct request *rq)
2669 {
2670 int budget_token;
2671
2672 budget_token = blk_mq_get_dispatch_budget(rq->q);
2673 if (budget_token < 0)
2674 return false;
2675 blk_mq_set_rq_budget_token(rq, budget_token);
2676 if (!blk_mq_get_driver_tag(rq)) {
2677 blk_mq_put_dispatch_budget(rq->q, budget_token);
2678 return false;
2679 }
2680 return true;
2681 }
2682
2683 /**
2684 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2685 * @hctx: Pointer of the associated hardware queue.
2686 * @rq: Pointer to request to be sent.
2687 *
2688 * If the device has enough resources to accept a new request now, send the
2689 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2690 * we can try send it another time in the future. Requests inserted at this
2691 * queue have higher priority.
2692 */
blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq)2693 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2694 struct request *rq)
2695 {
2696 blk_status_t ret;
2697
2698 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2699 blk_mq_insert_request(rq, 0);
2700 blk_mq_run_hw_queue(hctx, false);
2701 return;
2702 }
2703
2704 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2705 blk_mq_insert_request(rq, 0);
2706 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2707 return;
2708 }
2709
2710 ret = __blk_mq_issue_directly(hctx, rq, true);
2711 switch (ret) {
2712 case BLK_STS_OK:
2713 break;
2714 case BLK_STS_RESOURCE:
2715 case BLK_STS_DEV_RESOURCE:
2716 blk_mq_request_bypass_insert(rq, 0);
2717 blk_mq_run_hw_queue(hctx, false);
2718 break;
2719 default:
2720 blk_mq_end_request(rq, ret);
2721 break;
2722 }
2723 }
2724
blk_mq_request_issue_directly(struct request * rq,bool last)2725 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2726 {
2727 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2728
2729 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2730 blk_mq_insert_request(rq, 0);
2731 blk_mq_run_hw_queue(hctx, false);
2732 return BLK_STS_OK;
2733 }
2734
2735 if (!blk_mq_get_budget_and_tag(rq))
2736 return BLK_STS_RESOURCE;
2737 return __blk_mq_issue_directly(hctx, rq, last);
2738 }
2739
blk_mq_plug_issue_direct(struct blk_plug * plug)2740 static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2741 {
2742 struct blk_mq_hw_ctx *hctx = NULL;
2743 struct request *rq;
2744 int queued = 0;
2745 blk_status_t ret = BLK_STS_OK;
2746
2747 while ((rq = rq_list_pop(&plug->mq_list))) {
2748 bool last = rq_list_empty(plug->mq_list);
2749
2750 if (hctx != rq->mq_hctx) {
2751 if (hctx) {
2752 blk_mq_commit_rqs(hctx, queued, false);
2753 queued = 0;
2754 }
2755 hctx = rq->mq_hctx;
2756 }
2757
2758 ret = blk_mq_request_issue_directly(rq, last);
2759 switch (ret) {
2760 case BLK_STS_OK:
2761 queued++;
2762 break;
2763 case BLK_STS_RESOURCE:
2764 case BLK_STS_DEV_RESOURCE:
2765 blk_mq_request_bypass_insert(rq, 0);
2766 blk_mq_run_hw_queue(hctx, false);
2767 goto out;
2768 default:
2769 blk_mq_end_request(rq, ret);
2770 break;
2771 }
2772 }
2773
2774 out:
2775 if (ret != BLK_STS_OK)
2776 blk_mq_commit_rqs(hctx, queued, false);
2777 }
2778
__blk_mq_flush_plug_list(struct request_queue * q,struct blk_plug * plug)2779 static void __blk_mq_flush_plug_list(struct request_queue *q,
2780 struct blk_plug *plug)
2781 {
2782 if (blk_queue_quiesced(q))
2783 return;
2784 q->mq_ops->queue_rqs(&plug->mq_list);
2785 }
2786
blk_mq_dispatch_plug_list(struct blk_plug * plug,bool from_sched)2787 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2788 {
2789 struct blk_mq_hw_ctx *this_hctx = NULL;
2790 struct blk_mq_ctx *this_ctx = NULL;
2791 struct request *requeue_list = NULL;
2792 struct request **requeue_lastp = &requeue_list;
2793 unsigned int depth = 0;
2794 bool is_passthrough = false;
2795 LIST_HEAD(list);
2796
2797 do {
2798 struct request *rq = rq_list_pop(&plug->mq_list);
2799
2800 if (!this_hctx) {
2801 this_hctx = rq->mq_hctx;
2802 this_ctx = rq->mq_ctx;
2803 is_passthrough = blk_rq_is_passthrough(rq);
2804 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2805 is_passthrough != blk_rq_is_passthrough(rq)) {
2806 rq_list_add_tail(&requeue_lastp, rq);
2807 continue;
2808 }
2809 list_add(&rq->queuelist, &list);
2810 depth++;
2811 } while (!rq_list_empty(plug->mq_list));
2812
2813 plug->mq_list = requeue_list;
2814 trace_block_unplug(this_hctx->queue, depth, !from_sched);
2815
2816 percpu_ref_get(&this_hctx->queue->q_usage_counter);
2817 /* passthrough requests should never be issued to the I/O scheduler */
2818 if (is_passthrough) {
2819 spin_lock(&this_hctx->lock);
2820 list_splice_tail_init(&list, &this_hctx->dispatch);
2821 spin_unlock(&this_hctx->lock);
2822 blk_mq_run_hw_queue(this_hctx, from_sched);
2823 } else if (this_hctx->queue->elevator) {
2824 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
2825 &list, 0);
2826 blk_mq_run_hw_queue(this_hctx, from_sched);
2827 } else {
2828 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
2829 }
2830 percpu_ref_put(&this_hctx->queue->q_usage_counter);
2831 }
2832
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)2833 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2834 {
2835 struct request *rq;
2836
2837 /*
2838 * We may have been called recursively midway through handling
2839 * plug->mq_list via a schedule() in the driver's queue_rq() callback.
2840 * To avoid mq_list changing under our feet, clear rq_count early and
2841 * bail out specifically if rq_count is 0 rather than checking
2842 * whether the mq_list is empty.
2843 */
2844 if (plug->rq_count == 0)
2845 return;
2846 plug->rq_count = 0;
2847
2848 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2849 struct request_queue *q;
2850
2851 rq = rq_list_peek(&plug->mq_list);
2852 q = rq->q;
2853
2854 /*
2855 * Peek first request and see if we have a ->queue_rqs() hook.
2856 * If we do, we can dispatch the whole plug list in one go. We
2857 * already know at this point that all requests belong to the
2858 * same queue, caller must ensure that's the case.
2859 *
2860 * Since we pass off the full list to the driver at this point,
2861 * we do not increment the active request count for the queue.
2862 * Bypass shared tags for now because of that.
2863 */
2864 if (q->mq_ops->queue_rqs &&
2865 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2866 blk_mq_run_dispatch_ops(q,
2867 __blk_mq_flush_plug_list(q, plug));
2868 if (rq_list_empty(plug->mq_list))
2869 return;
2870 }
2871
2872 blk_mq_run_dispatch_ops(q,
2873 blk_mq_plug_issue_direct(plug));
2874 if (rq_list_empty(plug->mq_list))
2875 return;
2876 }
2877
2878 do {
2879 blk_mq_dispatch_plug_list(plug, from_schedule);
2880 } while (!rq_list_empty(plug->mq_list));
2881 }
2882
blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list)2883 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2884 struct list_head *list)
2885 {
2886 int queued = 0;
2887 blk_status_t ret = BLK_STS_OK;
2888
2889 while (!list_empty(list)) {
2890 struct request *rq = list_first_entry(list, struct request,
2891 queuelist);
2892
2893 list_del_init(&rq->queuelist);
2894 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2895 switch (ret) {
2896 case BLK_STS_OK:
2897 queued++;
2898 break;
2899 case BLK_STS_RESOURCE:
2900 case BLK_STS_DEV_RESOURCE:
2901 blk_mq_request_bypass_insert(rq, 0);
2902 if (list_empty(list))
2903 blk_mq_run_hw_queue(hctx, false);
2904 goto out;
2905 default:
2906 blk_mq_end_request(rq, ret);
2907 break;
2908 }
2909 }
2910
2911 out:
2912 if (ret != BLK_STS_OK)
2913 blk_mq_commit_rqs(hctx, queued, false);
2914 }
2915
blk_mq_attempt_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)2916 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2917 struct bio *bio, unsigned int nr_segs)
2918 {
2919 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2920 if (blk_attempt_plug_merge(q, bio, nr_segs))
2921 return true;
2922 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2923 return true;
2924 }
2925 return false;
2926 }
2927
blk_mq_get_new_requests(struct request_queue * q,struct blk_plug * plug,struct bio * bio,unsigned int nsegs)2928 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2929 struct blk_plug *plug,
2930 struct bio *bio,
2931 unsigned int nsegs)
2932 {
2933 struct blk_mq_alloc_data data = {
2934 .q = q,
2935 .nr_tags = 1,
2936 .cmd_flags = bio->bi_opf,
2937 };
2938 struct request *rq;
2939
2940 if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2941 return NULL;
2942
2943 rq_qos_throttle(q, bio);
2944
2945 if (plug) {
2946 data.nr_tags = plug->nr_ios;
2947 plug->nr_ios = 1;
2948 data.cached_rq = &plug->cached_rq;
2949 }
2950
2951 rq = __blk_mq_alloc_requests(&data);
2952 if (rq)
2953 return rq;
2954 rq_qos_cleanup(q, bio);
2955 if (bio->bi_opf & REQ_NOWAIT)
2956 bio_wouldblock_error(bio);
2957 return NULL;
2958 }
2959
2960 /* return true if this @rq can be used for @bio */
blk_mq_can_use_cached_rq(struct request * rq,struct blk_plug * plug,struct bio * bio)2961 static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
2962 struct bio *bio)
2963 {
2964 enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
2965 enum hctx_type hctx_type = rq->mq_hctx->type;
2966
2967 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2968
2969 if (type != hctx_type &&
2970 !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
2971 return false;
2972 if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2973 return false;
2974
2975 /*
2976 * If any qos ->throttle() end up blocking, we will have flushed the
2977 * plug and hence killed the cached_rq list as well. Pop this entry
2978 * before we throttle.
2979 */
2980 plug->cached_rq = rq_list_next(rq);
2981 rq_qos_throttle(rq->q, bio);
2982
2983 blk_mq_rq_time_init(rq, 0);
2984 rq->cmd_flags = bio->bi_opf;
2985 INIT_LIST_HEAD(&rq->queuelist);
2986 return true;
2987 }
2988
2989 /**
2990 * blk_mq_submit_bio - Create and send a request to block device.
2991 * @bio: Bio pointer.
2992 *
2993 * Builds up a request structure from @q and @bio and send to the device. The
2994 * request may not be queued directly to hardware if:
2995 * * This request can be merged with another one
2996 * * We want to place request at plug queue for possible future merging
2997 * * There is an IO scheduler active at this queue
2998 *
2999 * It will not queue the request if there is an error with the bio, or at the
3000 * request creation.
3001 */
blk_mq_submit_bio(struct bio * bio)3002 void blk_mq_submit_bio(struct bio *bio)
3003 {
3004 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
3005 struct blk_plug *plug = blk_mq_plug(bio);
3006 const int is_sync = op_is_sync(bio->bi_opf);
3007 struct blk_mq_hw_ctx *hctx;
3008 struct request *rq = NULL;
3009 unsigned int nr_segs = 1;
3010 blk_status_t ret;
3011
3012 bio = blk_queue_bounce(bio, q);
3013
3014 if (plug) {
3015 rq = rq_list_peek(&plug->cached_rq);
3016 if (rq && rq->q != q)
3017 rq = NULL;
3018 }
3019 if (rq) {
3020 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
3021 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
3022 if (!bio)
3023 return;
3024 }
3025 if (!bio_integrity_prep(bio))
3026 return;
3027 if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
3028 return;
3029 if (blk_mq_can_use_cached_rq(rq, plug, bio))
3030 goto done;
3031 percpu_ref_get(&q->q_usage_counter);
3032 } else {
3033 if (unlikely(bio_queue_enter(bio)))
3034 return;
3035 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
3036 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
3037 if (!bio)
3038 goto fail;
3039 }
3040 if (!bio_integrity_prep(bio))
3041 goto fail;
3042 }
3043
3044 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3045 if (unlikely(!rq)) {
3046 fail:
3047 blk_queue_exit(q);
3048 return;
3049 }
3050
3051 done:
3052 trace_block_getrq(bio);
3053
3054 rq_qos_track(q, rq, bio);
3055
3056 blk_mq_bio_to_request(rq, bio, nr_segs);
3057
3058 ret = blk_crypto_rq_get_keyslot(rq);
3059 if (ret != BLK_STS_OK) {
3060 bio->bi_status = ret;
3061 bio_endio(bio);
3062 blk_mq_free_request(rq);
3063 return;
3064 }
3065
3066 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3067 return;
3068
3069 if (plug) {
3070 blk_add_rq_to_plug(plug, rq);
3071 return;
3072 }
3073
3074 hctx = rq->mq_hctx;
3075 if ((rq->rq_flags & RQF_USE_SCHED) ||
3076 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3077 blk_mq_insert_request(rq, 0);
3078 blk_mq_run_hw_queue(hctx, true);
3079 } else {
3080 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3081 }
3082 }
3083
3084 #ifdef CONFIG_BLK_MQ_STACKING
3085 /**
3086 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
3087 * @rq: the request being queued
3088 */
blk_insert_cloned_request(struct request * rq)3089 blk_status_t blk_insert_cloned_request(struct request *rq)
3090 {
3091 struct request_queue *q = rq->q;
3092 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
3093 unsigned int max_segments = blk_rq_get_max_segments(rq);
3094 blk_status_t ret;
3095
3096 if (blk_rq_sectors(rq) > max_sectors) {
3097 /*
3098 * SCSI device does not have a good way to return if
3099 * Write Same/Zero is actually supported. If a device rejects
3100 * a non-read/write command (discard, write same,etc.) the
3101 * low-level device driver will set the relevant queue limit to
3102 * 0 to prevent blk-lib from issuing more of the offending
3103 * operations. Commands queued prior to the queue limit being
3104 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
3105 * errors being propagated to upper layers.
3106 */
3107 if (max_sectors == 0)
3108 return BLK_STS_NOTSUPP;
3109
3110 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
3111 __func__, blk_rq_sectors(rq), max_sectors);
3112 return BLK_STS_IOERR;
3113 }
3114
3115 /*
3116 * The queue settings related to segment counting may differ from the
3117 * original queue.
3118 */
3119 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3120 if (rq->nr_phys_segments > max_segments) {
3121 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
3122 __func__, rq->nr_phys_segments, max_segments);
3123 return BLK_STS_IOERR;
3124 }
3125
3126 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3127 return BLK_STS_IOERR;
3128
3129 ret = blk_crypto_rq_get_keyslot(rq);
3130 if (ret != BLK_STS_OK)
3131 return ret;
3132
3133 blk_account_io_start(rq);
3134
3135 /*
3136 * Since we have a scheduler attached on the top device,
3137 * bypass a potential scheduler on the bottom device for
3138 * insert.
3139 */
3140 blk_mq_run_dispatch_ops(q,
3141 ret = blk_mq_request_issue_directly(rq, true));
3142 if (ret)
3143 blk_account_io_done(rq, ktime_get_ns());
3144 return ret;
3145 }
3146 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
3147
3148 /**
3149 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3150 * @rq: the clone request to be cleaned up
3151 *
3152 * Description:
3153 * Free all bios in @rq for a cloned request.
3154 */
blk_rq_unprep_clone(struct request * rq)3155 void blk_rq_unprep_clone(struct request *rq)
3156 {
3157 struct bio *bio;
3158
3159 while ((bio = rq->bio) != NULL) {
3160 rq->bio = bio->bi_next;
3161
3162 bio_put(bio);
3163 }
3164 }
3165 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3166
3167 /**
3168 * blk_rq_prep_clone - Helper function to setup clone request
3169 * @rq: the request to be setup
3170 * @rq_src: original request to be cloned
3171 * @bs: bio_set that bios for clone are allocated from
3172 * @gfp_mask: memory allocation mask for bio
3173 * @bio_ctr: setup function to be called for each clone bio.
3174 * Returns %0 for success, non %0 for failure.
3175 * @data: private data to be passed to @bio_ctr
3176 *
3177 * Description:
3178 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3179 * Also, pages which the original bios are pointing to are not copied
3180 * and the cloned bios just point same pages.
3181 * So cloned bios must be completed before original bios, which means
3182 * the caller must complete @rq before @rq_src.
3183 */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)3184 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3185 struct bio_set *bs, gfp_t gfp_mask,
3186 int (*bio_ctr)(struct bio *, struct bio *, void *),
3187 void *data)
3188 {
3189 struct bio *bio, *bio_src;
3190
3191 if (!bs)
3192 bs = &fs_bio_set;
3193
3194 __rq_for_each_bio(bio_src, rq_src) {
3195 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3196 bs);
3197 if (!bio)
3198 goto free_and_out;
3199
3200 if (bio_ctr && bio_ctr(bio, bio_src, data))
3201 goto free_and_out;
3202
3203 if (rq->bio) {
3204 rq->biotail->bi_next = bio;
3205 rq->biotail = bio;
3206 } else {
3207 rq->bio = rq->biotail = bio;
3208 }
3209 bio = NULL;
3210 }
3211
3212 /* Copy attributes of the original request to the clone request. */
3213 rq->__sector = blk_rq_pos(rq_src);
3214 rq->__data_len = blk_rq_bytes(rq_src);
3215 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3216 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3217 rq->special_vec = rq_src->special_vec;
3218 }
3219 rq->nr_phys_segments = rq_src->nr_phys_segments;
3220 rq->ioprio = rq_src->ioprio;
3221
3222 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3223 goto free_and_out;
3224
3225 return 0;
3226
3227 free_and_out:
3228 if (bio)
3229 bio_put(bio);
3230 blk_rq_unprep_clone(rq);
3231
3232 return -ENOMEM;
3233 }
3234 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3235 #endif /* CONFIG_BLK_MQ_STACKING */
3236
3237 /*
3238 * Steal bios from a request and add them to a bio list.
3239 * The request must not have been partially completed before.
3240 */
blk_steal_bios(struct bio_list * list,struct request * rq)3241 void blk_steal_bios(struct bio_list *list, struct request *rq)
3242 {
3243 if (rq->bio) {
3244 if (list->tail)
3245 list->tail->bi_next = rq->bio;
3246 else
3247 list->head = rq->bio;
3248 list->tail = rq->biotail;
3249
3250 rq->bio = NULL;
3251 rq->biotail = NULL;
3252 }
3253
3254 rq->__data_len = 0;
3255 }
3256 EXPORT_SYMBOL_GPL(blk_steal_bios);
3257
order_to_size(unsigned int order)3258 static size_t order_to_size(unsigned int order)
3259 {
3260 return (size_t)PAGE_SIZE << order;
3261 }
3262
3263 /* called before freeing request pool in @tags */
blk_mq_clear_rq_mapping(struct blk_mq_tags * drv_tags,struct blk_mq_tags * tags)3264 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3265 struct blk_mq_tags *tags)
3266 {
3267 struct page *page;
3268 unsigned long flags;
3269
3270 /*
3271 * There is no need to clear mapping if driver tags is not initialized
3272 * or the mapping belongs to the driver tags.
3273 */
3274 if (!drv_tags || drv_tags == tags)
3275 return;
3276
3277 list_for_each_entry(page, &tags->page_list, lru) {
3278 unsigned long start = (unsigned long)page_address(page);
3279 unsigned long end = start + order_to_size(page->private);
3280 int i;
3281
3282 for (i = 0; i < drv_tags->nr_tags; i++) {
3283 struct request *rq = drv_tags->rqs[i];
3284 unsigned long rq_addr = (unsigned long)rq;
3285
3286 if (rq_addr >= start && rq_addr < end) {
3287 WARN_ON_ONCE(req_ref_read(rq) != 0);
3288 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3289 }
3290 }
3291 }
3292
3293 /*
3294 * Wait until all pending iteration is done.
3295 *
3296 * Request reference is cleared and it is guaranteed to be observed
3297 * after the ->lock is released.
3298 */
3299 spin_lock_irqsave(&drv_tags->lock, flags);
3300 spin_unlock_irqrestore(&drv_tags->lock, flags);
3301 }
3302
blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3303 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3304 unsigned int hctx_idx)
3305 {
3306 struct blk_mq_tags *drv_tags;
3307 struct page *page;
3308
3309 if (list_empty(&tags->page_list))
3310 return;
3311
3312 if (blk_mq_is_shared_tags(set->flags))
3313 drv_tags = set->shared_tags;
3314 else
3315 drv_tags = set->tags[hctx_idx];
3316
3317 if (tags->static_rqs && set->ops->exit_request) {
3318 int i;
3319
3320 for (i = 0; i < tags->nr_tags; i++) {
3321 struct request *rq = tags->static_rqs[i];
3322
3323 if (!rq)
3324 continue;
3325 set->ops->exit_request(set, rq, hctx_idx);
3326 tags->static_rqs[i] = NULL;
3327 }
3328 }
3329
3330 blk_mq_clear_rq_mapping(drv_tags, tags);
3331
3332 while (!list_empty(&tags->page_list)) {
3333 page = list_first_entry(&tags->page_list, struct page, lru);
3334 list_del_init(&page->lru);
3335 /*
3336 * Remove kmemleak object previously allocated in
3337 * blk_mq_alloc_rqs().
3338 */
3339 kmemleak_free(page_address(page));
3340 __free_pages(page, page->private);
3341 }
3342 }
3343
blk_mq_free_rq_map(struct blk_mq_tags * tags)3344 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3345 {
3346 kfree(tags->rqs);
3347 tags->rqs = NULL;
3348 kfree(tags->static_rqs);
3349 tags->static_rqs = NULL;
3350
3351 blk_mq_free_tags(tags);
3352 }
3353
hctx_idx_to_type(struct blk_mq_tag_set * set,unsigned int hctx_idx)3354 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3355 unsigned int hctx_idx)
3356 {
3357 int i;
3358
3359 for (i = 0; i < set->nr_maps; i++) {
3360 unsigned int start = set->map[i].queue_offset;
3361 unsigned int end = start + set->map[i].nr_queues;
3362
3363 if (hctx_idx >= start && hctx_idx < end)
3364 break;
3365 }
3366
3367 if (i >= set->nr_maps)
3368 i = HCTX_TYPE_DEFAULT;
3369
3370 return i;
3371 }
3372
blk_mq_get_hctx_node(struct blk_mq_tag_set * set,unsigned int hctx_idx)3373 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3374 unsigned int hctx_idx)
3375 {
3376 enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3377
3378 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3379 }
3380
blk_mq_alloc_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int nr_tags,unsigned int reserved_tags)3381 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3382 unsigned int hctx_idx,
3383 unsigned int nr_tags,
3384 unsigned int reserved_tags)
3385 {
3386 int node = blk_mq_get_hctx_node(set, hctx_idx);
3387 struct blk_mq_tags *tags;
3388
3389 if (node == NUMA_NO_NODE)
3390 node = set->numa_node;
3391
3392 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3393 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3394 if (!tags)
3395 return NULL;
3396
3397 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3398 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3399 node);
3400 if (!tags->rqs)
3401 goto err_free_tags;
3402
3403 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3404 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3405 node);
3406 if (!tags->static_rqs)
3407 goto err_free_rqs;
3408
3409 return tags;
3410
3411 err_free_rqs:
3412 kfree(tags->rqs);
3413 err_free_tags:
3414 blk_mq_free_tags(tags);
3415 return NULL;
3416 }
3417
blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node)3418 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3419 unsigned int hctx_idx, int node)
3420 {
3421 int ret;
3422
3423 if (set->ops->init_request) {
3424 ret = set->ops->init_request(set, rq, hctx_idx, node);
3425 if (ret)
3426 return ret;
3427 }
3428
3429 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3430 return 0;
3431 }
3432
blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth)3433 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3434 struct blk_mq_tags *tags,
3435 unsigned int hctx_idx, unsigned int depth)
3436 {
3437 unsigned int i, j, entries_per_page, max_order = 4;
3438 int node = blk_mq_get_hctx_node(set, hctx_idx);
3439 size_t rq_size, left;
3440
3441 if (node == NUMA_NO_NODE)
3442 node = set->numa_node;
3443
3444 INIT_LIST_HEAD(&tags->page_list);
3445
3446 /*
3447 * rq_size is the size of the request plus driver payload, rounded
3448 * to the cacheline size
3449 */
3450 rq_size = round_up(sizeof(struct request) + set->cmd_size,
3451 cache_line_size());
3452 left = rq_size * depth;
3453
3454 for (i = 0; i < depth; ) {
3455 int this_order = max_order;
3456 struct page *page;
3457 int to_do;
3458 void *p;
3459
3460 while (this_order && left < order_to_size(this_order - 1))
3461 this_order--;
3462
3463 do {
3464 page = alloc_pages_node(node,
3465 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3466 this_order);
3467 if (page)
3468 break;
3469 if (!this_order--)
3470 break;
3471 if (order_to_size(this_order) < rq_size)
3472 break;
3473 } while (1);
3474
3475 if (!page)
3476 goto fail;
3477
3478 page->private = this_order;
3479 list_add_tail(&page->lru, &tags->page_list);
3480
3481 p = page_address(page);
3482 /*
3483 * Allow kmemleak to scan these pages as they contain pointers
3484 * to additional allocations like via ops->init_request().
3485 */
3486 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3487 entries_per_page = order_to_size(this_order) / rq_size;
3488 to_do = min(entries_per_page, depth - i);
3489 left -= to_do * rq_size;
3490 for (j = 0; j < to_do; j++) {
3491 struct request *rq = p;
3492
3493 tags->static_rqs[i] = rq;
3494 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3495 tags->static_rqs[i] = NULL;
3496 goto fail;
3497 }
3498
3499 p += rq_size;
3500 i++;
3501 }
3502 }
3503 return 0;
3504
3505 fail:
3506 blk_mq_free_rqs(set, tags, hctx_idx);
3507 return -ENOMEM;
3508 }
3509
3510 struct rq_iter_data {
3511 struct blk_mq_hw_ctx *hctx;
3512 bool has_rq;
3513 };
3514
blk_mq_has_request(struct request * rq,void * data)3515 static bool blk_mq_has_request(struct request *rq, void *data)
3516 {
3517 struct rq_iter_data *iter_data = data;
3518
3519 if (rq->mq_hctx != iter_data->hctx)
3520 return true;
3521 iter_data->has_rq = true;
3522 return false;
3523 }
3524
blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx)3525 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3526 {
3527 struct blk_mq_tags *tags = hctx->sched_tags ?
3528 hctx->sched_tags : hctx->tags;
3529 struct rq_iter_data data = {
3530 .hctx = hctx,
3531 };
3532
3533 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3534 return data.has_rq;
3535 }
3536
blk_mq_last_cpu_in_hctx(unsigned int cpu,struct blk_mq_hw_ctx * hctx)3537 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3538 struct blk_mq_hw_ctx *hctx)
3539 {
3540 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3541 return false;
3542 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3543 return false;
3544 return true;
3545 }
3546
blk_mq_hctx_notify_offline(unsigned int cpu,struct hlist_node * node)3547 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3548 {
3549 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3550 struct blk_mq_hw_ctx, cpuhp_online);
3551
3552 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3553 !blk_mq_last_cpu_in_hctx(cpu, hctx))
3554 return 0;
3555
3556 /*
3557 * Prevent new request from being allocated on the current hctx.
3558 *
3559 * The smp_mb__after_atomic() Pairs with the implied barrier in
3560 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
3561 * seen once we return from the tag allocator.
3562 */
3563 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3564 smp_mb__after_atomic();
3565
3566 /*
3567 * Try to grab a reference to the queue and wait for any outstanding
3568 * requests. If we could not grab a reference the queue has been
3569 * frozen and there are no requests.
3570 */
3571 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3572 while (blk_mq_hctx_has_requests(hctx))
3573 msleep(5);
3574 percpu_ref_put(&hctx->queue->q_usage_counter);
3575 }
3576
3577 return 0;
3578 }
3579
blk_mq_hctx_notify_online(unsigned int cpu,struct hlist_node * node)3580 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3581 {
3582 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3583 struct blk_mq_hw_ctx, cpuhp_online);
3584
3585 if (cpumask_test_cpu(cpu, hctx->cpumask))
3586 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3587 return 0;
3588 }
3589
3590 /*
3591 * 'cpu' is going away. splice any existing rq_list entries from this
3592 * software queue to the hw queue dispatch list, and ensure that it
3593 * gets run.
3594 */
blk_mq_hctx_notify_dead(unsigned int cpu,struct hlist_node * node)3595 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3596 {
3597 struct blk_mq_hw_ctx *hctx;
3598 struct blk_mq_ctx *ctx;
3599 LIST_HEAD(tmp);
3600 enum hctx_type type;
3601
3602 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3603 if (!cpumask_test_cpu(cpu, hctx->cpumask))
3604 return 0;
3605
3606 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3607 type = hctx->type;
3608
3609 spin_lock(&ctx->lock);
3610 if (!list_empty(&ctx->rq_lists[type])) {
3611 list_splice_init(&ctx->rq_lists[type], &tmp);
3612 blk_mq_hctx_clear_pending(hctx, ctx);
3613 }
3614 spin_unlock(&ctx->lock);
3615
3616 if (list_empty(&tmp))
3617 return 0;
3618
3619 spin_lock(&hctx->lock);
3620 list_splice_tail_init(&tmp, &hctx->dispatch);
3621 spin_unlock(&hctx->lock);
3622
3623 blk_mq_run_hw_queue(hctx, true);
3624 return 0;
3625 }
3626
__blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)3627 static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3628 {
3629 lockdep_assert_held(&blk_mq_cpuhp_lock);
3630
3631 if (!(hctx->flags & BLK_MQ_F_STACKING) &&
3632 !hlist_unhashed(&hctx->cpuhp_online)) {
3633 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3634 &hctx->cpuhp_online);
3635 INIT_HLIST_NODE(&hctx->cpuhp_online);
3636 }
3637
3638 if (!hlist_unhashed(&hctx->cpuhp_dead)) {
3639 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3640 &hctx->cpuhp_dead);
3641 INIT_HLIST_NODE(&hctx->cpuhp_dead);
3642 }
3643 }
3644
blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)3645 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3646 {
3647 mutex_lock(&blk_mq_cpuhp_lock);
3648 __blk_mq_remove_cpuhp(hctx);
3649 mutex_unlock(&blk_mq_cpuhp_lock);
3650 }
3651
__blk_mq_add_cpuhp(struct blk_mq_hw_ctx * hctx)3652 static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
3653 {
3654 lockdep_assert_held(&blk_mq_cpuhp_lock);
3655
3656 if (!(hctx->flags & BLK_MQ_F_STACKING) &&
3657 hlist_unhashed(&hctx->cpuhp_online))
3658 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3659 &hctx->cpuhp_online);
3660
3661 if (hlist_unhashed(&hctx->cpuhp_dead))
3662 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3663 &hctx->cpuhp_dead);
3664 }
3665
__blk_mq_remove_cpuhp_list(struct list_head * head)3666 static void __blk_mq_remove_cpuhp_list(struct list_head *head)
3667 {
3668 struct blk_mq_hw_ctx *hctx;
3669
3670 lockdep_assert_held(&blk_mq_cpuhp_lock);
3671
3672 list_for_each_entry(hctx, head, hctx_list)
3673 __blk_mq_remove_cpuhp(hctx);
3674 }
3675
3676 /*
3677 * Unregister cpuhp callbacks from exited hw queues
3678 *
3679 * Safe to call if this `request_queue` is live
3680 */
blk_mq_remove_hw_queues_cpuhp(struct request_queue * q)3681 static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
3682 {
3683 LIST_HEAD(hctx_list);
3684
3685 spin_lock(&q->unused_hctx_lock);
3686 list_splice_init(&q->unused_hctx_list, &hctx_list);
3687 spin_unlock(&q->unused_hctx_lock);
3688
3689 mutex_lock(&blk_mq_cpuhp_lock);
3690 __blk_mq_remove_cpuhp_list(&hctx_list);
3691 mutex_unlock(&blk_mq_cpuhp_lock);
3692
3693 spin_lock(&q->unused_hctx_lock);
3694 list_splice(&hctx_list, &q->unused_hctx_list);
3695 spin_unlock(&q->unused_hctx_lock);
3696 }
3697
3698 /*
3699 * Register cpuhp callbacks from all hw queues
3700 *
3701 * Safe to call if this `request_queue` is live
3702 */
blk_mq_add_hw_queues_cpuhp(struct request_queue * q)3703 static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
3704 {
3705 struct blk_mq_hw_ctx *hctx;
3706 unsigned long i;
3707
3708 mutex_lock(&blk_mq_cpuhp_lock);
3709 queue_for_each_hw_ctx(q, hctx, i)
3710 __blk_mq_add_cpuhp(hctx);
3711 mutex_unlock(&blk_mq_cpuhp_lock);
3712 }
3713
3714 /*
3715 * Before freeing hw queue, clearing the flush request reference in
3716 * tags->rqs[] for avoiding potential UAF.
3717 */
blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq)3718 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3719 unsigned int queue_depth, struct request *flush_rq)
3720 {
3721 int i;
3722 unsigned long flags;
3723
3724 /* The hw queue may not be mapped yet */
3725 if (!tags)
3726 return;
3727
3728 WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3729
3730 for (i = 0; i < queue_depth; i++)
3731 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3732
3733 /*
3734 * Wait until all pending iteration is done.
3735 *
3736 * Request reference is cleared and it is guaranteed to be observed
3737 * after the ->lock is released.
3738 */
3739 spin_lock_irqsave(&tags->lock, flags);
3740 spin_unlock_irqrestore(&tags->lock, flags);
3741 }
3742
3743 /* hctx->ctxs will be freed in queue's release handler */
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)3744 static void blk_mq_exit_hctx(struct request_queue *q,
3745 struct blk_mq_tag_set *set,
3746 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3747 {
3748 struct request *flush_rq = hctx->fq->flush_rq;
3749
3750 if (blk_mq_hw_queue_mapped(hctx))
3751 blk_mq_tag_idle(hctx);
3752
3753 if (blk_queue_init_done(q))
3754 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3755 set->queue_depth, flush_rq);
3756 if (set->ops->exit_request)
3757 set->ops->exit_request(set, flush_rq, hctx_idx);
3758
3759 if (set->ops->exit_hctx)
3760 set->ops->exit_hctx(hctx, hctx_idx);
3761
3762 xa_erase(&q->hctx_table, hctx_idx);
3763
3764 spin_lock(&q->unused_hctx_lock);
3765 list_add(&hctx->hctx_list, &q->unused_hctx_list);
3766 spin_unlock(&q->unused_hctx_lock);
3767 }
3768
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)3769 static void blk_mq_exit_hw_queues(struct request_queue *q,
3770 struct blk_mq_tag_set *set, int nr_queue)
3771 {
3772 struct blk_mq_hw_ctx *hctx;
3773 unsigned long i;
3774
3775 queue_for_each_hw_ctx(q, hctx, i) {
3776 if (i == nr_queue)
3777 break;
3778 blk_mq_remove_cpuhp(hctx);
3779 blk_mq_exit_hctx(q, set, hctx, i);
3780 }
3781 }
3782
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)3783 static int blk_mq_init_hctx(struct request_queue *q,
3784 struct blk_mq_tag_set *set,
3785 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3786 {
3787 hctx->queue_num = hctx_idx;
3788
3789 hctx->tags = set->tags[hctx_idx];
3790
3791 if (set->ops->init_hctx &&
3792 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3793 goto fail;
3794
3795 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3796 hctx->numa_node))
3797 goto exit_hctx;
3798
3799 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3800 goto exit_flush_rq;
3801
3802 return 0;
3803
3804 exit_flush_rq:
3805 if (set->ops->exit_request)
3806 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3807 exit_hctx:
3808 if (set->ops->exit_hctx)
3809 set->ops->exit_hctx(hctx, hctx_idx);
3810 fail:
3811 return -1;
3812 }
3813
3814 static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node)3815 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3816 int node)
3817 {
3818 struct blk_mq_hw_ctx *hctx;
3819 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3820
3821 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3822 if (!hctx)
3823 goto fail_alloc_hctx;
3824
3825 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3826 goto free_hctx;
3827
3828 atomic_set(&hctx->nr_active, 0);
3829 if (node == NUMA_NO_NODE)
3830 node = set->numa_node;
3831 hctx->numa_node = node;
3832
3833 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3834 spin_lock_init(&hctx->lock);
3835 INIT_LIST_HEAD(&hctx->dispatch);
3836 INIT_HLIST_NODE(&hctx->cpuhp_dead);
3837 INIT_HLIST_NODE(&hctx->cpuhp_online);
3838 hctx->queue = q;
3839 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3840
3841 INIT_LIST_HEAD(&hctx->hctx_list);
3842
3843 /*
3844 * Allocate space for all possible cpus to avoid allocation at
3845 * runtime
3846 */
3847 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3848 gfp, node);
3849 if (!hctx->ctxs)
3850 goto free_cpumask;
3851
3852 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3853 gfp, node, false, false))
3854 goto free_ctxs;
3855 hctx->nr_ctx = 0;
3856
3857 spin_lock_init(&hctx->dispatch_wait_lock);
3858 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3859 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3860
3861 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3862 if (!hctx->fq)
3863 goto free_bitmap;
3864
3865 blk_mq_hctx_kobj_init(hctx);
3866
3867 return hctx;
3868
3869 free_bitmap:
3870 sbitmap_free(&hctx->ctx_map);
3871 free_ctxs:
3872 kfree(hctx->ctxs);
3873 free_cpumask:
3874 free_cpumask_var(hctx->cpumask);
3875 free_hctx:
3876 kfree(hctx);
3877 fail_alloc_hctx:
3878 return NULL;
3879 }
3880
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)3881 static void blk_mq_init_cpu_queues(struct request_queue *q,
3882 unsigned int nr_hw_queues)
3883 {
3884 struct blk_mq_tag_set *set = q->tag_set;
3885 unsigned int i, j;
3886
3887 for_each_possible_cpu(i) {
3888 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3889 struct blk_mq_hw_ctx *hctx;
3890 int k;
3891
3892 __ctx->cpu = i;
3893 spin_lock_init(&__ctx->lock);
3894 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3895 INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3896
3897 __ctx->queue = q;
3898
3899 /*
3900 * Set local node, IFF we have more than one hw queue. If
3901 * not, we remain on the home node of the device
3902 */
3903 for (j = 0; j < set->nr_maps; j++) {
3904 hctx = blk_mq_map_queue_type(q, j, i);
3905 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3906 hctx->numa_node = cpu_to_node(i);
3907 }
3908 }
3909 }
3910
blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int depth)3911 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3912 unsigned int hctx_idx,
3913 unsigned int depth)
3914 {
3915 struct blk_mq_tags *tags;
3916 int ret;
3917
3918 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3919 if (!tags)
3920 return NULL;
3921
3922 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3923 if (ret) {
3924 blk_mq_free_rq_map(tags);
3925 return NULL;
3926 }
3927
3928 return tags;
3929 }
3930
__blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,int hctx_idx)3931 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3932 int hctx_idx)
3933 {
3934 if (blk_mq_is_shared_tags(set->flags)) {
3935 set->tags[hctx_idx] = set->shared_tags;
3936
3937 return true;
3938 }
3939
3940 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3941 set->queue_depth);
3942
3943 return set->tags[hctx_idx];
3944 }
3945
blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3946 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3947 struct blk_mq_tags *tags,
3948 unsigned int hctx_idx)
3949 {
3950 if (tags) {
3951 blk_mq_free_rqs(set, tags, hctx_idx);
3952 blk_mq_free_rq_map(tags);
3953 }
3954 }
3955
__blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx)3956 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3957 unsigned int hctx_idx)
3958 {
3959 if (!blk_mq_is_shared_tags(set->flags))
3960 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3961
3962 set->tags[hctx_idx] = NULL;
3963 }
3964
blk_mq_map_swqueue(struct request_queue * q)3965 static void blk_mq_map_swqueue(struct request_queue *q)
3966 {
3967 unsigned int j, hctx_idx;
3968 unsigned long i;
3969 struct blk_mq_hw_ctx *hctx;
3970 struct blk_mq_ctx *ctx;
3971 struct blk_mq_tag_set *set = q->tag_set;
3972
3973 queue_for_each_hw_ctx(q, hctx, i) {
3974 cpumask_clear(hctx->cpumask);
3975 hctx->nr_ctx = 0;
3976 hctx->dispatch_from = NULL;
3977 }
3978
3979 /*
3980 * Map software to hardware queues.
3981 *
3982 * If the cpu isn't present, the cpu is mapped to first hctx.
3983 */
3984 for_each_possible_cpu(i) {
3985
3986 ctx = per_cpu_ptr(q->queue_ctx, i);
3987 for (j = 0; j < set->nr_maps; j++) {
3988 if (!set->map[j].nr_queues) {
3989 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3990 HCTX_TYPE_DEFAULT, i);
3991 continue;
3992 }
3993 hctx_idx = set->map[j].mq_map[i];
3994 /* unmapped hw queue can be remapped after CPU topo changed */
3995 if (!set->tags[hctx_idx] &&
3996 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3997 /*
3998 * If tags initialization fail for some hctx,
3999 * that hctx won't be brought online. In this
4000 * case, remap the current ctx to hctx[0] which
4001 * is guaranteed to always have tags allocated
4002 */
4003 set->map[j].mq_map[i] = 0;
4004 }
4005
4006 hctx = blk_mq_map_queue_type(q, j, i);
4007 ctx->hctxs[j] = hctx;
4008 /*
4009 * If the CPU is already set in the mask, then we've
4010 * mapped this one already. This can happen if
4011 * devices share queues across queue maps.
4012 */
4013 if (cpumask_test_cpu(i, hctx->cpumask))
4014 continue;
4015
4016 cpumask_set_cpu(i, hctx->cpumask);
4017 hctx->type = j;
4018 ctx->index_hw[hctx->type] = hctx->nr_ctx;
4019 hctx->ctxs[hctx->nr_ctx++] = ctx;
4020
4021 /*
4022 * If the nr_ctx type overflows, we have exceeded the
4023 * amount of sw queues we can support.
4024 */
4025 BUG_ON(!hctx->nr_ctx);
4026 }
4027
4028 for (; j < HCTX_MAX_TYPES; j++)
4029 ctx->hctxs[j] = blk_mq_map_queue_type(q,
4030 HCTX_TYPE_DEFAULT, i);
4031 }
4032
4033 queue_for_each_hw_ctx(q, hctx, i) {
4034 /*
4035 * If no software queues are mapped to this hardware queue,
4036 * disable it and free the request entries.
4037 */
4038 if (!hctx->nr_ctx) {
4039 /* Never unmap queue 0. We need it as a
4040 * fallback in case of a new remap fails
4041 * allocation
4042 */
4043 if (i)
4044 __blk_mq_free_map_and_rqs(set, i);
4045
4046 hctx->tags = NULL;
4047 continue;
4048 }
4049
4050 hctx->tags = set->tags[i];
4051 WARN_ON(!hctx->tags);
4052
4053 /*
4054 * Set the map size to the number of mapped software queues.
4055 * This is more accurate and more efficient than looping
4056 * over all possibly mapped software queues.
4057 */
4058 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
4059
4060 /*
4061 * Initialize batch roundrobin counts
4062 */
4063 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
4064 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
4065 }
4066 }
4067
4068 /*
4069 * Caller needs to ensure that we're either frozen/quiesced, or that
4070 * the queue isn't live yet.
4071 */
queue_set_hctx_shared(struct request_queue * q,bool shared)4072 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
4073 {
4074 struct blk_mq_hw_ctx *hctx;
4075 unsigned long i;
4076
4077 queue_for_each_hw_ctx(q, hctx, i) {
4078 if (shared) {
4079 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4080 } else {
4081 blk_mq_tag_idle(hctx);
4082 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
4083 }
4084 }
4085 }
4086
blk_mq_update_tag_set_shared(struct blk_mq_tag_set * set,bool shared)4087 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
4088 bool shared)
4089 {
4090 struct request_queue *q;
4091
4092 lockdep_assert_held(&set->tag_list_lock);
4093
4094 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4095 blk_mq_freeze_queue(q);
4096 queue_set_hctx_shared(q, shared);
4097 blk_mq_unfreeze_queue(q);
4098 }
4099 }
4100
blk_mq_del_queue_tag_set(struct request_queue * q)4101 static void blk_mq_del_queue_tag_set(struct request_queue *q)
4102 {
4103 struct blk_mq_tag_set *set = q->tag_set;
4104
4105 mutex_lock(&set->tag_list_lock);
4106 list_del(&q->tag_set_list);
4107 if (list_is_singular(&set->tag_list)) {
4108 /* just transitioned to unshared */
4109 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
4110 /* update existing queue */
4111 blk_mq_update_tag_set_shared(set, false);
4112 }
4113 mutex_unlock(&set->tag_list_lock);
4114 INIT_LIST_HEAD(&q->tag_set_list);
4115 }
4116
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)4117 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
4118 struct request_queue *q)
4119 {
4120 mutex_lock(&set->tag_list_lock);
4121
4122 /*
4123 * Check to see if we're transitioning to shared (from 1 to 2 queues).
4124 */
4125 if (!list_empty(&set->tag_list) &&
4126 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
4127 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4128 /* update existing queue */
4129 blk_mq_update_tag_set_shared(set, true);
4130 }
4131 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
4132 queue_set_hctx_shared(q, true);
4133 list_add_tail(&q->tag_set_list, &set->tag_list);
4134
4135 mutex_unlock(&set->tag_list_lock);
4136 }
4137
4138 /* All allocations will be freed in release handler of q->mq_kobj */
blk_mq_alloc_ctxs(struct request_queue * q)4139 static int blk_mq_alloc_ctxs(struct request_queue *q)
4140 {
4141 struct blk_mq_ctxs *ctxs;
4142 int cpu;
4143
4144 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
4145 if (!ctxs)
4146 return -ENOMEM;
4147
4148 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
4149 if (!ctxs->queue_ctx)
4150 goto fail;
4151
4152 for_each_possible_cpu(cpu) {
4153 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
4154 ctx->ctxs = ctxs;
4155 }
4156
4157 q->mq_kobj = &ctxs->kobj;
4158 q->queue_ctx = ctxs->queue_ctx;
4159
4160 return 0;
4161 fail:
4162 kfree(ctxs);
4163 return -ENOMEM;
4164 }
4165
4166 /*
4167 * It is the actual release handler for mq, but we do it from
4168 * request queue's release handler for avoiding use-after-free
4169 * and headache because q->mq_kobj shouldn't have been introduced,
4170 * but we can't group ctx/kctx kobj without it.
4171 */
blk_mq_release(struct request_queue * q)4172 void blk_mq_release(struct request_queue *q)
4173 {
4174 struct blk_mq_hw_ctx *hctx, *next;
4175 unsigned long i;
4176
4177 queue_for_each_hw_ctx(q, hctx, i)
4178 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
4179
4180 /* all hctx are in .unused_hctx_list now */
4181 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4182 list_del_init(&hctx->hctx_list);
4183 kobject_put(&hctx->kobj);
4184 }
4185
4186 xa_destroy(&q->hctx_table);
4187
4188 /*
4189 * release .mq_kobj and sw queue's kobject now because
4190 * both share lifetime with request queue.
4191 */
4192 blk_mq_sysfs_deinit(q);
4193 }
4194
blk_mq_init_queue_data(struct blk_mq_tag_set * set,void * queuedata)4195 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
4196 void *queuedata)
4197 {
4198 struct request_queue *q;
4199 int ret;
4200
4201 q = blk_alloc_queue(set->numa_node);
4202 if (!q)
4203 return ERR_PTR(-ENOMEM);
4204 q->queuedata = queuedata;
4205 ret = blk_mq_init_allocated_queue(set, q);
4206 if (ret) {
4207 blk_put_queue(q);
4208 return ERR_PTR(ret);
4209 }
4210 return q;
4211 }
4212
blk_mq_init_queue(struct blk_mq_tag_set * set)4213 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
4214 {
4215 return blk_mq_init_queue_data(set, NULL);
4216 }
4217 EXPORT_SYMBOL(blk_mq_init_queue);
4218
4219 /**
4220 * blk_mq_destroy_queue - shutdown a request queue
4221 * @q: request queue to shutdown
4222 *
4223 * This shuts down a request queue allocated by blk_mq_init_queue(). All future
4224 * requests will be failed with -ENODEV. The caller is responsible for dropping
4225 * the reference from blk_mq_init_queue() by calling blk_put_queue().
4226 *
4227 * Context: can sleep
4228 */
blk_mq_destroy_queue(struct request_queue * q)4229 void blk_mq_destroy_queue(struct request_queue *q)
4230 {
4231 WARN_ON_ONCE(!queue_is_mq(q));
4232 WARN_ON_ONCE(blk_queue_registered(q));
4233
4234 might_sleep();
4235
4236 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4237 blk_queue_start_drain(q);
4238 blk_mq_freeze_queue_wait(q);
4239
4240 blk_sync_queue(q);
4241 blk_mq_cancel_work_sync(q);
4242 blk_mq_exit_queue(q);
4243 }
4244 EXPORT_SYMBOL(blk_mq_destroy_queue);
4245
__blk_mq_alloc_disk(struct blk_mq_tag_set * set,void * queuedata,struct lock_class_key * lkclass)4246 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4247 struct lock_class_key *lkclass)
4248 {
4249 struct request_queue *q;
4250 struct gendisk *disk;
4251
4252 q = blk_mq_init_queue_data(set, queuedata);
4253 if (IS_ERR(q))
4254 return ERR_CAST(q);
4255
4256 disk = __alloc_disk_node(q, set->numa_node, lkclass);
4257 if (!disk) {
4258 blk_mq_destroy_queue(q);
4259 blk_put_queue(q);
4260 return ERR_PTR(-ENOMEM);
4261 }
4262 set_bit(GD_OWNS_QUEUE, &disk->state);
4263 return disk;
4264 }
4265 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4266
blk_mq_alloc_disk_for_queue(struct request_queue * q,struct lock_class_key * lkclass)4267 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4268 struct lock_class_key *lkclass)
4269 {
4270 struct gendisk *disk;
4271
4272 if (!blk_get_queue(q))
4273 return NULL;
4274 disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4275 if (!disk)
4276 blk_put_queue(q);
4277 return disk;
4278 }
4279 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4280
4281 /*
4282 * Only hctx removed from cpuhp list can be reused
4283 */
blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx * hctx)4284 static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx)
4285 {
4286 return hlist_unhashed(&hctx->cpuhp_online) &&
4287 hlist_unhashed(&hctx->cpuhp_dead);
4288 }
4289
blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node)4290 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4291 struct blk_mq_tag_set *set, struct request_queue *q,
4292 int hctx_idx, int node)
4293 {
4294 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4295
4296 /* reuse dead hctx first */
4297 spin_lock(&q->unused_hctx_lock);
4298 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4299 if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) {
4300 hctx = tmp;
4301 break;
4302 }
4303 }
4304 if (hctx)
4305 list_del_init(&hctx->hctx_list);
4306 spin_unlock(&q->unused_hctx_lock);
4307
4308 if (!hctx)
4309 hctx = blk_mq_alloc_hctx(q, set, node);
4310 if (!hctx)
4311 goto fail;
4312
4313 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4314 goto free_hctx;
4315
4316 return hctx;
4317
4318 free_hctx:
4319 kobject_put(&hctx->kobj);
4320 fail:
4321 return NULL;
4322 }
4323
blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q)4324 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4325 struct request_queue *q)
4326 {
4327 struct blk_mq_hw_ctx *hctx;
4328 unsigned long i, j;
4329
4330 /* protect against switching io scheduler */
4331 mutex_lock(&q->sysfs_lock);
4332 for (i = 0; i < set->nr_hw_queues; i++) {
4333 int old_node;
4334 int node = blk_mq_get_hctx_node(set, i);
4335 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4336
4337 if (old_hctx) {
4338 old_node = old_hctx->numa_node;
4339 blk_mq_exit_hctx(q, set, old_hctx, i);
4340 }
4341
4342 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4343 if (!old_hctx)
4344 break;
4345 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4346 node, old_node);
4347 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4348 WARN_ON_ONCE(!hctx);
4349 }
4350 }
4351 /*
4352 * Increasing nr_hw_queues fails. Free the newly allocated
4353 * hctxs and keep the previous q->nr_hw_queues.
4354 */
4355 if (i != set->nr_hw_queues) {
4356 j = q->nr_hw_queues;
4357 } else {
4358 j = i;
4359 q->nr_hw_queues = set->nr_hw_queues;
4360 }
4361
4362 xa_for_each_start(&q->hctx_table, j, hctx, j)
4363 blk_mq_exit_hctx(q, set, hctx, j);
4364 mutex_unlock(&q->sysfs_lock);
4365
4366 /* unregister cpuhp callbacks for exited hctxs */
4367 blk_mq_remove_hw_queues_cpuhp(q);
4368
4369 /* register cpuhp for new initialized hctxs */
4370 blk_mq_add_hw_queues_cpuhp(q);
4371 }
4372
blk_mq_update_poll_flag(struct request_queue * q)4373 static void blk_mq_update_poll_flag(struct request_queue *q)
4374 {
4375 struct blk_mq_tag_set *set = q->tag_set;
4376
4377 if (set->nr_maps > HCTX_TYPE_POLL &&
4378 set->map[HCTX_TYPE_POLL].nr_queues)
4379 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4380 else
4381 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4382 }
4383
blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q)4384 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4385 struct request_queue *q)
4386 {
4387 /* mark the queue as mq asap */
4388 q->mq_ops = set->ops;
4389
4390 if (blk_mq_alloc_ctxs(q))
4391 goto err_exit;
4392
4393 /* init q->mq_kobj and sw queues' kobjects */
4394 blk_mq_sysfs_init(q);
4395
4396 INIT_LIST_HEAD(&q->unused_hctx_list);
4397 spin_lock_init(&q->unused_hctx_lock);
4398
4399 xa_init(&q->hctx_table);
4400
4401 blk_mq_realloc_hw_ctxs(set, q);
4402 if (!q->nr_hw_queues)
4403 goto err_hctxs;
4404
4405 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4406 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4407
4408 q->tag_set = set;
4409
4410 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4411 blk_mq_update_poll_flag(q);
4412
4413 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4414 INIT_LIST_HEAD(&q->flush_list);
4415 INIT_LIST_HEAD(&q->requeue_list);
4416 spin_lock_init(&q->requeue_lock);
4417
4418 q->nr_requests = set->queue_depth;
4419
4420 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4421 blk_mq_add_queue_tag_set(set, q);
4422 blk_mq_map_swqueue(q);
4423 return 0;
4424
4425 err_hctxs:
4426 blk_mq_release(q);
4427 err_exit:
4428 q->mq_ops = NULL;
4429 return -ENOMEM;
4430 }
4431 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4432
4433 /* tags can _not_ be used after returning from blk_mq_exit_queue */
blk_mq_exit_queue(struct request_queue * q)4434 void blk_mq_exit_queue(struct request_queue *q)
4435 {
4436 struct blk_mq_tag_set *set = q->tag_set;
4437
4438 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4439 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4440 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4441 blk_mq_del_queue_tag_set(q);
4442 }
4443
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)4444 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4445 {
4446 int i;
4447
4448 if (blk_mq_is_shared_tags(set->flags)) {
4449 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4450 BLK_MQ_NO_HCTX_IDX,
4451 set->queue_depth);
4452 if (!set->shared_tags)
4453 return -ENOMEM;
4454 }
4455
4456 for (i = 0; i < set->nr_hw_queues; i++) {
4457 if (!__blk_mq_alloc_map_and_rqs(set, i))
4458 goto out_unwind;
4459 cond_resched();
4460 }
4461
4462 return 0;
4463
4464 out_unwind:
4465 while (--i >= 0)
4466 __blk_mq_free_map_and_rqs(set, i);
4467
4468 if (blk_mq_is_shared_tags(set->flags)) {
4469 blk_mq_free_map_and_rqs(set, set->shared_tags,
4470 BLK_MQ_NO_HCTX_IDX);
4471 }
4472
4473 return -ENOMEM;
4474 }
4475
4476 /*
4477 * Allocate the request maps associated with this tag_set. Note that this
4478 * may reduce the depth asked for, if memory is tight. set->queue_depth
4479 * will be updated to reflect the allocated depth.
4480 */
blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set * set)4481 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4482 {
4483 unsigned int depth;
4484 int err;
4485
4486 depth = set->queue_depth;
4487 do {
4488 err = __blk_mq_alloc_rq_maps(set);
4489 if (!err)
4490 break;
4491
4492 set->queue_depth >>= 1;
4493 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4494 err = -ENOMEM;
4495 break;
4496 }
4497 } while (set->queue_depth);
4498
4499 if (!set->queue_depth || err) {
4500 pr_err("blk-mq: failed to allocate request map\n");
4501 return -ENOMEM;
4502 }
4503
4504 if (depth != set->queue_depth)
4505 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4506 depth, set->queue_depth);
4507
4508 return 0;
4509 }
4510
blk_mq_update_queue_map(struct blk_mq_tag_set * set)4511 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4512 {
4513 /*
4514 * blk_mq_map_queues() and multiple .map_queues() implementations
4515 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4516 * number of hardware queues.
4517 */
4518 if (set->nr_maps == 1)
4519 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4520
4521 if (set->ops->map_queues && !is_kdump_kernel()) {
4522 int i;
4523
4524 /*
4525 * transport .map_queues is usually done in the following
4526 * way:
4527 *
4528 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4529 * mask = get_cpu_mask(queue)
4530 * for_each_cpu(cpu, mask)
4531 * set->map[x].mq_map[cpu] = queue;
4532 * }
4533 *
4534 * When we need to remap, the table has to be cleared for
4535 * killing stale mapping since one CPU may not be mapped
4536 * to any hw queue.
4537 */
4538 for (i = 0; i < set->nr_maps; i++)
4539 blk_mq_clear_mq_map(&set->map[i]);
4540
4541 set->ops->map_queues(set);
4542 } else {
4543 BUG_ON(set->nr_maps > 1);
4544 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4545 }
4546 }
4547
blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set * set,int new_nr_hw_queues)4548 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4549 int new_nr_hw_queues)
4550 {
4551 struct blk_mq_tags **new_tags;
4552 int i;
4553
4554 if (set->nr_hw_queues >= new_nr_hw_queues)
4555 goto done;
4556
4557 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4558 GFP_KERNEL, set->numa_node);
4559 if (!new_tags)
4560 return -ENOMEM;
4561
4562 if (set->tags)
4563 memcpy(new_tags, set->tags, set->nr_hw_queues *
4564 sizeof(*set->tags));
4565 kfree(set->tags);
4566 set->tags = new_tags;
4567
4568 for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
4569 if (!__blk_mq_alloc_map_and_rqs(set, i)) {
4570 while (--i >= set->nr_hw_queues)
4571 __blk_mq_free_map_and_rqs(set, i);
4572 return -ENOMEM;
4573 }
4574 cond_resched();
4575 }
4576
4577 done:
4578 set->nr_hw_queues = new_nr_hw_queues;
4579 return 0;
4580 }
4581
4582 /*
4583 * Alloc a tag set to be associated with one or more request queues.
4584 * May fail with EINVAL for various error conditions. May adjust the
4585 * requested depth down, if it's too large. In that case, the set
4586 * value will be stored in set->queue_depth.
4587 */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)4588 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4589 {
4590 int i, ret;
4591
4592 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4593
4594 if (!set->nr_hw_queues)
4595 return -EINVAL;
4596 if (!set->queue_depth)
4597 return -EINVAL;
4598 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4599 return -EINVAL;
4600
4601 if (!set->ops->queue_rq)
4602 return -EINVAL;
4603
4604 if (!set->ops->get_budget ^ !set->ops->put_budget)
4605 return -EINVAL;
4606
4607 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4608 pr_info("blk-mq: reduced tag depth to %u\n",
4609 BLK_MQ_MAX_DEPTH);
4610 set->queue_depth = BLK_MQ_MAX_DEPTH;
4611 }
4612
4613 if (!set->nr_maps)
4614 set->nr_maps = 1;
4615 else if (set->nr_maps > HCTX_MAX_TYPES)
4616 return -EINVAL;
4617
4618 /*
4619 * If a crashdump is active, then we are potentially in a very
4620 * memory constrained environment. Limit us to 1 queue and
4621 * 64 tags to prevent using too much memory.
4622 */
4623 if (is_kdump_kernel()) {
4624 set->nr_hw_queues = 1;
4625 set->nr_maps = 1;
4626 set->queue_depth = min(64U, set->queue_depth);
4627 }
4628 /*
4629 * There is no use for more h/w queues than cpus if we just have
4630 * a single map
4631 */
4632 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4633 set->nr_hw_queues = nr_cpu_ids;
4634
4635 if (set->flags & BLK_MQ_F_BLOCKING) {
4636 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
4637 if (!set->srcu)
4638 return -ENOMEM;
4639 ret = init_srcu_struct(set->srcu);
4640 if (ret)
4641 goto out_free_srcu;
4642 }
4643
4644 ret = -ENOMEM;
4645 set->tags = kcalloc_node(set->nr_hw_queues,
4646 sizeof(struct blk_mq_tags *), GFP_KERNEL,
4647 set->numa_node);
4648 if (!set->tags)
4649 goto out_cleanup_srcu;
4650
4651 for (i = 0; i < set->nr_maps; i++) {
4652 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4653 sizeof(set->map[i].mq_map[0]),
4654 GFP_KERNEL, set->numa_node);
4655 if (!set->map[i].mq_map)
4656 goto out_free_mq_map;
4657 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4658 }
4659
4660 blk_mq_update_queue_map(set);
4661
4662 ret = blk_mq_alloc_set_map_and_rqs(set);
4663 if (ret)
4664 goto out_free_mq_map;
4665
4666 mutex_init(&set->tag_list_lock);
4667 INIT_LIST_HEAD(&set->tag_list);
4668
4669 return 0;
4670
4671 out_free_mq_map:
4672 for (i = 0; i < set->nr_maps; i++) {
4673 kfree(set->map[i].mq_map);
4674 set->map[i].mq_map = NULL;
4675 }
4676 kfree(set->tags);
4677 set->tags = NULL;
4678 out_cleanup_srcu:
4679 if (set->flags & BLK_MQ_F_BLOCKING)
4680 cleanup_srcu_struct(set->srcu);
4681 out_free_srcu:
4682 if (set->flags & BLK_MQ_F_BLOCKING)
4683 kfree(set->srcu);
4684 return ret;
4685 }
4686 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4687
4688 /* allocate and initialize a tagset for a simple single-queue device */
blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set * set,const struct blk_mq_ops * ops,unsigned int queue_depth,unsigned int set_flags)4689 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4690 const struct blk_mq_ops *ops, unsigned int queue_depth,
4691 unsigned int set_flags)
4692 {
4693 memset(set, 0, sizeof(*set));
4694 set->ops = ops;
4695 set->nr_hw_queues = 1;
4696 set->nr_maps = 1;
4697 set->queue_depth = queue_depth;
4698 set->numa_node = NUMA_NO_NODE;
4699 set->flags = set_flags;
4700 return blk_mq_alloc_tag_set(set);
4701 }
4702 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4703
blk_mq_free_tag_set(struct blk_mq_tag_set * set)4704 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4705 {
4706 int i, j;
4707
4708 for (i = 0; i < set->nr_hw_queues; i++)
4709 __blk_mq_free_map_and_rqs(set, i);
4710
4711 if (blk_mq_is_shared_tags(set->flags)) {
4712 blk_mq_free_map_and_rqs(set, set->shared_tags,
4713 BLK_MQ_NO_HCTX_IDX);
4714 }
4715
4716 for (j = 0; j < set->nr_maps; j++) {
4717 kfree(set->map[j].mq_map);
4718 set->map[j].mq_map = NULL;
4719 }
4720
4721 kfree(set->tags);
4722 set->tags = NULL;
4723 if (set->flags & BLK_MQ_F_BLOCKING) {
4724 cleanup_srcu_struct(set->srcu);
4725 kfree(set->srcu);
4726 }
4727 }
4728 EXPORT_SYMBOL(blk_mq_free_tag_set);
4729
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)4730 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4731 {
4732 struct blk_mq_tag_set *set = q->tag_set;
4733 struct blk_mq_hw_ctx *hctx;
4734 int ret;
4735 unsigned long i;
4736
4737 if (!set)
4738 return -EINVAL;
4739
4740 if (q->nr_requests == nr)
4741 return 0;
4742
4743 blk_mq_freeze_queue(q);
4744 blk_mq_quiesce_queue(q);
4745
4746 ret = 0;
4747 queue_for_each_hw_ctx(q, hctx, i) {
4748 if (!hctx->tags)
4749 continue;
4750 /*
4751 * If we're using an MQ scheduler, just update the scheduler
4752 * queue depth. This is similar to what the old code would do.
4753 */
4754 if (hctx->sched_tags) {
4755 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4756 nr, true);
4757 } else {
4758 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4759 false);
4760 }
4761 if (ret)
4762 break;
4763 if (q->elevator && q->elevator->type->ops.depth_updated)
4764 q->elevator->type->ops.depth_updated(hctx);
4765 }
4766 if (!ret) {
4767 q->nr_requests = nr;
4768 if (blk_mq_is_shared_tags(set->flags)) {
4769 if (q->elevator)
4770 blk_mq_tag_update_sched_shared_tags(q);
4771 else
4772 blk_mq_tag_resize_shared_tags(set, nr);
4773 }
4774 }
4775
4776 blk_mq_unquiesce_queue(q);
4777 blk_mq_unfreeze_queue(q);
4778
4779 return ret;
4780 }
4781
4782 /*
4783 * request_queue and elevator_type pair.
4784 * It is just used by __blk_mq_update_nr_hw_queues to cache
4785 * the elevator_type associated with a request_queue.
4786 */
4787 struct blk_mq_qe_pair {
4788 struct list_head node;
4789 struct request_queue *q;
4790 struct elevator_type *type;
4791 };
4792
4793 /*
4794 * Cache the elevator_type in qe pair list and switch the
4795 * io scheduler to 'none'
4796 */
blk_mq_elv_switch_none(struct list_head * head,struct request_queue * q)4797 static bool blk_mq_elv_switch_none(struct list_head *head,
4798 struct request_queue *q)
4799 {
4800 struct blk_mq_qe_pair *qe;
4801
4802 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4803 if (!qe)
4804 return false;
4805
4806 /* q->elevator needs protection from ->sysfs_lock */
4807 mutex_lock(&q->sysfs_lock);
4808
4809 /* the check has to be done with holding sysfs_lock */
4810 if (!q->elevator) {
4811 kfree(qe);
4812 goto unlock;
4813 }
4814
4815 INIT_LIST_HEAD(&qe->node);
4816 qe->q = q;
4817 qe->type = q->elevator->type;
4818 /* keep a reference to the elevator module as we'll switch back */
4819 __elevator_get(qe->type);
4820 list_add(&qe->node, head);
4821 elevator_disable(q);
4822 unlock:
4823 mutex_unlock(&q->sysfs_lock);
4824
4825 return true;
4826 }
4827
blk_lookup_qe_pair(struct list_head * head,struct request_queue * q)4828 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4829 struct request_queue *q)
4830 {
4831 struct blk_mq_qe_pair *qe;
4832
4833 list_for_each_entry(qe, head, node)
4834 if (qe->q == q)
4835 return qe;
4836
4837 return NULL;
4838 }
4839
blk_mq_elv_switch_back(struct list_head * head,struct request_queue * q)4840 static void blk_mq_elv_switch_back(struct list_head *head,
4841 struct request_queue *q)
4842 {
4843 struct blk_mq_qe_pair *qe;
4844 struct elevator_type *t;
4845
4846 qe = blk_lookup_qe_pair(head, q);
4847 if (!qe)
4848 return;
4849 t = qe->type;
4850 list_del(&qe->node);
4851 kfree(qe);
4852
4853 mutex_lock(&q->sysfs_lock);
4854 elevator_switch(q, t);
4855 /* drop the reference acquired in blk_mq_elv_switch_none */
4856 elevator_put(t);
4857 mutex_unlock(&q->sysfs_lock);
4858 }
4859
__blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4860 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4861 int nr_hw_queues)
4862 {
4863 struct request_queue *q;
4864 LIST_HEAD(head);
4865 int prev_nr_hw_queues = set->nr_hw_queues;
4866 int i;
4867
4868 lockdep_assert_held(&set->tag_list_lock);
4869
4870 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4871 nr_hw_queues = nr_cpu_ids;
4872 if (nr_hw_queues < 1)
4873 return;
4874 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4875 return;
4876
4877 list_for_each_entry(q, &set->tag_list, tag_set_list)
4878 blk_mq_freeze_queue(q);
4879 /*
4880 * Switch IO scheduler to 'none', cleaning up the data associated
4881 * with the previous scheduler. We will switch back once we are done
4882 * updating the new sw to hw queue mappings.
4883 */
4884 list_for_each_entry(q, &set->tag_list, tag_set_list)
4885 if (!blk_mq_elv_switch_none(&head, q))
4886 goto switch_back;
4887
4888 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4889 blk_mq_debugfs_unregister_hctxs(q);
4890 blk_mq_sysfs_unregister_hctxs(q);
4891 }
4892
4893 if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4894 goto reregister;
4895
4896 fallback:
4897 blk_mq_update_queue_map(set);
4898 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4899 blk_mq_realloc_hw_ctxs(set, q);
4900 blk_mq_update_poll_flag(q);
4901 if (q->nr_hw_queues != set->nr_hw_queues) {
4902 int i = prev_nr_hw_queues;
4903
4904 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4905 nr_hw_queues, prev_nr_hw_queues);
4906 for (; i < set->nr_hw_queues; i++)
4907 __blk_mq_free_map_and_rqs(set, i);
4908
4909 set->nr_hw_queues = prev_nr_hw_queues;
4910 goto fallback;
4911 }
4912 blk_mq_map_swqueue(q);
4913 }
4914
4915 reregister:
4916 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4917 blk_mq_sysfs_register_hctxs(q);
4918 blk_mq_debugfs_register_hctxs(q);
4919 }
4920
4921 switch_back:
4922 list_for_each_entry(q, &set->tag_list, tag_set_list)
4923 blk_mq_elv_switch_back(&head, q);
4924
4925 list_for_each_entry(q, &set->tag_list, tag_set_list)
4926 blk_mq_unfreeze_queue(q);
4927
4928 /* Free the excess tags when nr_hw_queues shrink. */
4929 for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4930 __blk_mq_free_map_and_rqs(set, i);
4931 }
4932
blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4933 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4934 {
4935 mutex_lock(&set->tag_list_lock);
4936 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4937 mutex_unlock(&set->tag_list_lock);
4938 }
4939 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4940
blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags)4941 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4942 struct io_comp_batch *iob, unsigned int flags)
4943 {
4944 long state = get_current_state();
4945 int ret;
4946
4947 do {
4948 ret = q->mq_ops->poll(hctx, iob);
4949 if (ret > 0) {
4950 __set_current_state(TASK_RUNNING);
4951 return ret;
4952 }
4953
4954 if (signal_pending_state(state, current))
4955 __set_current_state(TASK_RUNNING);
4956 if (task_is_running(current))
4957 return 1;
4958
4959 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4960 break;
4961 cpu_relax();
4962 } while (!need_resched());
4963
4964 __set_current_state(TASK_RUNNING);
4965 return 0;
4966 }
4967
blk_mq_poll(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags)4968 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4969 struct io_comp_batch *iob, unsigned int flags)
4970 {
4971 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4972
4973 return blk_hctx_poll(q, hctx, iob, flags);
4974 }
4975
blk_rq_poll(struct request * rq,struct io_comp_batch * iob,unsigned int poll_flags)4976 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4977 unsigned int poll_flags)
4978 {
4979 struct request_queue *q = rq->q;
4980 int ret;
4981
4982 if (!blk_rq_is_poll(rq))
4983 return 0;
4984 if (!percpu_ref_tryget(&q->q_usage_counter))
4985 return 0;
4986
4987 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4988 blk_queue_exit(q);
4989
4990 return ret;
4991 }
4992 EXPORT_SYMBOL_GPL(blk_rq_poll);
4993
blk_mq_rq_cpu(struct request * rq)4994 unsigned int blk_mq_rq_cpu(struct request *rq)
4995 {
4996 return rq->mq_ctx->cpu;
4997 }
4998 EXPORT_SYMBOL(blk_mq_rq_cpu);
4999
blk_mq_cancel_work_sync(struct request_queue * q)5000 void blk_mq_cancel_work_sync(struct request_queue *q)
5001 {
5002 struct blk_mq_hw_ctx *hctx;
5003 unsigned long i;
5004
5005 cancel_delayed_work_sync(&q->requeue_work);
5006
5007 queue_for_each_hw_ctx(q, hctx, i)
5008 cancel_delayed_work_sync(&hctx->run_work);
5009 }
5010
blk_mq_init(void)5011 static int __init blk_mq_init(void)
5012 {
5013 int i;
5014
5015 for_each_possible_cpu(i)
5016 init_llist_head(&per_cpu(blk_cpu_done, i));
5017 for_each_possible_cpu(i)
5018 INIT_CSD(&per_cpu(blk_cpu_csd, i),
5019 __blk_mq_complete_request_remote, NULL);
5020 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
5021
5022 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
5023 "block/softirq:dead", NULL,
5024 blk_softirq_cpu_dead);
5025 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
5026 blk_mq_hctx_notify_dead);
5027 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
5028 blk_mq_hctx_notify_online,
5029 blk_mq_hctx_notify_offline);
5030 return 0;
5031 }
5032 subsys_initcall(blk_mq_init);
5033