xref: /openbmc/linux/block/blk-mq-sched.c (revision bebe84ebeec4d030aa65af58376305749762e5a0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-mq scheduling framework
4  *
5  * Copyright (C) 2016 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/blk-mq.h>
10 #include <linux/list_sort.h>
11 
12 #include <trace/events/block.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-wbt.h"
19 
20 /*
21  * Mark a hardware queue as needing a restart.
22  */
23 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
24 {
25 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
26 		return;
27 
28 	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
29 }
30 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
31 
32 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
33 {
34 	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
35 
36 	/*
37 	 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
38 	 * in blk_mq_run_hw_queue(). Its pair is the barrier in
39 	 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
40 	 * meantime new request added to hctx->dispatch is missed to check in
41 	 * blk_mq_run_hw_queue().
42 	 */
43 	smp_mb();
44 
45 	blk_mq_run_hw_queue(hctx, true);
46 }
47 
48 static int sched_rq_cmp(void *priv, const struct list_head *a,
49 			const struct list_head *b)
50 {
51 	struct request *rqa = container_of(a, struct request, queuelist);
52 	struct request *rqb = container_of(b, struct request, queuelist);
53 
54 	return rqa->mq_hctx > rqb->mq_hctx;
55 }
56 
57 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
58 {
59 	struct blk_mq_hw_ctx *hctx =
60 		list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
61 	struct request *rq;
62 	LIST_HEAD(hctx_list);
63 	unsigned int count = 0;
64 
65 	list_for_each_entry(rq, rq_list, queuelist) {
66 		if (rq->mq_hctx != hctx) {
67 			list_cut_before(&hctx_list, rq_list, &rq->queuelist);
68 			goto dispatch;
69 		}
70 		count++;
71 	}
72 	list_splice_tail_init(rq_list, &hctx_list);
73 
74 dispatch:
75 	return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
76 }
77 
78 #define BLK_MQ_BUDGET_DELAY	3		/* ms units */
79 
80 /*
81  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
82  * its queue by itself in its completion handler, so we don't need to
83  * restart queue if .get_budget() fails to get the budget.
84  *
85  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
86  * be run again.  This is necessary to avoid starving flushes.
87  */
88 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
89 {
90 	struct request_queue *q = hctx->queue;
91 	struct elevator_queue *e = q->elevator;
92 	bool multi_hctxs = false, run_queue = false;
93 	bool dispatched = false, busy = false;
94 	unsigned int max_dispatch;
95 	LIST_HEAD(rq_list);
96 	int count = 0;
97 
98 	if (hctx->dispatch_busy)
99 		max_dispatch = 1;
100 	else
101 		max_dispatch = hctx->queue->nr_requests;
102 
103 	do {
104 		struct request *rq;
105 		int budget_token;
106 
107 		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
108 			break;
109 
110 		if (!list_empty_careful(&hctx->dispatch)) {
111 			busy = true;
112 			break;
113 		}
114 
115 		budget_token = blk_mq_get_dispatch_budget(q);
116 		if (budget_token < 0)
117 			break;
118 
119 		rq = e->type->ops.dispatch_request(hctx);
120 		if (!rq) {
121 			blk_mq_put_dispatch_budget(q, budget_token);
122 			/*
123 			 * We're releasing without dispatching. Holding the
124 			 * budget could have blocked any "hctx"s with the
125 			 * same queue and if we didn't dispatch then there's
126 			 * no guarantee anyone will kick the queue.  Kick it
127 			 * ourselves.
128 			 */
129 			run_queue = true;
130 			break;
131 		}
132 
133 		blk_mq_set_rq_budget_token(rq, budget_token);
134 
135 		/*
136 		 * Now this rq owns the budget which has to be released
137 		 * if this rq won't be queued to driver via .queue_rq()
138 		 * in blk_mq_dispatch_rq_list().
139 		 */
140 		list_add_tail(&rq->queuelist, &rq_list);
141 		count++;
142 		if (rq->mq_hctx != hctx)
143 			multi_hctxs = true;
144 
145 		/*
146 		 * If we cannot get tag for the request, stop dequeueing
147 		 * requests from the IO scheduler. We are unlikely to be able
148 		 * to submit them anyway and it creates false impression for
149 		 * scheduling heuristics that the device can take more IO.
150 		 */
151 		if (!blk_mq_get_driver_tag(rq))
152 			break;
153 	} while (count < max_dispatch);
154 
155 	if (!count) {
156 		if (run_queue)
157 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
158 	} else if (multi_hctxs) {
159 		/*
160 		 * Requests from different hctx may be dequeued from some
161 		 * schedulers, such as bfq and deadline.
162 		 *
163 		 * Sort the requests in the list according to their hctx,
164 		 * dispatch batching requests from same hctx at a time.
165 		 */
166 		list_sort(NULL, &rq_list, sched_rq_cmp);
167 		do {
168 			dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
169 		} while (!list_empty(&rq_list));
170 	} else {
171 		dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
172 	}
173 
174 	if (busy)
175 		return -EAGAIN;
176 	return !!dispatched;
177 }
178 
179 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
180 {
181 	unsigned long end = jiffies + HZ;
182 	int ret;
183 
184 	do {
185 		ret = __blk_mq_do_dispatch_sched(hctx);
186 		if (ret != 1)
187 			break;
188 		if (need_resched() || time_is_before_jiffies(end)) {
189 			blk_mq_delay_run_hw_queue(hctx, 0);
190 			break;
191 		}
192 	} while (1);
193 
194 	return ret;
195 }
196 
197 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
198 					  struct blk_mq_ctx *ctx)
199 {
200 	unsigned short idx = ctx->index_hw[hctx->type];
201 
202 	if (++idx == hctx->nr_ctx)
203 		idx = 0;
204 
205 	return hctx->ctxs[idx];
206 }
207 
208 /*
209  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
210  * its queue by itself in its completion handler, so we don't need to
211  * restart queue if .get_budget() fails to get the budget.
212  *
213  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
214  * be run again.  This is necessary to avoid starving flushes.
215  */
216 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
217 {
218 	struct request_queue *q = hctx->queue;
219 	LIST_HEAD(rq_list);
220 	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
221 	int ret = 0;
222 	struct request *rq;
223 
224 	do {
225 		int budget_token;
226 
227 		if (!list_empty_careful(&hctx->dispatch)) {
228 			ret = -EAGAIN;
229 			break;
230 		}
231 
232 		if (!sbitmap_any_bit_set(&hctx->ctx_map))
233 			break;
234 
235 		budget_token = blk_mq_get_dispatch_budget(q);
236 		if (budget_token < 0)
237 			break;
238 
239 		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
240 		if (!rq) {
241 			blk_mq_put_dispatch_budget(q, budget_token);
242 			/*
243 			 * We're releasing without dispatching. Holding the
244 			 * budget could have blocked any "hctx"s with the
245 			 * same queue and if we didn't dispatch then there's
246 			 * no guarantee anyone will kick the queue.  Kick it
247 			 * ourselves.
248 			 */
249 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
250 			break;
251 		}
252 
253 		blk_mq_set_rq_budget_token(rq, budget_token);
254 
255 		/*
256 		 * Now this rq owns the budget which has to be released
257 		 * if this rq won't be queued to driver via .queue_rq()
258 		 * in blk_mq_dispatch_rq_list().
259 		 */
260 		list_add(&rq->queuelist, &rq_list);
261 
262 		/* round robin for fair dispatch */
263 		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
264 
265 	} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
266 
267 	WRITE_ONCE(hctx->dispatch_from, ctx);
268 	return ret;
269 }
270 
271 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
272 {
273 	struct request_queue *q = hctx->queue;
274 	const bool has_sched = q->elevator;
275 	int ret = 0;
276 	LIST_HEAD(rq_list);
277 
278 	/*
279 	 * If we have previous entries on our dispatch list, grab them first for
280 	 * more fair dispatch.
281 	 */
282 	if (!list_empty_careful(&hctx->dispatch)) {
283 		spin_lock(&hctx->lock);
284 		if (!list_empty(&hctx->dispatch))
285 			list_splice_init(&hctx->dispatch, &rq_list);
286 		spin_unlock(&hctx->lock);
287 	}
288 
289 	/*
290 	 * Only ask the scheduler for requests, if we didn't have residual
291 	 * requests from the dispatch list. This is to avoid the case where
292 	 * we only ever dispatch a fraction of the requests available because
293 	 * of low device queue depth. Once we pull requests out of the IO
294 	 * scheduler, we can no longer merge or sort them. So it's best to
295 	 * leave them there for as long as we can. Mark the hw queue as
296 	 * needing a restart in that case.
297 	 *
298 	 * We want to dispatch from the scheduler if there was nothing
299 	 * on the dispatch list or we were able to dispatch from the
300 	 * dispatch list.
301 	 */
302 	if (!list_empty(&rq_list)) {
303 		blk_mq_sched_mark_restart_hctx(hctx);
304 		if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
305 			if (has_sched)
306 				ret = blk_mq_do_dispatch_sched(hctx);
307 			else
308 				ret = blk_mq_do_dispatch_ctx(hctx);
309 		}
310 	} else if (has_sched) {
311 		ret = blk_mq_do_dispatch_sched(hctx);
312 	} else if (hctx->dispatch_busy) {
313 		/* dequeue request one by one from sw queue if queue is busy */
314 		ret = blk_mq_do_dispatch_ctx(hctx);
315 	} else {
316 		blk_mq_flush_busy_ctxs(hctx, &rq_list);
317 		blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
318 	}
319 
320 	return ret;
321 }
322 
323 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
324 {
325 	struct request_queue *q = hctx->queue;
326 
327 	/* RCU or SRCU read lock is needed before checking quiesced flag */
328 	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
329 		return;
330 
331 	hctx->run++;
332 
333 	/*
334 	 * A return of -EAGAIN is an indication that hctx->dispatch is not
335 	 * empty and we must run again in order to avoid starving flushes.
336 	 */
337 	if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
338 		if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
339 			blk_mq_run_hw_queue(hctx, true);
340 	}
341 }
342 
343 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
344 		unsigned int nr_segs)
345 {
346 	struct elevator_queue *e = q->elevator;
347 	struct blk_mq_ctx *ctx;
348 	struct blk_mq_hw_ctx *hctx;
349 	bool ret = false;
350 	enum hctx_type type;
351 
352 	if (e && e->type->ops.bio_merge) {
353 		ret = e->type->ops.bio_merge(q, bio, nr_segs);
354 		goto out_put;
355 	}
356 
357 	ctx = blk_mq_get_ctx(q);
358 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
359 	type = hctx->type;
360 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
361 	    list_empty_careful(&ctx->rq_lists[type]))
362 		goto out_put;
363 
364 	/* default per sw-queue merge */
365 	spin_lock(&ctx->lock);
366 	/*
367 	 * Reverse check our software queue for entries that we could
368 	 * potentially merge with. Currently includes a hand-wavy stop
369 	 * count of 8, to not spend too much time checking for merges.
370 	 */
371 	if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
372 		ret = true;
373 
374 	spin_unlock(&ctx->lock);
375 out_put:
376 	return ret;
377 }
378 
379 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
380 				   struct list_head *free)
381 {
382 	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
383 }
384 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
385 
386 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
387 				       struct request *rq)
388 {
389 	/*
390 	 * dispatch flush and passthrough rq directly
391 	 *
392 	 * passthrough request has to be added to hctx->dispatch directly.
393 	 * For some reason, device may be in one situation which can't
394 	 * handle FS request, so STS_RESOURCE is always returned and the
395 	 * FS request will be added to hctx->dispatch. However passthrough
396 	 * request may be required at that time for fixing the problem. If
397 	 * passthrough request is added to scheduler queue, there isn't any
398 	 * chance to dispatch it given we prioritize requests in hctx->dispatch.
399 	 */
400 	if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
401 		return true;
402 
403 	return false;
404 }
405 
406 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
407 				 bool run_queue, bool async)
408 {
409 	struct request_queue *q = rq->q;
410 	struct elevator_queue *e = q->elevator;
411 	struct blk_mq_ctx *ctx = rq->mq_ctx;
412 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
413 
414 	WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
415 
416 	if (blk_mq_sched_bypass_insert(hctx, rq)) {
417 		/*
418 		 * Firstly normal IO request is inserted to scheduler queue or
419 		 * sw queue, meantime we add flush request to dispatch queue(
420 		 * hctx->dispatch) directly and there is at most one in-flight
421 		 * flush request for each hw queue, so it doesn't matter to add
422 		 * flush request to tail or front of the dispatch queue.
423 		 *
424 		 * Secondly in case of NCQ, flush request belongs to non-NCQ
425 		 * command, and queueing it will fail when there is any
426 		 * in-flight normal IO request(NCQ command). When adding flush
427 		 * rq to the front of hctx->dispatch, it is easier to introduce
428 		 * extra time to flush rq's latency because of S_SCHED_RESTART
429 		 * compared with adding to the tail of dispatch queue, then
430 		 * chance of flush merge is increased, and less flush requests
431 		 * will be issued to controller. It is observed that ~10% time
432 		 * is saved in blktests block/004 on disk attached to AHCI/NCQ
433 		 * drive when adding flush rq to the front of hctx->dispatch.
434 		 *
435 		 * Simply queue flush rq to the front of hctx->dispatch so that
436 		 * intensive flush workloads can benefit in case of NCQ HW.
437 		 */
438 		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
439 		blk_mq_request_bypass_insert(rq, at_head, false);
440 		goto run;
441 	}
442 
443 	if (e) {
444 		LIST_HEAD(list);
445 
446 		list_add(&rq->queuelist, &list);
447 		e->type->ops.insert_requests(hctx, &list, at_head);
448 	} else {
449 		spin_lock(&ctx->lock);
450 		__blk_mq_insert_request(hctx, rq, at_head);
451 		spin_unlock(&ctx->lock);
452 	}
453 
454 run:
455 	if (run_queue)
456 		blk_mq_run_hw_queue(hctx, async);
457 }
458 
459 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
460 				  struct blk_mq_ctx *ctx,
461 				  struct list_head *list, bool run_queue_async)
462 {
463 	struct elevator_queue *e;
464 	struct request_queue *q = hctx->queue;
465 
466 	/*
467 	 * blk_mq_sched_insert_requests() is called from flush plug
468 	 * context only, and hold one usage counter to prevent queue
469 	 * from being released.
470 	 */
471 	percpu_ref_get(&q->q_usage_counter);
472 
473 	e = hctx->queue->elevator;
474 	if (e) {
475 		e->type->ops.insert_requests(hctx, list, false);
476 	} else {
477 		/*
478 		 * try to issue requests directly if the hw queue isn't
479 		 * busy in case of 'none' scheduler, and this way may save
480 		 * us one extra enqueue & dequeue to sw queue.
481 		 */
482 		if (!hctx->dispatch_busy && !run_queue_async) {
483 			blk_mq_run_dispatch_ops(hctx->queue,
484 				blk_mq_try_issue_list_directly(hctx, list));
485 			if (list_empty(list))
486 				goto out;
487 		}
488 		blk_mq_insert_requests(hctx, ctx, list);
489 	}
490 
491 	blk_mq_run_hw_queue(hctx, run_queue_async);
492  out:
493 	percpu_ref_put(&q->q_usage_counter);
494 }
495 
496 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
497 					  struct blk_mq_hw_ctx *hctx,
498 					  unsigned int hctx_idx)
499 {
500 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
501 		hctx->sched_tags = q->sched_shared_tags;
502 		return 0;
503 	}
504 
505 	hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
506 						    q->nr_requests);
507 
508 	if (!hctx->sched_tags)
509 		return -ENOMEM;
510 	return 0;
511 }
512 
513 static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
514 {
515 	blk_mq_free_rq_map(queue->sched_shared_tags);
516 	queue->sched_shared_tags = NULL;
517 }
518 
519 /* called in queue's release handler, tagset has gone away */
520 static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
521 {
522 	struct blk_mq_hw_ctx *hctx;
523 	unsigned long i;
524 
525 	queue_for_each_hw_ctx(q, hctx, i) {
526 		if (hctx->sched_tags) {
527 			if (!blk_mq_is_shared_tags(flags))
528 				blk_mq_free_rq_map(hctx->sched_tags);
529 			hctx->sched_tags = NULL;
530 		}
531 	}
532 
533 	if (blk_mq_is_shared_tags(flags))
534 		blk_mq_exit_sched_shared_tags(q);
535 }
536 
537 static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
538 {
539 	struct blk_mq_tag_set *set = queue->tag_set;
540 
541 	/*
542 	 * Set initial depth at max so that we don't need to reallocate for
543 	 * updating nr_requests.
544 	 */
545 	queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
546 						BLK_MQ_NO_HCTX_IDX,
547 						MAX_SCHED_RQ);
548 	if (!queue->sched_shared_tags)
549 		return -ENOMEM;
550 
551 	blk_mq_tag_update_sched_shared_tags(queue);
552 
553 	return 0;
554 }
555 
556 /* caller must have a reference to @e, will grab another one if successful */
557 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
558 {
559 	unsigned int flags = q->tag_set->flags;
560 	struct blk_mq_hw_ctx *hctx;
561 	struct elevator_queue *eq;
562 	unsigned long i;
563 	int ret;
564 
565 	/*
566 	 * Default to double of smaller one between hw queue_depth and 128,
567 	 * since we don't split into sync/async like the old code did.
568 	 * Additionally, this is a per-hw queue depth.
569 	 */
570 	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
571 				   BLKDEV_DEFAULT_RQ);
572 
573 	if (blk_mq_is_shared_tags(flags)) {
574 		ret = blk_mq_init_sched_shared_tags(q);
575 		if (ret)
576 			return ret;
577 	}
578 
579 	queue_for_each_hw_ctx(q, hctx, i) {
580 		ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
581 		if (ret)
582 			goto err_free_map_and_rqs;
583 	}
584 
585 	ret = e->ops.init_sched(q, e);
586 	if (ret)
587 		goto err_free_map_and_rqs;
588 
589 	mutex_lock(&q->debugfs_mutex);
590 	blk_mq_debugfs_register_sched(q);
591 	mutex_unlock(&q->debugfs_mutex);
592 
593 	queue_for_each_hw_ctx(q, hctx, i) {
594 		if (e->ops.init_hctx) {
595 			ret = e->ops.init_hctx(hctx, i);
596 			if (ret) {
597 				eq = q->elevator;
598 				blk_mq_sched_free_rqs(q);
599 				blk_mq_exit_sched(q, eq);
600 				kobject_put(&eq->kobj);
601 				return ret;
602 			}
603 		}
604 		mutex_lock(&q->debugfs_mutex);
605 		blk_mq_debugfs_register_sched_hctx(q, hctx);
606 		mutex_unlock(&q->debugfs_mutex);
607 	}
608 
609 	return 0;
610 
611 err_free_map_and_rqs:
612 	blk_mq_sched_free_rqs(q);
613 	blk_mq_sched_tags_teardown(q, flags);
614 
615 	q->elevator = NULL;
616 	return ret;
617 }
618 
619 /*
620  * called in either blk_queue_cleanup or elevator_switch, tagset
621  * is required for freeing requests
622  */
623 void blk_mq_sched_free_rqs(struct request_queue *q)
624 {
625 	struct blk_mq_hw_ctx *hctx;
626 	unsigned long i;
627 
628 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
629 		blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
630 				BLK_MQ_NO_HCTX_IDX);
631 	} else {
632 		queue_for_each_hw_ctx(q, hctx, i) {
633 			if (hctx->sched_tags)
634 				blk_mq_free_rqs(q->tag_set,
635 						hctx->sched_tags, i);
636 		}
637 	}
638 }
639 
640 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
641 {
642 	struct blk_mq_hw_ctx *hctx;
643 	unsigned long i;
644 	unsigned int flags = 0;
645 
646 	queue_for_each_hw_ctx(q, hctx, i) {
647 		mutex_lock(&q->debugfs_mutex);
648 		blk_mq_debugfs_unregister_sched_hctx(hctx);
649 		mutex_unlock(&q->debugfs_mutex);
650 
651 		if (e->type->ops.exit_hctx && hctx->sched_data) {
652 			e->type->ops.exit_hctx(hctx, i);
653 			hctx->sched_data = NULL;
654 		}
655 		flags = hctx->flags;
656 	}
657 
658 	mutex_lock(&q->debugfs_mutex);
659 	blk_mq_debugfs_unregister_sched(q);
660 	mutex_unlock(&q->debugfs_mutex);
661 
662 	if (e->type->ops.exit_sched)
663 		e->type->ops.exit_sched(e);
664 	blk_mq_sched_tags_teardown(q, flags);
665 	q->elevator = NULL;
666 }
667