xref: /openbmc/linux/block/blk-mq-sched.c (revision dd21bfa4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-mq scheduling framework
4  *
5  * Copyright (C) 2016 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/blk-mq.h>
10 #include <linux/list_sort.h>
11 
12 #include <trace/events/block.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-mq-tag.h"
19 #include "blk-wbt.h"
20 
21 /*
22  * Mark a hardware queue as needing a restart. For shared queues, maintain
23  * a count of how many hardware queues are marked for restart.
24  */
25 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
26 {
27 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
28 		return;
29 
30 	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
31 }
32 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
33 
34 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
35 {
36 	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
37 
38 	/*
39 	 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
40 	 * in blk_mq_run_hw_queue(). Its pair is the barrier in
41 	 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
42 	 * meantime new request added to hctx->dispatch is missed to check in
43 	 * blk_mq_run_hw_queue().
44 	 */
45 	smp_mb();
46 
47 	blk_mq_run_hw_queue(hctx, true);
48 }
49 
50 static int sched_rq_cmp(void *priv, const struct list_head *a,
51 			const struct list_head *b)
52 {
53 	struct request *rqa = container_of(a, struct request, queuelist);
54 	struct request *rqb = container_of(b, struct request, queuelist);
55 
56 	return rqa->mq_hctx > rqb->mq_hctx;
57 }
58 
59 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
60 {
61 	struct blk_mq_hw_ctx *hctx =
62 		list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
63 	struct request *rq;
64 	LIST_HEAD(hctx_list);
65 	unsigned int count = 0;
66 
67 	list_for_each_entry(rq, rq_list, queuelist) {
68 		if (rq->mq_hctx != hctx) {
69 			list_cut_before(&hctx_list, rq_list, &rq->queuelist);
70 			goto dispatch;
71 		}
72 		count++;
73 	}
74 	list_splice_tail_init(rq_list, &hctx_list);
75 
76 dispatch:
77 	return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
78 }
79 
80 #define BLK_MQ_BUDGET_DELAY	3		/* ms units */
81 
82 /*
83  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
84  * its queue by itself in its completion handler, so we don't need to
85  * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
86  *
87  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
88  * be run again.  This is necessary to avoid starving flushes.
89  */
90 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
91 {
92 	struct request_queue *q = hctx->queue;
93 	struct elevator_queue *e = q->elevator;
94 	bool multi_hctxs = false, run_queue = false;
95 	bool dispatched = false, busy = false;
96 	unsigned int max_dispatch;
97 	LIST_HEAD(rq_list);
98 	int count = 0;
99 
100 	if (hctx->dispatch_busy)
101 		max_dispatch = 1;
102 	else
103 		max_dispatch = hctx->queue->nr_requests;
104 
105 	do {
106 		struct request *rq;
107 		int budget_token;
108 
109 		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
110 			break;
111 
112 		if (!list_empty_careful(&hctx->dispatch)) {
113 			busy = true;
114 			break;
115 		}
116 
117 		budget_token = blk_mq_get_dispatch_budget(q);
118 		if (budget_token < 0)
119 			break;
120 
121 		rq = e->type->ops.dispatch_request(hctx);
122 		if (!rq) {
123 			blk_mq_put_dispatch_budget(q, budget_token);
124 			/*
125 			 * We're releasing without dispatching. Holding the
126 			 * budget could have blocked any "hctx"s with the
127 			 * same queue and if we didn't dispatch then there's
128 			 * no guarantee anyone will kick the queue.  Kick it
129 			 * ourselves.
130 			 */
131 			run_queue = true;
132 			break;
133 		}
134 
135 		blk_mq_set_rq_budget_token(rq, budget_token);
136 
137 		/*
138 		 * Now this rq owns the budget which has to be released
139 		 * if this rq won't be queued to driver via .queue_rq()
140 		 * in blk_mq_dispatch_rq_list().
141 		 */
142 		list_add_tail(&rq->queuelist, &rq_list);
143 		count++;
144 		if (rq->mq_hctx != hctx)
145 			multi_hctxs = true;
146 
147 		/*
148 		 * If we cannot get tag for the request, stop dequeueing
149 		 * requests from the IO scheduler. We are unlikely to be able
150 		 * to submit them anyway and it creates false impression for
151 		 * scheduling heuristics that the device can take more IO.
152 		 */
153 		if (!blk_mq_get_driver_tag(rq))
154 			break;
155 	} while (count < max_dispatch);
156 
157 	if (!count) {
158 		if (run_queue)
159 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
160 	} else if (multi_hctxs) {
161 		/*
162 		 * Requests from different hctx may be dequeued from some
163 		 * schedulers, such as bfq and deadline.
164 		 *
165 		 * Sort the requests in the list according to their hctx,
166 		 * dispatch batching requests from same hctx at a time.
167 		 */
168 		list_sort(NULL, &rq_list, sched_rq_cmp);
169 		do {
170 			dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
171 		} while (!list_empty(&rq_list));
172 	} else {
173 		dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
174 	}
175 
176 	if (busy)
177 		return -EAGAIN;
178 	return !!dispatched;
179 }
180 
181 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
182 {
183 	int ret;
184 
185 	do {
186 		ret = __blk_mq_do_dispatch_sched(hctx);
187 	} while (ret == 1);
188 
189 	return ret;
190 }
191 
192 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
193 					  struct blk_mq_ctx *ctx)
194 {
195 	unsigned short idx = ctx->index_hw[hctx->type];
196 
197 	if (++idx == hctx->nr_ctx)
198 		idx = 0;
199 
200 	return hctx->ctxs[idx];
201 }
202 
203 /*
204  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
205  * its queue by itself in its completion handler, so we don't need to
206  * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
207  *
208  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
209  * be run again.  This is necessary to avoid starving flushes.
210  */
211 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
212 {
213 	struct request_queue *q = hctx->queue;
214 	LIST_HEAD(rq_list);
215 	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
216 	int ret = 0;
217 	struct request *rq;
218 
219 	do {
220 		int budget_token;
221 
222 		if (!list_empty_careful(&hctx->dispatch)) {
223 			ret = -EAGAIN;
224 			break;
225 		}
226 
227 		if (!sbitmap_any_bit_set(&hctx->ctx_map))
228 			break;
229 
230 		budget_token = blk_mq_get_dispatch_budget(q);
231 		if (budget_token < 0)
232 			break;
233 
234 		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
235 		if (!rq) {
236 			blk_mq_put_dispatch_budget(q, budget_token);
237 			/*
238 			 * We're releasing without dispatching. Holding the
239 			 * budget could have blocked any "hctx"s with the
240 			 * same queue and if we didn't dispatch then there's
241 			 * no guarantee anyone will kick the queue.  Kick it
242 			 * ourselves.
243 			 */
244 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
245 			break;
246 		}
247 
248 		blk_mq_set_rq_budget_token(rq, budget_token);
249 
250 		/*
251 		 * Now this rq owns the budget which has to be released
252 		 * if this rq won't be queued to driver via .queue_rq()
253 		 * in blk_mq_dispatch_rq_list().
254 		 */
255 		list_add(&rq->queuelist, &rq_list);
256 
257 		/* round robin for fair dispatch */
258 		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
259 
260 	} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
261 
262 	WRITE_ONCE(hctx->dispatch_from, ctx);
263 	return ret;
264 }
265 
266 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
267 {
268 	struct request_queue *q = hctx->queue;
269 	const bool has_sched = q->elevator;
270 	int ret = 0;
271 	LIST_HEAD(rq_list);
272 
273 	/*
274 	 * If we have previous entries on our dispatch list, grab them first for
275 	 * more fair dispatch.
276 	 */
277 	if (!list_empty_careful(&hctx->dispatch)) {
278 		spin_lock(&hctx->lock);
279 		if (!list_empty(&hctx->dispatch))
280 			list_splice_init(&hctx->dispatch, &rq_list);
281 		spin_unlock(&hctx->lock);
282 	}
283 
284 	/*
285 	 * Only ask the scheduler for requests, if we didn't have residual
286 	 * requests from the dispatch list. This is to avoid the case where
287 	 * we only ever dispatch a fraction of the requests available because
288 	 * of low device queue depth. Once we pull requests out of the IO
289 	 * scheduler, we can no longer merge or sort them. So it's best to
290 	 * leave them there for as long as we can. Mark the hw queue as
291 	 * needing a restart in that case.
292 	 *
293 	 * We want to dispatch from the scheduler if there was nothing
294 	 * on the dispatch list or we were able to dispatch from the
295 	 * dispatch list.
296 	 */
297 	if (!list_empty(&rq_list)) {
298 		blk_mq_sched_mark_restart_hctx(hctx);
299 		if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
300 			if (has_sched)
301 				ret = blk_mq_do_dispatch_sched(hctx);
302 			else
303 				ret = blk_mq_do_dispatch_ctx(hctx);
304 		}
305 	} else if (has_sched) {
306 		ret = blk_mq_do_dispatch_sched(hctx);
307 	} else if (hctx->dispatch_busy) {
308 		/* dequeue request one by one from sw queue if queue is busy */
309 		ret = blk_mq_do_dispatch_ctx(hctx);
310 	} else {
311 		blk_mq_flush_busy_ctxs(hctx, &rq_list);
312 		blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
313 	}
314 
315 	return ret;
316 }
317 
318 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
319 {
320 	struct request_queue *q = hctx->queue;
321 
322 	/* RCU or SRCU read lock is needed before checking quiesced flag */
323 	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
324 		return;
325 
326 	hctx->run++;
327 
328 	/*
329 	 * A return of -EAGAIN is an indication that hctx->dispatch is not
330 	 * empty and we must run again in order to avoid starving flushes.
331 	 */
332 	if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
333 		if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
334 			blk_mq_run_hw_queue(hctx, true);
335 	}
336 }
337 
338 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
339 		unsigned int nr_segs)
340 {
341 	struct elevator_queue *e = q->elevator;
342 	struct blk_mq_ctx *ctx;
343 	struct blk_mq_hw_ctx *hctx;
344 	bool ret = false;
345 	enum hctx_type type;
346 
347 	if (e && e->type->ops.bio_merge) {
348 		ret = e->type->ops.bio_merge(q, bio, nr_segs);
349 		goto out_put;
350 	}
351 
352 	ctx = blk_mq_get_ctx(q);
353 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
354 	type = hctx->type;
355 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
356 	    list_empty_careful(&ctx->rq_lists[type]))
357 		goto out_put;
358 
359 	/* default per sw-queue merge */
360 	spin_lock(&ctx->lock);
361 	/*
362 	 * Reverse check our software queue for entries that we could
363 	 * potentially merge with. Currently includes a hand-wavy stop
364 	 * count of 8, to not spend too much time checking for merges.
365 	 */
366 	if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
367 		ret = true;
368 
369 	spin_unlock(&ctx->lock);
370 out_put:
371 	return ret;
372 }
373 
374 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
375 				   struct list_head *free)
376 {
377 	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
378 }
379 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
380 
381 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
382 				       struct request *rq)
383 {
384 	/*
385 	 * dispatch flush and passthrough rq directly
386 	 *
387 	 * passthrough request has to be added to hctx->dispatch directly.
388 	 * For some reason, device may be in one situation which can't
389 	 * handle FS request, so STS_RESOURCE is always returned and the
390 	 * FS request will be added to hctx->dispatch. However passthrough
391 	 * request may be required at that time for fixing the problem. If
392 	 * passthrough request is added to scheduler queue, there isn't any
393 	 * chance to dispatch it given we prioritize requests in hctx->dispatch.
394 	 */
395 	if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
396 		return true;
397 
398 	return false;
399 }
400 
401 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
402 				 bool run_queue, bool async)
403 {
404 	struct request_queue *q = rq->q;
405 	struct elevator_queue *e = q->elevator;
406 	struct blk_mq_ctx *ctx = rq->mq_ctx;
407 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
408 
409 	WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
410 
411 	if (blk_mq_sched_bypass_insert(hctx, rq)) {
412 		/*
413 		 * Firstly normal IO request is inserted to scheduler queue or
414 		 * sw queue, meantime we add flush request to dispatch queue(
415 		 * hctx->dispatch) directly and there is at most one in-flight
416 		 * flush request for each hw queue, so it doesn't matter to add
417 		 * flush request to tail or front of the dispatch queue.
418 		 *
419 		 * Secondly in case of NCQ, flush request belongs to non-NCQ
420 		 * command, and queueing it will fail when there is any
421 		 * in-flight normal IO request(NCQ command). When adding flush
422 		 * rq to the front of hctx->dispatch, it is easier to introduce
423 		 * extra time to flush rq's latency because of S_SCHED_RESTART
424 		 * compared with adding to the tail of dispatch queue, then
425 		 * chance of flush merge is increased, and less flush requests
426 		 * will be issued to controller. It is observed that ~10% time
427 		 * is saved in blktests block/004 on disk attached to AHCI/NCQ
428 		 * drive when adding flush rq to the front of hctx->dispatch.
429 		 *
430 		 * Simply queue flush rq to the front of hctx->dispatch so that
431 		 * intensive flush workloads can benefit in case of NCQ HW.
432 		 */
433 		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
434 		blk_mq_request_bypass_insert(rq, at_head, false);
435 		goto run;
436 	}
437 
438 	if (e) {
439 		LIST_HEAD(list);
440 
441 		list_add(&rq->queuelist, &list);
442 		e->type->ops.insert_requests(hctx, &list, at_head);
443 	} else {
444 		spin_lock(&ctx->lock);
445 		__blk_mq_insert_request(hctx, rq, at_head);
446 		spin_unlock(&ctx->lock);
447 	}
448 
449 run:
450 	if (run_queue)
451 		blk_mq_run_hw_queue(hctx, async);
452 }
453 
454 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
455 				  struct blk_mq_ctx *ctx,
456 				  struct list_head *list, bool run_queue_async)
457 {
458 	struct elevator_queue *e;
459 	struct request_queue *q = hctx->queue;
460 
461 	/*
462 	 * blk_mq_sched_insert_requests() is called from flush plug
463 	 * context only, and hold one usage counter to prevent queue
464 	 * from being released.
465 	 */
466 	percpu_ref_get(&q->q_usage_counter);
467 
468 	e = hctx->queue->elevator;
469 	if (e) {
470 		e->type->ops.insert_requests(hctx, list, false);
471 	} else {
472 		/*
473 		 * try to issue requests directly if the hw queue isn't
474 		 * busy in case of 'none' scheduler, and this way may save
475 		 * us one extra enqueue & dequeue to sw queue.
476 		 */
477 		if (!hctx->dispatch_busy && !run_queue_async) {
478 			blk_mq_run_dispatch_ops(hctx->queue,
479 				blk_mq_try_issue_list_directly(hctx, list));
480 			if (list_empty(list))
481 				goto out;
482 		}
483 		blk_mq_insert_requests(hctx, ctx, list);
484 	}
485 
486 	blk_mq_run_hw_queue(hctx, run_queue_async);
487  out:
488 	percpu_ref_put(&q->q_usage_counter);
489 }
490 
491 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
492 					  struct blk_mq_hw_ctx *hctx,
493 					  unsigned int hctx_idx)
494 {
495 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
496 		hctx->sched_tags = q->sched_shared_tags;
497 		return 0;
498 	}
499 
500 	hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
501 						    q->nr_requests);
502 
503 	if (!hctx->sched_tags)
504 		return -ENOMEM;
505 	return 0;
506 }
507 
508 static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
509 {
510 	blk_mq_free_rq_map(queue->sched_shared_tags);
511 	queue->sched_shared_tags = NULL;
512 }
513 
514 /* called in queue's release handler, tagset has gone away */
515 static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
516 {
517 	struct blk_mq_hw_ctx *hctx;
518 	int i;
519 
520 	queue_for_each_hw_ctx(q, hctx, i) {
521 		if (hctx->sched_tags) {
522 			if (!blk_mq_is_shared_tags(flags))
523 				blk_mq_free_rq_map(hctx->sched_tags);
524 			hctx->sched_tags = NULL;
525 		}
526 	}
527 
528 	if (blk_mq_is_shared_tags(flags))
529 		blk_mq_exit_sched_shared_tags(q);
530 }
531 
532 static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
533 {
534 	struct blk_mq_tag_set *set = queue->tag_set;
535 
536 	/*
537 	 * Set initial depth at max so that we don't need to reallocate for
538 	 * updating nr_requests.
539 	 */
540 	queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
541 						BLK_MQ_NO_HCTX_IDX,
542 						MAX_SCHED_RQ);
543 	if (!queue->sched_shared_tags)
544 		return -ENOMEM;
545 
546 	blk_mq_tag_update_sched_shared_tags(queue);
547 
548 	return 0;
549 }
550 
551 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
552 {
553 	unsigned int i, flags = q->tag_set->flags;
554 	struct blk_mq_hw_ctx *hctx;
555 	struct elevator_queue *eq;
556 	int ret;
557 
558 	if (!e) {
559 		q->elevator = NULL;
560 		q->nr_requests = q->tag_set->queue_depth;
561 		return 0;
562 	}
563 
564 	/*
565 	 * Default to double of smaller one between hw queue_depth and 128,
566 	 * since we don't split into sync/async like the old code did.
567 	 * Additionally, this is a per-hw queue depth.
568 	 */
569 	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
570 				   BLKDEV_DEFAULT_RQ);
571 
572 	if (blk_mq_is_shared_tags(flags)) {
573 		ret = blk_mq_init_sched_shared_tags(q);
574 		if (ret)
575 			return ret;
576 	}
577 
578 	queue_for_each_hw_ctx(q, hctx, i) {
579 		ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
580 		if (ret)
581 			goto err_free_map_and_rqs;
582 	}
583 
584 	ret = e->ops.init_sched(q, e);
585 	if (ret)
586 		goto err_free_map_and_rqs;
587 
588 	blk_mq_debugfs_register_sched(q);
589 
590 	queue_for_each_hw_ctx(q, hctx, i) {
591 		if (e->ops.init_hctx) {
592 			ret = e->ops.init_hctx(hctx, i);
593 			if (ret) {
594 				eq = q->elevator;
595 				blk_mq_sched_free_rqs(q);
596 				blk_mq_exit_sched(q, eq);
597 				kobject_put(&eq->kobj);
598 				return ret;
599 			}
600 		}
601 		blk_mq_debugfs_register_sched_hctx(q, hctx);
602 	}
603 
604 	return 0;
605 
606 err_free_map_and_rqs:
607 	blk_mq_sched_free_rqs(q);
608 	blk_mq_sched_tags_teardown(q, flags);
609 
610 	q->elevator = NULL;
611 	return ret;
612 }
613 
614 /*
615  * called in either blk_queue_cleanup or elevator_switch, tagset
616  * is required for freeing requests
617  */
618 void blk_mq_sched_free_rqs(struct request_queue *q)
619 {
620 	struct blk_mq_hw_ctx *hctx;
621 	int i;
622 
623 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
624 		blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
625 				BLK_MQ_NO_HCTX_IDX);
626 	} else {
627 		queue_for_each_hw_ctx(q, hctx, i) {
628 			if (hctx->sched_tags)
629 				blk_mq_free_rqs(q->tag_set,
630 						hctx->sched_tags, i);
631 		}
632 	}
633 }
634 
635 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
636 {
637 	struct blk_mq_hw_ctx *hctx;
638 	unsigned int i;
639 	unsigned int flags = 0;
640 
641 	queue_for_each_hw_ctx(q, hctx, i) {
642 		blk_mq_debugfs_unregister_sched_hctx(hctx);
643 		if (e->type->ops.exit_hctx && hctx->sched_data) {
644 			e->type->ops.exit_hctx(hctx, i);
645 			hctx->sched_data = NULL;
646 		}
647 		flags = hctx->flags;
648 	}
649 	blk_mq_debugfs_unregister_sched(q);
650 	if (e->type->ops.exit_sched)
651 		e->type->ops.exit_sched(e);
652 	blk_mq_sched_tags_teardown(q, flags);
653 	q->elevator = NULL;
654 }
655