xref: /openbmc/linux/block/blk-mq-debugfs.c (revision 2f828fb2)
1 /*
2  * Copyright (C) 2017 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20 
21 #include <linux/blk-mq.h>
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-tag.h"
26 
27 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
28 			  const char *const *flag_name, int flag_name_count)
29 {
30 	bool sep = false;
31 	int i;
32 
33 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
34 		if (!(flags & BIT(i)))
35 			continue;
36 		if (sep)
37 			seq_puts(m, "|");
38 		sep = true;
39 		if (i < flag_name_count && flag_name[i])
40 			seq_puts(m, flag_name[i]);
41 		else
42 			seq_printf(m, "%d", i);
43 	}
44 	return 0;
45 }
46 
47 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
48 static const char *const blk_queue_flag_name[] = {
49 	QUEUE_FLAG_NAME(QUEUED),
50 	QUEUE_FLAG_NAME(STOPPED),
51 	QUEUE_FLAG_NAME(DYING),
52 	QUEUE_FLAG_NAME(BYPASS),
53 	QUEUE_FLAG_NAME(BIDI),
54 	QUEUE_FLAG_NAME(NOMERGES),
55 	QUEUE_FLAG_NAME(SAME_COMP),
56 	QUEUE_FLAG_NAME(FAIL_IO),
57 	QUEUE_FLAG_NAME(NONROT),
58 	QUEUE_FLAG_NAME(IO_STAT),
59 	QUEUE_FLAG_NAME(DISCARD),
60 	QUEUE_FLAG_NAME(NOXMERGES),
61 	QUEUE_FLAG_NAME(ADD_RANDOM),
62 	QUEUE_FLAG_NAME(SECERASE),
63 	QUEUE_FLAG_NAME(SAME_FORCE),
64 	QUEUE_FLAG_NAME(DEAD),
65 	QUEUE_FLAG_NAME(INIT_DONE),
66 	QUEUE_FLAG_NAME(NO_SG_MERGE),
67 	QUEUE_FLAG_NAME(POLL),
68 	QUEUE_FLAG_NAME(WC),
69 	QUEUE_FLAG_NAME(FUA),
70 	QUEUE_FLAG_NAME(FLUSH_NQ),
71 	QUEUE_FLAG_NAME(DAX),
72 	QUEUE_FLAG_NAME(STATS),
73 	QUEUE_FLAG_NAME(POLL_STATS),
74 	QUEUE_FLAG_NAME(REGISTERED),
75 	QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
76 	QUEUE_FLAG_NAME(QUIESCED),
77 	QUEUE_FLAG_NAME(PREEMPT_ONLY),
78 };
79 #undef QUEUE_FLAG_NAME
80 
81 static int queue_state_show(void *data, struct seq_file *m)
82 {
83 	struct request_queue *q = data;
84 
85 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
86 		       ARRAY_SIZE(blk_queue_flag_name));
87 	seq_puts(m, "\n");
88 	return 0;
89 }
90 
91 static ssize_t queue_state_write(void *data, const char __user *buf,
92 				 size_t count, loff_t *ppos)
93 {
94 	struct request_queue *q = data;
95 	char opbuf[16] = { }, *op;
96 
97 	/*
98 	 * The "state" attribute is removed after blk_cleanup_queue() has called
99 	 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
100 	 * triggering a use-after-free.
101 	 */
102 	if (blk_queue_dead(q))
103 		return -ENOENT;
104 
105 	if (count >= sizeof(opbuf)) {
106 		pr_err("%s: operation too long\n", __func__);
107 		goto inval;
108 	}
109 
110 	if (copy_from_user(opbuf, buf, count))
111 		return -EFAULT;
112 	op = strstrip(opbuf);
113 	if (strcmp(op, "run") == 0) {
114 		blk_mq_run_hw_queues(q, true);
115 	} else if (strcmp(op, "start") == 0) {
116 		blk_mq_start_stopped_hw_queues(q, true);
117 	} else if (strcmp(op, "kick") == 0) {
118 		blk_mq_kick_requeue_list(q);
119 	} else {
120 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
121 inval:
122 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
123 		return -EINVAL;
124 	}
125 	return count;
126 }
127 
128 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
129 {
130 	if (stat->nr_samples) {
131 		seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
132 			   stat->nr_samples, stat->mean, stat->min, stat->max);
133 	} else {
134 		seq_puts(m, "samples=0");
135 	}
136 }
137 
138 static int queue_write_hint_show(void *data, struct seq_file *m)
139 {
140 	struct request_queue *q = data;
141 	int i;
142 
143 	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
144 		seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
145 
146 	return 0;
147 }
148 
149 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
150 				      size_t count, loff_t *ppos)
151 {
152 	struct request_queue *q = data;
153 	int i;
154 
155 	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
156 		q->write_hints[i] = 0;
157 
158 	return count;
159 }
160 
161 static int queue_poll_stat_show(void *data, struct seq_file *m)
162 {
163 	struct request_queue *q = data;
164 	int bucket;
165 
166 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
167 		seq_printf(m, "read  (%d Bytes): ", 1 << (9+bucket));
168 		print_stat(m, &q->poll_stat[2*bucket]);
169 		seq_puts(m, "\n");
170 
171 		seq_printf(m, "write (%d Bytes): ",  1 << (9+bucket));
172 		print_stat(m, &q->poll_stat[2*bucket+1]);
173 		seq_puts(m, "\n");
174 	}
175 	return 0;
176 }
177 
178 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
179 static const char *const hctx_state_name[] = {
180 	HCTX_STATE_NAME(STOPPED),
181 	HCTX_STATE_NAME(TAG_ACTIVE),
182 	HCTX_STATE_NAME(SCHED_RESTART),
183 	HCTX_STATE_NAME(START_ON_RUN),
184 };
185 #undef HCTX_STATE_NAME
186 
187 static int hctx_state_show(void *data, struct seq_file *m)
188 {
189 	struct blk_mq_hw_ctx *hctx = data;
190 
191 	blk_flags_show(m, hctx->state, hctx_state_name,
192 		       ARRAY_SIZE(hctx_state_name));
193 	seq_puts(m, "\n");
194 	return 0;
195 }
196 
197 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
198 static const char *const alloc_policy_name[] = {
199 	BLK_TAG_ALLOC_NAME(FIFO),
200 	BLK_TAG_ALLOC_NAME(RR),
201 };
202 #undef BLK_TAG_ALLOC_NAME
203 
204 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
205 static const char *const hctx_flag_name[] = {
206 	HCTX_FLAG_NAME(SHOULD_MERGE),
207 	HCTX_FLAG_NAME(TAG_SHARED),
208 	HCTX_FLAG_NAME(SG_MERGE),
209 	HCTX_FLAG_NAME(BLOCKING),
210 	HCTX_FLAG_NAME(NO_SCHED),
211 };
212 #undef HCTX_FLAG_NAME
213 
214 static int hctx_flags_show(void *data, struct seq_file *m)
215 {
216 	struct blk_mq_hw_ctx *hctx = data;
217 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
218 
219 	seq_puts(m, "alloc_policy=");
220 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
221 	    alloc_policy_name[alloc_policy])
222 		seq_puts(m, alloc_policy_name[alloc_policy]);
223 	else
224 		seq_printf(m, "%d", alloc_policy);
225 	seq_puts(m, " ");
226 	blk_flags_show(m,
227 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
228 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
229 	seq_puts(m, "\n");
230 	return 0;
231 }
232 
233 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
234 static const char *const op_name[] = {
235 	REQ_OP_NAME(READ),
236 	REQ_OP_NAME(WRITE),
237 	REQ_OP_NAME(FLUSH),
238 	REQ_OP_NAME(DISCARD),
239 	REQ_OP_NAME(ZONE_REPORT),
240 	REQ_OP_NAME(SECURE_ERASE),
241 	REQ_OP_NAME(ZONE_RESET),
242 	REQ_OP_NAME(WRITE_SAME),
243 	REQ_OP_NAME(WRITE_ZEROES),
244 	REQ_OP_NAME(SCSI_IN),
245 	REQ_OP_NAME(SCSI_OUT),
246 	REQ_OP_NAME(DRV_IN),
247 	REQ_OP_NAME(DRV_OUT),
248 };
249 #undef REQ_OP_NAME
250 
251 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
252 static const char *const cmd_flag_name[] = {
253 	CMD_FLAG_NAME(FAILFAST_DEV),
254 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
255 	CMD_FLAG_NAME(FAILFAST_DRIVER),
256 	CMD_FLAG_NAME(SYNC),
257 	CMD_FLAG_NAME(META),
258 	CMD_FLAG_NAME(PRIO),
259 	CMD_FLAG_NAME(NOMERGE),
260 	CMD_FLAG_NAME(IDLE),
261 	CMD_FLAG_NAME(INTEGRITY),
262 	CMD_FLAG_NAME(FUA),
263 	CMD_FLAG_NAME(PREFLUSH),
264 	CMD_FLAG_NAME(RAHEAD),
265 	CMD_FLAG_NAME(BACKGROUND),
266 	CMD_FLAG_NAME(NOUNMAP),
267 	CMD_FLAG_NAME(NOWAIT),
268 };
269 #undef CMD_FLAG_NAME
270 
271 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
272 static const char *const rqf_name[] = {
273 	RQF_NAME(SORTED),
274 	RQF_NAME(STARTED),
275 	RQF_NAME(QUEUED),
276 	RQF_NAME(SOFTBARRIER),
277 	RQF_NAME(FLUSH_SEQ),
278 	RQF_NAME(MIXED_MERGE),
279 	RQF_NAME(MQ_INFLIGHT),
280 	RQF_NAME(DONTPREP),
281 	RQF_NAME(PREEMPT),
282 	RQF_NAME(COPY_USER),
283 	RQF_NAME(FAILED),
284 	RQF_NAME(QUIET),
285 	RQF_NAME(ELVPRIV),
286 	RQF_NAME(IO_STAT),
287 	RQF_NAME(ALLOCED),
288 	RQF_NAME(PM),
289 	RQF_NAME(HASHED),
290 	RQF_NAME(STATS),
291 	RQF_NAME(SPECIAL_PAYLOAD),
292 };
293 #undef RQF_NAME
294 
295 #define RQAF_NAME(name) [REQ_ATOM_##name] = #name
296 static const char *const rqaf_name[] = {
297 	RQAF_NAME(COMPLETE),
298 	RQAF_NAME(STARTED),
299 	RQAF_NAME(POLL_SLEPT),
300 };
301 #undef RQAF_NAME
302 
303 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
304 {
305 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
306 	const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
307 
308 	seq_printf(m, "%p {.op=", rq);
309 	if (op < ARRAY_SIZE(op_name) && op_name[op])
310 		seq_printf(m, "%s", op_name[op]);
311 	else
312 		seq_printf(m, "%d", op);
313 	seq_puts(m, ", .cmd_flags=");
314 	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
315 		       ARRAY_SIZE(cmd_flag_name));
316 	seq_puts(m, ", .rq_flags=");
317 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
318 		       ARRAY_SIZE(rqf_name));
319 	seq_puts(m, ", .atomic_flags=");
320 	blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
321 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
322 		   rq->internal_tag);
323 	if (mq_ops->show_rq)
324 		mq_ops->show_rq(m, rq);
325 	seq_puts(m, "}\n");
326 	return 0;
327 }
328 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
329 
330 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
331 {
332 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
333 }
334 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
335 
336 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
337 	__acquires(&q->requeue_lock)
338 {
339 	struct request_queue *q = m->private;
340 
341 	spin_lock_irq(&q->requeue_lock);
342 	return seq_list_start(&q->requeue_list, *pos);
343 }
344 
345 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
346 {
347 	struct request_queue *q = m->private;
348 
349 	return seq_list_next(v, &q->requeue_list, pos);
350 }
351 
352 static void queue_requeue_list_stop(struct seq_file *m, void *v)
353 	__releases(&q->requeue_lock)
354 {
355 	struct request_queue *q = m->private;
356 
357 	spin_unlock_irq(&q->requeue_lock);
358 }
359 
360 static const struct seq_operations queue_requeue_list_seq_ops = {
361 	.start	= queue_requeue_list_start,
362 	.next	= queue_requeue_list_next,
363 	.stop	= queue_requeue_list_stop,
364 	.show	= blk_mq_debugfs_rq_show,
365 };
366 
367 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
368 	__acquires(&hctx->lock)
369 {
370 	struct blk_mq_hw_ctx *hctx = m->private;
371 
372 	spin_lock(&hctx->lock);
373 	return seq_list_start(&hctx->dispatch, *pos);
374 }
375 
376 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
377 {
378 	struct blk_mq_hw_ctx *hctx = m->private;
379 
380 	return seq_list_next(v, &hctx->dispatch, pos);
381 }
382 
383 static void hctx_dispatch_stop(struct seq_file *m, void *v)
384 	__releases(&hctx->lock)
385 {
386 	struct blk_mq_hw_ctx *hctx = m->private;
387 
388 	spin_unlock(&hctx->lock);
389 }
390 
391 static const struct seq_operations hctx_dispatch_seq_ops = {
392 	.start	= hctx_dispatch_start,
393 	.next	= hctx_dispatch_next,
394 	.stop	= hctx_dispatch_stop,
395 	.show	= blk_mq_debugfs_rq_show,
396 };
397 
398 struct show_busy_params {
399 	struct seq_file		*m;
400 	struct blk_mq_hw_ctx	*hctx;
401 };
402 
403 /*
404  * Note: the state of a request may change while this function is in progress,
405  * e.g. due to a concurrent blk_mq_finish_request() call.
406  */
407 static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
408 {
409 	const struct show_busy_params *params = data;
410 
411 	if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
412 	    test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
413 		__blk_mq_debugfs_rq_show(params->m,
414 					 list_entry_rq(&rq->queuelist));
415 }
416 
417 static int hctx_busy_show(void *data, struct seq_file *m)
418 {
419 	struct blk_mq_hw_ctx *hctx = data;
420 	struct show_busy_params params = { .m = m, .hctx = hctx };
421 
422 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
423 				&params);
424 
425 	return 0;
426 }
427 
428 static int hctx_ctx_map_show(void *data, struct seq_file *m)
429 {
430 	struct blk_mq_hw_ctx *hctx = data;
431 
432 	sbitmap_bitmap_show(&hctx->ctx_map, m);
433 	return 0;
434 }
435 
436 static void blk_mq_debugfs_tags_show(struct seq_file *m,
437 				     struct blk_mq_tags *tags)
438 {
439 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
440 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
441 	seq_printf(m, "active_queues=%d\n",
442 		   atomic_read(&tags->active_queues));
443 
444 	seq_puts(m, "\nbitmap_tags:\n");
445 	sbitmap_queue_show(&tags->bitmap_tags, m);
446 
447 	if (tags->nr_reserved_tags) {
448 		seq_puts(m, "\nbreserved_tags:\n");
449 		sbitmap_queue_show(&tags->breserved_tags, m);
450 	}
451 }
452 
453 static int hctx_tags_show(void *data, struct seq_file *m)
454 {
455 	struct blk_mq_hw_ctx *hctx = data;
456 	struct request_queue *q = hctx->queue;
457 	int res;
458 
459 	res = mutex_lock_interruptible(&q->sysfs_lock);
460 	if (res)
461 		goto out;
462 	if (hctx->tags)
463 		blk_mq_debugfs_tags_show(m, hctx->tags);
464 	mutex_unlock(&q->sysfs_lock);
465 
466 out:
467 	return res;
468 }
469 
470 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
471 {
472 	struct blk_mq_hw_ctx *hctx = data;
473 	struct request_queue *q = hctx->queue;
474 	int res;
475 
476 	res = mutex_lock_interruptible(&q->sysfs_lock);
477 	if (res)
478 		goto out;
479 	if (hctx->tags)
480 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
481 	mutex_unlock(&q->sysfs_lock);
482 
483 out:
484 	return res;
485 }
486 
487 static int hctx_sched_tags_show(void *data, struct seq_file *m)
488 {
489 	struct blk_mq_hw_ctx *hctx = data;
490 	struct request_queue *q = hctx->queue;
491 	int res;
492 
493 	res = mutex_lock_interruptible(&q->sysfs_lock);
494 	if (res)
495 		goto out;
496 	if (hctx->sched_tags)
497 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
498 	mutex_unlock(&q->sysfs_lock);
499 
500 out:
501 	return res;
502 }
503 
504 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
505 {
506 	struct blk_mq_hw_ctx *hctx = data;
507 	struct request_queue *q = hctx->queue;
508 	int res;
509 
510 	res = mutex_lock_interruptible(&q->sysfs_lock);
511 	if (res)
512 		goto out;
513 	if (hctx->sched_tags)
514 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
515 	mutex_unlock(&q->sysfs_lock);
516 
517 out:
518 	return res;
519 }
520 
521 static int hctx_io_poll_show(void *data, struct seq_file *m)
522 {
523 	struct blk_mq_hw_ctx *hctx = data;
524 
525 	seq_printf(m, "considered=%lu\n", hctx->poll_considered);
526 	seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
527 	seq_printf(m, "success=%lu\n", hctx->poll_success);
528 	return 0;
529 }
530 
531 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
532 				  size_t count, loff_t *ppos)
533 {
534 	struct blk_mq_hw_ctx *hctx = data;
535 
536 	hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
537 	return count;
538 }
539 
540 static int hctx_dispatched_show(void *data, struct seq_file *m)
541 {
542 	struct blk_mq_hw_ctx *hctx = data;
543 	int i;
544 
545 	seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
546 
547 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
548 		unsigned int d = 1U << (i - 1);
549 
550 		seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
551 	}
552 
553 	seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
554 	return 0;
555 }
556 
557 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
558 				     size_t count, loff_t *ppos)
559 {
560 	struct blk_mq_hw_ctx *hctx = data;
561 	int i;
562 
563 	for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
564 		hctx->dispatched[i] = 0;
565 	return count;
566 }
567 
568 static int hctx_queued_show(void *data, struct seq_file *m)
569 {
570 	struct blk_mq_hw_ctx *hctx = data;
571 
572 	seq_printf(m, "%lu\n", hctx->queued);
573 	return 0;
574 }
575 
576 static ssize_t hctx_queued_write(void *data, const char __user *buf,
577 				 size_t count, loff_t *ppos)
578 {
579 	struct blk_mq_hw_ctx *hctx = data;
580 
581 	hctx->queued = 0;
582 	return count;
583 }
584 
585 static int hctx_run_show(void *data, struct seq_file *m)
586 {
587 	struct blk_mq_hw_ctx *hctx = data;
588 
589 	seq_printf(m, "%lu\n", hctx->run);
590 	return 0;
591 }
592 
593 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
594 			      loff_t *ppos)
595 {
596 	struct blk_mq_hw_ctx *hctx = data;
597 
598 	hctx->run = 0;
599 	return count;
600 }
601 
602 static int hctx_active_show(void *data, struct seq_file *m)
603 {
604 	struct blk_mq_hw_ctx *hctx = data;
605 
606 	seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
607 	return 0;
608 }
609 
610 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
611 	__acquires(&ctx->lock)
612 {
613 	struct blk_mq_ctx *ctx = m->private;
614 
615 	spin_lock(&ctx->lock);
616 	return seq_list_start(&ctx->rq_list, *pos);
617 }
618 
619 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
620 {
621 	struct blk_mq_ctx *ctx = m->private;
622 
623 	return seq_list_next(v, &ctx->rq_list, pos);
624 }
625 
626 static void ctx_rq_list_stop(struct seq_file *m, void *v)
627 	__releases(&ctx->lock)
628 {
629 	struct blk_mq_ctx *ctx = m->private;
630 
631 	spin_unlock(&ctx->lock);
632 }
633 
634 static const struct seq_operations ctx_rq_list_seq_ops = {
635 	.start	= ctx_rq_list_start,
636 	.next	= ctx_rq_list_next,
637 	.stop	= ctx_rq_list_stop,
638 	.show	= blk_mq_debugfs_rq_show,
639 };
640 static int ctx_dispatched_show(void *data, struct seq_file *m)
641 {
642 	struct blk_mq_ctx *ctx = data;
643 
644 	seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
645 	return 0;
646 }
647 
648 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
649 				    size_t count, loff_t *ppos)
650 {
651 	struct blk_mq_ctx *ctx = data;
652 
653 	ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
654 	return count;
655 }
656 
657 static int ctx_merged_show(void *data, struct seq_file *m)
658 {
659 	struct blk_mq_ctx *ctx = data;
660 
661 	seq_printf(m, "%lu\n", ctx->rq_merged);
662 	return 0;
663 }
664 
665 static ssize_t ctx_merged_write(void *data, const char __user *buf,
666 				size_t count, loff_t *ppos)
667 {
668 	struct blk_mq_ctx *ctx = data;
669 
670 	ctx->rq_merged = 0;
671 	return count;
672 }
673 
674 static int ctx_completed_show(void *data, struct seq_file *m)
675 {
676 	struct blk_mq_ctx *ctx = data;
677 
678 	seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
679 	return 0;
680 }
681 
682 static ssize_t ctx_completed_write(void *data, const char __user *buf,
683 				   size_t count, loff_t *ppos)
684 {
685 	struct blk_mq_ctx *ctx = data;
686 
687 	ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
688 	return count;
689 }
690 
691 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
692 {
693 	const struct blk_mq_debugfs_attr *attr = m->private;
694 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
695 
696 	return attr->show(data, m);
697 }
698 
699 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
700 				    size_t count, loff_t *ppos)
701 {
702 	struct seq_file *m = file->private_data;
703 	const struct blk_mq_debugfs_attr *attr = m->private;
704 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
705 
706 	if (!attr->write)
707 		return -EPERM;
708 
709 	return attr->write(data, buf, count, ppos);
710 }
711 
712 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
713 {
714 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
715 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
716 	struct seq_file *m;
717 	int ret;
718 
719 	if (attr->seq_ops) {
720 		ret = seq_open(file, attr->seq_ops);
721 		if (!ret) {
722 			m = file->private_data;
723 			m->private = data;
724 		}
725 		return ret;
726 	}
727 
728 	if (WARN_ON_ONCE(!attr->show))
729 		return -EPERM;
730 
731 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
732 }
733 
734 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
735 {
736 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
737 
738 	if (attr->show)
739 		return single_release(inode, file);
740 	else
741 		return seq_release(inode, file);
742 }
743 
744 static const struct file_operations blk_mq_debugfs_fops = {
745 	.open		= blk_mq_debugfs_open,
746 	.read		= seq_read,
747 	.write		= blk_mq_debugfs_write,
748 	.llseek		= seq_lseek,
749 	.release	= blk_mq_debugfs_release,
750 };
751 
752 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
753 	{"poll_stat", 0400, queue_poll_stat_show},
754 	{"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
755 	{"state", 0600, queue_state_show, queue_state_write},
756 	{"write_hints", 0600, queue_write_hint_show, queue_write_hint_store},
757 	{},
758 };
759 
760 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
761 	{"state", 0400, hctx_state_show},
762 	{"flags", 0400, hctx_flags_show},
763 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
764 	{"busy", 0400, hctx_busy_show},
765 	{"ctx_map", 0400, hctx_ctx_map_show},
766 	{"tags", 0400, hctx_tags_show},
767 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
768 	{"sched_tags", 0400, hctx_sched_tags_show},
769 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
770 	{"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
771 	{"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
772 	{"queued", 0600, hctx_queued_show, hctx_queued_write},
773 	{"run", 0600, hctx_run_show, hctx_run_write},
774 	{"active", 0400, hctx_active_show},
775 	{},
776 };
777 
778 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
779 	{"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
780 	{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
781 	{"merged", 0600, ctx_merged_show, ctx_merged_write},
782 	{"completed", 0600, ctx_completed_show, ctx_completed_write},
783 	{},
784 };
785 
786 static bool debugfs_create_files(struct dentry *parent, void *data,
787 				 const struct blk_mq_debugfs_attr *attr)
788 {
789 	d_inode(parent)->i_private = data;
790 
791 	for (; attr->name; attr++) {
792 		if (!debugfs_create_file(attr->name, attr->mode, parent,
793 					 (void *)attr, &blk_mq_debugfs_fops))
794 			return false;
795 	}
796 	return true;
797 }
798 
799 int blk_mq_debugfs_register(struct request_queue *q)
800 {
801 	struct blk_mq_hw_ctx *hctx;
802 	int i;
803 
804 	if (!blk_debugfs_root)
805 		return -ENOENT;
806 
807 	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
808 					    blk_debugfs_root);
809 	if (!q->debugfs_dir)
810 		return -ENOMEM;
811 
812 	if (!debugfs_create_files(q->debugfs_dir, q,
813 				  blk_mq_debugfs_queue_attrs))
814 		goto err;
815 
816 	/*
817 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
818 	 * didn't exist yet (because we don't know what to name the directory
819 	 * until the queue is registered to a gendisk).
820 	 */
821 	if (q->elevator && !q->sched_debugfs_dir)
822 		blk_mq_debugfs_register_sched(q);
823 
824 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
825 	queue_for_each_hw_ctx(q, hctx, i) {
826 		if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
827 			goto err;
828 		if (q->elevator && !hctx->sched_debugfs_dir &&
829 		    blk_mq_debugfs_register_sched_hctx(q, hctx))
830 			goto err;
831 	}
832 
833 	return 0;
834 
835 err:
836 	blk_mq_debugfs_unregister(q);
837 	return -ENOMEM;
838 }
839 
840 void blk_mq_debugfs_unregister(struct request_queue *q)
841 {
842 	debugfs_remove_recursive(q->debugfs_dir);
843 	q->sched_debugfs_dir = NULL;
844 	q->debugfs_dir = NULL;
845 }
846 
847 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
848 				       struct blk_mq_ctx *ctx)
849 {
850 	struct dentry *ctx_dir;
851 	char name[20];
852 
853 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
854 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
855 	if (!ctx_dir)
856 		return -ENOMEM;
857 
858 	if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
859 		return -ENOMEM;
860 
861 	return 0;
862 }
863 
864 int blk_mq_debugfs_register_hctx(struct request_queue *q,
865 				 struct blk_mq_hw_ctx *hctx)
866 {
867 	struct blk_mq_ctx *ctx;
868 	char name[20];
869 	int i;
870 
871 	if (!q->debugfs_dir)
872 		return -ENOENT;
873 
874 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
875 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
876 	if (!hctx->debugfs_dir)
877 		return -ENOMEM;
878 
879 	if (!debugfs_create_files(hctx->debugfs_dir, hctx,
880 				  blk_mq_debugfs_hctx_attrs))
881 		goto err;
882 
883 	hctx_for_each_ctx(hctx, ctx, i) {
884 		if (blk_mq_debugfs_register_ctx(hctx, ctx))
885 			goto err;
886 	}
887 
888 	return 0;
889 
890 err:
891 	blk_mq_debugfs_unregister_hctx(hctx);
892 	return -ENOMEM;
893 }
894 
895 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
896 {
897 	debugfs_remove_recursive(hctx->debugfs_dir);
898 	hctx->sched_debugfs_dir = NULL;
899 	hctx->debugfs_dir = NULL;
900 }
901 
902 int blk_mq_debugfs_register_hctxs(struct request_queue *q)
903 {
904 	struct blk_mq_hw_ctx *hctx;
905 	int i;
906 
907 	queue_for_each_hw_ctx(q, hctx, i) {
908 		if (blk_mq_debugfs_register_hctx(q, hctx))
909 			return -ENOMEM;
910 	}
911 
912 	return 0;
913 }
914 
915 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
916 {
917 	struct blk_mq_hw_ctx *hctx;
918 	int i;
919 
920 	queue_for_each_hw_ctx(q, hctx, i)
921 		blk_mq_debugfs_unregister_hctx(hctx);
922 }
923 
924 int blk_mq_debugfs_register_sched(struct request_queue *q)
925 {
926 	struct elevator_type *e = q->elevator->type;
927 
928 	if (!q->debugfs_dir)
929 		return -ENOENT;
930 
931 	if (!e->queue_debugfs_attrs)
932 		return 0;
933 
934 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
935 	if (!q->sched_debugfs_dir)
936 		return -ENOMEM;
937 
938 	if (!debugfs_create_files(q->sched_debugfs_dir, q,
939 				  e->queue_debugfs_attrs))
940 		goto err;
941 
942 	return 0;
943 
944 err:
945 	blk_mq_debugfs_unregister_sched(q);
946 	return -ENOMEM;
947 }
948 
949 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
950 {
951 	debugfs_remove_recursive(q->sched_debugfs_dir);
952 	q->sched_debugfs_dir = NULL;
953 }
954 
955 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
956 				       struct blk_mq_hw_ctx *hctx)
957 {
958 	struct elevator_type *e = q->elevator->type;
959 
960 	if (!hctx->debugfs_dir)
961 		return -ENOENT;
962 
963 	if (!e->hctx_debugfs_attrs)
964 		return 0;
965 
966 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
967 						     hctx->debugfs_dir);
968 	if (!hctx->sched_debugfs_dir)
969 		return -ENOMEM;
970 
971 	if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
972 				  e->hctx_debugfs_attrs))
973 		return -ENOMEM;
974 
975 	return 0;
976 }
977 
978 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
979 {
980 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
981 	hctx->sched_debugfs_dir = NULL;
982 }
983