xref: /openbmc/linux/block/blk-mq-debugfs.c (revision fbb6b31a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Facebook
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9 
10 #include <linux/blk-mq.h>
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-sched.h"
15 #include "blk-mq-tag.h"
16 #include "blk-rq-qos.h"
17 
18 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
19 {
20 	if (stat->nr_samples) {
21 		seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
22 			   stat->nr_samples, stat->mean, stat->min, stat->max);
23 	} else {
24 		seq_puts(m, "samples=0");
25 	}
26 }
27 
28 static int queue_poll_stat_show(void *data, struct seq_file *m)
29 {
30 	struct request_queue *q = data;
31 	int bucket;
32 
33 	if (!q->poll_stat)
34 		return 0;
35 
36 	for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
37 		seq_printf(m, "read  (%d Bytes): ", 1 << (9 + bucket));
38 		print_stat(m, &q->poll_stat[2 * bucket]);
39 		seq_puts(m, "\n");
40 
41 		seq_printf(m, "write (%d Bytes): ",  1 << (9 + bucket));
42 		print_stat(m, &q->poll_stat[2 * bucket + 1]);
43 		seq_puts(m, "\n");
44 	}
45 	return 0;
46 }
47 
48 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
49 	__acquires(&q->requeue_lock)
50 {
51 	struct request_queue *q = m->private;
52 
53 	spin_lock_irq(&q->requeue_lock);
54 	return seq_list_start(&q->requeue_list, *pos);
55 }
56 
57 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
58 {
59 	struct request_queue *q = m->private;
60 
61 	return seq_list_next(v, &q->requeue_list, pos);
62 }
63 
64 static void queue_requeue_list_stop(struct seq_file *m, void *v)
65 	__releases(&q->requeue_lock)
66 {
67 	struct request_queue *q = m->private;
68 
69 	spin_unlock_irq(&q->requeue_lock);
70 }
71 
72 static const struct seq_operations queue_requeue_list_seq_ops = {
73 	.start	= queue_requeue_list_start,
74 	.next	= queue_requeue_list_next,
75 	.stop	= queue_requeue_list_stop,
76 	.show	= blk_mq_debugfs_rq_show,
77 };
78 
79 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
80 			  const char *const *flag_name, int flag_name_count)
81 {
82 	bool sep = false;
83 	int i;
84 
85 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
86 		if (!(flags & BIT(i)))
87 			continue;
88 		if (sep)
89 			seq_puts(m, "|");
90 		sep = true;
91 		if (i < flag_name_count && flag_name[i])
92 			seq_puts(m, flag_name[i]);
93 		else
94 			seq_printf(m, "%d", i);
95 	}
96 	return 0;
97 }
98 
99 static int queue_pm_only_show(void *data, struct seq_file *m)
100 {
101 	struct request_queue *q = data;
102 
103 	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
104 	return 0;
105 }
106 
107 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
108 static const char *const blk_queue_flag_name[] = {
109 	QUEUE_FLAG_NAME(STOPPED),
110 	QUEUE_FLAG_NAME(DYING),
111 	QUEUE_FLAG_NAME(NOMERGES),
112 	QUEUE_FLAG_NAME(SAME_COMP),
113 	QUEUE_FLAG_NAME(FAIL_IO),
114 	QUEUE_FLAG_NAME(NONROT),
115 	QUEUE_FLAG_NAME(IO_STAT),
116 	QUEUE_FLAG_NAME(DISCARD),
117 	QUEUE_FLAG_NAME(NOXMERGES),
118 	QUEUE_FLAG_NAME(ADD_RANDOM),
119 	QUEUE_FLAG_NAME(SECERASE),
120 	QUEUE_FLAG_NAME(SAME_FORCE),
121 	QUEUE_FLAG_NAME(DEAD),
122 	QUEUE_FLAG_NAME(INIT_DONE),
123 	QUEUE_FLAG_NAME(STABLE_WRITES),
124 	QUEUE_FLAG_NAME(POLL),
125 	QUEUE_FLAG_NAME(WC),
126 	QUEUE_FLAG_NAME(FUA),
127 	QUEUE_FLAG_NAME(DAX),
128 	QUEUE_FLAG_NAME(STATS),
129 	QUEUE_FLAG_NAME(REGISTERED),
130 	QUEUE_FLAG_NAME(QUIESCED),
131 	QUEUE_FLAG_NAME(PCI_P2PDMA),
132 	QUEUE_FLAG_NAME(ZONE_RESETALL),
133 	QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
134 	QUEUE_FLAG_NAME(HCTX_ACTIVE),
135 	QUEUE_FLAG_NAME(NOWAIT),
136 };
137 #undef QUEUE_FLAG_NAME
138 
139 static int queue_state_show(void *data, struct seq_file *m)
140 {
141 	struct request_queue *q = data;
142 
143 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
144 		       ARRAY_SIZE(blk_queue_flag_name));
145 	seq_puts(m, "\n");
146 	return 0;
147 }
148 
149 static ssize_t queue_state_write(void *data, const char __user *buf,
150 				 size_t count, loff_t *ppos)
151 {
152 	struct request_queue *q = data;
153 	char opbuf[16] = { }, *op;
154 
155 	/*
156 	 * The "state" attribute is removed after blk_cleanup_queue() has called
157 	 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
158 	 * triggering a use-after-free.
159 	 */
160 	if (blk_queue_dead(q))
161 		return -ENOENT;
162 
163 	if (count >= sizeof(opbuf)) {
164 		pr_err("%s: operation too long\n", __func__);
165 		goto inval;
166 	}
167 
168 	if (copy_from_user(opbuf, buf, count))
169 		return -EFAULT;
170 	op = strstrip(opbuf);
171 	if (strcmp(op, "run") == 0) {
172 		blk_mq_run_hw_queues(q, true);
173 	} else if (strcmp(op, "start") == 0) {
174 		blk_mq_start_stopped_hw_queues(q, true);
175 	} else if (strcmp(op, "kick") == 0) {
176 		blk_mq_kick_requeue_list(q);
177 	} else {
178 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
179 inval:
180 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
181 		return -EINVAL;
182 	}
183 	return count;
184 }
185 
186 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
187 	{ "poll_stat", 0400, queue_poll_stat_show },
188 	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
189 	{ "pm_only", 0600, queue_pm_only_show, NULL },
190 	{ "state", 0600, queue_state_show, queue_state_write },
191 	{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
192 	{ },
193 };
194 
195 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
196 static const char *const hctx_state_name[] = {
197 	HCTX_STATE_NAME(STOPPED),
198 	HCTX_STATE_NAME(TAG_ACTIVE),
199 	HCTX_STATE_NAME(SCHED_RESTART),
200 	HCTX_STATE_NAME(INACTIVE),
201 };
202 #undef HCTX_STATE_NAME
203 
204 static int hctx_state_show(void *data, struct seq_file *m)
205 {
206 	struct blk_mq_hw_ctx *hctx = data;
207 
208 	blk_flags_show(m, hctx->state, hctx_state_name,
209 		       ARRAY_SIZE(hctx_state_name));
210 	seq_puts(m, "\n");
211 	return 0;
212 }
213 
214 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
215 static const char *const alloc_policy_name[] = {
216 	BLK_TAG_ALLOC_NAME(FIFO),
217 	BLK_TAG_ALLOC_NAME(RR),
218 };
219 #undef BLK_TAG_ALLOC_NAME
220 
221 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
222 static const char *const hctx_flag_name[] = {
223 	HCTX_FLAG_NAME(SHOULD_MERGE),
224 	HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
225 	HCTX_FLAG_NAME(BLOCKING),
226 	HCTX_FLAG_NAME(NO_SCHED),
227 	HCTX_FLAG_NAME(STACKING),
228 	HCTX_FLAG_NAME(TAG_HCTX_SHARED),
229 };
230 #undef HCTX_FLAG_NAME
231 
232 static int hctx_flags_show(void *data, struct seq_file *m)
233 {
234 	struct blk_mq_hw_ctx *hctx = data;
235 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
236 
237 	seq_puts(m, "alloc_policy=");
238 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
239 	    alloc_policy_name[alloc_policy])
240 		seq_puts(m, alloc_policy_name[alloc_policy]);
241 	else
242 		seq_printf(m, "%d", alloc_policy);
243 	seq_puts(m, " ");
244 	blk_flags_show(m,
245 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
246 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
247 	seq_puts(m, "\n");
248 	return 0;
249 }
250 
251 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
252 static const char *const cmd_flag_name[] = {
253 	CMD_FLAG_NAME(FAILFAST_DEV),
254 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
255 	CMD_FLAG_NAME(FAILFAST_DRIVER),
256 	CMD_FLAG_NAME(SYNC),
257 	CMD_FLAG_NAME(META),
258 	CMD_FLAG_NAME(PRIO),
259 	CMD_FLAG_NAME(NOMERGE),
260 	CMD_FLAG_NAME(IDLE),
261 	CMD_FLAG_NAME(INTEGRITY),
262 	CMD_FLAG_NAME(FUA),
263 	CMD_FLAG_NAME(PREFLUSH),
264 	CMD_FLAG_NAME(RAHEAD),
265 	CMD_FLAG_NAME(BACKGROUND),
266 	CMD_FLAG_NAME(NOWAIT),
267 	CMD_FLAG_NAME(NOUNMAP),
268 	CMD_FLAG_NAME(POLLED),
269 };
270 #undef CMD_FLAG_NAME
271 
272 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
273 static const char *const rqf_name[] = {
274 	RQF_NAME(STARTED),
275 	RQF_NAME(SOFTBARRIER),
276 	RQF_NAME(FLUSH_SEQ),
277 	RQF_NAME(MIXED_MERGE),
278 	RQF_NAME(MQ_INFLIGHT),
279 	RQF_NAME(DONTPREP),
280 	RQF_NAME(FAILED),
281 	RQF_NAME(QUIET),
282 	RQF_NAME(ELVPRIV),
283 	RQF_NAME(IO_STAT),
284 	RQF_NAME(PM),
285 	RQF_NAME(HASHED),
286 	RQF_NAME(STATS),
287 	RQF_NAME(SPECIAL_PAYLOAD),
288 	RQF_NAME(ZONE_WRITE_LOCKED),
289 	RQF_NAME(MQ_POLL_SLEPT),
290 	RQF_NAME(ELV),
291 };
292 #undef RQF_NAME
293 
294 static const char *const blk_mq_rq_state_name_array[] = {
295 	[MQ_RQ_IDLE]		= "idle",
296 	[MQ_RQ_IN_FLIGHT]	= "in_flight",
297 	[MQ_RQ_COMPLETE]	= "complete",
298 };
299 
300 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
301 {
302 	if (WARN_ON_ONCE((unsigned int)rq_state >=
303 			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
304 		return "(?)";
305 	return blk_mq_rq_state_name_array[rq_state];
306 }
307 
308 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
309 {
310 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
311 	const unsigned int op = req_op(rq);
312 	const char *op_str = blk_op_str(op);
313 
314 	seq_printf(m, "%p {.op=", rq);
315 	if (strcmp(op_str, "UNKNOWN") == 0)
316 		seq_printf(m, "%u", op);
317 	else
318 		seq_printf(m, "%s", op_str);
319 	seq_puts(m, ", .cmd_flags=");
320 	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
321 		       ARRAY_SIZE(cmd_flag_name));
322 	seq_puts(m, ", .rq_flags=");
323 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
324 		       ARRAY_SIZE(rqf_name));
325 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
326 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
327 		   rq->internal_tag);
328 	if (mq_ops->show_rq)
329 		mq_ops->show_rq(m, rq);
330 	seq_puts(m, "}\n");
331 	return 0;
332 }
333 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
334 
335 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
336 {
337 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
338 }
339 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
340 
341 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
342 	__acquires(&hctx->lock)
343 {
344 	struct blk_mq_hw_ctx *hctx = m->private;
345 
346 	spin_lock(&hctx->lock);
347 	return seq_list_start(&hctx->dispatch, *pos);
348 }
349 
350 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
351 {
352 	struct blk_mq_hw_ctx *hctx = m->private;
353 
354 	return seq_list_next(v, &hctx->dispatch, pos);
355 }
356 
357 static void hctx_dispatch_stop(struct seq_file *m, void *v)
358 	__releases(&hctx->lock)
359 {
360 	struct blk_mq_hw_ctx *hctx = m->private;
361 
362 	spin_unlock(&hctx->lock);
363 }
364 
365 static const struct seq_operations hctx_dispatch_seq_ops = {
366 	.start	= hctx_dispatch_start,
367 	.next	= hctx_dispatch_next,
368 	.stop	= hctx_dispatch_stop,
369 	.show	= blk_mq_debugfs_rq_show,
370 };
371 
372 struct show_busy_params {
373 	struct seq_file		*m;
374 	struct blk_mq_hw_ctx	*hctx;
375 };
376 
377 /*
378  * Note: the state of a request may change while this function is in progress,
379  * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
380  * keep iterating requests.
381  */
382 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
383 {
384 	const struct show_busy_params *params = data;
385 
386 	if (rq->mq_hctx == params->hctx)
387 		__blk_mq_debugfs_rq_show(params->m, rq);
388 
389 	return true;
390 }
391 
392 static int hctx_busy_show(void *data, struct seq_file *m)
393 {
394 	struct blk_mq_hw_ctx *hctx = data;
395 	struct show_busy_params params = { .m = m, .hctx = hctx };
396 
397 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
398 				&params);
399 
400 	return 0;
401 }
402 
403 static const char *const hctx_types[] = {
404 	[HCTX_TYPE_DEFAULT]	= "default",
405 	[HCTX_TYPE_READ]	= "read",
406 	[HCTX_TYPE_POLL]	= "poll",
407 };
408 
409 static int hctx_type_show(void *data, struct seq_file *m)
410 {
411 	struct blk_mq_hw_ctx *hctx = data;
412 
413 	BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
414 	seq_printf(m, "%s\n", hctx_types[hctx->type]);
415 	return 0;
416 }
417 
418 static int hctx_ctx_map_show(void *data, struct seq_file *m)
419 {
420 	struct blk_mq_hw_ctx *hctx = data;
421 
422 	sbitmap_bitmap_show(&hctx->ctx_map, m);
423 	return 0;
424 }
425 
426 static void blk_mq_debugfs_tags_show(struct seq_file *m,
427 				     struct blk_mq_tags *tags)
428 {
429 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
430 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
431 	seq_printf(m, "active_queues=%d\n",
432 		   atomic_read(&tags->active_queues));
433 
434 	seq_puts(m, "\nbitmap_tags:\n");
435 	sbitmap_queue_show(&tags->bitmap_tags, m);
436 
437 	if (tags->nr_reserved_tags) {
438 		seq_puts(m, "\nbreserved_tags:\n");
439 		sbitmap_queue_show(&tags->breserved_tags, m);
440 	}
441 }
442 
443 static int hctx_tags_show(void *data, struct seq_file *m)
444 {
445 	struct blk_mq_hw_ctx *hctx = data;
446 	struct request_queue *q = hctx->queue;
447 	int res;
448 
449 	res = mutex_lock_interruptible(&q->sysfs_lock);
450 	if (res)
451 		goto out;
452 	if (hctx->tags)
453 		blk_mq_debugfs_tags_show(m, hctx->tags);
454 	mutex_unlock(&q->sysfs_lock);
455 
456 out:
457 	return res;
458 }
459 
460 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
461 {
462 	struct blk_mq_hw_ctx *hctx = data;
463 	struct request_queue *q = hctx->queue;
464 	int res;
465 
466 	res = mutex_lock_interruptible(&q->sysfs_lock);
467 	if (res)
468 		goto out;
469 	if (hctx->tags)
470 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
471 	mutex_unlock(&q->sysfs_lock);
472 
473 out:
474 	return res;
475 }
476 
477 static int hctx_sched_tags_show(void *data, struct seq_file *m)
478 {
479 	struct blk_mq_hw_ctx *hctx = data;
480 	struct request_queue *q = hctx->queue;
481 	int res;
482 
483 	res = mutex_lock_interruptible(&q->sysfs_lock);
484 	if (res)
485 		goto out;
486 	if (hctx->sched_tags)
487 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
488 	mutex_unlock(&q->sysfs_lock);
489 
490 out:
491 	return res;
492 }
493 
494 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
495 {
496 	struct blk_mq_hw_ctx *hctx = data;
497 	struct request_queue *q = hctx->queue;
498 	int res;
499 
500 	res = mutex_lock_interruptible(&q->sysfs_lock);
501 	if (res)
502 		goto out;
503 	if (hctx->sched_tags)
504 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
505 	mutex_unlock(&q->sysfs_lock);
506 
507 out:
508 	return res;
509 }
510 
511 static int hctx_run_show(void *data, struct seq_file *m)
512 {
513 	struct blk_mq_hw_ctx *hctx = data;
514 
515 	seq_printf(m, "%lu\n", hctx->run);
516 	return 0;
517 }
518 
519 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
520 			      loff_t *ppos)
521 {
522 	struct blk_mq_hw_ctx *hctx = data;
523 
524 	hctx->run = 0;
525 	return count;
526 }
527 
528 static int hctx_active_show(void *data, struct seq_file *m)
529 {
530 	struct blk_mq_hw_ctx *hctx = data;
531 
532 	seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
533 	return 0;
534 }
535 
536 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
537 {
538 	struct blk_mq_hw_ctx *hctx = data;
539 
540 	seq_printf(m, "%u\n", hctx->dispatch_busy);
541 	return 0;
542 }
543 
544 #define CTX_RQ_SEQ_OPS(name, type)					\
545 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
546 	__acquires(&ctx->lock)						\
547 {									\
548 	struct blk_mq_ctx *ctx = m->private;				\
549 									\
550 	spin_lock(&ctx->lock);						\
551 	return seq_list_start(&ctx->rq_lists[type], *pos);		\
552 }									\
553 									\
554 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,	\
555 				     loff_t *pos)			\
556 {									\
557 	struct blk_mq_ctx *ctx = m->private;				\
558 									\
559 	return seq_list_next(v, &ctx->rq_lists[type], pos);		\
560 }									\
561 									\
562 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)	\
563 	__releases(&ctx->lock)						\
564 {									\
565 	struct blk_mq_ctx *ctx = m->private;				\
566 									\
567 	spin_unlock(&ctx->lock);					\
568 }									\
569 									\
570 static const struct seq_operations ctx_##name##_rq_list_seq_ops = {	\
571 	.start	= ctx_##name##_rq_list_start,				\
572 	.next	= ctx_##name##_rq_list_next,				\
573 	.stop	= ctx_##name##_rq_list_stop,				\
574 	.show	= blk_mq_debugfs_rq_show,				\
575 }
576 
577 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
578 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
579 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
580 
581 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
582 {
583 	const struct blk_mq_debugfs_attr *attr = m->private;
584 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
585 
586 	return attr->show(data, m);
587 }
588 
589 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
590 				    size_t count, loff_t *ppos)
591 {
592 	struct seq_file *m = file->private_data;
593 	const struct blk_mq_debugfs_attr *attr = m->private;
594 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
595 
596 	/*
597 	 * Attributes that only implement .seq_ops are read-only and 'attr' is
598 	 * the same with 'data' in this case.
599 	 */
600 	if (attr == data || !attr->write)
601 		return -EPERM;
602 
603 	return attr->write(data, buf, count, ppos);
604 }
605 
606 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
607 {
608 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
609 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
610 	struct seq_file *m;
611 	int ret;
612 
613 	if (attr->seq_ops) {
614 		ret = seq_open(file, attr->seq_ops);
615 		if (!ret) {
616 			m = file->private_data;
617 			m->private = data;
618 		}
619 		return ret;
620 	}
621 
622 	if (WARN_ON_ONCE(!attr->show))
623 		return -EPERM;
624 
625 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
626 }
627 
628 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
629 {
630 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
631 
632 	if (attr->show)
633 		return single_release(inode, file);
634 
635 	return seq_release(inode, file);
636 }
637 
638 static const struct file_operations blk_mq_debugfs_fops = {
639 	.open		= blk_mq_debugfs_open,
640 	.read		= seq_read,
641 	.write		= blk_mq_debugfs_write,
642 	.llseek		= seq_lseek,
643 	.release	= blk_mq_debugfs_release,
644 };
645 
646 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
647 	{"state", 0400, hctx_state_show},
648 	{"flags", 0400, hctx_flags_show},
649 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
650 	{"busy", 0400, hctx_busy_show},
651 	{"ctx_map", 0400, hctx_ctx_map_show},
652 	{"tags", 0400, hctx_tags_show},
653 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
654 	{"sched_tags", 0400, hctx_sched_tags_show},
655 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
656 	{"run", 0600, hctx_run_show, hctx_run_write},
657 	{"active", 0400, hctx_active_show},
658 	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
659 	{"type", 0400, hctx_type_show},
660 	{},
661 };
662 
663 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
664 	{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
665 	{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
666 	{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
667 	{},
668 };
669 
670 static void debugfs_create_files(struct dentry *parent, void *data,
671 				 const struct blk_mq_debugfs_attr *attr)
672 {
673 	if (IS_ERR_OR_NULL(parent))
674 		return;
675 
676 	d_inode(parent)->i_private = data;
677 
678 	for (; attr->name; attr++)
679 		debugfs_create_file(attr->name, attr->mode, parent,
680 				    (void *)attr, &blk_mq_debugfs_fops);
681 }
682 
683 void blk_mq_debugfs_register(struct request_queue *q)
684 {
685 	struct blk_mq_hw_ctx *hctx;
686 	unsigned long i;
687 
688 	debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
689 
690 	/*
691 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
692 	 * didn't exist yet (because we don't know what to name the directory
693 	 * until the queue is registered to a gendisk).
694 	 */
695 	if (q->elevator && !q->sched_debugfs_dir)
696 		blk_mq_debugfs_register_sched(q);
697 
698 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
699 	queue_for_each_hw_ctx(q, hctx, i) {
700 		if (!hctx->debugfs_dir)
701 			blk_mq_debugfs_register_hctx(q, hctx);
702 		if (q->elevator && !hctx->sched_debugfs_dir)
703 			blk_mq_debugfs_register_sched_hctx(q, hctx);
704 	}
705 
706 	if (q->rq_qos) {
707 		struct rq_qos *rqos = q->rq_qos;
708 
709 		while (rqos) {
710 			blk_mq_debugfs_register_rqos(rqos);
711 			rqos = rqos->next;
712 		}
713 	}
714 }
715 
716 void blk_mq_debugfs_unregister(struct request_queue *q)
717 {
718 	q->sched_debugfs_dir = NULL;
719 }
720 
721 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
722 					struct blk_mq_ctx *ctx)
723 {
724 	struct dentry *ctx_dir;
725 	char name[20];
726 
727 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
728 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
729 
730 	debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
731 }
732 
733 void blk_mq_debugfs_register_hctx(struct request_queue *q,
734 				  struct blk_mq_hw_ctx *hctx)
735 {
736 	struct blk_mq_ctx *ctx;
737 	char name[20];
738 	int i;
739 
740 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
741 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
742 
743 	debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
744 
745 	hctx_for_each_ctx(hctx, ctx, i)
746 		blk_mq_debugfs_register_ctx(hctx, ctx);
747 }
748 
749 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
750 {
751 	debugfs_remove_recursive(hctx->debugfs_dir);
752 	hctx->sched_debugfs_dir = NULL;
753 	hctx->debugfs_dir = NULL;
754 }
755 
756 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
757 {
758 	struct blk_mq_hw_ctx *hctx;
759 	unsigned long i;
760 
761 	queue_for_each_hw_ctx(q, hctx, i)
762 		blk_mq_debugfs_register_hctx(q, hctx);
763 }
764 
765 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
766 {
767 	struct blk_mq_hw_ctx *hctx;
768 	unsigned long i;
769 
770 	queue_for_each_hw_ctx(q, hctx, i)
771 		blk_mq_debugfs_unregister_hctx(hctx);
772 }
773 
774 void blk_mq_debugfs_register_sched(struct request_queue *q)
775 {
776 	struct elevator_type *e = q->elevator->type;
777 
778 	/*
779 	 * If the parent directory has not been created yet, return, we will be
780 	 * called again later on and the directory/files will be created then.
781 	 */
782 	if (!q->debugfs_dir)
783 		return;
784 
785 	if (!e->queue_debugfs_attrs)
786 		return;
787 
788 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
789 
790 	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
791 }
792 
793 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
794 {
795 	debugfs_remove_recursive(q->sched_debugfs_dir);
796 	q->sched_debugfs_dir = NULL;
797 }
798 
799 static const char *rq_qos_id_to_name(enum rq_qos_id id)
800 {
801 	switch (id) {
802 	case RQ_QOS_WBT:
803 		return "wbt";
804 	case RQ_QOS_LATENCY:
805 		return "latency";
806 	case RQ_QOS_COST:
807 		return "cost";
808 	case RQ_QOS_IOPRIO:
809 		return "ioprio";
810 	}
811 	return "unknown";
812 }
813 
814 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
815 {
816 	debugfs_remove_recursive(rqos->debugfs_dir);
817 	rqos->debugfs_dir = NULL;
818 }
819 
820 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
821 {
822 	struct request_queue *q = rqos->q;
823 	const char *dir_name = rq_qos_id_to_name(rqos->id);
824 
825 	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
826 		return;
827 
828 	if (!q->rqos_debugfs_dir)
829 		q->rqos_debugfs_dir = debugfs_create_dir("rqos",
830 							 q->debugfs_dir);
831 
832 	rqos->debugfs_dir = debugfs_create_dir(dir_name,
833 					       rqos->q->rqos_debugfs_dir);
834 
835 	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
836 }
837 
838 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
839 {
840 	debugfs_remove_recursive(q->rqos_debugfs_dir);
841 	q->rqos_debugfs_dir = NULL;
842 }
843 
844 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
845 					struct blk_mq_hw_ctx *hctx)
846 {
847 	struct elevator_type *e = q->elevator->type;
848 
849 	/*
850 	 * If the parent debugfs directory has not been created yet, return;
851 	 * We will be called again later on with appropriate parent debugfs
852 	 * directory from blk_register_queue()
853 	 */
854 	if (!hctx->debugfs_dir)
855 		return;
856 
857 	if (!e->hctx_debugfs_attrs)
858 		return;
859 
860 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
861 						     hctx->debugfs_dir);
862 	debugfs_create_files(hctx->sched_debugfs_dir, hctx,
863 			     e->hctx_debugfs_attrs);
864 }
865 
866 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
867 {
868 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
869 	hctx->sched_debugfs_dir = NULL;
870 }
871