xref: /openbmc/linux/block/blk-mq-debugfs.c (revision 83794367)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Facebook
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9 
10 #include "blk.h"
11 #include "blk-mq.h"
12 #include "blk-mq-debugfs.h"
13 #include "blk-mq-sched.h"
14 #include "blk-rq-qos.h"
15 
16 static int queue_poll_stat_show(void *data, struct seq_file *m)
17 {
18 	return 0;
19 }
20 
21 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
22 	__acquires(&q->requeue_lock)
23 {
24 	struct request_queue *q = m->private;
25 
26 	spin_lock_irq(&q->requeue_lock);
27 	return seq_list_start(&q->requeue_list, *pos);
28 }
29 
30 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
31 {
32 	struct request_queue *q = m->private;
33 
34 	return seq_list_next(v, &q->requeue_list, pos);
35 }
36 
37 static void queue_requeue_list_stop(struct seq_file *m, void *v)
38 	__releases(&q->requeue_lock)
39 {
40 	struct request_queue *q = m->private;
41 
42 	spin_unlock_irq(&q->requeue_lock);
43 }
44 
45 static const struct seq_operations queue_requeue_list_seq_ops = {
46 	.start	= queue_requeue_list_start,
47 	.next	= queue_requeue_list_next,
48 	.stop	= queue_requeue_list_stop,
49 	.show	= blk_mq_debugfs_rq_show,
50 };
51 
52 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
53 			  const char *const *flag_name, int flag_name_count)
54 {
55 	bool sep = false;
56 	int i;
57 
58 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
59 		if (!(flags & BIT(i)))
60 			continue;
61 		if (sep)
62 			seq_puts(m, "|");
63 		sep = true;
64 		if (i < flag_name_count && flag_name[i])
65 			seq_puts(m, flag_name[i]);
66 		else
67 			seq_printf(m, "%d", i);
68 	}
69 	return 0;
70 }
71 
72 static int queue_pm_only_show(void *data, struct seq_file *m)
73 {
74 	struct request_queue *q = data;
75 
76 	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
77 	return 0;
78 }
79 
80 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
81 static const char *const blk_queue_flag_name[] = {
82 	QUEUE_FLAG_NAME(STOPPED),
83 	QUEUE_FLAG_NAME(DYING),
84 	QUEUE_FLAG_NAME(NOMERGES),
85 	QUEUE_FLAG_NAME(SAME_COMP),
86 	QUEUE_FLAG_NAME(FAIL_IO),
87 	QUEUE_FLAG_NAME(NONROT),
88 	QUEUE_FLAG_NAME(IO_STAT),
89 	QUEUE_FLAG_NAME(NOXMERGES),
90 	QUEUE_FLAG_NAME(ADD_RANDOM),
91 	QUEUE_FLAG_NAME(SAME_FORCE),
92 	QUEUE_FLAG_NAME(INIT_DONE),
93 	QUEUE_FLAG_NAME(STABLE_WRITES),
94 	QUEUE_FLAG_NAME(POLL),
95 	QUEUE_FLAG_NAME(WC),
96 	QUEUE_FLAG_NAME(FUA),
97 	QUEUE_FLAG_NAME(DAX),
98 	QUEUE_FLAG_NAME(STATS),
99 	QUEUE_FLAG_NAME(REGISTERED),
100 	QUEUE_FLAG_NAME(QUIESCED),
101 	QUEUE_FLAG_NAME(PCI_P2PDMA),
102 	QUEUE_FLAG_NAME(ZONE_RESETALL),
103 	QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
104 	QUEUE_FLAG_NAME(HCTX_ACTIVE),
105 	QUEUE_FLAG_NAME(NOWAIT),
106 };
107 #undef QUEUE_FLAG_NAME
108 
109 static int queue_state_show(void *data, struct seq_file *m)
110 {
111 	struct request_queue *q = data;
112 
113 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
114 		       ARRAY_SIZE(blk_queue_flag_name));
115 	seq_puts(m, "\n");
116 	return 0;
117 }
118 
119 static ssize_t queue_state_write(void *data, const char __user *buf,
120 				 size_t count, loff_t *ppos)
121 {
122 	struct request_queue *q = data;
123 	char opbuf[16] = { }, *op;
124 
125 	/*
126 	 * The "state" attribute is removed when the queue is removed.  Don't
127 	 * allow setting the state on a dying queue to avoid a use-after-free.
128 	 */
129 	if (blk_queue_dying(q))
130 		return -ENOENT;
131 
132 	if (count >= sizeof(opbuf)) {
133 		pr_err("%s: operation too long\n", __func__);
134 		goto inval;
135 	}
136 
137 	if (copy_from_user(opbuf, buf, count))
138 		return -EFAULT;
139 	op = strstrip(opbuf);
140 	if (strcmp(op, "run") == 0) {
141 		blk_mq_run_hw_queues(q, true);
142 	} else if (strcmp(op, "start") == 0) {
143 		blk_mq_start_stopped_hw_queues(q, true);
144 	} else if (strcmp(op, "kick") == 0) {
145 		blk_mq_kick_requeue_list(q);
146 	} else {
147 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
148 inval:
149 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
150 		return -EINVAL;
151 	}
152 	return count;
153 }
154 
155 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
156 	{ "poll_stat", 0400, queue_poll_stat_show },
157 	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
158 	{ "pm_only", 0600, queue_pm_only_show, NULL },
159 	{ "state", 0600, queue_state_show, queue_state_write },
160 	{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
161 	{ },
162 };
163 
164 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
165 static const char *const hctx_state_name[] = {
166 	HCTX_STATE_NAME(STOPPED),
167 	HCTX_STATE_NAME(TAG_ACTIVE),
168 	HCTX_STATE_NAME(SCHED_RESTART),
169 	HCTX_STATE_NAME(INACTIVE),
170 };
171 #undef HCTX_STATE_NAME
172 
173 static int hctx_state_show(void *data, struct seq_file *m)
174 {
175 	struct blk_mq_hw_ctx *hctx = data;
176 
177 	blk_flags_show(m, hctx->state, hctx_state_name,
178 		       ARRAY_SIZE(hctx_state_name));
179 	seq_puts(m, "\n");
180 	return 0;
181 }
182 
183 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
184 static const char *const alloc_policy_name[] = {
185 	BLK_TAG_ALLOC_NAME(FIFO),
186 	BLK_TAG_ALLOC_NAME(RR),
187 };
188 #undef BLK_TAG_ALLOC_NAME
189 
190 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
191 static const char *const hctx_flag_name[] = {
192 	HCTX_FLAG_NAME(SHOULD_MERGE),
193 	HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
194 	HCTX_FLAG_NAME(BLOCKING),
195 	HCTX_FLAG_NAME(NO_SCHED),
196 	HCTX_FLAG_NAME(STACKING),
197 	HCTX_FLAG_NAME(TAG_HCTX_SHARED),
198 };
199 #undef HCTX_FLAG_NAME
200 
201 static int hctx_flags_show(void *data, struct seq_file *m)
202 {
203 	struct blk_mq_hw_ctx *hctx = data;
204 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
205 
206 	seq_puts(m, "alloc_policy=");
207 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
208 	    alloc_policy_name[alloc_policy])
209 		seq_puts(m, alloc_policy_name[alloc_policy]);
210 	else
211 		seq_printf(m, "%d", alloc_policy);
212 	seq_puts(m, " ");
213 	blk_flags_show(m,
214 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
215 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
216 	seq_puts(m, "\n");
217 	return 0;
218 }
219 
220 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
221 static const char *const cmd_flag_name[] = {
222 	CMD_FLAG_NAME(FAILFAST_DEV),
223 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
224 	CMD_FLAG_NAME(FAILFAST_DRIVER),
225 	CMD_FLAG_NAME(SYNC),
226 	CMD_FLAG_NAME(META),
227 	CMD_FLAG_NAME(PRIO),
228 	CMD_FLAG_NAME(NOMERGE),
229 	CMD_FLAG_NAME(IDLE),
230 	CMD_FLAG_NAME(INTEGRITY),
231 	CMD_FLAG_NAME(FUA),
232 	CMD_FLAG_NAME(PREFLUSH),
233 	CMD_FLAG_NAME(RAHEAD),
234 	CMD_FLAG_NAME(BACKGROUND),
235 	CMD_FLAG_NAME(NOWAIT),
236 	CMD_FLAG_NAME(NOUNMAP),
237 	CMD_FLAG_NAME(POLLED),
238 };
239 #undef CMD_FLAG_NAME
240 
241 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
242 static const char *const rqf_name[] = {
243 	RQF_NAME(STARTED),
244 	RQF_NAME(SOFTBARRIER),
245 	RQF_NAME(FLUSH_SEQ),
246 	RQF_NAME(MIXED_MERGE),
247 	RQF_NAME(MQ_INFLIGHT),
248 	RQF_NAME(DONTPREP),
249 	RQF_NAME(FAILED),
250 	RQF_NAME(QUIET),
251 	RQF_NAME(ELVPRIV),
252 	RQF_NAME(IO_STAT),
253 	RQF_NAME(PM),
254 	RQF_NAME(HASHED),
255 	RQF_NAME(STATS),
256 	RQF_NAME(SPECIAL_PAYLOAD),
257 	RQF_NAME(ZONE_WRITE_LOCKED),
258 	RQF_NAME(TIMED_OUT),
259 	RQF_NAME(ELV),
260 	RQF_NAME(RESV),
261 };
262 #undef RQF_NAME
263 
264 static const char *const blk_mq_rq_state_name_array[] = {
265 	[MQ_RQ_IDLE]		= "idle",
266 	[MQ_RQ_IN_FLIGHT]	= "in_flight",
267 	[MQ_RQ_COMPLETE]	= "complete",
268 };
269 
270 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
271 {
272 	if (WARN_ON_ONCE((unsigned int)rq_state >=
273 			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
274 		return "(?)";
275 	return blk_mq_rq_state_name_array[rq_state];
276 }
277 
278 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
279 {
280 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
281 	const enum req_op op = req_op(rq);
282 	const char *op_str = blk_op_str(op);
283 
284 	seq_printf(m, "%p {.op=", rq);
285 	if (strcmp(op_str, "UNKNOWN") == 0)
286 		seq_printf(m, "%u", op);
287 	else
288 		seq_printf(m, "%s", op_str);
289 	seq_puts(m, ", .cmd_flags=");
290 	blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
291 		       cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
292 	seq_puts(m, ", .rq_flags=");
293 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
294 		       ARRAY_SIZE(rqf_name));
295 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
296 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
297 		   rq->internal_tag);
298 	if (mq_ops->show_rq)
299 		mq_ops->show_rq(m, rq);
300 	seq_puts(m, "}\n");
301 	return 0;
302 }
303 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
304 
305 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
306 {
307 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
308 }
309 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
310 
311 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
312 	__acquires(&hctx->lock)
313 {
314 	struct blk_mq_hw_ctx *hctx = m->private;
315 
316 	spin_lock(&hctx->lock);
317 	return seq_list_start(&hctx->dispatch, *pos);
318 }
319 
320 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
321 {
322 	struct blk_mq_hw_ctx *hctx = m->private;
323 
324 	return seq_list_next(v, &hctx->dispatch, pos);
325 }
326 
327 static void hctx_dispatch_stop(struct seq_file *m, void *v)
328 	__releases(&hctx->lock)
329 {
330 	struct blk_mq_hw_ctx *hctx = m->private;
331 
332 	spin_unlock(&hctx->lock);
333 }
334 
335 static const struct seq_operations hctx_dispatch_seq_ops = {
336 	.start	= hctx_dispatch_start,
337 	.next	= hctx_dispatch_next,
338 	.stop	= hctx_dispatch_stop,
339 	.show	= blk_mq_debugfs_rq_show,
340 };
341 
342 struct show_busy_params {
343 	struct seq_file		*m;
344 	struct blk_mq_hw_ctx	*hctx;
345 };
346 
347 /*
348  * Note: the state of a request may change while this function is in progress,
349  * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
350  * keep iterating requests.
351  */
352 static bool hctx_show_busy_rq(struct request *rq, void *data)
353 {
354 	const struct show_busy_params *params = data;
355 
356 	if (rq->mq_hctx == params->hctx)
357 		__blk_mq_debugfs_rq_show(params->m, rq);
358 
359 	return true;
360 }
361 
362 static int hctx_busy_show(void *data, struct seq_file *m)
363 {
364 	struct blk_mq_hw_ctx *hctx = data;
365 	struct show_busy_params params = { .m = m, .hctx = hctx };
366 
367 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
368 				&params);
369 
370 	return 0;
371 }
372 
373 static const char *const hctx_types[] = {
374 	[HCTX_TYPE_DEFAULT]	= "default",
375 	[HCTX_TYPE_READ]	= "read",
376 	[HCTX_TYPE_POLL]	= "poll",
377 };
378 
379 static int hctx_type_show(void *data, struct seq_file *m)
380 {
381 	struct blk_mq_hw_ctx *hctx = data;
382 
383 	BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
384 	seq_printf(m, "%s\n", hctx_types[hctx->type]);
385 	return 0;
386 }
387 
388 static int hctx_ctx_map_show(void *data, struct seq_file *m)
389 {
390 	struct blk_mq_hw_ctx *hctx = data;
391 
392 	sbitmap_bitmap_show(&hctx->ctx_map, m);
393 	return 0;
394 }
395 
396 static void blk_mq_debugfs_tags_show(struct seq_file *m,
397 				     struct blk_mq_tags *tags)
398 {
399 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
400 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
401 	seq_printf(m, "active_queues=%d\n",
402 		   atomic_read(&tags->active_queues));
403 
404 	seq_puts(m, "\nbitmap_tags:\n");
405 	sbitmap_queue_show(&tags->bitmap_tags, m);
406 
407 	if (tags->nr_reserved_tags) {
408 		seq_puts(m, "\nbreserved_tags:\n");
409 		sbitmap_queue_show(&tags->breserved_tags, m);
410 	}
411 }
412 
413 static int hctx_tags_show(void *data, struct seq_file *m)
414 {
415 	struct blk_mq_hw_ctx *hctx = data;
416 	struct request_queue *q = hctx->queue;
417 	int res;
418 
419 	res = mutex_lock_interruptible(&q->sysfs_lock);
420 	if (res)
421 		goto out;
422 	if (hctx->tags)
423 		blk_mq_debugfs_tags_show(m, hctx->tags);
424 	mutex_unlock(&q->sysfs_lock);
425 
426 out:
427 	return res;
428 }
429 
430 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
431 {
432 	struct blk_mq_hw_ctx *hctx = data;
433 	struct request_queue *q = hctx->queue;
434 	int res;
435 
436 	res = mutex_lock_interruptible(&q->sysfs_lock);
437 	if (res)
438 		goto out;
439 	if (hctx->tags)
440 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
441 	mutex_unlock(&q->sysfs_lock);
442 
443 out:
444 	return res;
445 }
446 
447 static int hctx_sched_tags_show(void *data, struct seq_file *m)
448 {
449 	struct blk_mq_hw_ctx *hctx = data;
450 	struct request_queue *q = hctx->queue;
451 	int res;
452 
453 	res = mutex_lock_interruptible(&q->sysfs_lock);
454 	if (res)
455 		goto out;
456 	if (hctx->sched_tags)
457 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
458 	mutex_unlock(&q->sysfs_lock);
459 
460 out:
461 	return res;
462 }
463 
464 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
465 {
466 	struct blk_mq_hw_ctx *hctx = data;
467 	struct request_queue *q = hctx->queue;
468 	int res;
469 
470 	res = mutex_lock_interruptible(&q->sysfs_lock);
471 	if (res)
472 		goto out;
473 	if (hctx->sched_tags)
474 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
475 	mutex_unlock(&q->sysfs_lock);
476 
477 out:
478 	return res;
479 }
480 
481 static int hctx_run_show(void *data, struct seq_file *m)
482 {
483 	struct blk_mq_hw_ctx *hctx = data;
484 
485 	seq_printf(m, "%lu\n", hctx->run);
486 	return 0;
487 }
488 
489 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
490 			      loff_t *ppos)
491 {
492 	struct blk_mq_hw_ctx *hctx = data;
493 
494 	hctx->run = 0;
495 	return count;
496 }
497 
498 static int hctx_active_show(void *data, struct seq_file *m)
499 {
500 	struct blk_mq_hw_ctx *hctx = data;
501 
502 	seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
503 	return 0;
504 }
505 
506 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
507 {
508 	struct blk_mq_hw_ctx *hctx = data;
509 
510 	seq_printf(m, "%u\n", hctx->dispatch_busy);
511 	return 0;
512 }
513 
514 #define CTX_RQ_SEQ_OPS(name, type)					\
515 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
516 	__acquires(&ctx->lock)						\
517 {									\
518 	struct blk_mq_ctx *ctx = m->private;				\
519 									\
520 	spin_lock(&ctx->lock);						\
521 	return seq_list_start(&ctx->rq_lists[type], *pos);		\
522 }									\
523 									\
524 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,	\
525 				     loff_t *pos)			\
526 {									\
527 	struct blk_mq_ctx *ctx = m->private;				\
528 									\
529 	return seq_list_next(v, &ctx->rq_lists[type], pos);		\
530 }									\
531 									\
532 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)	\
533 	__releases(&ctx->lock)						\
534 {									\
535 	struct blk_mq_ctx *ctx = m->private;				\
536 									\
537 	spin_unlock(&ctx->lock);					\
538 }									\
539 									\
540 static const struct seq_operations ctx_##name##_rq_list_seq_ops = {	\
541 	.start	= ctx_##name##_rq_list_start,				\
542 	.next	= ctx_##name##_rq_list_next,				\
543 	.stop	= ctx_##name##_rq_list_stop,				\
544 	.show	= blk_mq_debugfs_rq_show,				\
545 }
546 
547 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
548 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
549 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
550 
551 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
552 {
553 	const struct blk_mq_debugfs_attr *attr = m->private;
554 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
555 
556 	return attr->show(data, m);
557 }
558 
559 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
560 				    size_t count, loff_t *ppos)
561 {
562 	struct seq_file *m = file->private_data;
563 	const struct blk_mq_debugfs_attr *attr = m->private;
564 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
565 
566 	/*
567 	 * Attributes that only implement .seq_ops are read-only and 'attr' is
568 	 * the same with 'data' in this case.
569 	 */
570 	if (attr == data || !attr->write)
571 		return -EPERM;
572 
573 	return attr->write(data, buf, count, ppos);
574 }
575 
576 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
577 {
578 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
579 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
580 	struct seq_file *m;
581 	int ret;
582 
583 	if (attr->seq_ops) {
584 		ret = seq_open(file, attr->seq_ops);
585 		if (!ret) {
586 			m = file->private_data;
587 			m->private = data;
588 		}
589 		return ret;
590 	}
591 
592 	if (WARN_ON_ONCE(!attr->show))
593 		return -EPERM;
594 
595 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
596 }
597 
598 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
599 {
600 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
601 
602 	if (attr->show)
603 		return single_release(inode, file);
604 
605 	return seq_release(inode, file);
606 }
607 
608 static const struct file_operations blk_mq_debugfs_fops = {
609 	.open		= blk_mq_debugfs_open,
610 	.read		= seq_read,
611 	.write		= blk_mq_debugfs_write,
612 	.llseek		= seq_lseek,
613 	.release	= blk_mq_debugfs_release,
614 };
615 
616 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
617 	{"state", 0400, hctx_state_show},
618 	{"flags", 0400, hctx_flags_show},
619 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
620 	{"busy", 0400, hctx_busy_show},
621 	{"ctx_map", 0400, hctx_ctx_map_show},
622 	{"tags", 0400, hctx_tags_show},
623 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
624 	{"sched_tags", 0400, hctx_sched_tags_show},
625 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
626 	{"run", 0600, hctx_run_show, hctx_run_write},
627 	{"active", 0400, hctx_active_show},
628 	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
629 	{"type", 0400, hctx_type_show},
630 	{},
631 };
632 
633 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
634 	{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
635 	{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
636 	{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
637 	{},
638 };
639 
640 static void debugfs_create_files(struct dentry *parent, void *data,
641 				 const struct blk_mq_debugfs_attr *attr)
642 {
643 	if (IS_ERR_OR_NULL(parent))
644 		return;
645 
646 	d_inode(parent)->i_private = data;
647 
648 	for (; attr->name; attr++)
649 		debugfs_create_file(attr->name, attr->mode, parent,
650 				    (void *)attr, &blk_mq_debugfs_fops);
651 }
652 
653 void blk_mq_debugfs_register(struct request_queue *q)
654 {
655 	struct blk_mq_hw_ctx *hctx;
656 	unsigned long i;
657 
658 	debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
659 
660 	/*
661 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
662 	 * didn't exist yet (because we don't know what to name the directory
663 	 * until the queue is registered to a gendisk).
664 	 */
665 	if (q->elevator && !q->sched_debugfs_dir)
666 		blk_mq_debugfs_register_sched(q);
667 
668 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
669 	queue_for_each_hw_ctx(q, hctx, i) {
670 		if (!hctx->debugfs_dir)
671 			blk_mq_debugfs_register_hctx(q, hctx);
672 		if (q->elevator && !hctx->sched_debugfs_dir)
673 			blk_mq_debugfs_register_sched_hctx(q, hctx);
674 	}
675 
676 	if (q->rq_qos) {
677 		struct rq_qos *rqos = q->rq_qos;
678 
679 		while (rqos) {
680 			blk_mq_debugfs_register_rqos(rqos);
681 			rqos = rqos->next;
682 		}
683 	}
684 }
685 
686 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
687 					struct blk_mq_ctx *ctx)
688 {
689 	struct dentry *ctx_dir;
690 	char name[20];
691 
692 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
693 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
694 
695 	debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
696 }
697 
698 void blk_mq_debugfs_register_hctx(struct request_queue *q,
699 				  struct blk_mq_hw_ctx *hctx)
700 {
701 	struct blk_mq_ctx *ctx;
702 	char name[20];
703 	int i;
704 
705 	if (!q->debugfs_dir)
706 		return;
707 
708 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
709 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
710 
711 	debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
712 
713 	hctx_for_each_ctx(hctx, ctx, i)
714 		blk_mq_debugfs_register_ctx(hctx, ctx);
715 }
716 
717 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
718 {
719 	if (!hctx->queue->debugfs_dir)
720 		return;
721 	debugfs_remove_recursive(hctx->debugfs_dir);
722 	hctx->sched_debugfs_dir = NULL;
723 	hctx->debugfs_dir = NULL;
724 }
725 
726 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
727 {
728 	struct blk_mq_hw_ctx *hctx;
729 	unsigned long i;
730 
731 	queue_for_each_hw_ctx(q, hctx, i)
732 		blk_mq_debugfs_register_hctx(q, hctx);
733 }
734 
735 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
736 {
737 	struct blk_mq_hw_ctx *hctx;
738 	unsigned long i;
739 
740 	queue_for_each_hw_ctx(q, hctx, i)
741 		blk_mq_debugfs_unregister_hctx(hctx);
742 }
743 
744 void blk_mq_debugfs_register_sched(struct request_queue *q)
745 {
746 	struct elevator_type *e = q->elevator->type;
747 
748 	lockdep_assert_held(&q->debugfs_mutex);
749 
750 	/*
751 	 * If the parent directory has not been created yet, return, we will be
752 	 * called again later on and the directory/files will be created then.
753 	 */
754 	if (!q->debugfs_dir)
755 		return;
756 
757 	if (!e->queue_debugfs_attrs)
758 		return;
759 
760 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
761 
762 	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
763 }
764 
765 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
766 {
767 	lockdep_assert_held(&q->debugfs_mutex);
768 
769 	debugfs_remove_recursive(q->sched_debugfs_dir);
770 	q->sched_debugfs_dir = NULL;
771 }
772 
773 static const char *rq_qos_id_to_name(enum rq_qos_id id)
774 {
775 	switch (id) {
776 	case RQ_QOS_WBT:
777 		return "wbt";
778 	case RQ_QOS_LATENCY:
779 		return "latency";
780 	case RQ_QOS_COST:
781 		return "cost";
782 	}
783 	return "unknown";
784 }
785 
786 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
787 {
788 	lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
789 
790 	if (!rqos->disk->queue->debugfs_dir)
791 		return;
792 	debugfs_remove_recursive(rqos->debugfs_dir);
793 	rqos->debugfs_dir = NULL;
794 }
795 
796 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
797 {
798 	struct request_queue *q = rqos->disk->queue;
799 	const char *dir_name = rq_qos_id_to_name(rqos->id);
800 
801 	lockdep_assert_held(&q->debugfs_mutex);
802 
803 	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
804 		return;
805 
806 	if (!q->rqos_debugfs_dir)
807 		q->rqos_debugfs_dir = debugfs_create_dir("rqos",
808 							 q->debugfs_dir);
809 
810 	rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
811 	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
812 }
813 
814 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
815 					struct blk_mq_hw_ctx *hctx)
816 {
817 	struct elevator_type *e = q->elevator->type;
818 
819 	lockdep_assert_held(&q->debugfs_mutex);
820 
821 	/*
822 	 * If the parent debugfs directory has not been created yet, return;
823 	 * We will be called again later on with appropriate parent debugfs
824 	 * directory from blk_register_queue()
825 	 */
826 	if (!hctx->debugfs_dir)
827 		return;
828 
829 	if (!e->hctx_debugfs_attrs)
830 		return;
831 
832 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
833 						     hctx->debugfs_dir);
834 	debugfs_create_files(hctx->sched_debugfs_dir, hctx,
835 			     e->hctx_debugfs_attrs);
836 }
837 
838 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
839 {
840 	lockdep_assert_held(&hctx->queue->debugfs_mutex);
841 
842 	if (!hctx->queue->debugfs_dir)
843 		return;
844 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
845 	hctx->sched_debugfs_dir = NULL;
846 }
847