1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Facebook 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/blkdev.h> 8 #include <linux/debugfs.h> 9 10 #include <linux/blk-mq.h> 11 #include "blk.h" 12 #include "blk-mq.h" 13 #include "blk-mq-debugfs.h" 14 #include "blk-mq-tag.h" 15 #include "blk-rq-qos.h" 16 17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 18 { 19 if (stat->nr_samples) { 20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu", 21 stat->nr_samples, stat->mean, stat->min, stat->max); 22 } else { 23 seq_puts(m, "samples=0"); 24 } 25 } 26 27 static int queue_poll_stat_show(void *data, struct seq_file *m) 28 { 29 struct request_queue *q = data; 30 int bucket; 31 32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) { 33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket)); 34 print_stat(m, &q->poll_stat[2 * bucket]); 35 seq_puts(m, "\n"); 36 37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket)); 38 print_stat(m, &q->poll_stat[2 * bucket + 1]); 39 seq_puts(m, "\n"); 40 } 41 return 0; 42 } 43 44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) 45 __acquires(&q->requeue_lock) 46 { 47 struct request_queue *q = m->private; 48 49 spin_lock_irq(&q->requeue_lock); 50 return seq_list_start(&q->requeue_list, *pos); 51 } 52 53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) 54 { 55 struct request_queue *q = m->private; 56 57 return seq_list_next(v, &q->requeue_list, pos); 58 } 59 60 static void queue_requeue_list_stop(struct seq_file *m, void *v) 61 __releases(&q->requeue_lock) 62 { 63 struct request_queue *q = m->private; 64 65 spin_unlock_irq(&q->requeue_lock); 66 } 67 68 static const struct seq_operations queue_requeue_list_seq_ops = { 69 .start = queue_requeue_list_start, 70 .next = queue_requeue_list_next, 71 .stop = queue_requeue_list_stop, 72 .show = blk_mq_debugfs_rq_show, 73 }; 74 75 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 76 const char *const *flag_name, int flag_name_count) 77 { 78 bool sep = false; 79 int i; 80 81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 82 if (!(flags & BIT(i))) 83 continue; 84 if (sep) 85 seq_puts(m, "|"); 86 sep = true; 87 if (i < flag_name_count && flag_name[i]) 88 seq_puts(m, flag_name[i]); 89 else 90 seq_printf(m, "%d", i); 91 } 92 return 0; 93 } 94 95 static int queue_pm_only_show(void *data, struct seq_file *m) 96 { 97 struct request_queue *q = data; 98 99 seq_printf(m, "%d\n", atomic_read(&q->pm_only)); 100 return 0; 101 } 102 103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 104 static const char *const blk_queue_flag_name[] = { 105 QUEUE_FLAG_NAME(STOPPED), 106 QUEUE_FLAG_NAME(DYING), 107 QUEUE_FLAG_NAME(NOMERGES), 108 QUEUE_FLAG_NAME(SAME_COMP), 109 QUEUE_FLAG_NAME(FAIL_IO), 110 QUEUE_FLAG_NAME(NONROT), 111 QUEUE_FLAG_NAME(IO_STAT), 112 QUEUE_FLAG_NAME(DISCARD), 113 QUEUE_FLAG_NAME(NOXMERGES), 114 QUEUE_FLAG_NAME(ADD_RANDOM), 115 QUEUE_FLAG_NAME(SECERASE), 116 QUEUE_FLAG_NAME(SAME_FORCE), 117 QUEUE_FLAG_NAME(DEAD), 118 QUEUE_FLAG_NAME(INIT_DONE), 119 QUEUE_FLAG_NAME(STABLE_WRITES), 120 QUEUE_FLAG_NAME(POLL), 121 QUEUE_FLAG_NAME(WC), 122 QUEUE_FLAG_NAME(FUA), 123 QUEUE_FLAG_NAME(DAX), 124 QUEUE_FLAG_NAME(STATS), 125 QUEUE_FLAG_NAME(POLL_STATS), 126 QUEUE_FLAG_NAME(REGISTERED), 127 QUEUE_FLAG_NAME(QUIESCED), 128 QUEUE_FLAG_NAME(PCI_P2PDMA), 129 QUEUE_FLAG_NAME(ZONE_RESETALL), 130 QUEUE_FLAG_NAME(RQ_ALLOC_TIME), 131 QUEUE_FLAG_NAME(HCTX_ACTIVE), 132 QUEUE_FLAG_NAME(NOWAIT), 133 }; 134 #undef QUEUE_FLAG_NAME 135 136 static int queue_state_show(void *data, struct seq_file *m) 137 { 138 struct request_queue *q = data; 139 140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 141 ARRAY_SIZE(blk_queue_flag_name)); 142 seq_puts(m, "\n"); 143 return 0; 144 } 145 146 static ssize_t queue_state_write(void *data, const char __user *buf, 147 size_t count, loff_t *ppos) 148 { 149 struct request_queue *q = data; 150 char opbuf[16] = { }, *op; 151 152 /* 153 * The "state" attribute is removed after blk_cleanup_queue() has called 154 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 155 * triggering a use-after-free. 156 */ 157 if (blk_queue_dead(q)) 158 return -ENOENT; 159 160 if (count >= sizeof(opbuf)) { 161 pr_err("%s: operation too long\n", __func__); 162 goto inval; 163 } 164 165 if (copy_from_user(opbuf, buf, count)) 166 return -EFAULT; 167 op = strstrip(opbuf); 168 if (strcmp(op, "run") == 0) { 169 blk_mq_run_hw_queues(q, true); 170 } else if (strcmp(op, "start") == 0) { 171 blk_mq_start_stopped_hw_queues(q, true); 172 } else if (strcmp(op, "kick") == 0) { 173 blk_mq_kick_requeue_list(q); 174 } else { 175 pr_err("%s: unsupported operation '%s'\n", __func__, op); 176 inval: 177 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); 178 return -EINVAL; 179 } 180 return count; 181 } 182 183 static int queue_write_hint_show(void *data, struct seq_file *m) 184 { 185 struct request_queue *q = data; 186 int i; 187 188 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 189 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); 190 191 return 0; 192 } 193 194 static ssize_t queue_write_hint_store(void *data, const char __user *buf, 195 size_t count, loff_t *ppos) 196 { 197 struct request_queue *q = data; 198 int i; 199 200 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 201 q->write_hints[i] = 0; 202 203 return count; 204 } 205 206 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 207 { "poll_stat", 0400, queue_poll_stat_show }, 208 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, 209 { "pm_only", 0600, queue_pm_only_show, NULL }, 210 { "state", 0600, queue_state_show, queue_state_write }, 211 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, 212 { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, 213 { }, 214 }; 215 216 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 217 static const char *const hctx_state_name[] = { 218 HCTX_STATE_NAME(STOPPED), 219 HCTX_STATE_NAME(TAG_ACTIVE), 220 HCTX_STATE_NAME(SCHED_RESTART), 221 HCTX_STATE_NAME(INACTIVE), 222 }; 223 #undef HCTX_STATE_NAME 224 225 static int hctx_state_show(void *data, struct seq_file *m) 226 { 227 struct blk_mq_hw_ctx *hctx = data; 228 229 blk_flags_show(m, hctx->state, hctx_state_name, 230 ARRAY_SIZE(hctx_state_name)); 231 seq_puts(m, "\n"); 232 return 0; 233 } 234 235 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 236 static const char *const alloc_policy_name[] = { 237 BLK_TAG_ALLOC_NAME(FIFO), 238 BLK_TAG_ALLOC_NAME(RR), 239 }; 240 #undef BLK_TAG_ALLOC_NAME 241 242 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 243 static const char *const hctx_flag_name[] = { 244 HCTX_FLAG_NAME(SHOULD_MERGE), 245 HCTX_FLAG_NAME(TAG_QUEUE_SHARED), 246 HCTX_FLAG_NAME(BLOCKING), 247 HCTX_FLAG_NAME(NO_SCHED), 248 HCTX_FLAG_NAME(STACKING), 249 HCTX_FLAG_NAME(TAG_HCTX_SHARED), 250 }; 251 #undef HCTX_FLAG_NAME 252 253 static int hctx_flags_show(void *data, struct seq_file *m) 254 { 255 struct blk_mq_hw_ctx *hctx = data; 256 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 257 258 seq_puts(m, "alloc_policy="); 259 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 260 alloc_policy_name[alloc_policy]) 261 seq_puts(m, alloc_policy_name[alloc_policy]); 262 else 263 seq_printf(m, "%d", alloc_policy); 264 seq_puts(m, " "); 265 blk_flags_show(m, 266 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 267 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 268 seq_puts(m, "\n"); 269 return 0; 270 } 271 272 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 273 static const char *const cmd_flag_name[] = { 274 CMD_FLAG_NAME(FAILFAST_DEV), 275 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 276 CMD_FLAG_NAME(FAILFAST_DRIVER), 277 CMD_FLAG_NAME(SYNC), 278 CMD_FLAG_NAME(META), 279 CMD_FLAG_NAME(PRIO), 280 CMD_FLAG_NAME(NOMERGE), 281 CMD_FLAG_NAME(IDLE), 282 CMD_FLAG_NAME(INTEGRITY), 283 CMD_FLAG_NAME(FUA), 284 CMD_FLAG_NAME(PREFLUSH), 285 CMD_FLAG_NAME(RAHEAD), 286 CMD_FLAG_NAME(BACKGROUND), 287 CMD_FLAG_NAME(NOWAIT), 288 CMD_FLAG_NAME(NOUNMAP), 289 CMD_FLAG_NAME(POLLED), 290 }; 291 #undef CMD_FLAG_NAME 292 293 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 294 static const char *const rqf_name[] = { 295 RQF_NAME(STARTED), 296 RQF_NAME(SOFTBARRIER), 297 RQF_NAME(FLUSH_SEQ), 298 RQF_NAME(MIXED_MERGE), 299 RQF_NAME(MQ_INFLIGHT), 300 RQF_NAME(DONTPREP), 301 RQF_NAME(FAILED), 302 RQF_NAME(QUIET), 303 RQF_NAME(ELVPRIV), 304 RQF_NAME(IO_STAT), 305 RQF_NAME(PM), 306 RQF_NAME(HASHED), 307 RQF_NAME(STATS), 308 RQF_NAME(SPECIAL_PAYLOAD), 309 RQF_NAME(ZONE_WRITE_LOCKED), 310 RQF_NAME(MQ_POLL_SLEPT), 311 RQF_NAME(ELV), 312 }; 313 #undef RQF_NAME 314 315 static const char *const blk_mq_rq_state_name_array[] = { 316 [MQ_RQ_IDLE] = "idle", 317 [MQ_RQ_IN_FLIGHT] = "in_flight", 318 [MQ_RQ_COMPLETE] = "complete", 319 }; 320 321 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) 322 { 323 if (WARN_ON_ONCE((unsigned int)rq_state >= 324 ARRAY_SIZE(blk_mq_rq_state_name_array))) 325 return "(?)"; 326 return blk_mq_rq_state_name_array[rq_state]; 327 } 328 329 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 330 { 331 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 332 const unsigned int op = req_op(rq); 333 const char *op_str = blk_op_str(op); 334 335 seq_printf(m, "%p {.op=", rq); 336 if (strcmp(op_str, "UNKNOWN") == 0) 337 seq_printf(m, "%u", op); 338 else 339 seq_printf(m, "%s", op_str); 340 seq_puts(m, ", .cmd_flags="); 341 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, 342 ARRAY_SIZE(cmd_flag_name)); 343 seq_puts(m, ", .rq_flags="); 344 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 345 ARRAY_SIZE(rqf_name)); 346 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); 347 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 348 rq->internal_tag); 349 if (mq_ops->show_rq) 350 mq_ops->show_rq(m, rq); 351 seq_puts(m, "}\n"); 352 return 0; 353 } 354 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 355 356 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 357 { 358 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 359 } 360 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 361 362 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 363 __acquires(&hctx->lock) 364 { 365 struct blk_mq_hw_ctx *hctx = m->private; 366 367 spin_lock(&hctx->lock); 368 return seq_list_start(&hctx->dispatch, *pos); 369 } 370 371 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 372 { 373 struct blk_mq_hw_ctx *hctx = m->private; 374 375 return seq_list_next(v, &hctx->dispatch, pos); 376 } 377 378 static void hctx_dispatch_stop(struct seq_file *m, void *v) 379 __releases(&hctx->lock) 380 { 381 struct blk_mq_hw_ctx *hctx = m->private; 382 383 spin_unlock(&hctx->lock); 384 } 385 386 static const struct seq_operations hctx_dispatch_seq_ops = { 387 .start = hctx_dispatch_start, 388 .next = hctx_dispatch_next, 389 .stop = hctx_dispatch_stop, 390 .show = blk_mq_debugfs_rq_show, 391 }; 392 393 struct show_busy_params { 394 struct seq_file *m; 395 struct blk_mq_hw_ctx *hctx; 396 }; 397 398 /* 399 * Note: the state of a request may change while this function is in progress, 400 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to 401 * keep iterating requests. 402 */ 403 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved) 404 { 405 const struct show_busy_params *params = data; 406 407 if (rq->mq_hctx == params->hctx) 408 __blk_mq_debugfs_rq_show(params->m, rq); 409 410 return true; 411 } 412 413 static int hctx_busy_show(void *data, struct seq_file *m) 414 { 415 struct blk_mq_hw_ctx *hctx = data; 416 struct show_busy_params params = { .m = m, .hctx = hctx }; 417 418 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, 419 ¶ms); 420 421 return 0; 422 } 423 424 static const char *const hctx_types[] = { 425 [HCTX_TYPE_DEFAULT] = "default", 426 [HCTX_TYPE_READ] = "read", 427 [HCTX_TYPE_POLL] = "poll", 428 }; 429 430 static int hctx_type_show(void *data, struct seq_file *m) 431 { 432 struct blk_mq_hw_ctx *hctx = data; 433 434 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); 435 seq_printf(m, "%s\n", hctx_types[hctx->type]); 436 return 0; 437 } 438 439 static int hctx_ctx_map_show(void *data, struct seq_file *m) 440 { 441 struct blk_mq_hw_ctx *hctx = data; 442 443 sbitmap_bitmap_show(&hctx->ctx_map, m); 444 return 0; 445 } 446 447 static void blk_mq_debugfs_tags_show(struct seq_file *m, 448 struct blk_mq_tags *tags) 449 { 450 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 451 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 452 seq_printf(m, "active_queues=%d\n", 453 atomic_read(&tags->active_queues)); 454 455 seq_puts(m, "\nbitmap_tags:\n"); 456 sbitmap_queue_show(&tags->bitmap_tags, m); 457 458 if (tags->nr_reserved_tags) { 459 seq_puts(m, "\nbreserved_tags:\n"); 460 sbitmap_queue_show(&tags->breserved_tags, m); 461 } 462 } 463 464 static int hctx_tags_show(void *data, struct seq_file *m) 465 { 466 struct blk_mq_hw_ctx *hctx = data; 467 struct request_queue *q = hctx->queue; 468 int res; 469 470 res = mutex_lock_interruptible(&q->sysfs_lock); 471 if (res) 472 goto out; 473 if (hctx->tags) 474 blk_mq_debugfs_tags_show(m, hctx->tags); 475 mutex_unlock(&q->sysfs_lock); 476 477 out: 478 return res; 479 } 480 481 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 482 { 483 struct blk_mq_hw_ctx *hctx = data; 484 struct request_queue *q = hctx->queue; 485 int res; 486 487 res = mutex_lock_interruptible(&q->sysfs_lock); 488 if (res) 489 goto out; 490 if (hctx->tags) 491 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); 492 mutex_unlock(&q->sysfs_lock); 493 494 out: 495 return res; 496 } 497 498 static int hctx_sched_tags_show(void *data, struct seq_file *m) 499 { 500 struct blk_mq_hw_ctx *hctx = data; 501 struct request_queue *q = hctx->queue; 502 int res; 503 504 res = mutex_lock_interruptible(&q->sysfs_lock); 505 if (res) 506 goto out; 507 if (hctx->sched_tags) 508 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 509 mutex_unlock(&q->sysfs_lock); 510 511 out: 512 return res; 513 } 514 515 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 516 { 517 struct blk_mq_hw_ctx *hctx = data; 518 struct request_queue *q = hctx->queue; 519 int res; 520 521 res = mutex_lock_interruptible(&q->sysfs_lock); 522 if (res) 523 goto out; 524 if (hctx->sched_tags) 525 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); 526 mutex_unlock(&q->sysfs_lock); 527 528 out: 529 return res; 530 } 531 532 static int hctx_run_show(void *data, struct seq_file *m) 533 { 534 struct blk_mq_hw_ctx *hctx = data; 535 536 seq_printf(m, "%lu\n", hctx->run); 537 return 0; 538 } 539 540 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 541 loff_t *ppos) 542 { 543 struct blk_mq_hw_ctx *hctx = data; 544 545 hctx->run = 0; 546 return count; 547 } 548 549 static int hctx_active_show(void *data, struct seq_file *m) 550 { 551 struct blk_mq_hw_ctx *hctx = data; 552 553 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx)); 554 return 0; 555 } 556 557 static int hctx_dispatch_busy_show(void *data, struct seq_file *m) 558 { 559 struct blk_mq_hw_ctx *hctx = data; 560 561 seq_printf(m, "%u\n", hctx->dispatch_busy); 562 return 0; 563 } 564 565 #define CTX_RQ_SEQ_OPS(name, type) \ 566 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ 567 __acquires(&ctx->lock) \ 568 { \ 569 struct blk_mq_ctx *ctx = m->private; \ 570 \ 571 spin_lock(&ctx->lock); \ 572 return seq_list_start(&ctx->rq_lists[type], *pos); \ 573 } \ 574 \ 575 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ 576 loff_t *pos) \ 577 { \ 578 struct blk_mq_ctx *ctx = m->private; \ 579 \ 580 return seq_list_next(v, &ctx->rq_lists[type], pos); \ 581 } \ 582 \ 583 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ 584 __releases(&ctx->lock) \ 585 { \ 586 struct blk_mq_ctx *ctx = m->private; \ 587 \ 588 spin_unlock(&ctx->lock); \ 589 } \ 590 \ 591 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ 592 .start = ctx_##name##_rq_list_start, \ 593 .next = ctx_##name##_rq_list_next, \ 594 .stop = ctx_##name##_rq_list_stop, \ 595 .show = blk_mq_debugfs_rq_show, \ 596 } 597 598 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); 599 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); 600 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); 601 602 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 603 { 604 const struct blk_mq_debugfs_attr *attr = m->private; 605 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 606 607 return attr->show(data, m); 608 } 609 610 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 611 size_t count, loff_t *ppos) 612 { 613 struct seq_file *m = file->private_data; 614 const struct blk_mq_debugfs_attr *attr = m->private; 615 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 616 617 /* 618 * Attributes that only implement .seq_ops are read-only and 'attr' is 619 * the same with 'data' in this case. 620 */ 621 if (attr == data || !attr->write) 622 return -EPERM; 623 624 return attr->write(data, buf, count, ppos); 625 } 626 627 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 628 { 629 const struct blk_mq_debugfs_attr *attr = inode->i_private; 630 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 631 struct seq_file *m; 632 int ret; 633 634 if (attr->seq_ops) { 635 ret = seq_open(file, attr->seq_ops); 636 if (!ret) { 637 m = file->private_data; 638 m->private = data; 639 } 640 return ret; 641 } 642 643 if (WARN_ON_ONCE(!attr->show)) 644 return -EPERM; 645 646 return single_open(file, blk_mq_debugfs_show, inode->i_private); 647 } 648 649 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 650 { 651 const struct blk_mq_debugfs_attr *attr = inode->i_private; 652 653 if (attr->show) 654 return single_release(inode, file); 655 656 return seq_release(inode, file); 657 } 658 659 static const struct file_operations blk_mq_debugfs_fops = { 660 .open = blk_mq_debugfs_open, 661 .read = seq_read, 662 .write = blk_mq_debugfs_write, 663 .llseek = seq_lseek, 664 .release = blk_mq_debugfs_release, 665 }; 666 667 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 668 {"state", 0400, hctx_state_show}, 669 {"flags", 0400, hctx_flags_show}, 670 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 671 {"busy", 0400, hctx_busy_show}, 672 {"ctx_map", 0400, hctx_ctx_map_show}, 673 {"tags", 0400, hctx_tags_show}, 674 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 675 {"sched_tags", 0400, hctx_sched_tags_show}, 676 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 677 {"run", 0600, hctx_run_show, hctx_run_write}, 678 {"active", 0400, hctx_active_show}, 679 {"dispatch_busy", 0400, hctx_dispatch_busy_show}, 680 {"type", 0400, hctx_type_show}, 681 {}, 682 }; 683 684 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 685 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, 686 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, 687 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, 688 {}, 689 }; 690 691 static void debugfs_create_files(struct dentry *parent, void *data, 692 const struct blk_mq_debugfs_attr *attr) 693 { 694 if (IS_ERR_OR_NULL(parent)) 695 return; 696 697 d_inode(parent)->i_private = data; 698 699 for (; attr->name; attr++) 700 debugfs_create_file(attr->name, attr->mode, parent, 701 (void *)attr, &blk_mq_debugfs_fops); 702 } 703 704 void blk_mq_debugfs_register(struct request_queue *q) 705 { 706 struct blk_mq_hw_ctx *hctx; 707 int i; 708 709 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); 710 711 /* 712 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir 713 * didn't exist yet (because we don't know what to name the directory 714 * until the queue is registered to a gendisk). 715 */ 716 if (q->elevator && !q->sched_debugfs_dir) 717 blk_mq_debugfs_register_sched(q); 718 719 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ 720 queue_for_each_hw_ctx(q, hctx, i) { 721 if (!hctx->debugfs_dir) 722 blk_mq_debugfs_register_hctx(q, hctx); 723 if (q->elevator && !hctx->sched_debugfs_dir) 724 blk_mq_debugfs_register_sched_hctx(q, hctx); 725 } 726 727 if (q->rq_qos) { 728 struct rq_qos *rqos = q->rq_qos; 729 730 while (rqos) { 731 blk_mq_debugfs_register_rqos(rqos); 732 rqos = rqos->next; 733 } 734 } 735 } 736 737 void blk_mq_debugfs_unregister(struct request_queue *q) 738 { 739 q->sched_debugfs_dir = NULL; 740 } 741 742 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 743 struct blk_mq_ctx *ctx) 744 { 745 struct dentry *ctx_dir; 746 char name[20]; 747 748 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 749 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 750 751 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); 752 } 753 754 void blk_mq_debugfs_register_hctx(struct request_queue *q, 755 struct blk_mq_hw_ctx *hctx) 756 { 757 struct blk_mq_ctx *ctx; 758 char name[20]; 759 int i; 760 761 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 762 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 763 764 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); 765 766 hctx_for_each_ctx(hctx, ctx, i) 767 blk_mq_debugfs_register_ctx(hctx, ctx); 768 } 769 770 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 771 { 772 debugfs_remove_recursive(hctx->debugfs_dir); 773 hctx->sched_debugfs_dir = NULL; 774 hctx->debugfs_dir = NULL; 775 } 776 777 void blk_mq_debugfs_register_hctxs(struct request_queue *q) 778 { 779 struct blk_mq_hw_ctx *hctx; 780 int i; 781 782 queue_for_each_hw_ctx(q, hctx, i) 783 blk_mq_debugfs_register_hctx(q, hctx); 784 } 785 786 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 787 { 788 struct blk_mq_hw_ctx *hctx; 789 int i; 790 791 queue_for_each_hw_ctx(q, hctx, i) 792 blk_mq_debugfs_unregister_hctx(hctx); 793 } 794 795 void blk_mq_debugfs_register_sched(struct request_queue *q) 796 { 797 struct elevator_type *e = q->elevator->type; 798 799 /* 800 * If the parent directory has not been created yet, return, we will be 801 * called again later on and the directory/files will be created then. 802 */ 803 if (!q->debugfs_dir) 804 return; 805 806 if (!e->queue_debugfs_attrs) 807 return; 808 809 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 810 811 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); 812 } 813 814 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 815 { 816 debugfs_remove_recursive(q->sched_debugfs_dir); 817 q->sched_debugfs_dir = NULL; 818 } 819 820 static const char *rq_qos_id_to_name(enum rq_qos_id id) 821 { 822 switch (id) { 823 case RQ_QOS_WBT: 824 return "wbt"; 825 case RQ_QOS_LATENCY: 826 return "latency"; 827 case RQ_QOS_COST: 828 return "cost"; 829 case RQ_QOS_IOPRIO: 830 return "ioprio"; 831 } 832 return "unknown"; 833 } 834 835 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) 836 { 837 debugfs_remove_recursive(rqos->debugfs_dir); 838 rqos->debugfs_dir = NULL; 839 } 840 841 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) 842 { 843 struct request_queue *q = rqos->q; 844 const char *dir_name = rq_qos_id_to_name(rqos->id); 845 846 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) 847 return; 848 849 if (!q->rqos_debugfs_dir) 850 q->rqos_debugfs_dir = debugfs_create_dir("rqos", 851 q->debugfs_dir); 852 853 rqos->debugfs_dir = debugfs_create_dir(dir_name, 854 rqos->q->rqos_debugfs_dir); 855 856 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); 857 } 858 859 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) 860 { 861 debugfs_remove_recursive(q->rqos_debugfs_dir); 862 q->rqos_debugfs_dir = NULL; 863 } 864 865 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 866 struct blk_mq_hw_ctx *hctx) 867 { 868 struct elevator_type *e = q->elevator->type; 869 870 /* 871 * If the parent debugfs directory has not been created yet, return; 872 * We will be called again later on with appropriate parent debugfs 873 * directory from blk_register_queue() 874 */ 875 if (!hctx->debugfs_dir) 876 return; 877 878 if (!e->hctx_debugfs_attrs) 879 return; 880 881 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 882 hctx->debugfs_dir); 883 debugfs_create_files(hctx->sched_debugfs_dir, hctx, 884 e->hctx_debugfs_attrs); 885 } 886 887 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 888 { 889 debugfs_remove_recursive(hctx->sched_debugfs_dir); 890 hctx->sched_debugfs_dir = NULL; 891 } 892