1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Facebook 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/blkdev.h> 8 #include <linux/debugfs.h> 9 10 #include <linux/blk-mq.h> 11 #include "blk.h" 12 #include "blk-mq.h" 13 #include "blk-mq-debugfs.h" 14 #include "blk-mq-sched.h" 15 #include "blk-mq-tag.h" 16 #include "blk-rq-qos.h" 17 18 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 19 { 20 if (stat->nr_samples) { 21 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu", 22 stat->nr_samples, stat->mean, stat->min, stat->max); 23 } else { 24 seq_puts(m, "samples=0"); 25 } 26 } 27 28 static int queue_poll_stat_show(void *data, struct seq_file *m) 29 { 30 struct request_queue *q = data; 31 int bucket; 32 33 if (!q->poll_stat) 34 return 0; 35 36 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) { 37 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket)); 38 print_stat(m, &q->poll_stat[2 * bucket]); 39 seq_puts(m, "\n"); 40 41 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket)); 42 print_stat(m, &q->poll_stat[2 * bucket + 1]); 43 seq_puts(m, "\n"); 44 } 45 return 0; 46 } 47 48 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) 49 __acquires(&q->requeue_lock) 50 { 51 struct request_queue *q = m->private; 52 53 spin_lock_irq(&q->requeue_lock); 54 return seq_list_start(&q->requeue_list, *pos); 55 } 56 57 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) 58 { 59 struct request_queue *q = m->private; 60 61 return seq_list_next(v, &q->requeue_list, pos); 62 } 63 64 static void queue_requeue_list_stop(struct seq_file *m, void *v) 65 __releases(&q->requeue_lock) 66 { 67 struct request_queue *q = m->private; 68 69 spin_unlock_irq(&q->requeue_lock); 70 } 71 72 static const struct seq_operations queue_requeue_list_seq_ops = { 73 .start = queue_requeue_list_start, 74 .next = queue_requeue_list_next, 75 .stop = queue_requeue_list_stop, 76 .show = blk_mq_debugfs_rq_show, 77 }; 78 79 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 80 const char *const *flag_name, int flag_name_count) 81 { 82 bool sep = false; 83 int i; 84 85 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 86 if (!(flags & BIT(i))) 87 continue; 88 if (sep) 89 seq_puts(m, "|"); 90 sep = true; 91 if (i < flag_name_count && flag_name[i]) 92 seq_puts(m, flag_name[i]); 93 else 94 seq_printf(m, "%d", i); 95 } 96 return 0; 97 } 98 99 static int queue_pm_only_show(void *data, struct seq_file *m) 100 { 101 struct request_queue *q = data; 102 103 seq_printf(m, "%d\n", atomic_read(&q->pm_only)); 104 return 0; 105 } 106 107 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 108 static const char *const blk_queue_flag_name[] = { 109 QUEUE_FLAG_NAME(STOPPED), 110 QUEUE_FLAG_NAME(DYING), 111 QUEUE_FLAG_NAME(NOMERGES), 112 QUEUE_FLAG_NAME(SAME_COMP), 113 QUEUE_FLAG_NAME(FAIL_IO), 114 QUEUE_FLAG_NAME(NONROT), 115 QUEUE_FLAG_NAME(IO_STAT), 116 QUEUE_FLAG_NAME(DISCARD), 117 QUEUE_FLAG_NAME(NOXMERGES), 118 QUEUE_FLAG_NAME(ADD_RANDOM), 119 QUEUE_FLAG_NAME(SECERASE), 120 QUEUE_FLAG_NAME(SAME_FORCE), 121 QUEUE_FLAG_NAME(DEAD), 122 QUEUE_FLAG_NAME(INIT_DONE), 123 QUEUE_FLAG_NAME(STABLE_WRITES), 124 QUEUE_FLAG_NAME(POLL), 125 QUEUE_FLAG_NAME(WC), 126 QUEUE_FLAG_NAME(FUA), 127 QUEUE_FLAG_NAME(DAX), 128 QUEUE_FLAG_NAME(STATS), 129 QUEUE_FLAG_NAME(REGISTERED), 130 QUEUE_FLAG_NAME(QUIESCED), 131 QUEUE_FLAG_NAME(PCI_P2PDMA), 132 QUEUE_FLAG_NAME(ZONE_RESETALL), 133 QUEUE_FLAG_NAME(RQ_ALLOC_TIME), 134 QUEUE_FLAG_NAME(HCTX_ACTIVE), 135 QUEUE_FLAG_NAME(NOWAIT), 136 }; 137 #undef QUEUE_FLAG_NAME 138 139 static int queue_state_show(void *data, struct seq_file *m) 140 { 141 struct request_queue *q = data; 142 143 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 144 ARRAY_SIZE(blk_queue_flag_name)); 145 seq_puts(m, "\n"); 146 return 0; 147 } 148 149 static ssize_t queue_state_write(void *data, const char __user *buf, 150 size_t count, loff_t *ppos) 151 { 152 struct request_queue *q = data; 153 char opbuf[16] = { }, *op; 154 155 /* 156 * The "state" attribute is removed after blk_cleanup_queue() has called 157 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 158 * triggering a use-after-free. 159 */ 160 if (blk_queue_dead(q)) 161 return -ENOENT; 162 163 if (count >= sizeof(opbuf)) { 164 pr_err("%s: operation too long\n", __func__); 165 goto inval; 166 } 167 168 if (copy_from_user(opbuf, buf, count)) 169 return -EFAULT; 170 op = strstrip(opbuf); 171 if (strcmp(op, "run") == 0) { 172 blk_mq_run_hw_queues(q, true); 173 } else if (strcmp(op, "start") == 0) { 174 blk_mq_start_stopped_hw_queues(q, true); 175 } else if (strcmp(op, "kick") == 0) { 176 blk_mq_kick_requeue_list(q); 177 } else { 178 pr_err("%s: unsupported operation '%s'\n", __func__, op); 179 inval: 180 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); 181 return -EINVAL; 182 } 183 return count; 184 } 185 186 static int queue_write_hint_show(void *data, struct seq_file *m) 187 { 188 struct request_queue *q = data; 189 int i; 190 191 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 192 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); 193 194 return 0; 195 } 196 197 static ssize_t queue_write_hint_store(void *data, const char __user *buf, 198 size_t count, loff_t *ppos) 199 { 200 struct request_queue *q = data; 201 int i; 202 203 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 204 q->write_hints[i] = 0; 205 206 return count; 207 } 208 209 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 210 { "poll_stat", 0400, queue_poll_stat_show }, 211 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, 212 { "pm_only", 0600, queue_pm_only_show, NULL }, 213 { "state", 0600, queue_state_show, queue_state_write }, 214 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, 215 { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, 216 { }, 217 }; 218 219 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 220 static const char *const hctx_state_name[] = { 221 HCTX_STATE_NAME(STOPPED), 222 HCTX_STATE_NAME(TAG_ACTIVE), 223 HCTX_STATE_NAME(SCHED_RESTART), 224 HCTX_STATE_NAME(INACTIVE), 225 }; 226 #undef HCTX_STATE_NAME 227 228 static int hctx_state_show(void *data, struct seq_file *m) 229 { 230 struct blk_mq_hw_ctx *hctx = data; 231 232 blk_flags_show(m, hctx->state, hctx_state_name, 233 ARRAY_SIZE(hctx_state_name)); 234 seq_puts(m, "\n"); 235 return 0; 236 } 237 238 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 239 static const char *const alloc_policy_name[] = { 240 BLK_TAG_ALLOC_NAME(FIFO), 241 BLK_TAG_ALLOC_NAME(RR), 242 }; 243 #undef BLK_TAG_ALLOC_NAME 244 245 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 246 static const char *const hctx_flag_name[] = { 247 HCTX_FLAG_NAME(SHOULD_MERGE), 248 HCTX_FLAG_NAME(TAG_QUEUE_SHARED), 249 HCTX_FLAG_NAME(BLOCKING), 250 HCTX_FLAG_NAME(NO_SCHED), 251 HCTX_FLAG_NAME(STACKING), 252 HCTX_FLAG_NAME(TAG_HCTX_SHARED), 253 }; 254 #undef HCTX_FLAG_NAME 255 256 static int hctx_flags_show(void *data, struct seq_file *m) 257 { 258 struct blk_mq_hw_ctx *hctx = data; 259 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 260 261 seq_puts(m, "alloc_policy="); 262 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 263 alloc_policy_name[alloc_policy]) 264 seq_puts(m, alloc_policy_name[alloc_policy]); 265 else 266 seq_printf(m, "%d", alloc_policy); 267 seq_puts(m, " "); 268 blk_flags_show(m, 269 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 270 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 271 seq_puts(m, "\n"); 272 return 0; 273 } 274 275 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 276 static const char *const cmd_flag_name[] = { 277 CMD_FLAG_NAME(FAILFAST_DEV), 278 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 279 CMD_FLAG_NAME(FAILFAST_DRIVER), 280 CMD_FLAG_NAME(SYNC), 281 CMD_FLAG_NAME(META), 282 CMD_FLAG_NAME(PRIO), 283 CMD_FLAG_NAME(NOMERGE), 284 CMD_FLAG_NAME(IDLE), 285 CMD_FLAG_NAME(INTEGRITY), 286 CMD_FLAG_NAME(FUA), 287 CMD_FLAG_NAME(PREFLUSH), 288 CMD_FLAG_NAME(RAHEAD), 289 CMD_FLAG_NAME(BACKGROUND), 290 CMD_FLAG_NAME(NOWAIT), 291 CMD_FLAG_NAME(NOUNMAP), 292 CMD_FLAG_NAME(POLLED), 293 }; 294 #undef CMD_FLAG_NAME 295 296 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 297 static const char *const rqf_name[] = { 298 RQF_NAME(STARTED), 299 RQF_NAME(SOFTBARRIER), 300 RQF_NAME(FLUSH_SEQ), 301 RQF_NAME(MIXED_MERGE), 302 RQF_NAME(MQ_INFLIGHT), 303 RQF_NAME(DONTPREP), 304 RQF_NAME(FAILED), 305 RQF_NAME(QUIET), 306 RQF_NAME(ELVPRIV), 307 RQF_NAME(IO_STAT), 308 RQF_NAME(PM), 309 RQF_NAME(HASHED), 310 RQF_NAME(STATS), 311 RQF_NAME(SPECIAL_PAYLOAD), 312 RQF_NAME(ZONE_WRITE_LOCKED), 313 RQF_NAME(MQ_POLL_SLEPT), 314 RQF_NAME(ELV), 315 }; 316 #undef RQF_NAME 317 318 static const char *const blk_mq_rq_state_name_array[] = { 319 [MQ_RQ_IDLE] = "idle", 320 [MQ_RQ_IN_FLIGHT] = "in_flight", 321 [MQ_RQ_COMPLETE] = "complete", 322 }; 323 324 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) 325 { 326 if (WARN_ON_ONCE((unsigned int)rq_state >= 327 ARRAY_SIZE(blk_mq_rq_state_name_array))) 328 return "(?)"; 329 return blk_mq_rq_state_name_array[rq_state]; 330 } 331 332 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 333 { 334 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 335 const unsigned int op = req_op(rq); 336 const char *op_str = blk_op_str(op); 337 338 seq_printf(m, "%p {.op=", rq); 339 if (strcmp(op_str, "UNKNOWN") == 0) 340 seq_printf(m, "%u", op); 341 else 342 seq_printf(m, "%s", op_str); 343 seq_puts(m, ", .cmd_flags="); 344 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, 345 ARRAY_SIZE(cmd_flag_name)); 346 seq_puts(m, ", .rq_flags="); 347 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 348 ARRAY_SIZE(rqf_name)); 349 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); 350 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 351 rq->internal_tag); 352 if (mq_ops->show_rq) 353 mq_ops->show_rq(m, rq); 354 seq_puts(m, "}\n"); 355 return 0; 356 } 357 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 358 359 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 360 { 361 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 362 } 363 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 364 365 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 366 __acquires(&hctx->lock) 367 { 368 struct blk_mq_hw_ctx *hctx = m->private; 369 370 spin_lock(&hctx->lock); 371 return seq_list_start(&hctx->dispatch, *pos); 372 } 373 374 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 375 { 376 struct blk_mq_hw_ctx *hctx = m->private; 377 378 return seq_list_next(v, &hctx->dispatch, pos); 379 } 380 381 static void hctx_dispatch_stop(struct seq_file *m, void *v) 382 __releases(&hctx->lock) 383 { 384 struct blk_mq_hw_ctx *hctx = m->private; 385 386 spin_unlock(&hctx->lock); 387 } 388 389 static const struct seq_operations hctx_dispatch_seq_ops = { 390 .start = hctx_dispatch_start, 391 .next = hctx_dispatch_next, 392 .stop = hctx_dispatch_stop, 393 .show = blk_mq_debugfs_rq_show, 394 }; 395 396 struct show_busy_params { 397 struct seq_file *m; 398 struct blk_mq_hw_ctx *hctx; 399 }; 400 401 /* 402 * Note: the state of a request may change while this function is in progress, 403 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to 404 * keep iterating requests. 405 */ 406 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved) 407 { 408 const struct show_busy_params *params = data; 409 410 if (rq->mq_hctx == params->hctx) 411 __blk_mq_debugfs_rq_show(params->m, rq); 412 413 return true; 414 } 415 416 static int hctx_busy_show(void *data, struct seq_file *m) 417 { 418 struct blk_mq_hw_ctx *hctx = data; 419 struct show_busy_params params = { .m = m, .hctx = hctx }; 420 421 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, 422 ¶ms); 423 424 return 0; 425 } 426 427 static const char *const hctx_types[] = { 428 [HCTX_TYPE_DEFAULT] = "default", 429 [HCTX_TYPE_READ] = "read", 430 [HCTX_TYPE_POLL] = "poll", 431 }; 432 433 static int hctx_type_show(void *data, struct seq_file *m) 434 { 435 struct blk_mq_hw_ctx *hctx = data; 436 437 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); 438 seq_printf(m, "%s\n", hctx_types[hctx->type]); 439 return 0; 440 } 441 442 static int hctx_ctx_map_show(void *data, struct seq_file *m) 443 { 444 struct blk_mq_hw_ctx *hctx = data; 445 446 sbitmap_bitmap_show(&hctx->ctx_map, m); 447 return 0; 448 } 449 450 static void blk_mq_debugfs_tags_show(struct seq_file *m, 451 struct blk_mq_tags *tags) 452 { 453 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 454 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 455 seq_printf(m, "active_queues=%d\n", 456 atomic_read(&tags->active_queues)); 457 458 seq_puts(m, "\nbitmap_tags:\n"); 459 sbitmap_queue_show(&tags->bitmap_tags, m); 460 461 if (tags->nr_reserved_tags) { 462 seq_puts(m, "\nbreserved_tags:\n"); 463 sbitmap_queue_show(&tags->breserved_tags, m); 464 } 465 } 466 467 static int hctx_tags_show(void *data, struct seq_file *m) 468 { 469 struct blk_mq_hw_ctx *hctx = data; 470 struct request_queue *q = hctx->queue; 471 int res; 472 473 res = mutex_lock_interruptible(&q->sysfs_lock); 474 if (res) 475 goto out; 476 if (hctx->tags) 477 blk_mq_debugfs_tags_show(m, hctx->tags); 478 mutex_unlock(&q->sysfs_lock); 479 480 out: 481 return res; 482 } 483 484 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 485 { 486 struct blk_mq_hw_ctx *hctx = data; 487 struct request_queue *q = hctx->queue; 488 int res; 489 490 res = mutex_lock_interruptible(&q->sysfs_lock); 491 if (res) 492 goto out; 493 if (hctx->tags) 494 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); 495 mutex_unlock(&q->sysfs_lock); 496 497 out: 498 return res; 499 } 500 501 static int hctx_sched_tags_show(void *data, struct seq_file *m) 502 { 503 struct blk_mq_hw_ctx *hctx = data; 504 struct request_queue *q = hctx->queue; 505 int res; 506 507 res = mutex_lock_interruptible(&q->sysfs_lock); 508 if (res) 509 goto out; 510 if (hctx->sched_tags) 511 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 512 mutex_unlock(&q->sysfs_lock); 513 514 out: 515 return res; 516 } 517 518 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 519 { 520 struct blk_mq_hw_ctx *hctx = data; 521 struct request_queue *q = hctx->queue; 522 int res; 523 524 res = mutex_lock_interruptible(&q->sysfs_lock); 525 if (res) 526 goto out; 527 if (hctx->sched_tags) 528 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); 529 mutex_unlock(&q->sysfs_lock); 530 531 out: 532 return res; 533 } 534 535 static int hctx_run_show(void *data, struct seq_file *m) 536 { 537 struct blk_mq_hw_ctx *hctx = data; 538 539 seq_printf(m, "%lu\n", hctx->run); 540 return 0; 541 } 542 543 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 544 loff_t *ppos) 545 { 546 struct blk_mq_hw_ctx *hctx = data; 547 548 hctx->run = 0; 549 return count; 550 } 551 552 static int hctx_active_show(void *data, struct seq_file *m) 553 { 554 struct blk_mq_hw_ctx *hctx = data; 555 556 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx)); 557 return 0; 558 } 559 560 static int hctx_dispatch_busy_show(void *data, struct seq_file *m) 561 { 562 struct blk_mq_hw_ctx *hctx = data; 563 564 seq_printf(m, "%u\n", hctx->dispatch_busy); 565 return 0; 566 } 567 568 #define CTX_RQ_SEQ_OPS(name, type) \ 569 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ 570 __acquires(&ctx->lock) \ 571 { \ 572 struct blk_mq_ctx *ctx = m->private; \ 573 \ 574 spin_lock(&ctx->lock); \ 575 return seq_list_start(&ctx->rq_lists[type], *pos); \ 576 } \ 577 \ 578 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ 579 loff_t *pos) \ 580 { \ 581 struct blk_mq_ctx *ctx = m->private; \ 582 \ 583 return seq_list_next(v, &ctx->rq_lists[type], pos); \ 584 } \ 585 \ 586 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ 587 __releases(&ctx->lock) \ 588 { \ 589 struct blk_mq_ctx *ctx = m->private; \ 590 \ 591 spin_unlock(&ctx->lock); \ 592 } \ 593 \ 594 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ 595 .start = ctx_##name##_rq_list_start, \ 596 .next = ctx_##name##_rq_list_next, \ 597 .stop = ctx_##name##_rq_list_stop, \ 598 .show = blk_mq_debugfs_rq_show, \ 599 } 600 601 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); 602 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); 603 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); 604 605 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 606 { 607 const struct blk_mq_debugfs_attr *attr = m->private; 608 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 609 610 return attr->show(data, m); 611 } 612 613 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 614 size_t count, loff_t *ppos) 615 { 616 struct seq_file *m = file->private_data; 617 const struct blk_mq_debugfs_attr *attr = m->private; 618 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 619 620 /* 621 * Attributes that only implement .seq_ops are read-only and 'attr' is 622 * the same with 'data' in this case. 623 */ 624 if (attr == data || !attr->write) 625 return -EPERM; 626 627 return attr->write(data, buf, count, ppos); 628 } 629 630 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 631 { 632 const struct blk_mq_debugfs_attr *attr = inode->i_private; 633 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 634 struct seq_file *m; 635 int ret; 636 637 if (attr->seq_ops) { 638 ret = seq_open(file, attr->seq_ops); 639 if (!ret) { 640 m = file->private_data; 641 m->private = data; 642 } 643 return ret; 644 } 645 646 if (WARN_ON_ONCE(!attr->show)) 647 return -EPERM; 648 649 return single_open(file, blk_mq_debugfs_show, inode->i_private); 650 } 651 652 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 653 { 654 const struct blk_mq_debugfs_attr *attr = inode->i_private; 655 656 if (attr->show) 657 return single_release(inode, file); 658 659 return seq_release(inode, file); 660 } 661 662 static const struct file_operations blk_mq_debugfs_fops = { 663 .open = blk_mq_debugfs_open, 664 .read = seq_read, 665 .write = blk_mq_debugfs_write, 666 .llseek = seq_lseek, 667 .release = blk_mq_debugfs_release, 668 }; 669 670 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 671 {"state", 0400, hctx_state_show}, 672 {"flags", 0400, hctx_flags_show}, 673 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 674 {"busy", 0400, hctx_busy_show}, 675 {"ctx_map", 0400, hctx_ctx_map_show}, 676 {"tags", 0400, hctx_tags_show}, 677 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 678 {"sched_tags", 0400, hctx_sched_tags_show}, 679 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 680 {"run", 0600, hctx_run_show, hctx_run_write}, 681 {"active", 0400, hctx_active_show}, 682 {"dispatch_busy", 0400, hctx_dispatch_busy_show}, 683 {"type", 0400, hctx_type_show}, 684 {}, 685 }; 686 687 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 688 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, 689 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, 690 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, 691 {}, 692 }; 693 694 static void debugfs_create_files(struct dentry *parent, void *data, 695 const struct blk_mq_debugfs_attr *attr) 696 { 697 if (IS_ERR_OR_NULL(parent)) 698 return; 699 700 d_inode(parent)->i_private = data; 701 702 for (; attr->name; attr++) 703 debugfs_create_file(attr->name, attr->mode, parent, 704 (void *)attr, &blk_mq_debugfs_fops); 705 } 706 707 void blk_mq_debugfs_register(struct request_queue *q) 708 { 709 struct blk_mq_hw_ctx *hctx; 710 int i; 711 712 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); 713 714 /* 715 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir 716 * didn't exist yet (because we don't know what to name the directory 717 * until the queue is registered to a gendisk). 718 */ 719 if (q->elevator && !q->sched_debugfs_dir) 720 blk_mq_debugfs_register_sched(q); 721 722 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ 723 queue_for_each_hw_ctx(q, hctx, i) { 724 if (!hctx->debugfs_dir) 725 blk_mq_debugfs_register_hctx(q, hctx); 726 if (q->elevator && !hctx->sched_debugfs_dir) 727 blk_mq_debugfs_register_sched_hctx(q, hctx); 728 } 729 730 if (q->rq_qos) { 731 struct rq_qos *rqos = q->rq_qos; 732 733 while (rqos) { 734 blk_mq_debugfs_register_rqos(rqos); 735 rqos = rqos->next; 736 } 737 } 738 } 739 740 void blk_mq_debugfs_unregister(struct request_queue *q) 741 { 742 q->sched_debugfs_dir = NULL; 743 } 744 745 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 746 struct blk_mq_ctx *ctx) 747 { 748 struct dentry *ctx_dir; 749 char name[20]; 750 751 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 752 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 753 754 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); 755 } 756 757 void blk_mq_debugfs_register_hctx(struct request_queue *q, 758 struct blk_mq_hw_ctx *hctx) 759 { 760 struct blk_mq_ctx *ctx; 761 char name[20]; 762 int i; 763 764 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 765 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 766 767 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); 768 769 hctx_for_each_ctx(hctx, ctx, i) 770 blk_mq_debugfs_register_ctx(hctx, ctx); 771 } 772 773 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 774 { 775 debugfs_remove_recursive(hctx->debugfs_dir); 776 hctx->sched_debugfs_dir = NULL; 777 hctx->debugfs_dir = NULL; 778 } 779 780 void blk_mq_debugfs_register_hctxs(struct request_queue *q) 781 { 782 struct blk_mq_hw_ctx *hctx; 783 int i; 784 785 queue_for_each_hw_ctx(q, hctx, i) 786 blk_mq_debugfs_register_hctx(q, hctx); 787 } 788 789 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 790 { 791 struct blk_mq_hw_ctx *hctx; 792 int i; 793 794 queue_for_each_hw_ctx(q, hctx, i) 795 blk_mq_debugfs_unregister_hctx(hctx); 796 } 797 798 void blk_mq_debugfs_register_sched(struct request_queue *q) 799 { 800 struct elevator_type *e = q->elevator->type; 801 802 /* 803 * If the parent directory has not been created yet, return, we will be 804 * called again later on and the directory/files will be created then. 805 */ 806 if (!q->debugfs_dir) 807 return; 808 809 if (!e->queue_debugfs_attrs) 810 return; 811 812 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 813 814 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); 815 } 816 817 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 818 { 819 debugfs_remove_recursive(q->sched_debugfs_dir); 820 q->sched_debugfs_dir = NULL; 821 } 822 823 static const char *rq_qos_id_to_name(enum rq_qos_id id) 824 { 825 switch (id) { 826 case RQ_QOS_WBT: 827 return "wbt"; 828 case RQ_QOS_LATENCY: 829 return "latency"; 830 case RQ_QOS_COST: 831 return "cost"; 832 case RQ_QOS_IOPRIO: 833 return "ioprio"; 834 } 835 return "unknown"; 836 } 837 838 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) 839 { 840 debugfs_remove_recursive(rqos->debugfs_dir); 841 rqos->debugfs_dir = NULL; 842 } 843 844 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) 845 { 846 struct request_queue *q = rqos->q; 847 const char *dir_name = rq_qos_id_to_name(rqos->id); 848 849 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) 850 return; 851 852 if (!q->rqos_debugfs_dir) 853 q->rqos_debugfs_dir = debugfs_create_dir("rqos", 854 q->debugfs_dir); 855 856 rqos->debugfs_dir = debugfs_create_dir(dir_name, 857 rqos->q->rqos_debugfs_dir); 858 859 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); 860 } 861 862 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) 863 { 864 debugfs_remove_recursive(q->rqos_debugfs_dir); 865 q->rqos_debugfs_dir = NULL; 866 } 867 868 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 869 struct blk_mq_hw_ctx *hctx) 870 { 871 struct elevator_type *e = q->elevator->type; 872 873 /* 874 * If the parent debugfs directory has not been created yet, return; 875 * We will be called again later on with appropriate parent debugfs 876 * directory from blk_register_queue() 877 */ 878 if (!hctx->debugfs_dir) 879 return; 880 881 if (!e->hctx_debugfs_attrs) 882 return; 883 884 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 885 hctx->debugfs_dir); 886 debugfs_create_files(hctx->sched_debugfs_dir, hctx, 887 e->hctx_debugfs_attrs); 888 } 889 890 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 891 { 892 debugfs_remove_recursive(hctx->sched_debugfs_dir); 893 hctx->sched_debugfs_dir = NULL; 894 } 895