1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Facebook 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/blkdev.h> 8 #include <linux/debugfs.h> 9 10 #include <linux/blk-mq.h> 11 #include "blk.h" 12 #include "blk-mq.h" 13 #include "blk-mq-debugfs.h" 14 #include "blk-mq-tag.h" 15 #include "blk-rq-qos.h" 16 17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 18 { 19 if (stat->nr_samples) { 20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu", 21 stat->nr_samples, stat->mean, stat->min, stat->max); 22 } else { 23 seq_puts(m, "samples=0"); 24 } 25 } 26 27 static int queue_poll_stat_show(void *data, struct seq_file *m) 28 { 29 struct request_queue *q = data; 30 int bucket; 31 32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) { 33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket)); 34 print_stat(m, &q->poll_stat[2 * bucket]); 35 seq_puts(m, "\n"); 36 37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket)); 38 print_stat(m, &q->poll_stat[2 * bucket + 1]); 39 seq_puts(m, "\n"); 40 } 41 return 0; 42 } 43 44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) 45 __acquires(&q->requeue_lock) 46 { 47 struct request_queue *q = m->private; 48 49 spin_lock_irq(&q->requeue_lock); 50 return seq_list_start(&q->requeue_list, *pos); 51 } 52 53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) 54 { 55 struct request_queue *q = m->private; 56 57 return seq_list_next(v, &q->requeue_list, pos); 58 } 59 60 static void queue_requeue_list_stop(struct seq_file *m, void *v) 61 __releases(&q->requeue_lock) 62 { 63 struct request_queue *q = m->private; 64 65 spin_unlock_irq(&q->requeue_lock); 66 } 67 68 static const struct seq_operations queue_requeue_list_seq_ops = { 69 .start = queue_requeue_list_start, 70 .next = queue_requeue_list_next, 71 .stop = queue_requeue_list_stop, 72 .show = blk_mq_debugfs_rq_show, 73 }; 74 75 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 76 const char *const *flag_name, int flag_name_count) 77 { 78 bool sep = false; 79 int i; 80 81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 82 if (!(flags & BIT(i))) 83 continue; 84 if (sep) 85 seq_puts(m, "|"); 86 sep = true; 87 if (i < flag_name_count && flag_name[i]) 88 seq_puts(m, flag_name[i]); 89 else 90 seq_printf(m, "%d", i); 91 } 92 return 0; 93 } 94 95 static int queue_pm_only_show(void *data, struct seq_file *m) 96 { 97 struct request_queue *q = data; 98 99 seq_printf(m, "%d\n", atomic_read(&q->pm_only)); 100 return 0; 101 } 102 103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 104 static const char *const blk_queue_flag_name[] = { 105 QUEUE_FLAG_NAME(STOPPED), 106 QUEUE_FLAG_NAME(DYING), 107 QUEUE_FLAG_NAME(NOMERGES), 108 QUEUE_FLAG_NAME(SAME_COMP), 109 QUEUE_FLAG_NAME(FAIL_IO), 110 QUEUE_FLAG_NAME(NONROT), 111 QUEUE_FLAG_NAME(IO_STAT), 112 QUEUE_FLAG_NAME(DISCARD), 113 QUEUE_FLAG_NAME(NOXMERGES), 114 QUEUE_FLAG_NAME(ADD_RANDOM), 115 QUEUE_FLAG_NAME(SECERASE), 116 QUEUE_FLAG_NAME(SAME_FORCE), 117 QUEUE_FLAG_NAME(DEAD), 118 QUEUE_FLAG_NAME(INIT_DONE), 119 QUEUE_FLAG_NAME(STABLE_WRITES), 120 QUEUE_FLAG_NAME(POLL), 121 QUEUE_FLAG_NAME(WC), 122 QUEUE_FLAG_NAME(FUA), 123 QUEUE_FLAG_NAME(DAX), 124 QUEUE_FLAG_NAME(STATS), 125 QUEUE_FLAG_NAME(POLL_STATS), 126 QUEUE_FLAG_NAME(REGISTERED), 127 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), 128 QUEUE_FLAG_NAME(QUIESCED), 129 QUEUE_FLAG_NAME(PCI_P2PDMA), 130 QUEUE_FLAG_NAME(ZONE_RESETALL), 131 QUEUE_FLAG_NAME(RQ_ALLOC_TIME), 132 QUEUE_FLAG_NAME(NOWAIT), 133 }; 134 #undef QUEUE_FLAG_NAME 135 136 static int queue_state_show(void *data, struct seq_file *m) 137 { 138 struct request_queue *q = data; 139 140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 141 ARRAY_SIZE(blk_queue_flag_name)); 142 seq_puts(m, "\n"); 143 return 0; 144 } 145 146 static ssize_t queue_state_write(void *data, const char __user *buf, 147 size_t count, loff_t *ppos) 148 { 149 struct request_queue *q = data; 150 char opbuf[16] = { }, *op; 151 152 /* 153 * The "state" attribute is removed after blk_cleanup_queue() has called 154 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 155 * triggering a use-after-free. 156 */ 157 if (blk_queue_dead(q)) 158 return -ENOENT; 159 160 if (count >= sizeof(opbuf)) { 161 pr_err("%s: operation too long\n", __func__); 162 goto inval; 163 } 164 165 if (copy_from_user(opbuf, buf, count)) 166 return -EFAULT; 167 op = strstrip(opbuf); 168 if (strcmp(op, "run") == 0) { 169 blk_mq_run_hw_queues(q, true); 170 } else if (strcmp(op, "start") == 0) { 171 blk_mq_start_stopped_hw_queues(q, true); 172 } else if (strcmp(op, "kick") == 0) { 173 blk_mq_kick_requeue_list(q); 174 } else { 175 pr_err("%s: unsupported operation '%s'\n", __func__, op); 176 inval: 177 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); 178 return -EINVAL; 179 } 180 return count; 181 } 182 183 static int queue_write_hint_show(void *data, struct seq_file *m) 184 { 185 struct request_queue *q = data; 186 int i; 187 188 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 189 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); 190 191 return 0; 192 } 193 194 static ssize_t queue_write_hint_store(void *data, const char __user *buf, 195 size_t count, loff_t *ppos) 196 { 197 struct request_queue *q = data; 198 int i; 199 200 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 201 q->write_hints[i] = 0; 202 203 return count; 204 } 205 206 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 207 { "poll_stat", 0400, queue_poll_stat_show }, 208 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, 209 { "pm_only", 0600, queue_pm_only_show, NULL }, 210 { "state", 0600, queue_state_show, queue_state_write }, 211 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, 212 { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, 213 { }, 214 }; 215 216 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 217 static const char *const hctx_state_name[] = { 218 HCTX_STATE_NAME(STOPPED), 219 HCTX_STATE_NAME(TAG_ACTIVE), 220 HCTX_STATE_NAME(SCHED_RESTART), 221 HCTX_STATE_NAME(INACTIVE), 222 }; 223 #undef HCTX_STATE_NAME 224 225 static int hctx_state_show(void *data, struct seq_file *m) 226 { 227 struct blk_mq_hw_ctx *hctx = data; 228 229 blk_flags_show(m, hctx->state, hctx_state_name, 230 ARRAY_SIZE(hctx_state_name)); 231 seq_puts(m, "\n"); 232 return 0; 233 } 234 235 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 236 static const char *const alloc_policy_name[] = { 237 BLK_TAG_ALLOC_NAME(FIFO), 238 BLK_TAG_ALLOC_NAME(RR), 239 }; 240 #undef BLK_TAG_ALLOC_NAME 241 242 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 243 static const char *const hctx_flag_name[] = { 244 HCTX_FLAG_NAME(SHOULD_MERGE), 245 HCTX_FLAG_NAME(TAG_QUEUE_SHARED), 246 HCTX_FLAG_NAME(BLOCKING), 247 HCTX_FLAG_NAME(NO_SCHED), 248 HCTX_FLAG_NAME(STACKING), 249 HCTX_FLAG_NAME(TAG_HCTX_SHARED), 250 }; 251 #undef HCTX_FLAG_NAME 252 253 static int hctx_flags_show(void *data, struct seq_file *m) 254 { 255 struct blk_mq_hw_ctx *hctx = data; 256 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 257 258 seq_puts(m, "alloc_policy="); 259 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 260 alloc_policy_name[alloc_policy]) 261 seq_puts(m, alloc_policy_name[alloc_policy]); 262 else 263 seq_printf(m, "%d", alloc_policy); 264 seq_puts(m, " "); 265 blk_flags_show(m, 266 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 267 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 268 seq_puts(m, "\n"); 269 return 0; 270 } 271 272 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 273 static const char *const cmd_flag_name[] = { 274 CMD_FLAG_NAME(FAILFAST_DEV), 275 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 276 CMD_FLAG_NAME(FAILFAST_DRIVER), 277 CMD_FLAG_NAME(SYNC), 278 CMD_FLAG_NAME(META), 279 CMD_FLAG_NAME(PRIO), 280 CMD_FLAG_NAME(NOMERGE), 281 CMD_FLAG_NAME(IDLE), 282 CMD_FLAG_NAME(INTEGRITY), 283 CMD_FLAG_NAME(FUA), 284 CMD_FLAG_NAME(PREFLUSH), 285 CMD_FLAG_NAME(RAHEAD), 286 CMD_FLAG_NAME(BACKGROUND), 287 CMD_FLAG_NAME(NOWAIT), 288 CMD_FLAG_NAME(NOUNMAP), 289 CMD_FLAG_NAME(HIPRI), 290 }; 291 #undef CMD_FLAG_NAME 292 293 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 294 static const char *const rqf_name[] = { 295 RQF_NAME(STARTED), 296 RQF_NAME(SOFTBARRIER), 297 RQF_NAME(FLUSH_SEQ), 298 RQF_NAME(MIXED_MERGE), 299 RQF_NAME(MQ_INFLIGHT), 300 RQF_NAME(DONTPREP), 301 RQF_NAME(FAILED), 302 RQF_NAME(QUIET), 303 RQF_NAME(ELVPRIV), 304 RQF_NAME(IO_STAT), 305 RQF_NAME(ALLOCED), 306 RQF_NAME(PM), 307 RQF_NAME(HASHED), 308 RQF_NAME(STATS), 309 RQF_NAME(SPECIAL_PAYLOAD), 310 RQF_NAME(ZONE_WRITE_LOCKED), 311 RQF_NAME(MQ_POLL_SLEPT), 312 }; 313 #undef RQF_NAME 314 315 static const char *const blk_mq_rq_state_name_array[] = { 316 [MQ_RQ_IDLE] = "idle", 317 [MQ_RQ_IN_FLIGHT] = "in_flight", 318 [MQ_RQ_COMPLETE] = "complete", 319 }; 320 321 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) 322 { 323 if (WARN_ON_ONCE((unsigned int)rq_state >= 324 ARRAY_SIZE(blk_mq_rq_state_name_array))) 325 return "(?)"; 326 return blk_mq_rq_state_name_array[rq_state]; 327 } 328 329 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 330 { 331 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 332 const unsigned int op = req_op(rq); 333 const char *op_str = blk_op_str(op); 334 335 seq_printf(m, "%p {.op=", rq); 336 if (strcmp(op_str, "UNKNOWN") == 0) 337 seq_printf(m, "%u", op); 338 else 339 seq_printf(m, "%s", op_str); 340 seq_puts(m, ", .cmd_flags="); 341 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, 342 ARRAY_SIZE(cmd_flag_name)); 343 seq_puts(m, ", .rq_flags="); 344 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 345 ARRAY_SIZE(rqf_name)); 346 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); 347 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 348 rq->internal_tag); 349 if (mq_ops->show_rq) 350 mq_ops->show_rq(m, rq); 351 seq_puts(m, "}\n"); 352 return 0; 353 } 354 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 355 356 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 357 { 358 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 359 } 360 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 361 362 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 363 __acquires(&hctx->lock) 364 { 365 struct blk_mq_hw_ctx *hctx = m->private; 366 367 spin_lock(&hctx->lock); 368 return seq_list_start(&hctx->dispatch, *pos); 369 } 370 371 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 372 { 373 struct blk_mq_hw_ctx *hctx = m->private; 374 375 return seq_list_next(v, &hctx->dispatch, pos); 376 } 377 378 static void hctx_dispatch_stop(struct seq_file *m, void *v) 379 __releases(&hctx->lock) 380 { 381 struct blk_mq_hw_ctx *hctx = m->private; 382 383 spin_unlock(&hctx->lock); 384 } 385 386 static const struct seq_operations hctx_dispatch_seq_ops = { 387 .start = hctx_dispatch_start, 388 .next = hctx_dispatch_next, 389 .stop = hctx_dispatch_stop, 390 .show = blk_mq_debugfs_rq_show, 391 }; 392 393 struct show_busy_params { 394 struct seq_file *m; 395 struct blk_mq_hw_ctx *hctx; 396 }; 397 398 /* 399 * Note: the state of a request may change while this function is in progress, 400 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to 401 * keep iterating requests. 402 */ 403 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved) 404 { 405 const struct show_busy_params *params = data; 406 407 if (rq->mq_hctx == params->hctx) 408 __blk_mq_debugfs_rq_show(params->m, rq); 409 410 return true; 411 } 412 413 static int hctx_busy_show(void *data, struct seq_file *m) 414 { 415 struct blk_mq_hw_ctx *hctx = data; 416 struct show_busy_params params = { .m = m, .hctx = hctx }; 417 418 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, 419 ¶ms); 420 421 return 0; 422 } 423 424 static const char *const hctx_types[] = { 425 [HCTX_TYPE_DEFAULT] = "default", 426 [HCTX_TYPE_READ] = "read", 427 [HCTX_TYPE_POLL] = "poll", 428 }; 429 430 static int hctx_type_show(void *data, struct seq_file *m) 431 { 432 struct blk_mq_hw_ctx *hctx = data; 433 434 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); 435 seq_printf(m, "%s\n", hctx_types[hctx->type]); 436 return 0; 437 } 438 439 static int hctx_ctx_map_show(void *data, struct seq_file *m) 440 { 441 struct blk_mq_hw_ctx *hctx = data; 442 443 sbitmap_bitmap_show(&hctx->ctx_map, m); 444 return 0; 445 } 446 447 static void blk_mq_debugfs_tags_show(struct seq_file *m, 448 struct blk_mq_tags *tags) 449 { 450 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 451 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 452 seq_printf(m, "active_queues=%d\n", 453 atomic_read(&tags->active_queues)); 454 455 seq_puts(m, "\nbitmap_tags:\n"); 456 sbitmap_queue_show(tags->bitmap_tags, m); 457 458 if (tags->nr_reserved_tags) { 459 seq_puts(m, "\nbreserved_tags:\n"); 460 sbitmap_queue_show(tags->breserved_tags, m); 461 } 462 } 463 464 static int hctx_tags_show(void *data, struct seq_file *m) 465 { 466 struct blk_mq_hw_ctx *hctx = data; 467 struct request_queue *q = hctx->queue; 468 int res; 469 470 res = mutex_lock_interruptible(&q->sysfs_lock); 471 if (res) 472 goto out; 473 if (hctx->tags) 474 blk_mq_debugfs_tags_show(m, hctx->tags); 475 mutex_unlock(&q->sysfs_lock); 476 477 out: 478 return res; 479 } 480 481 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 482 { 483 struct blk_mq_hw_ctx *hctx = data; 484 struct request_queue *q = hctx->queue; 485 int res; 486 487 res = mutex_lock_interruptible(&q->sysfs_lock); 488 if (res) 489 goto out; 490 if (hctx->tags) 491 sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m); 492 mutex_unlock(&q->sysfs_lock); 493 494 out: 495 return res; 496 } 497 498 static int hctx_sched_tags_show(void *data, struct seq_file *m) 499 { 500 struct blk_mq_hw_ctx *hctx = data; 501 struct request_queue *q = hctx->queue; 502 int res; 503 504 res = mutex_lock_interruptible(&q->sysfs_lock); 505 if (res) 506 goto out; 507 if (hctx->sched_tags) 508 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 509 mutex_unlock(&q->sysfs_lock); 510 511 out: 512 return res; 513 } 514 515 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 516 { 517 struct blk_mq_hw_ctx *hctx = data; 518 struct request_queue *q = hctx->queue; 519 int res; 520 521 res = mutex_lock_interruptible(&q->sysfs_lock); 522 if (res) 523 goto out; 524 if (hctx->sched_tags) 525 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m); 526 mutex_unlock(&q->sysfs_lock); 527 528 out: 529 return res; 530 } 531 532 static int hctx_io_poll_show(void *data, struct seq_file *m) 533 { 534 struct blk_mq_hw_ctx *hctx = data; 535 536 seq_printf(m, "considered=%lu\n", hctx->poll_considered); 537 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); 538 seq_printf(m, "success=%lu\n", hctx->poll_success); 539 return 0; 540 } 541 542 static ssize_t hctx_io_poll_write(void *data, const char __user *buf, 543 size_t count, loff_t *ppos) 544 { 545 struct blk_mq_hw_ctx *hctx = data; 546 547 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; 548 return count; 549 } 550 551 static int hctx_dispatched_show(void *data, struct seq_file *m) 552 { 553 struct blk_mq_hw_ctx *hctx = data; 554 int i; 555 556 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); 557 558 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) { 559 unsigned int d = 1U << (i - 1); 560 561 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); 562 } 563 564 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); 565 return 0; 566 } 567 568 static ssize_t hctx_dispatched_write(void *data, const char __user *buf, 569 size_t count, loff_t *ppos) 570 { 571 struct blk_mq_hw_ctx *hctx = data; 572 int i; 573 574 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) 575 hctx->dispatched[i] = 0; 576 return count; 577 } 578 579 static int hctx_queued_show(void *data, struct seq_file *m) 580 { 581 struct blk_mq_hw_ctx *hctx = data; 582 583 seq_printf(m, "%lu\n", hctx->queued); 584 return 0; 585 } 586 587 static ssize_t hctx_queued_write(void *data, const char __user *buf, 588 size_t count, loff_t *ppos) 589 { 590 struct blk_mq_hw_ctx *hctx = data; 591 592 hctx->queued = 0; 593 return count; 594 } 595 596 static int hctx_run_show(void *data, struct seq_file *m) 597 { 598 struct blk_mq_hw_ctx *hctx = data; 599 600 seq_printf(m, "%lu\n", hctx->run); 601 return 0; 602 } 603 604 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 605 loff_t *ppos) 606 { 607 struct blk_mq_hw_ctx *hctx = data; 608 609 hctx->run = 0; 610 return count; 611 } 612 613 static int hctx_active_show(void *data, struct seq_file *m) 614 { 615 struct blk_mq_hw_ctx *hctx = data; 616 617 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); 618 return 0; 619 } 620 621 static int hctx_dispatch_busy_show(void *data, struct seq_file *m) 622 { 623 struct blk_mq_hw_ctx *hctx = data; 624 625 seq_printf(m, "%u\n", hctx->dispatch_busy); 626 return 0; 627 } 628 629 #define CTX_RQ_SEQ_OPS(name, type) \ 630 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ 631 __acquires(&ctx->lock) \ 632 { \ 633 struct blk_mq_ctx *ctx = m->private; \ 634 \ 635 spin_lock(&ctx->lock); \ 636 return seq_list_start(&ctx->rq_lists[type], *pos); \ 637 } \ 638 \ 639 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ 640 loff_t *pos) \ 641 { \ 642 struct blk_mq_ctx *ctx = m->private; \ 643 \ 644 return seq_list_next(v, &ctx->rq_lists[type], pos); \ 645 } \ 646 \ 647 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ 648 __releases(&ctx->lock) \ 649 { \ 650 struct blk_mq_ctx *ctx = m->private; \ 651 \ 652 spin_unlock(&ctx->lock); \ 653 } \ 654 \ 655 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ 656 .start = ctx_##name##_rq_list_start, \ 657 .next = ctx_##name##_rq_list_next, \ 658 .stop = ctx_##name##_rq_list_stop, \ 659 .show = blk_mq_debugfs_rq_show, \ 660 } 661 662 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); 663 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); 664 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); 665 666 static int ctx_dispatched_show(void *data, struct seq_file *m) 667 { 668 struct blk_mq_ctx *ctx = data; 669 670 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); 671 return 0; 672 } 673 674 static ssize_t ctx_dispatched_write(void *data, const char __user *buf, 675 size_t count, loff_t *ppos) 676 { 677 struct blk_mq_ctx *ctx = data; 678 679 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; 680 return count; 681 } 682 683 static int ctx_merged_show(void *data, struct seq_file *m) 684 { 685 struct blk_mq_ctx *ctx = data; 686 687 seq_printf(m, "%lu\n", ctx->rq_merged); 688 return 0; 689 } 690 691 static ssize_t ctx_merged_write(void *data, const char __user *buf, 692 size_t count, loff_t *ppos) 693 { 694 struct blk_mq_ctx *ctx = data; 695 696 ctx->rq_merged = 0; 697 return count; 698 } 699 700 static int ctx_completed_show(void *data, struct seq_file *m) 701 { 702 struct blk_mq_ctx *ctx = data; 703 704 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); 705 return 0; 706 } 707 708 static ssize_t ctx_completed_write(void *data, const char __user *buf, 709 size_t count, loff_t *ppos) 710 { 711 struct blk_mq_ctx *ctx = data; 712 713 ctx->rq_completed[0] = ctx->rq_completed[1] = 0; 714 return count; 715 } 716 717 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 718 { 719 const struct blk_mq_debugfs_attr *attr = m->private; 720 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 721 722 return attr->show(data, m); 723 } 724 725 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 726 size_t count, loff_t *ppos) 727 { 728 struct seq_file *m = file->private_data; 729 const struct blk_mq_debugfs_attr *attr = m->private; 730 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 731 732 /* 733 * Attributes that only implement .seq_ops are read-only and 'attr' is 734 * the same with 'data' in this case. 735 */ 736 if (attr == data || !attr->write) 737 return -EPERM; 738 739 return attr->write(data, buf, count, ppos); 740 } 741 742 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 743 { 744 const struct blk_mq_debugfs_attr *attr = inode->i_private; 745 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 746 struct seq_file *m; 747 int ret; 748 749 if (attr->seq_ops) { 750 ret = seq_open(file, attr->seq_ops); 751 if (!ret) { 752 m = file->private_data; 753 m->private = data; 754 } 755 return ret; 756 } 757 758 if (WARN_ON_ONCE(!attr->show)) 759 return -EPERM; 760 761 return single_open(file, blk_mq_debugfs_show, inode->i_private); 762 } 763 764 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 765 { 766 const struct blk_mq_debugfs_attr *attr = inode->i_private; 767 768 if (attr->show) 769 return single_release(inode, file); 770 771 return seq_release(inode, file); 772 } 773 774 static const struct file_operations blk_mq_debugfs_fops = { 775 .open = blk_mq_debugfs_open, 776 .read = seq_read, 777 .write = blk_mq_debugfs_write, 778 .llseek = seq_lseek, 779 .release = blk_mq_debugfs_release, 780 }; 781 782 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 783 {"state", 0400, hctx_state_show}, 784 {"flags", 0400, hctx_flags_show}, 785 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 786 {"busy", 0400, hctx_busy_show}, 787 {"ctx_map", 0400, hctx_ctx_map_show}, 788 {"tags", 0400, hctx_tags_show}, 789 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 790 {"sched_tags", 0400, hctx_sched_tags_show}, 791 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 792 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, 793 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, 794 {"queued", 0600, hctx_queued_show, hctx_queued_write}, 795 {"run", 0600, hctx_run_show, hctx_run_write}, 796 {"active", 0400, hctx_active_show}, 797 {"dispatch_busy", 0400, hctx_dispatch_busy_show}, 798 {"type", 0400, hctx_type_show}, 799 {}, 800 }; 801 802 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 803 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, 804 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, 805 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, 806 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, 807 {"merged", 0600, ctx_merged_show, ctx_merged_write}, 808 {"completed", 0600, ctx_completed_show, ctx_completed_write}, 809 {}, 810 }; 811 812 static void debugfs_create_files(struct dentry *parent, void *data, 813 const struct blk_mq_debugfs_attr *attr) 814 { 815 if (IS_ERR_OR_NULL(parent)) 816 return; 817 818 d_inode(parent)->i_private = data; 819 820 for (; attr->name; attr++) 821 debugfs_create_file(attr->name, attr->mode, parent, 822 (void *)attr, &blk_mq_debugfs_fops); 823 } 824 825 void blk_mq_debugfs_register(struct request_queue *q) 826 { 827 struct blk_mq_hw_ctx *hctx; 828 int i; 829 830 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); 831 832 /* 833 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir 834 * didn't exist yet (because we don't know what to name the directory 835 * until the queue is registered to a gendisk). 836 */ 837 if (q->elevator && !q->sched_debugfs_dir) 838 blk_mq_debugfs_register_sched(q); 839 840 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ 841 queue_for_each_hw_ctx(q, hctx, i) { 842 if (!hctx->debugfs_dir) 843 blk_mq_debugfs_register_hctx(q, hctx); 844 if (q->elevator && !hctx->sched_debugfs_dir) 845 blk_mq_debugfs_register_sched_hctx(q, hctx); 846 } 847 848 if (q->rq_qos) { 849 struct rq_qos *rqos = q->rq_qos; 850 851 while (rqos) { 852 blk_mq_debugfs_register_rqos(rqos); 853 rqos = rqos->next; 854 } 855 } 856 } 857 858 void blk_mq_debugfs_unregister(struct request_queue *q) 859 { 860 q->sched_debugfs_dir = NULL; 861 } 862 863 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 864 struct blk_mq_ctx *ctx) 865 { 866 struct dentry *ctx_dir; 867 char name[20]; 868 869 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 870 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 871 872 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); 873 } 874 875 void blk_mq_debugfs_register_hctx(struct request_queue *q, 876 struct blk_mq_hw_ctx *hctx) 877 { 878 struct blk_mq_ctx *ctx; 879 char name[20]; 880 int i; 881 882 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 883 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 884 885 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); 886 887 hctx_for_each_ctx(hctx, ctx, i) 888 blk_mq_debugfs_register_ctx(hctx, ctx); 889 } 890 891 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 892 { 893 debugfs_remove_recursive(hctx->debugfs_dir); 894 hctx->sched_debugfs_dir = NULL; 895 hctx->debugfs_dir = NULL; 896 } 897 898 void blk_mq_debugfs_register_hctxs(struct request_queue *q) 899 { 900 struct blk_mq_hw_ctx *hctx; 901 int i; 902 903 queue_for_each_hw_ctx(q, hctx, i) 904 blk_mq_debugfs_register_hctx(q, hctx); 905 } 906 907 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 908 { 909 struct blk_mq_hw_ctx *hctx; 910 int i; 911 912 queue_for_each_hw_ctx(q, hctx, i) 913 blk_mq_debugfs_unregister_hctx(hctx); 914 } 915 916 void blk_mq_debugfs_register_sched(struct request_queue *q) 917 { 918 struct elevator_type *e = q->elevator->type; 919 920 /* 921 * If the parent directory has not been created yet, return, we will be 922 * called again later on and the directory/files will be created then. 923 */ 924 if (!q->debugfs_dir) 925 return; 926 927 if (!e->queue_debugfs_attrs) 928 return; 929 930 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 931 932 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); 933 } 934 935 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 936 { 937 debugfs_remove_recursive(q->sched_debugfs_dir); 938 q->sched_debugfs_dir = NULL; 939 } 940 941 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) 942 { 943 debugfs_remove_recursive(rqos->debugfs_dir); 944 rqos->debugfs_dir = NULL; 945 } 946 947 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) 948 { 949 struct request_queue *q = rqos->q; 950 const char *dir_name = rq_qos_id_to_name(rqos->id); 951 952 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) 953 return; 954 955 if (!q->rqos_debugfs_dir) 956 q->rqos_debugfs_dir = debugfs_create_dir("rqos", 957 q->debugfs_dir); 958 959 rqos->debugfs_dir = debugfs_create_dir(dir_name, 960 rqos->q->rqos_debugfs_dir); 961 962 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); 963 } 964 965 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) 966 { 967 debugfs_remove_recursive(q->rqos_debugfs_dir); 968 q->rqos_debugfs_dir = NULL; 969 } 970 971 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 972 struct blk_mq_hw_ctx *hctx) 973 { 974 struct elevator_type *e = q->elevator->type; 975 976 if (!e->hctx_debugfs_attrs) 977 return; 978 979 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 980 hctx->debugfs_dir); 981 debugfs_create_files(hctx->sched_debugfs_dir, hctx, 982 e->hctx_debugfs_attrs); 983 } 984 985 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 986 { 987 debugfs_remove_recursive(hctx->sched_debugfs_dir); 988 hctx->sched_debugfs_dir = NULL; 989 } 990