1 /* 2 * Copyright (C) 2017 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <https://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/blkdev.h> 19 #include <linux/debugfs.h> 20 21 #include <linux/blk-mq.h> 22 #include "blk.h" 23 #include "blk-mq.h" 24 #include "blk-mq-debugfs.h" 25 #include "blk-mq-tag.h" 26 27 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 28 { 29 if (stat->nr_samples) { 30 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu", 31 stat->nr_samples, stat->mean, stat->min, stat->max); 32 } else { 33 seq_puts(m, "samples=0"); 34 } 35 } 36 37 static int queue_poll_stat_show(void *data, struct seq_file *m) 38 { 39 struct request_queue *q = data; 40 int bucket; 41 42 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { 43 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket)); 44 print_stat(m, &q->poll_stat[2*bucket]); 45 seq_puts(m, "\n"); 46 47 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket)); 48 print_stat(m, &q->poll_stat[2*bucket+1]); 49 seq_puts(m, "\n"); 50 } 51 return 0; 52 } 53 54 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) 55 __acquires(&q->requeue_lock) 56 { 57 struct request_queue *q = m->private; 58 59 spin_lock_irq(&q->requeue_lock); 60 return seq_list_start(&q->requeue_list, *pos); 61 } 62 63 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) 64 { 65 struct request_queue *q = m->private; 66 67 return seq_list_next(v, &q->requeue_list, pos); 68 } 69 70 static void queue_requeue_list_stop(struct seq_file *m, void *v) 71 __releases(&q->requeue_lock) 72 { 73 struct request_queue *q = m->private; 74 75 spin_unlock_irq(&q->requeue_lock); 76 } 77 78 static const struct seq_operations queue_requeue_list_seq_ops = { 79 .start = queue_requeue_list_start, 80 .next = queue_requeue_list_next, 81 .stop = queue_requeue_list_stop, 82 .show = blk_mq_debugfs_rq_show, 83 }; 84 85 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 86 const char *const *flag_name, int flag_name_count) 87 { 88 bool sep = false; 89 int i; 90 91 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 92 if (!(flags & BIT(i))) 93 continue; 94 if (sep) 95 seq_puts(m, "|"); 96 sep = true; 97 if (i < flag_name_count && flag_name[i]) 98 seq_puts(m, flag_name[i]); 99 else 100 seq_printf(m, "%d", i); 101 } 102 return 0; 103 } 104 105 static int queue_pm_only_show(void *data, struct seq_file *m) 106 { 107 struct request_queue *q = data; 108 109 seq_printf(m, "%d\n", atomic_read(&q->pm_only)); 110 return 0; 111 } 112 113 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 114 static const char *const blk_queue_flag_name[] = { 115 QUEUE_FLAG_NAME(QUEUED), 116 QUEUE_FLAG_NAME(STOPPED), 117 QUEUE_FLAG_NAME(DYING), 118 QUEUE_FLAG_NAME(BYPASS), 119 QUEUE_FLAG_NAME(BIDI), 120 QUEUE_FLAG_NAME(NOMERGES), 121 QUEUE_FLAG_NAME(SAME_COMP), 122 QUEUE_FLAG_NAME(FAIL_IO), 123 QUEUE_FLAG_NAME(NONROT), 124 QUEUE_FLAG_NAME(IO_STAT), 125 QUEUE_FLAG_NAME(DISCARD), 126 QUEUE_FLAG_NAME(NOXMERGES), 127 QUEUE_FLAG_NAME(ADD_RANDOM), 128 QUEUE_FLAG_NAME(SECERASE), 129 QUEUE_FLAG_NAME(SAME_FORCE), 130 QUEUE_FLAG_NAME(DEAD), 131 QUEUE_FLAG_NAME(INIT_DONE), 132 QUEUE_FLAG_NAME(NO_SG_MERGE), 133 QUEUE_FLAG_NAME(POLL), 134 QUEUE_FLAG_NAME(WC), 135 QUEUE_FLAG_NAME(FUA), 136 QUEUE_FLAG_NAME(FLUSH_NQ), 137 QUEUE_FLAG_NAME(DAX), 138 QUEUE_FLAG_NAME(STATS), 139 QUEUE_FLAG_NAME(POLL_STATS), 140 QUEUE_FLAG_NAME(REGISTERED), 141 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), 142 QUEUE_FLAG_NAME(QUIESCED), 143 }; 144 #undef QUEUE_FLAG_NAME 145 146 static int queue_state_show(void *data, struct seq_file *m) 147 { 148 struct request_queue *q = data; 149 150 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 151 ARRAY_SIZE(blk_queue_flag_name)); 152 seq_puts(m, "\n"); 153 return 0; 154 } 155 156 static ssize_t queue_state_write(void *data, const char __user *buf, 157 size_t count, loff_t *ppos) 158 { 159 struct request_queue *q = data; 160 char opbuf[16] = { }, *op; 161 162 /* 163 * The "state" attribute is removed after blk_cleanup_queue() has called 164 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 165 * triggering a use-after-free. 166 */ 167 if (blk_queue_dead(q)) 168 return -ENOENT; 169 170 if (count >= sizeof(opbuf)) { 171 pr_err("%s: operation too long\n", __func__); 172 goto inval; 173 } 174 175 if (copy_from_user(opbuf, buf, count)) 176 return -EFAULT; 177 op = strstrip(opbuf); 178 if (strcmp(op, "run") == 0) { 179 blk_mq_run_hw_queues(q, true); 180 } else if (strcmp(op, "start") == 0) { 181 blk_mq_start_stopped_hw_queues(q, true); 182 } else if (strcmp(op, "kick") == 0) { 183 blk_mq_kick_requeue_list(q); 184 } else { 185 pr_err("%s: unsupported operation '%s'\n", __func__, op); 186 inval: 187 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); 188 return -EINVAL; 189 } 190 return count; 191 } 192 193 static int queue_write_hint_show(void *data, struct seq_file *m) 194 { 195 struct request_queue *q = data; 196 int i; 197 198 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 199 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); 200 201 return 0; 202 } 203 204 static ssize_t queue_write_hint_store(void *data, const char __user *buf, 205 size_t count, loff_t *ppos) 206 { 207 struct request_queue *q = data; 208 int i; 209 210 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 211 q->write_hints[i] = 0; 212 213 return count; 214 } 215 216 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 217 { "poll_stat", 0400, queue_poll_stat_show }, 218 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, 219 { "pm_only", 0600, queue_pm_only_show, NULL }, 220 { "state", 0600, queue_state_show, queue_state_write }, 221 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, 222 { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, 223 { }, 224 }; 225 226 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 227 static const char *const hctx_state_name[] = { 228 HCTX_STATE_NAME(STOPPED), 229 HCTX_STATE_NAME(TAG_ACTIVE), 230 HCTX_STATE_NAME(SCHED_RESTART), 231 }; 232 #undef HCTX_STATE_NAME 233 234 static int hctx_state_show(void *data, struct seq_file *m) 235 { 236 struct blk_mq_hw_ctx *hctx = data; 237 238 blk_flags_show(m, hctx->state, hctx_state_name, 239 ARRAY_SIZE(hctx_state_name)); 240 seq_puts(m, "\n"); 241 return 0; 242 } 243 244 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 245 static const char *const alloc_policy_name[] = { 246 BLK_TAG_ALLOC_NAME(FIFO), 247 BLK_TAG_ALLOC_NAME(RR), 248 }; 249 #undef BLK_TAG_ALLOC_NAME 250 251 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 252 static const char *const hctx_flag_name[] = { 253 HCTX_FLAG_NAME(SHOULD_MERGE), 254 HCTX_FLAG_NAME(TAG_SHARED), 255 HCTX_FLAG_NAME(SG_MERGE), 256 HCTX_FLAG_NAME(BLOCKING), 257 HCTX_FLAG_NAME(NO_SCHED), 258 }; 259 #undef HCTX_FLAG_NAME 260 261 static int hctx_flags_show(void *data, struct seq_file *m) 262 { 263 struct blk_mq_hw_ctx *hctx = data; 264 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 265 266 seq_puts(m, "alloc_policy="); 267 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 268 alloc_policy_name[alloc_policy]) 269 seq_puts(m, alloc_policy_name[alloc_policy]); 270 else 271 seq_printf(m, "%d", alloc_policy); 272 seq_puts(m, " "); 273 blk_flags_show(m, 274 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 275 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 276 seq_puts(m, "\n"); 277 return 0; 278 } 279 280 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 281 static const char *const op_name[] = { 282 REQ_OP_NAME(READ), 283 REQ_OP_NAME(WRITE), 284 REQ_OP_NAME(FLUSH), 285 REQ_OP_NAME(DISCARD), 286 REQ_OP_NAME(SECURE_ERASE), 287 REQ_OP_NAME(ZONE_RESET), 288 REQ_OP_NAME(WRITE_SAME), 289 REQ_OP_NAME(WRITE_ZEROES), 290 REQ_OP_NAME(SCSI_IN), 291 REQ_OP_NAME(SCSI_OUT), 292 REQ_OP_NAME(DRV_IN), 293 REQ_OP_NAME(DRV_OUT), 294 }; 295 #undef REQ_OP_NAME 296 297 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 298 static const char *const cmd_flag_name[] = { 299 CMD_FLAG_NAME(FAILFAST_DEV), 300 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 301 CMD_FLAG_NAME(FAILFAST_DRIVER), 302 CMD_FLAG_NAME(SYNC), 303 CMD_FLAG_NAME(META), 304 CMD_FLAG_NAME(PRIO), 305 CMD_FLAG_NAME(NOMERGE), 306 CMD_FLAG_NAME(IDLE), 307 CMD_FLAG_NAME(INTEGRITY), 308 CMD_FLAG_NAME(FUA), 309 CMD_FLAG_NAME(PREFLUSH), 310 CMD_FLAG_NAME(RAHEAD), 311 CMD_FLAG_NAME(BACKGROUND), 312 CMD_FLAG_NAME(NOUNMAP), 313 CMD_FLAG_NAME(NOWAIT), 314 }; 315 #undef CMD_FLAG_NAME 316 317 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 318 static const char *const rqf_name[] = { 319 RQF_NAME(SORTED), 320 RQF_NAME(STARTED), 321 RQF_NAME(QUEUED), 322 RQF_NAME(SOFTBARRIER), 323 RQF_NAME(FLUSH_SEQ), 324 RQF_NAME(MIXED_MERGE), 325 RQF_NAME(MQ_INFLIGHT), 326 RQF_NAME(DONTPREP), 327 RQF_NAME(PREEMPT), 328 RQF_NAME(COPY_USER), 329 RQF_NAME(FAILED), 330 RQF_NAME(QUIET), 331 RQF_NAME(ELVPRIV), 332 RQF_NAME(IO_STAT), 333 RQF_NAME(ALLOCED), 334 RQF_NAME(PM), 335 RQF_NAME(HASHED), 336 RQF_NAME(STATS), 337 RQF_NAME(SPECIAL_PAYLOAD), 338 RQF_NAME(ZONE_WRITE_LOCKED), 339 RQF_NAME(MQ_POLL_SLEPT), 340 }; 341 #undef RQF_NAME 342 343 static const char *const blk_mq_rq_state_name_array[] = { 344 [MQ_RQ_IDLE] = "idle", 345 [MQ_RQ_IN_FLIGHT] = "in_flight", 346 [MQ_RQ_COMPLETE] = "complete", 347 }; 348 349 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) 350 { 351 if (WARN_ON_ONCE((unsigned int)rq_state >= 352 ARRAY_SIZE(blk_mq_rq_state_name_array))) 353 return "(?)"; 354 return blk_mq_rq_state_name_array[rq_state]; 355 } 356 357 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 358 { 359 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 360 const unsigned int op = rq->cmd_flags & REQ_OP_MASK; 361 362 seq_printf(m, "%p {.op=", rq); 363 if (op < ARRAY_SIZE(op_name) && op_name[op]) 364 seq_printf(m, "%s", op_name[op]); 365 else 366 seq_printf(m, "%d", op); 367 seq_puts(m, ", .cmd_flags="); 368 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, 369 ARRAY_SIZE(cmd_flag_name)); 370 seq_puts(m, ", .rq_flags="); 371 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 372 ARRAY_SIZE(rqf_name)); 373 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); 374 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 375 rq->internal_tag); 376 if (mq_ops->show_rq) 377 mq_ops->show_rq(m, rq); 378 seq_puts(m, "}\n"); 379 return 0; 380 } 381 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 382 383 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 384 { 385 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 386 } 387 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 388 389 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 390 __acquires(&hctx->lock) 391 { 392 struct blk_mq_hw_ctx *hctx = m->private; 393 394 spin_lock(&hctx->lock); 395 return seq_list_start(&hctx->dispatch, *pos); 396 } 397 398 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 399 { 400 struct blk_mq_hw_ctx *hctx = m->private; 401 402 return seq_list_next(v, &hctx->dispatch, pos); 403 } 404 405 static void hctx_dispatch_stop(struct seq_file *m, void *v) 406 __releases(&hctx->lock) 407 { 408 struct blk_mq_hw_ctx *hctx = m->private; 409 410 spin_unlock(&hctx->lock); 411 } 412 413 static const struct seq_operations hctx_dispatch_seq_ops = { 414 .start = hctx_dispatch_start, 415 .next = hctx_dispatch_next, 416 .stop = hctx_dispatch_stop, 417 .show = blk_mq_debugfs_rq_show, 418 }; 419 420 struct show_busy_params { 421 struct seq_file *m; 422 struct blk_mq_hw_ctx *hctx; 423 }; 424 425 /* 426 * Note: the state of a request may change while this function is in progress, 427 * e.g. due to a concurrent blk_mq_finish_request() call. 428 */ 429 static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) 430 { 431 const struct show_busy_params *params = data; 432 433 if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx) 434 __blk_mq_debugfs_rq_show(params->m, 435 list_entry_rq(&rq->queuelist)); 436 } 437 438 static int hctx_busy_show(void *data, struct seq_file *m) 439 { 440 struct blk_mq_hw_ctx *hctx = data; 441 struct show_busy_params params = { .m = m, .hctx = hctx }; 442 443 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, 444 ¶ms); 445 446 return 0; 447 } 448 449 static int hctx_ctx_map_show(void *data, struct seq_file *m) 450 { 451 struct blk_mq_hw_ctx *hctx = data; 452 453 sbitmap_bitmap_show(&hctx->ctx_map, m); 454 return 0; 455 } 456 457 static void blk_mq_debugfs_tags_show(struct seq_file *m, 458 struct blk_mq_tags *tags) 459 { 460 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 461 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 462 seq_printf(m, "active_queues=%d\n", 463 atomic_read(&tags->active_queues)); 464 465 seq_puts(m, "\nbitmap_tags:\n"); 466 sbitmap_queue_show(&tags->bitmap_tags, m); 467 468 if (tags->nr_reserved_tags) { 469 seq_puts(m, "\nbreserved_tags:\n"); 470 sbitmap_queue_show(&tags->breserved_tags, m); 471 } 472 } 473 474 static int hctx_tags_show(void *data, struct seq_file *m) 475 { 476 struct blk_mq_hw_ctx *hctx = data; 477 struct request_queue *q = hctx->queue; 478 int res; 479 480 res = mutex_lock_interruptible(&q->sysfs_lock); 481 if (res) 482 goto out; 483 if (hctx->tags) 484 blk_mq_debugfs_tags_show(m, hctx->tags); 485 mutex_unlock(&q->sysfs_lock); 486 487 out: 488 return res; 489 } 490 491 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 492 { 493 struct blk_mq_hw_ctx *hctx = data; 494 struct request_queue *q = hctx->queue; 495 int res; 496 497 res = mutex_lock_interruptible(&q->sysfs_lock); 498 if (res) 499 goto out; 500 if (hctx->tags) 501 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); 502 mutex_unlock(&q->sysfs_lock); 503 504 out: 505 return res; 506 } 507 508 static int hctx_sched_tags_show(void *data, struct seq_file *m) 509 { 510 struct blk_mq_hw_ctx *hctx = data; 511 struct request_queue *q = hctx->queue; 512 int res; 513 514 res = mutex_lock_interruptible(&q->sysfs_lock); 515 if (res) 516 goto out; 517 if (hctx->sched_tags) 518 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 519 mutex_unlock(&q->sysfs_lock); 520 521 out: 522 return res; 523 } 524 525 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 526 { 527 struct blk_mq_hw_ctx *hctx = data; 528 struct request_queue *q = hctx->queue; 529 int res; 530 531 res = mutex_lock_interruptible(&q->sysfs_lock); 532 if (res) 533 goto out; 534 if (hctx->sched_tags) 535 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); 536 mutex_unlock(&q->sysfs_lock); 537 538 out: 539 return res; 540 } 541 542 static int hctx_io_poll_show(void *data, struct seq_file *m) 543 { 544 struct blk_mq_hw_ctx *hctx = data; 545 546 seq_printf(m, "considered=%lu\n", hctx->poll_considered); 547 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); 548 seq_printf(m, "success=%lu\n", hctx->poll_success); 549 return 0; 550 } 551 552 static ssize_t hctx_io_poll_write(void *data, const char __user *buf, 553 size_t count, loff_t *ppos) 554 { 555 struct blk_mq_hw_ctx *hctx = data; 556 557 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; 558 return count; 559 } 560 561 static int hctx_dispatched_show(void *data, struct seq_file *m) 562 { 563 struct blk_mq_hw_ctx *hctx = data; 564 int i; 565 566 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); 567 568 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) { 569 unsigned int d = 1U << (i - 1); 570 571 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); 572 } 573 574 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); 575 return 0; 576 } 577 578 static ssize_t hctx_dispatched_write(void *data, const char __user *buf, 579 size_t count, loff_t *ppos) 580 { 581 struct blk_mq_hw_ctx *hctx = data; 582 int i; 583 584 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) 585 hctx->dispatched[i] = 0; 586 return count; 587 } 588 589 static int hctx_queued_show(void *data, struct seq_file *m) 590 { 591 struct blk_mq_hw_ctx *hctx = data; 592 593 seq_printf(m, "%lu\n", hctx->queued); 594 return 0; 595 } 596 597 static ssize_t hctx_queued_write(void *data, const char __user *buf, 598 size_t count, loff_t *ppos) 599 { 600 struct blk_mq_hw_ctx *hctx = data; 601 602 hctx->queued = 0; 603 return count; 604 } 605 606 static int hctx_run_show(void *data, struct seq_file *m) 607 { 608 struct blk_mq_hw_ctx *hctx = data; 609 610 seq_printf(m, "%lu\n", hctx->run); 611 return 0; 612 } 613 614 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 615 loff_t *ppos) 616 { 617 struct blk_mq_hw_ctx *hctx = data; 618 619 hctx->run = 0; 620 return count; 621 } 622 623 static int hctx_active_show(void *data, struct seq_file *m) 624 { 625 struct blk_mq_hw_ctx *hctx = data; 626 627 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); 628 return 0; 629 } 630 631 static int hctx_dispatch_busy_show(void *data, struct seq_file *m) 632 { 633 struct blk_mq_hw_ctx *hctx = data; 634 635 seq_printf(m, "%u\n", hctx->dispatch_busy); 636 return 0; 637 } 638 639 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos) 640 __acquires(&ctx->lock) 641 { 642 struct blk_mq_ctx *ctx = m->private; 643 644 spin_lock(&ctx->lock); 645 return seq_list_start(&ctx->rq_list, *pos); 646 } 647 648 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos) 649 { 650 struct blk_mq_ctx *ctx = m->private; 651 652 return seq_list_next(v, &ctx->rq_list, pos); 653 } 654 655 static void ctx_rq_list_stop(struct seq_file *m, void *v) 656 __releases(&ctx->lock) 657 { 658 struct blk_mq_ctx *ctx = m->private; 659 660 spin_unlock(&ctx->lock); 661 } 662 663 static const struct seq_operations ctx_rq_list_seq_ops = { 664 .start = ctx_rq_list_start, 665 .next = ctx_rq_list_next, 666 .stop = ctx_rq_list_stop, 667 .show = blk_mq_debugfs_rq_show, 668 }; 669 static int ctx_dispatched_show(void *data, struct seq_file *m) 670 { 671 struct blk_mq_ctx *ctx = data; 672 673 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); 674 return 0; 675 } 676 677 static ssize_t ctx_dispatched_write(void *data, const char __user *buf, 678 size_t count, loff_t *ppos) 679 { 680 struct blk_mq_ctx *ctx = data; 681 682 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; 683 return count; 684 } 685 686 static int ctx_merged_show(void *data, struct seq_file *m) 687 { 688 struct blk_mq_ctx *ctx = data; 689 690 seq_printf(m, "%lu\n", ctx->rq_merged); 691 return 0; 692 } 693 694 static ssize_t ctx_merged_write(void *data, const char __user *buf, 695 size_t count, loff_t *ppos) 696 { 697 struct blk_mq_ctx *ctx = data; 698 699 ctx->rq_merged = 0; 700 return count; 701 } 702 703 static int ctx_completed_show(void *data, struct seq_file *m) 704 { 705 struct blk_mq_ctx *ctx = data; 706 707 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); 708 return 0; 709 } 710 711 static ssize_t ctx_completed_write(void *data, const char __user *buf, 712 size_t count, loff_t *ppos) 713 { 714 struct blk_mq_ctx *ctx = data; 715 716 ctx->rq_completed[0] = ctx->rq_completed[1] = 0; 717 return count; 718 } 719 720 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 721 { 722 const struct blk_mq_debugfs_attr *attr = m->private; 723 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 724 725 return attr->show(data, m); 726 } 727 728 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 729 size_t count, loff_t *ppos) 730 { 731 struct seq_file *m = file->private_data; 732 const struct blk_mq_debugfs_attr *attr = m->private; 733 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 734 735 /* 736 * Attributes that only implement .seq_ops are read-only and 'attr' is 737 * the same with 'data' in this case. 738 */ 739 if (attr == data || !attr->write) 740 return -EPERM; 741 742 return attr->write(data, buf, count, ppos); 743 } 744 745 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 746 { 747 const struct blk_mq_debugfs_attr *attr = inode->i_private; 748 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 749 struct seq_file *m; 750 int ret; 751 752 if (attr->seq_ops) { 753 ret = seq_open(file, attr->seq_ops); 754 if (!ret) { 755 m = file->private_data; 756 m->private = data; 757 } 758 return ret; 759 } 760 761 if (WARN_ON_ONCE(!attr->show)) 762 return -EPERM; 763 764 return single_open(file, blk_mq_debugfs_show, inode->i_private); 765 } 766 767 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 768 { 769 const struct blk_mq_debugfs_attr *attr = inode->i_private; 770 771 if (attr->show) 772 return single_release(inode, file); 773 else 774 return seq_release(inode, file); 775 } 776 777 static const struct file_operations blk_mq_debugfs_fops = { 778 .open = blk_mq_debugfs_open, 779 .read = seq_read, 780 .write = blk_mq_debugfs_write, 781 .llseek = seq_lseek, 782 .release = blk_mq_debugfs_release, 783 }; 784 785 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 786 {"state", 0400, hctx_state_show}, 787 {"flags", 0400, hctx_flags_show}, 788 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 789 {"busy", 0400, hctx_busy_show}, 790 {"ctx_map", 0400, hctx_ctx_map_show}, 791 {"tags", 0400, hctx_tags_show}, 792 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 793 {"sched_tags", 0400, hctx_sched_tags_show}, 794 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 795 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, 796 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, 797 {"queued", 0600, hctx_queued_show, hctx_queued_write}, 798 {"run", 0600, hctx_run_show, hctx_run_write}, 799 {"active", 0400, hctx_active_show}, 800 {"dispatch_busy", 0400, hctx_dispatch_busy_show}, 801 {}, 802 }; 803 804 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 805 {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops}, 806 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, 807 {"merged", 0600, ctx_merged_show, ctx_merged_write}, 808 {"completed", 0600, ctx_completed_show, ctx_completed_write}, 809 {}, 810 }; 811 812 static bool debugfs_create_files(struct dentry *parent, void *data, 813 const struct blk_mq_debugfs_attr *attr) 814 { 815 d_inode(parent)->i_private = data; 816 817 for (; attr->name; attr++) { 818 if (!debugfs_create_file(attr->name, attr->mode, parent, 819 (void *)attr, &blk_mq_debugfs_fops)) 820 return false; 821 } 822 return true; 823 } 824 825 int blk_mq_debugfs_register(struct request_queue *q) 826 { 827 struct blk_mq_hw_ctx *hctx; 828 int i; 829 830 if (!blk_debugfs_root) 831 return -ENOENT; 832 833 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 834 blk_debugfs_root); 835 if (!q->debugfs_dir) 836 return -ENOMEM; 837 838 if (!debugfs_create_files(q->debugfs_dir, q, 839 blk_mq_debugfs_queue_attrs)) 840 goto err; 841 842 /* 843 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir 844 * didn't exist yet (because we don't know what to name the directory 845 * until the queue is registered to a gendisk). 846 */ 847 if (q->elevator && !q->sched_debugfs_dir) 848 blk_mq_debugfs_register_sched(q); 849 850 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ 851 queue_for_each_hw_ctx(q, hctx, i) { 852 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) 853 goto err; 854 if (q->elevator && !hctx->sched_debugfs_dir && 855 blk_mq_debugfs_register_sched_hctx(q, hctx)) 856 goto err; 857 } 858 859 return 0; 860 861 err: 862 blk_mq_debugfs_unregister(q); 863 return -ENOMEM; 864 } 865 866 void blk_mq_debugfs_unregister(struct request_queue *q) 867 { 868 debugfs_remove_recursive(q->debugfs_dir); 869 q->sched_debugfs_dir = NULL; 870 q->debugfs_dir = NULL; 871 } 872 873 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 874 struct blk_mq_ctx *ctx) 875 { 876 struct dentry *ctx_dir; 877 char name[20]; 878 879 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 880 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 881 if (!ctx_dir) 882 return -ENOMEM; 883 884 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs)) 885 return -ENOMEM; 886 887 return 0; 888 } 889 890 int blk_mq_debugfs_register_hctx(struct request_queue *q, 891 struct blk_mq_hw_ctx *hctx) 892 { 893 struct blk_mq_ctx *ctx; 894 char name[20]; 895 int i; 896 897 if (!q->debugfs_dir) 898 return -ENOENT; 899 900 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 901 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 902 if (!hctx->debugfs_dir) 903 return -ENOMEM; 904 905 if (!debugfs_create_files(hctx->debugfs_dir, hctx, 906 blk_mq_debugfs_hctx_attrs)) 907 goto err; 908 909 hctx_for_each_ctx(hctx, ctx, i) { 910 if (blk_mq_debugfs_register_ctx(hctx, ctx)) 911 goto err; 912 } 913 914 return 0; 915 916 err: 917 blk_mq_debugfs_unregister_hctx(hctx); 918 return -ENOMEM; 919 } 920 921 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 922 { 923 debugfs_remove_recursive(hctx->debugfs_dir); 924 hctx->sched_debugfs_dir = NULL; 925 hctx->debugfs_dir = NULL; 926 } 927 928 int blk_mq_debugfs_register_hctxs(struct request_queue *q) 929 { 930 struct blk_mq_hw_ctx *hctx; 931 int i; 932 933 queue_for_each_hw_ctx(q, hctx, i) { 934 if (blk_mq_debugfs_register_hctx(q, hctx)) 935 return -ENOMEM; 936 } 937 938 return 0; 939 } 940 941 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 942 { 943 struct blk_mq_hw_ctx *hctx; 944 int i; 945 946 queue_for_each_hw_ctx(q, hctx, i) 947 blk_mq_debugfs_unregister_hctx(hctx); 948 } 949 950 int blk_mq_debugfs_register_sched(struct request_queue *q) 951 { 952 struct elevator_type *e = q->elevator->type; 953 954 if (!q->debugfs_dir) 955 return -ENOENT; 956 957 if (!e->queue_debugfs_attrs) 958 return 0; 959 960 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 961 if (!q->sched_debugfs_dir) 962 return -ENOMEM; 963 964 if (!debugfs_create_files(q->sched_debugfs_dir, q, 965 e->queue_debugfs_attrs)) 966 goto err; 967 968 return 0; 969 970 err: 971 blk_mq_debugfs_unregister_sched(q); 972 return -ENOMEM; 973 } 974 975 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 976 { 977 debugfs_remove_recursive(q->sched_debugfs_dir); 978 q->sched_debugfs_dir = NULL; 979 } 980 981 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 982 struct blk_mq_hw_ctx *hctx) 983 { 984 struct elevator_type *e = q->elevator->type; 985 986 if (!hctx->debugfs_dir) 987 return -ENOENT; 988 989 if (!e->hctx_debugfs_attrs) 990 return 0; 991 992 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 993 hctx->debugfs_dir); 994 if (!hctx->sched_debugfs_dir) 995 return -ENOMEM; 996 997 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx, 998 e->hctx_debugfs_attrs)) 999 return -ENOMEM; 1000 1001 return 0; 1002 } 1003 1004 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 1005 { 1006 debugfs_remove_recursive(hctx->sched_debugfs_dir); 1007 hctx->sched_debugfs_dir = NULL; 1008 } 1009