1 /* 2 * Copyright (C) 2017 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <https://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/blkdev.h> 19 #include <linux/debugfs.h> 20 21 #include <linux/blk-mq.h> 22 #include "blk.h" 23 #include "blk-mq.h" 24 #include "blk-mq-debugfs.h" 25 #include "blk-mq-tag.h" 26 27 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 28 const char *const *flag_name, int flag_name_count) 29 { 30 bool sep = false; 31 int i; 32 33 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 34 if (!(flags & BIT(i))) 35 continue; 36 if (sep) 37 seq_puts(m, "|"); 38 sep = true; 39 if (i < flag_name_count && flag_name[i]) 40 seq_puts(m, flag_name[i]); 41 else 42 seq_printf(m, "%d", i); 43 } 44 return 0; 45 } 46 47 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 48 static const char *const blk_queue_flag_name[] = { 49 QUEUE_FLAG_NAME(QUEUED), 50 QUEUE_FLAG_NAME(STOPPED), 51 QUEUE_FLAG_NAME(DYING), 52 QUEUE_FLAG_NAME(BYPASS), 53 QUEUE_FLAG_NAME(BIDI), 54 QUEUE_FLAG_NAME(NOMERGES), 55 QUEUE_FLAG_NAME(SAME_COMP), 56 QUEUE_FLAG_NAME(FAIL_IO), 57 QUEUE_FLAG_NAME(NONROT), 58 QUEUE_FLAG_NAME(IO_STAT), 59 QUEUE_FLAG_NAME(DISCARD), 60 QUEUE_FLAG_NAME(NOXMERGES), 61 QUEUE_FLAG_NAME(ADD_RANDOM), 62 QUEUE_FLAG_NAME(SECERASE), 63 QUEUE_FLAG_NAME(SAME_FORCE), 64 QUEUE_FLAG_NAME(DEAD), 65 QUEUE_FLAG_NAME(INIT_DONE), 66 QUEUE_FLAG_NAME(NO_SG_MERGE), 67 QUEUE_FLAG_NAME(POLL), 68 QUEUE_FLAG_NAME(WC), 69 QUEUE_FLAG_NAME(FUA), 70 QUEUE_FLAG_NAME(FLUSH_NQ), 71 QUEUE_FLAG_NAME(DAX), 72 QUEUE_FLAG_NAME(STATS), 73 QUEUE_FLAG_NAME(POLL_STATS), 74 QUEUE_FLAG_NAME(REGISTERED), 75 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), 76 QUEUE_FLAG_NAME(QUIESCED), 77 QUEUE_FLAG_NAME(PREEMPT_ONLY), 78 }; 79 #undef QUEUE_FLAG_NAME 80 81 static int queue_state_show(void *data, struct seq_file *m) 82 { 83 struct request_queue *q = data; 84 85 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 86 ARRAY_SIZE(blk_queue_flag_name)); 87 seq_puts(m, "\n"); 88 return 0; 89 } 90 91 static ssize_t queue_state_write(void *data, const char __user *buf, 92 size_t count, loff_t *ppos) 93 { 94 struct request_queue *q = data; 95 char opbuf[16] = { }, *op; 96 97 /* 98 * The "state" attribute is removed after blk_cleanup_queue() has called 99 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 100 * triggering a use-after-free. 101 */ 102 if (blk_queue_dead(q)) 103 return -ENOENT; 104 105 if (count >= sizeof(opbuf)) { 106 pr_err("%s: operation too long\n", __func__); 107 goto inval; 108 } 109 110 if (copy_from_user(opbuf, buf, count)) 111 return -EFAULT; 112 op = strstrip(opbuf); 113 if (strcmp(op, "run") == 0) { 114 blk_mq_run_hw_queues(q, true); 115 } else if (strcmp(op, "start") == 0) { 116 blk_mq_start_stopped_hw_queues(q, true); 117 } else if (strcmp(op, "kick") == 0) { 118 blk_mq_kick_requeue_list(q); 119 } else { 120 pr_err("%s: unsupported operation '%s'\n", __func__, op); 121 inval: 122 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); 123 return -EINVAL; 124 } 125 return count; 126 } 127 128 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 129 { 130 if (stat->nr_samples) { 131 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu", 132 stat->nr_samples, stat->mean, stat->min, stat->max); 133 } else { 134 seq_puts(m, "samples=0"); 135 } 136 } 137 138 static int queue_write_hint_show(void *data, struct seq_file *m) 139 { 140 struct request_queue *q = data; 141 int i; 142 143 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 144 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); 145 146 return 0; 147 } 148 149 static ssize_t queue_write_hint_store(void *data, const char __user *buf, 150 size_t count, loff_t *ppos) 151 { 152 struct request_queue *q = data; 153 int i; 154 155 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) 156 q->write_hints[i] = 0; 157 158 return count; 159 } 160 161 static int queue_poll_stat_show(void *data, struct seq_file *m) 162 { 163 struct request_queue *q = data; 164 int bucket; 165 166 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { 167 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket)); 168 print_stat(m, &q->poll_stat[2*bucket]); 169 seq_puts(m, "\n"); 170 171 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket)); 172 print_stat(m, &q->poll_stat[2*bucket+1]); 173 seq_puts(m, "\n"); 174 } 175 return 0; 176 } 177 178 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 179 static const char *const hctx_state_name[] = { 180 HCTX_STATE_NAME(STOPPED), 181 HCTX_STATE_NAME(TAG_ACTIVE), 182 HCTX_STATE_NAME(SCHED_RESTART), 183 HCTX_STATE_NAME(START_ON_RUN), 184 }; 185 #undef HCTX_STATE_NAME 186 187 static int hctx_state_show(void *data, struct seq_file *m) 188 { 189 struct blk_mq_hw_ctx *hctx = data; 190 191 blk_flags_show(m, hctx->state, hctx_state_name, 192 ARRAY_SIZE(hctx_state_name)); 193 seq_puts(m, "\n"); 194 return 0; 195 } 196 197 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 198 static const char *const alloc_policy_name[] = { 199 BLK_TAG_ALLOC_NAME(FIFO), 200 BLK_TAG_ALLOC_NAME(RR), 201 }; 202 #undef BLK_TAG_ALLOC_NAME 203 204 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 205 static const char *const hctx_flag_name[] = { 206 HCTX_FLAG_NAME(SHOULD_MERGE), 207 HCTX_FLAG_NAME(TAG_SHARED), 208 HCTX_FLAG_NAME(SG_MERGE), 209 HCTX_FLAG_NAME(BLOCKING), 210 HCTX_FLAG_NAME(NO_SCHED), 211 }; 212 #undef HCTX_FLAG_NAME 213 214 static int hctx_flags_show(void *data, struct seq_file *m) 215 { 216 struct blk_mq_hw_ctx *hctx = data; 217 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 218 219 seq_puts(m, "alloc_policy="); 220 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 221 alloc_policy_name[alloc_policy]) 222 seq_puts(m, alloc_policy_name[alloc_policy]); 223 else 224 seq_printf(m, "%d", alloc_policy); 225 seq_puts(m, " "); 226 blk_flags_show(m, 227 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 228 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 229 seq_puts(m, "\n"); 230 return 0; 231 } 232 233 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 234 static const char *const op_name[] = { 235 REQ_OP_NAME(READ), 236 REQ_OP_NAME(WRITE), 237 REQ_OP_NAME(FLUSH), 238 REQ_OP_NAME(DISCARD), 239 REQ_OP_NAME(ZONE_REPORT), 240 REQ_OP_NAME(SECURE_ERASE), 241 REQ_OP_NAME(ZONE_RESET), 242 REQ_OP_NAME(WRITE_SAME), 243 REQ_OP_NAME(WRITE_ZEROES), 244 REQ_OP_NAME(SCSI_IN), 245 REQ_OP_NAME(SCSI_OUT), 246 REQ_OP_NAME(DRV_IN), 247 REQ_OP_NAME(DRV_OUT), 248 }; 249 #undef REQ_OP_NAME 250 251 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 252 static const char *const cmd_flag_name[] = { 253 CMD_FLAG_NAME(FAILFAST_DEV), 254 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 255 CMD_FLAG_NAME(FAILFAST_DRIVER), 256 CMD_FLAG_NAME(SYNC), 257 CMD_FLAG_NAME(META), 258 CMD_FLAG_NAME(PRIO), 259 CMD_FLAG_NAME(NOMERGE), 260 CMD_FLAG_NAME(IDLE), 261 CMD_FLAG_NAME(INTEGRITY), 262 CMD_FLAG_NAME(FUA), 263 CMD_FLAG_NAME(PREFLUSH), 264 CMD_FLAG_NAME(RAHEAD), 265 CMD_FLAG_NAME(BACKGROUND), 266 CMD_FLAG_NAME(NOUNMAP), 267 CMD_FLAG_NAME(NOWAIT), 268 }; 269 #undef CMD_FLAG_NAME 270 271 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 272 static const char *const rqf_name[] = { 273 RQF_NAME(SORTED), 274 RQF_NAME(STARTED), 275 RQF_NAME(QUEUED), 276 RQF_NAME(SOFTBARRIER), 277 RQF_NAME(FLUSH_SEQ), 278 RQF_NAME(MIXED_MERGE), 279 RQF_NAME(MQ_INFLIGHT), 280 RQF_NAME(DONTPREP), 281 RQF_NAME(PREEMPT), 282 RQF_NAME(COPY_USER), 283 RQF_NAME(FAILED), 284 RQF_NAME(QUIET), 285 RQF_NAME(ELVPRIV), 286 RQF_NAME(IO_STAT), 287 RQF_NAME(ALLOCED), 288 RQF_NAME(PM), 289 RQF_NAME(HASHED), 290 RQF_NAME(STATS), 291 RQF_NAME(SPECIAL_PAYLOAD), 292 RQF_NAME(ZONE_WRITE_LOCKED), 293 RQF_NAME(MQ_TIMEOUT_EXPIRED), 294 RQF_NAME(MQ_POLL_SLEPT), 295 }; 296 #undef RQF_NAME 297 298 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 299 { 300 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 301 const unsigned int op = rq->cmd_flags & REQ_OP_MASK; 302 303 seq_printf(m, "%p {.op=", rq); 304 if (op < ARRAY_SIZE(op_name) && op_name[op]) 305 seq_printf(m, "%s", op_name[op]); 306 else 307 seq_printf(m, "%d", op); 308 seq_puts(m, ", .cmd_flags="); 309 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, 310 ARRAY_SIZE(cmd_flag_name)); 311 seq_puts(m, ", .rq_flags="); 312 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 313 ARRAY_SIZE(rqf_name)); 314 seq_printf(m, ", complete=%d", blk_rq_is_complete(rq)); 315 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 316 rq->internal_tag); 317 if (mq_ops->show_rq) 318 mq_ops->show_rq(m, rq); 319 seq_puts(m, "}\n"); 320 return 0; 321 } 322 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 323 324 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 325 { 326 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 327 } 328 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 329 330 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) 331 __acquires(&q->requeue_lock) 332 { 333 struct request_queue *q = m->private; 334 335 spin_lock_irq(&q->requeue_lock); 336 return seq_list_start(&q->requeue_list, *pos); 337 } 338 339 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) 340 { 341 struct request_queue *q = m->private; 342 343 return seq_list_next(v, &q->requeue_list, pos); 344 } 345 346 static void queue_requeue_list_stop(struct seq_file *m, void *v) 347 __releases(&q->requeue_lock) 348 { 349 struct request_queue *q = m->private; 350 351 spin_unlock_irq(&q->requeue_lock); 352 } 353 354 static const struct seq_operations queue_requeue_list_seq_ops = { 355 .start = queue_requeue_list_start, 356 .next = queue_requeue_list_next, 357 .stop = queue_requeue_list_stop, 358 .show = blk_mq_debugfs_rq_show, 359 }; 360 361 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 362 __acquires(&hctx->lock) 363 { 364 struct blk_mq_hw_ctx *hctx = m->private; 365 366 spin_lock(&hctx->lock); 367 return seq_list_start(&hctx->dispatch, *pos); 368 } 369 370 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 371 { 372 struct blk_mq_hw_ctx *hctx = m->private; 373 374 return seq_list_next(v, &hctx->dispatch, pos); 375 } 376 377 static void hctx_dispatch_stop(struct seq_file *m, void *v) 378 __releases(&hctx->lock) 379 { 380 struct blk_mq_hw_ctx *hctx = m->private; 381 382 spin_unlock(&hctx->lock); 383 } 384 385 static const struct seq_operations hctx_dispatch_seq_ops = { 386 .start = hctx_dispatch_start, 387 .next = hctx_dispatch_next, 388 .stop = hctx_dispatch_stop, 389 .show = blk_mq_debugfs_rq_show, 390 }; 391 392 struct show_busy_params { 393 struct seq_file *m; 394 struct blk_mq_hw_ctx *hctx; 395 }; 396 397 /* 398 * Note: the state of a request may change while this function is in progress, 399 * e.g. due to a concurrent blk_mq_finish_request() call. 400 */ 401 static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) 402 { 403 const struct show_busy_params *params = data; 404 405 if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx && 406 blk_mq_rq_state(rq) != MQ_RQ_IDLE) 407 __blk_mq_debugfs_rq_show(params->m, 408 list_entry_rq(&rq->queuelist)); 409 } 410 411 static int hctx_busy_show(void *data, struct seq_file *m) 412 { 413 struct blk_mq_hw_ctx *hctx = data; 414 struct show_busy_params params = { .m = m, .hctx = hctx }; 415 416 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, 417 ¶ms); 418 419 return 0; 420 } 421 422 static int hctx_ctx_map_show(void *data, struct seq_file *m) 423 { 424 struct blk_mq_hw_ctx *hctx = data; 425 426 sbitmap_bitmap_show(&hctx->ctx_map, m); 427 return 0; 428 } 429 430 static void blk_mq_debugfs_tags_show(struct seq_file *m, 431 struct blk_mq_tags *tags) 432 { 433 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 434 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 435 seq_printf(m, "active_queues=%d\n", 436 atomic_read(&tags->active_queues)); 437 438 seq_puts(m, "\nbitmap_tags:\n"); 439 sbitmap_queue_show(&tags->bitmap_tags, m); 440 441 if (tags->nr_reserved_tags) { 442 seq_puts(m, "\nbreserved_tags:\n"); 443 sbitmap_queue_show(&tags->breserved_tags, m); 444 } 445 } 446 447 static int hctx_tags_show(void *data, struct seq_file *m) 448 { 449 struct blk_mq_hw_ctx *hctx = data; 450 struct request_queue *q = hctx->queue; 451 int res; 452 453 res = mutex_lock_interruptible(&q->sysfs_lock); 454 if (res) 455 goto out; 456 if (hctx->tags) 457 blk_mq_debugfs_tags_show(m, hctx->tags); 458 mutex_unlock(&q->sysfs_lock); 459 460 out: 461 return res; 462 } 463 464 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 465 { 466 struct blk_mq_hw_ctx *hctx = data; 467 struct request_queue *q = hctx->queue; 468 int res; 469 470 res = mutex_lock_interruptible(&q->sysfs_lock); 471 if (res) 472 goto out; 473 if (hctx->tags) 474 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); 475 mutex_unlock(&q->sysfs_lock); 476 477 out: 478 return res; 479 } 480 481 static int hctx_sched_tags_show(void *data, struct seq_file *m) 482 { 483 struct blk_mq_hw_ctx *hctx = data; 484 struct request_queue *q = hctx->queue; 485 int res; 486 487 res = mutex_lock_interruptible(&q->sysfs_lock); 488 if (res) 489 goto out; 490 if (hctx->sched_tags) 491 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 492 mutex_unlock(&q->sysfs_lock); 493 494 out: 495 return res; 496 } 497 498 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 499 { 500 struct blk_mq_hw_ctx *hctx = data; 501 struct request_queue *q = hctx->queue; 502 int res; 503 504 res = mutex_lock_interruptible(&q->sysfs_lock); 505 if (res) 506 goto out; 507 if (hctx->sched_tags) 508 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); 509 mutex_unlock(&q->sysfs_lock); 510 511 out: 512 return res; 513 } 514 515 static int hctx_io_poll_show(void *data, struct seq_file *m) 516 { 517 struct blk_mq_hw_ctx *hctx = data; 518 519 seq_printf(m, "considered=%lu\n", hctx->poll_considered); 520 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); 521 seq_printf(m, "success=%lu\n", hctx->poll_success); 522 return 0; 523 } 524 525 static ssize_t hctx_io_poll_write(void *data, const char __user *buf, 526 size_t count, loff_t *ppos) 527 { 528 struct blk_mq_hw_ctx *hctx = data; 529 530 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; 531 return count; 532 } 533 534 static int hctx_dispatched_show(void *data, struct seq_file *m) 535 { 536 struct blk_mq_hw_ctx *hctx = data; 537 int i; 538 539 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); 540 541 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) { 542 unsigned int d = 1U << (i - 1); 543 544 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); 545 } 546 547 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); 548 return 0; 549 } 550 551 static ssize_t hctx_dispatched_write(void *data, const char __user *buf, 552 size_t count, loff_t *ppos) 553 { 554 struct blk_mq_hw_ctx *hctx = data; 555 int i; 556 557 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) 558 hctx->dispatched[i] = 0; 559 return count; 560 } 561 562 static int hctx_queued_show(void *data, struct seq_file *m) 563 { 564 struct blk_mq_hw_ctx *hctx = data; 565 566 seq_printf(m, "%lu\n", hctx->queued); 567 return 0; 568 } 569 570 static ssize_t hctx_queued_write(void *data, const char __user *buf, 571 size_t count, loff_t *ppos) 572 { 573 struct blk_mq_hw_ctx *hctx = data; 574 575 hctx->queued = 0; 576 return count; 577 } 578 579 static int hctx_run_show(void *data, struct seq_file *m) 580 { 581 struct blk_mq_hw_ctx *hctx = data; 582 583 seq_printf(m, "%lu\n", hctx->run); 584 return 0; 585 } 586 587 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 588 loff_t *ppos) 589 { 590 struct blk_mq_hw_ctx *hctx = data; 591 592 hctx->run = 0; 593 return count; 594 } 595 596 static int hctx_active_show(void *data, struct seq_file *m) 597 { 598 struct blk_mq_hw_ctx *hctx = data; 599 600 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); 601 return 0; 602 } 603 604 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos) 605 __acquires(&ctx->lock) 606 { 607 struct blk_mq_ctx *ctx = m->private; 608 609 spin_lock(&ctx->lock); 610 return seq_list_start(&ctx->rq_list, *pos); 611 } 612 613 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos) 614 { 615 struct blk_mq_ctx *ctx = m->private; 616 617 return seq_list_next(v, &ctx->rq_list, pos); 618 } 619 620 static void ctx_rq_list_stop(struct seq_file *m, void *v) 621 __releases(&ctx->lock) 622 { 623 struct blk_mq_ctx *ctx = m->private; 624 625 spin_unlock(&ctx->lock); 626 } 627 628 static const struct seq_operations ctx_rq_list_seq_ops = { 629 .start = ctx_rq_list_start, 630 .next = ctx_rq_list_next, 631 .stop = ctx_rq_list_stop, 632 .show = blk_mq_debugfs_rq_show, 633 }; 634 static int ctx_dispatched_show(void *data, struct seq_file *m) 635 { 636 struct blk_mq_ctx *ctx = data; 637 638 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); 639 return 0; 640 } 641 642 static ssize_t ctx_dispatched_write(void *data, const char __user *buf, 643 size_t count, loff_t *ppos) 644 { 645 struct blk_mq_ctx *ctx = data; 646 647 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; 648 return count; 649 } 650 651 static int ctx_merged_show(void *data, struct seq_file *m) 652 { 653 struct blk_mq_ctx *ctx = data; 654 655 seq_printf(m, "%lu\n", ctx->rq_merged); 656 return 0; 657 } 658 659 static ssize_t ctx_merged_write(void *data, const char __user *buf, 660 size_t count, loff_t *ppos) 661 { 662 struct blk_mq_ctx *ctx = data; 663 664 ctx->rq_merged = 0; 665 return count; 666 } 667 668 static int ctx_completed_show(void *data, struct seq_file *m) 669 { 670 struct blk_mq_ctx *ctx = data; 671 672 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); 673 return 0; 674 } 675 676 static ssize_t ctx_completed_write(void *data, const char __user *buf, 677 size_t count, loff_t *ppos) 678 { 679 struct blk_mq_ctx *ctx = data; 680 681 ctx->rq_completed[0] = ctx->rq_completed[1] = 0; 682 return count; 683 } 684 685 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 686 { 687 const struct blk_mq_debugfs_attr *attr = m->private; 688 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 689 690 return attr->show(data, m); 691 } 692 693 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 694 size_t count, loff_t *ppos) 695 { 696 struct seq_file *m = file->private_data; 697 const struct blk_mq_debugfs_attr *attr = m->private; 698 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 699 700 /* 701 * Attributes that only implement .seq_ops are read-only and 'attr' is 702 * the same with 'data' in this case. 703 */ 704 if (attr == data || !attr->write) 705 return -EPERM; 706 707 return attr->write(data, buf, count, ppos); 708 } 709 710 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 711 { 712 const struct blk_mq_debugfs_attr *attr = inode->i_private; 713 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 714 struct seq_file *m; 715 int ret; 716 717 if (attr->seq_ops) { 718 ret = seq_open(file, attr->seq_ops); 719 if (!ret) { 720 m = file->private_data; 721 m->private = data; 722 } 723 return ret; 724 } 725 726 if (WARN_ON_ONCE(!attr->show)) 727 return -EPERM; 728 729 return single_open(file, blk_mq_debugfs_show, inode->i_private); 730 } 731 732 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 733 { 734 const struct blk_mq_debugfs_attr *attr = inode->i_private; 735 736 if (attr->show) 737 return single_release(inode, file); 738 else 739 return seq_release(inode, file); 740 } 741 742 static const struct file_operations blk_mq_debugfs_fops = { 743 .open = blk_mq_debugfs_open, 744 .read = seq_read, 745 .write = blk_mq_debugfs_write, 746 .llseek = seq_lseek, 747 .release = blk_mq_debugfs_release, 748 }; 749 750 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 751 {"poll_stat", 0400, queue_poll_stat_show}, 752 {"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops}, 753 {"state", 0600, queue_state_show, queue_state_write}, 754 {"write_hints", 0600, queue_write_hint_show, queue_write_hint_store}, 755 {}, 756 }; 757 758 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 759 {"state", 0400, hctx_state_show}, 760 {"flags", 0400, hctx_flags_show}, 761 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 762 {"busy", 0400, hctx_busy_show}, 763 {"ctx_map", 0400, hctx_ctx_map_show}, 764 {"tags", 0400, hctx_tags_show}, 765 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 766 {"sched_tags", 0400, hctx_sched_tags_show}, 767 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 768 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, 769 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, 770 {"queued", 0600, hctx_queued_show, hctx_queued_write}, 771 {"run", 0600, hctx_run_show, hctx_run_write}, 772 {"active", 0400, hctx_active_show}, 773 {}, 774 }; 775 776 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 777 {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops}, 778 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, 779 {"merged", 0600, ctx_merged_show, ctx_merged_write}, 780 {"completed", 0600, ctx_completed_show, ctx_completed_write}, 781 {}, 782 }; 783 784 static bool debugfs_create_files(struct dentry *parent, void *data, 785 const struct blk_mq_debugfs_attr *attr) 786 { 787 d_inode(parent)->i_private = data; 788 789 for (; attr->name; attr++) { 790 if (!debugfs_create_file(attr->name, attr->mode, parent, 791 (void *)attr, &blk_mq_debugfs_fops)) 792 return false; 793 } 794 return true; 795 } 796 797 int blk_mq_debugfs_register(struct request_queue *q) 798 { 799 struct blk_mq_hw_ctx *hctx; 800 int i; 801 802 if (!blk_debugfs_root) 803 return -ENOENT; 804 805 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 806 blk_debugfs_root); 807 if (!q->debugfs_dir) 808 return -ENOMEM; 809 810 if (!debugfs_create_files(q->debugfs_dir, q, 811 blk_mq_debugfs_queue_attrs)) 812 goto err; 813 814 /* 815 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir 816 * didn't exist yet (because we don't know what to name the directory 817 * until the queue is registered to a gendisk). 818 */ 819 if (q->elevator && !q->sched_debugfs_dir) 820 blk_mq_debugfs_register_sched(q); 821 822 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ 823 queue_for_each_hw_ctx(q, hctx, i) { 824 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) 825 goto err; 826 if (q->elevator && !hctx->sched_debugfs_dir && 827 blk_mq_debugfs_register_sched_hctx(q, hctx)) 828 goto err; 829 } 830 831 return 0; 832 833 err: 834 blk_mq_debugfs_unregister(q); 835 return -ENOMEM; 836 } 837 838 void blk_mq_debugfs_unregister(struct request_queue *q) 839 { 840 debugfs_remove_recursive(q->debugfs_dir); 841 q->sched_debugfs_dir = NULL; 842 q->debugfs_dir = NULL; 843 } 844 845 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 846 struct blk_mq_ctx *ctx) 847 { 848 struct dentry *ctx_dir; 849 char name[20]; 850 851 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 852 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 853 if (!ctx_dir) 854 return -ENOMEM; 855 856 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs)) 857 return -ENOMEM; 858 859 return 0; 860 } 861 862 int blk_mq_debugfs_register_hctx(struct request_queue *q, 863 struct blk_mq_hw_ctx *hctx) 864 { 865 struct blk_mq_ctx *ctx; 866 char name[20]; 867 int i; 868 869 if (!q->debugfs_dir) 870 return -ENOENT; 871 872 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 873 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 874 if (!hctx->debugfs_dir) 875 return -ENOMEM; 876 877 if (!debugfs_create_files(hctx->debugfs_dir, hctx, 878 blk_mq_debugfs_hctx_attrs)) 879 goto err; 880 881 hctx_for_each_ctx(hctx, ctx, i) { 882 if (blk_mq_debugfs_register_ctx(hctx, ctx)) 883 goto err; 884 } 885 886 return 0; 887 888 err: 889 blk_mq_debugfs_unregister_hctx(hctx); 890 return -ENOMEM; 891 } 892 893 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 894 { 895 debugfs_remove_recursive(hctx->debugfs_dir); 896 hctx->sched_debugfs_dir = NULL; 897 hctx->debugfs_dir = NULL; 898 } 899 900 int blk_mq_debugfs_register_hctxs(struct request_queue *q) 901 { 902 struct blk_mq_hw_ctx *hctx; 903 int i; 904 905 queue_for_each_hw_ctx(q, hctx, i) { 906 if (blk_mq_debugfs_register_hctx(q, hctx)) 907 return -ENOMEM; 908 } 909 910 return 0; 911 } 912 913 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 914 { 915 struct blk_mq_hw_ctx *hctx; 916 int i; 917 918 queue_for_each_hw_ctx(q, hctx, i) 919 blk_mq_debugfs_unregister_hctx(hctx); 920 } 921 922 int blk_mq_debugfs_register_sched(struct request_queue *q) 923 { 924 struct elevator_type *e = q->elevator->type; 925 926 if (!q->debugfs_dir) 927 return -ENOENT; 928 929 if (!e->queue_debugfs_attrs) 930 return 0; 931 932 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 933 if (!q->sched_debugfs_dir) 934 return -ENOMEM; 935 936 if (!debugfs_create_files(q->sched_debugfs_dir, q, 937 e->queue_debugfs_attrs)) 938 goto err; 939 940 return 0; 941 942 err: 943 blk_mq_debugfs_unregister_sched(q); 944 return -ENOMEM; 945 } 946 947 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 948 { 949 debugfs_remove_recursive(q->sched_debugfs_dir); 950 q->sched_debugfs_dir = NULL; 951 } 952 953 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 954 struct blk_mq_hw_ctx *hctx) 955 { 956 struct elevator_type *e = q->elevator->type; 957 958 if (!hctx->debugfs_dir) 959 return -ENOENT; 960 961 if (!e->hctx_debugfs_attrs) 962 return 0; 963 964 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 965 hctx->debugfs_dir); 966 if (!hctx->sched_debugfs_dir) 967 return -ENOMEM; 968 969 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx, 970 e->hctx_debugfs_attrs)) 971 return -ENOMEM; 972 973 return 0; 974 } 975 976 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 977 { 978 debugfs_remove_recursive(hctx->sched_debugfs_dir); 979 hctx->sched_debugfs_dir = NULL; 980 } 981