1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> 4 * 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/blkdev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/percpu.h> 13 #include <linux/init.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/debugfs.h> 17 #include <linux/export.h> 18 #include <linux/time.h> 19 #include <linux/uaccess.h> 20 #include <linux/list.h> 21 #include <linux/blk-cgroup.h> 22 23 #include "../../block/blk.h" 24 25 #include <trace/events/block.h> 26 27 #include "trace_output.h" 28 29 #ifdef CONFIG_BLK_DEV_IO_TRACE 30 31 static unsigned int blktrace_seq __read_mostly = 1; 32 33 static struct trace_array *blk_tr; 34 static bool blk_tracer_enabled __read_mostly; 35 36 static LIST_HEAD(running_trace_list); 37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock); 38 39 /* Select an alternative, minimalistic output than the original one */ 40 #define TRACE_BLK_OPT_CLASSIC 0x1 41 #define TRACE_BLK_OPT_CGROUP 0x2 42 #define TRACE_BLK_OPT_CGNAME 0x4 43 44 static struct tracer_opt blk_tracer_opts[] = { 45 /* Default disable the minimalistic output */ 46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, 47 #ifdef CONFIG_BLK_CGROUP 48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) }, 49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) }, 50 #endif 51 { } 52 }; 53 54 static struct tracer_flags blk_tracer_flags = { 55 .val = 0, 56 .opts = blk_tracer_opts, 57 }; 58 59 /* Global reference count of probes */ 60 static DEFINE_MUTEX(blk_probe_mutex); 61 static int blk_probes_ref; 62 63 static void blk_register_tracepoints(void); 64 static void blk_unregister_tracepoints(void); 65 66 /* 67 * Send out a notify message. 68 */ 69 static void trace_note(struct blk_trace *bt, pid_t pid, int action, 70 const void *data, size_t len, u64 cgid) 71 { 72 struct blk_io_trace *t; 73 struct ring_buffer_event *event = NULL; 74 struct trace_buffer *buffer = NULL; 75 unsigned int trace_ctx = 0; 76 int cpu = smp_processor_id(); 77 bool blk_tracer = blk_tracer_enabled; 78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0; 79 80 if (blk_tracer) { 81 buffer = blk_tr->array_buffer.buffer; 82 trace_ctx = tracing_gen_ctx_flags(0); 83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 84 sizeof(*t) + len + cgid_len, 85 trace_ctx); 86 if (!event) 87 return; 88 t = ring_buffer_event_data(event); 89 goto record_it; 90 } 91 92 if (!bt->rchan) 93 return; 94 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); 96 if (t) { 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 98 t->time = ktime_to_ns(ktime_get()); 99 record_it: 100 t->device = bt->dev; 101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); 102 t->pid = pid; 103 t->cpu = cpu; 104 t->pdu_len = len + cgid_len; 105 if (cgid_len) 106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); 107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len); 108 109 if (blk_tracer) 110 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); 111 } 112 } 113 114 /* 115 * Send out a notify for this process, if we haven't done so since a trace 116 * started 117 */ 118 static void trace_note_tsk(struct task_struct *tsk) 119 { 120 unsigned long flags; 121 struct blk_trace *bt; 122 123 tsk->btrace_seq = blktrace_seq; 124 raw_spin_lock_irqsave(&running_trace_lock, flags); 125 list_for_each_entry(bt, &running_trace_list, running_list) { 126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, 127 sizeof(tsk->comm), 0); 128 } 129 raw_spin_unlock_irqrestore(&running_trace_lock, flags); 130 } 131 132 static void trace_note_time(struct blk_trace *bt) 133 { 134 struct timespec64 now; 135 unsigned long flags; 136 u32 words[2]; 137 138 /* need to check user space to see if this breaks in y2038 or y2106 */ 139 ktime_get_real_ts64(&now); 140 words[0] = (u32)now.tv_sec; 141 words[1] = now.tv_nsec; 142 143 local_irq_save(flags); 144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0); 145 local_irq_restore(flags); 146 } 147 148 void __blk_trace_note_message(struct blk_trace *bt, 149 struct cgroup_subsys_state *css, const char *fmt, ...) 150 { 151 int n; 152 va_list args; 153 unsigned long flags; 154 char *buf; 155 u64 cgid = 0; 156 157 if (unlikely(bt->trace_state != Blktrace_running && 158 !blk_tracer_enabled)) 159 return; 160 161 /* 162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note 163 * message to the trace. 164 */ 165 if (!(bt->act_mask & BLK_TC_NOTIFY)) 166 return; 167 168 local_irq_save(flags); 169 buf = this_cpu_ptr(bt->msg_data); 170 va_start(args, fmt); 171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); 172 va_end(args); 173 174 #ifdef CONFIG_BLK_CGROUP 175 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 176 cgid = cgroup_id(css->cgroup); 177 else 178 cgid = 1; 179 #endif 180 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid); 181 local_irq_restore(flags); 182 } 183 EXPORT_SYMBOL_GPL(__blk_trace_note_message); 184 185 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, 186 pid_t pid) 187 { 188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) 189 return 1; 190 if (sector && (sector < bt->start_lba || sector > bt->end_lba)) 191 return 1; 192 if (bt->pid && pid != bt->pid) 193 return 1; 194 195 return 0; 196 } 197 198 /* 199 * Data direction bit lookup 200 */ 201 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 202 BLK_TC_ACT(BLK_TC_WRITE) }; 203 204 #define BLK_TC_RAHEAD BLK_TC_AHEAD 205 #define BLK_TC_PREFLUSH BLK_TC_FLUSH 206 207 /* The ilog2() calls fall out because they're constant */ 208 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ 209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) 210 211 /* 212 * The worker for the various blk_add_trace*() types. Fills out a 213 * blk_io_trace structure and places it in a per-cpu subbuffer. 214 */ 215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, 216 int op, int op_flags, u32 what, int error, int pdu_len, 217 void *pdu_data, u64 cgid) 218 { 219 struct task_struct *tsk = current; 220 struct ring_buffer_event *event = NULL; 221 struct trace_buffer *buffer = NULL; 222 struct blk_io_trace *t; 223 unsigned long flags = 0; 224 unsigned long *sequence; 225 unsigned int trace_ctx = 0; 226 pid_t pid; 227 int cpu; 228 bool blk_tracer = blk_tracer_enabled; 229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0; 230 231 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) 232 return; 233 234 what |= ddir_act[op_is_write(op) ? WRITE : READ]; 235 what |= MASK_TC_BIT(op_flags, SYNC); 236 what |= MASK_TC_BIT(op_flags, RAHEAD); 237 what |= MASK_TC_BIT(op_flags, META); 238 what |= MASK_TC_BIT(op_flags, PREFLUSH); 239 what |= MASK_TC_BIT(op_flags, FUA); 240 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) 241 what |= BLK_TC_ACT(BLK_TC_DISCARD); 242 if (op == REQ_OP_FLUSH) 243 what |= BLK_TC_ACT(BLK_TC_FLUSH); 244 if (cgid) 245 what |= __BLK_TA_CGROUP; 246 247 pid = tsk->pid; 248 if (act_log_check(bt, what, sector, pid)) 249 return; 250 cpu = raw_smp_processor_id(); 251 252 if (blk_tracer) { 253 tracing_record_cmdline(current); 254 255 buffer = blk_tr->array_buffer.buffer; 256 trace_ctx = tracing_gen_ctx_flags(0); 257 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 258 sizeof(*t) + pdu_len + cgid_len, 259 trace_ctx); 260 if (!event) 261 return; 262 t = ring_buffer_event_data(event); 263 goto record_it; 264 } 265 266 if (unlikely(tsk->btrace_seq != blktrace_seq)) 267 trace_note_tsk(tsk); 268 269 /* 270 * A word about the locking here - we disable interrupts to reserve 271 * some space in the relay per-cpu buffer, to prevent an irq 272 * from coming in and stepping on our toes. 273 */ 274 local_irq_save(flags); 275 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len); 276 if (t) { 277 sequence = per_cpu_ptr(bt->sequence, cpu); 278 279 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 280 t->sequence = ++(*sequence); 281 t->time = ktime_to_ns(ktime_get()); 282 record_it: 283 /* 284 * These two are not needed in ftrace as they are in the 285 * generic trace_entry, filled by tracing_generic_entry_update, 286 * but for the trace_event->bin() synthesizer benefit we do it 287 * here too. 288 */ 289 t->cpu = cpu; 290 t->pid = pid; 291 292 t->sector = sector; 293 t->bytes = bytes; 294 t->action = what; 295 t->device = bt->dev; 296 t->error = error; 297 t->pdu_len = pdu_len + cgid_len; 298 299 if (cgid_len) 300 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); 301 if (pdu_len) 302 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); 303 304 if (blk_tracer) { 305 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); 306 return; 307 } 308 } 309 310 local_irq_restore(flags); 311 } 312 313 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt) 314 { 315 relay_close(bt->rchan); 316 317 /* 318 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created 319 * under 'q->debugfs_dir', thus lookup and remove them. 320 */ 321 if (!bt->dir) { 322 debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir)); 323 debugfs_remove(debugfs_lookup("msg", q->debugfs_dir)); 324 } else { 325 debugfs_remove(bt->dir); 326 } 327 free_percpu(bt->sequence); 328 free_percpu(bt->msg_data); 329 kfree(bt); 330 } 331 332 static void get_probe_ref(void) 333 { 334 mutex_lock(&blk_probe_mutex); 335 if (++blk_probes_ref == 1) 336 blk_register_tracepoints(); 337 mutex_unlock(&blk_probe_mutex); 338 } 339 340 static void put_probe_ref(void) 341 { 342 mutex_lock(&blk_probe_mutex); 343 if (!--blk_probes_ref) 344 blk_unregister_tracepoints(); 345 mutex_unlock(&blk_probe_mutex); 346 } 347 348 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt) 349 { 350 synchronize_rcu(); 351 blk_trace_free(q, bt); 352 put_probe_ref(); 353 } 354 355 static int __blk_trace_remove(struct request_queue *q) 356 { 357 struct blk_trace *bt; 358 359 bt = rcu_replace_pointer(q->blk_trace, NULL, 360 lockdep_is_held(&q->debugfs_mutex)); 361 if (!bt) 362 return -EINVAL; 363 364 if (bt->trace_state != Blktrace_running) 365 blk_trace_cleanup(q, bt); 366 367 return 0; 368 } 369 370 int blk_trace_remove(struct request_queue *q) 371 { 372 int ret; 373 374 mutex_lock(&q->debugfs_mutex); 375 ret = __blk_trace_remove(q); 376 mutex_unlock(&q->debugfs_mutex); 377 378 return ret; 379 } 380 EXPORT_SYMBOL_GPL(blk_trace_remove); 381 382 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, 383 size_t count, loff_t *ppos) 384 { 385 struct blk_trace *bt = filp->private_data; 386 char buf[16]; 387 388 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); 389 390 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); 391 } 392 393 static const struct file_operations blk_dropped_fops = { 394 .owner = THIS_MODULE, 395 .open = simple_open, 396 .read = blk_dropped_read, 397 .llseek = default_llseek, 398 }; 399 400 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, 401 size_t count, loff_t *ppos) 402 { 403 char *msg; 404 struct blk_trace *bt; 405 406 if (count >= BLK_TN_MAX_MSG) 407 return -EINVAL; 408 409 msg = memdup_user_nul(buffer, count); 410 if (IS_ERR(msg)) 411 return PTR_ERR(msg); 412 413 bt = filp->private_data; 414 __blk_trace_note_message(bt, NULL, "%s", msg); 415 kfree(msg); 416 417 return count; 418 } 419 420 static const struct file_operations blk_msg_fops = { 421 .owner = THIS_MODULE, 422 .open = simple_open, 423 .write = blk_msg_write, 424 .llseek = noop_llseek, 425 }; 426 427 /* 428 * Keep track of how many times we encountered a full subbuffer, to aid 429 * the user space app in telling how many lost events there were. 430 */ 431 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, 432 void *prev_subbuf, size_t prev_padding) 433 { 434 struct blk_trace *bt; 435 436 if (!relay_buf_full(buf)) 437 return 1; 438 439 bt = buf->chan->private_data; 440 atomic_inc(&bt->dropped); 441 return 0; 442 } 443 444 static int blk_remove_buf_file_callback(struct dentry *dentry) 445 { 446 debugfs_remove(dentry); 447 448 return 0; 449 } 450 451 static struct dentry *blk_create_buf_file_callback(const char *filename, 452 struct dentry *parent, 453 umode_t mode, 454 struct rchan_buf *buf, 455 int *is_global) 456 { 457 return debugfs_create_file(filename, mode, parent, buf, 458 &relay_file_operations); 459 } 460 461 static const struct rchan_callbacks blk_relay_callbacks = { 462 .subbuf_start = blk_subbuf_start_callback, 463 .create_buf_file = blk_create_buf_file_callback, 464 .remove_buf_file = blk_remove_buf_file_callback, 465 }; 466 467 static void blk_trace_setup_lba(struct blk_trace *bt, 468 struct block_device *bdev) 469 { 470 if (bdev) { 471 bt->start_lba = bdev->bd_start_sect; 472 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev); 473 } else { 474 bt->start_lba = 0; 475 bt->end_lba = -1ULL; 476 } 477 } 478 479 /* 480 * Setup everything required to start tracing 481 */ 482 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 483 struct block_device *bdev, 484 struct blk_user_trace_setup *buts) 485 { 486 struct blk_trace *bt = NULL; 487 struct dentry *dir = NULL; 488 int ret; 489 490 lockdep_assert_held(&q->debugfs_mutex); 491 492 if (!buts->buf_size || !buts->buf_nr) 493 return -EINVAL; 494 495 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); 496 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; 497 498 /* 499 * some device names have larger paths - convert the slashes 500 * to underscores for this to work as expected 501 */ 502 strreplace(buts->name, '/', '_'); 503 504 /* 505 * bdev can be NULL, as with scsi-generic, this is a helpful as 506 * we can be. 507 */ 508 if (rcu_dereference_protected(q->blk_trace, 509 lockdep_is_held(&q->debugfs_mutex))) { 510 pr_warn("Concurrent blktraces are not allowed on %s\n", 511 buts->name); 512 return -EBUSY; 513 } 514 515 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 516 if (!bt) 517 return -ENOMEM; 518 519 ret = -ENOMEM; 520 bt->sequence = alloc_percpu(unsigned long); 521 if (!bt->sequence) 522 goto err; 523 524 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 525 if (!bt->msg_data) 526 goto err; 527 528 /* 529 * When tracing the whole disk reuse the existing debugfs directory 530 * created by the block layer on init. For partitions block devices, 531 * and scsi-generic block devices we create a temporary new debugfs 532 * directory that will be removed once the trace ends. 533 */ 534 if (bdev && !bdev_is_partition(bdev)) 535 dir = q->debugfs_dir; 536 else 537 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); 538 539 /* 540 * As blktrace relies on debugfs for its interface the debugfs directory 541 * is required, contrary to the usual mantra of not checking for debugfs 542 * files or directories. 543 */ 544 if (IS_ERR_OR_NULL(dir)) { 545 pr_warn("debugfs_dir not present for %s so skipping\n", 546 buts->name); 547 ret = -ENOENT; 548 goto err; 549 } 550 551 bt->dev = dev; 552 atomic_set(&bt->dropped, 0); 553 INIT_LIST_HEAD(&bt->running_list); 554 555 ret = -EIO; 556 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); 557 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); 558 559 bt->rchan = relay_open("trace", dir, buts->buf_size, 560 buts->buf_nr, &blk_relay_callbacks, bt); 561 if (!bt->rchan) 562 goto err; 563 564 bt->act_mask = buts->act_mask; 565 if (!bt->act_mask) 566 bt->act_mask = (u16) -1; 567 568 blk_trace_setup_lba(bt, bdev); 569 570 /* overwrite with user settings */ 571 if (buts->start_lba) 572 bt->start_lba = buts->start_lba; 573 if (buts->end_lba) 574 bt->end_lba = buts->end_lba; 575 576 bt->pid = buts->pid; 577 bt->trace_state = Blktrace_setup; 578 579 rcu_assign_pointer(q->blk_trace, bt); 580 get_probe_ref(); 581 582 ret = 0; 583 err: 584 if (ret) 585 blk_trace_free(q, bt); 586 return ret; 587 } 588 589 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 590 struct block_device *bdev, char __user *arg) 591 { 592 struct blk_user_trace_setup buts; 593 int ret; 594 595 ret = copy_from_user(&buts, arg, sizeof(buts)); 596 if (ret) 597 return -EFAULT; 598 599 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 600 if (ret) 601 return ret; 602 603 if (copy_to_user(arg, &buts, sizeof(buts))) { 604 __blk_trace_remove(q); 605 return -EFAULT; 606 } 607 return 0; 608 } 609 610 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 611 struct block_device *bdev, 612 char __user *arg) 613 { 614 int ret; 615 616 mutex_lock(&q->debugfs_mutex); 617 ret = __blk_trace_setup(q, name, dev, bdev, arg); 618 mutex_unlock(&q->debugfs_mutex); 619 620 return ret; 621 } 622 EXPORT_SYMBOL_GPL(blk_trace_setup); 623 624 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 625 static int compat_blk_trace_setup(struct request_queue *q, char *name, 626 dev_t dev, struct block_device *bdev, 627 char __user *arg) 628 { 629 struct blk_user_trace_setup buts; 630 struct compat_blk_user_trace_setup cbuts; 631 int ret; 632 633 if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 634 return -EFAULT; 635 636 buts = (struct blk_user_trace_setup) { 637 .act_mask = cbuts.act_mask, 638 .buf_size = cbuts.buf_size, 639 .buf_nr = cbuts.buf_nr, 640 .start_lba = cbuts.start_lba, 641 .end_lba = cbuts.end_lba, 642 .pid = cbuts.pid, 643 }; 644 645 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 646 if (ret) 647 return ret; 648 649 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { 650 __blk_trace_remove(q); 651 return -EFAULT; 652 } 653 654 return 0; 655 } 656 #endif 657 658 static int __blk_trace_startstop(struct request_queue *q, int start) 659 { 660 int ret; 661 struct blk_trace *bt; 662 663 bt = rcu_dereference_protected(q->blk_trace, 664 lockdep_is_held(&q->debugfs_mutex)); 665 if (bt == NULL) 666 return -EINVAL; 667 668 /* 669 * For starting a trace, we can transition from a setup or stopped 670 * trace. For stopping a trace, the state must be running 671 */ 672 ret = -EINVAL; 673 if (start) { 674 if (bt->trace_state == Blktrace_setup || 675 bt->trace_state == Blktrace_stopped) { 676 blktrace_seq++; 677 smp_mb(); 678 bt->trace_state = Blktrace_running; 679 raw_spin_lock_irq(&running_trace_lock); 680 list_add(&bt->running_list, &running_trace_list); 681 raw_spin_unlock_irq(&running_trace_lock); 682 683 trace_note_time(bt); 684 ret = 0; 685 } 686 } else { 687 if (bt->trace_state == Blktrace_running) { 688 bt->trace_state = Blktrace_stopped; 689 raw_spin_lock_irq(&running_trace_lock); 690 list_del_init(&bt->running_list); 691 raw_spin_unlock_irq(&running_trace_lock); 692 relay_flush(bt->rchan); 693 ret = 0; 694 } 695 } 696 697 return ret; 698 } 699 700 int blk_trace_startstop(struct request_queue *q, int start) 701 { 702 int ret; 703 704 mutex_lock(&q->debugfs_mutex); 705 ret = __blk_trace_startstop(q, start); 706 mutex_unlock(&q->debugfs_mutex); 707 708 return ret; 709 } 710 EXPORT_SYMBOL_GPL(blk_trace_startstop); 711 712 /* 713 * When reading or writing the blktrace sysfs files, the references to the 714 * opened sysfs or device files should prevent the underlying block device 715 * from being removed. So no further delete protection is really needed. 716 */ 717 718 /** 719 * blk_trace_ioctl: - handle the ioctls associated with tracing 720 * @bdev: the block device 721 * @cmd: the ioctl cmd 722 * @arg: the argument data, if any 723 * 724 **/ 725 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 726 { 727 struct request_queue *q; 728 int ret, start = 0; 729 char b[BDEVNAME_SIZE]; 730 731 q = bdev_get_queue(bdev); 732 if (!q) 733 return -ENXIO; 734 735 mutex_lock(&q->debugfs_mutex); 736 737 switch (cmd) { 738 case BLKTRACESETUP: 739 bdevname(bdev, b); 740 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 741 break; 742 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 743 case BLKTRACESETUP32: 744 bdevname(bdev, b); 745 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 746 break; 747 #endif 748 case BLKTRACESTART: 749 start = 1; 750 fallthrough; 751 case BLKTRACESTOP: 752 ret = __blk_trace_startstop(q, start); 753 break; 754 case BLKTRACETEARDOWN: 755 ret = __blk_trace_remove(q); 756 break; 757 default: 758 ret = -ENOTTY; 759 break; 760 } 761 762 mutex_unlock(&q->debugfs_mutex); 763 return ret; 764 } 765 766 /** 767 * blk_trace_shutdown: - stop and cleanup trace structures 768 * @q: the request queue associated with the device 769 * 770 **/ 771 void blk_trace_shutdown(struct request_queue *q) 772 { 773 mutex_lock(&q->debugfs_mutex); 774 if (rcu_dereference_protected(q->blk_trace, 775 lockdep_is_held(&q->debugfs_mutex))) { 776 __blk_trace_startstop(q, 0); 777 __blk_trace_remove(q); 778 } 779 780 mutex_unlock(&q->debugfs_mutex); 781 } 782 783 #ifdef CONFIG_BLK_CGROUP 784 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 785 { 786 struct cgroup_subsys_state *blkcg_css; 787 struct blk_trace *bt; 788 789 /* We don't use the 'bt' value here except as an optimization... */ 790 bt = rcu_dereference_protected(q->blk_trace, 1); 791 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 792 return 0; 793 794 blkcg_css = bio_blkcg_css(bio); 795 if (!blkcg_css) 796 return 0; 797 return cgroup_id(blkcg_css->cgroup); 798 } 799 #else 800 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 801 { 802 return 0; 803 } 804 #endif 805 806 static u64 807 blk_trace_request_get_cgid(struct request *rq) 808 { 809 if (!rq->bio) 810 return 0; 811 /* Use the first bio */ 812 return blk_trace_bio_get_cgid(rq->q, rq->bio); 813 } 814 815 /* 816 * blktrace probes 817 */ 818 819 /** 820 * blk_add_trace_rq - Add a trace for a request oriented action 821 * @rq: the source request 822 * @error: return status to log 823 * @nr_bytes: number of completed bytes 824 * @what: the action 825 * @cgid: the cgroup info 826 * 827 * Description: 828 * Records an action against a request. Will log the bio offset + size. 829 * 830 **/ 831 static void blk_add_trace_rq(struct request *rq, blk_status_t error, 832 unsigned int nr_bytes, u32 what, u64 cgid) 833 { 834 struct blk_trace *bt; 835 836 rcu_read_lock(); 837 bt = rcu_dereference(rq->q->blk_trace); 838 if (likely(!bt)) { 839 rcu_read_unlock(); 840 return; 841 } 842 843 if (blk_rq_is_passthrough(rq)) 844 what |= BLK_TC_ACT(BLK_TC_PC); 845 else 846 what |= BLK_TC_ACT(BLK_TC_FS); 847 848 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), 849 rq->cmd_flags, what, blk_status_to_errno(error), 0, 850 NULL, cgid); 851 rcu_read_unlock(); 852 } 853 854 static void blk_add_trace_rq_insert(void *ignore, struct request *rq) 855 { 856 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, 857 blk_trace_request_get_cgid(rq)); 858 } 859 860 static void blk_add_trace_rq_issue(void *ignore, struct request *rq) 861 { 862 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, 863 blk_trace_request_get_cgid(rq)); 864 } 865 866 static void blk_add_trace_rq_merge(void *ignore, struct request *rq) 867 { 868 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, 869 blk_trace_request_get_cgid(rq)); 870 } 871 872 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq) 873 { 874 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, 875 blk_trace_request_get_cgid(rq)); 876 } 877 878 static void blk_add_trace_rq_complete(void *ignore, struct request *rq, 879 blk_status_t error, unsigned int nr_bytes) 880 { 881 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, 882 blk_trace_request_get_cgid(rq)); 883 } 884 885 /** 886 * blk_add_trace_bio - Add a trace for a bio oriented action 887 * @q: queue the io is for 888 * @bio: the source bio 889 * @what: the action 890 * @error: error, if any 891 * 892 * Description: 893 * Records an action against a bio. Will log the bio offset + size. 894 * 895 **/ 896 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 897 u32 what, int error) 898 { 899 struct blk_trace *bt; 900 901 rcu_read_lock(); 902 bt = rcu_dereference(q->blk_trace); 903 if (likely(!bt)) { 904 rcu_read_unlock(); 905 return; 906 } 907 908 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 909 bio_op(bio), bio->bi_opf, what, error, 0, NULL, 910 blk_trace_bio_get_cgid(q, bio)); 911 rcu_read_unlock(); 912 } 913 914 static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio) 915 { 916 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); 917 } 918 919 static void blk_add_trace_bio_complete(void *ignore, 920 struct request_queue *q, struct bio *bio) 921 { 922 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, 923 blk_status_to_errno(bio->bi_status)); 924 } 925 926 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio) 927 { 928 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, 929 0); 930 } 931 932 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio) 933 { 934 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, 935 0); 936 } 937 938 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio) 939 { 940 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); 941 } 942 943 static void blk_add_trace_getrq(void *ignore, struct bio *bio) 944 { 945 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); 946 } 947 948 static void blk_add_trace_plug(void *ignore, struct request_queue *q) 949 { 950 struct blk_trace *bt; 951 952 rcu_read_lock(); 953 bt = rcu_dereference(q->blk_trace); 954 if (bt) 955 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0); 956 rcu_read_unlock(); 957 } 958 959 static void blk_add_trace_unplug(void *ignore, struct request_queue *q, 960 unsigned int depth, bool explicit) 961 { 962 struct blk_trace *bt; 963 964 rcu_read_lock(); 965 bt = rcu_dereference(q->blk_trace); 966 if (bt) { 967 __be64 rpdu = cpu_to_be64(depth); 968 u32 what; 969 970 if (explicit) 971 what = BLK_TA_UNPLUG_IO; 972 else 973 what = BLK_TA_UNPLUG_TIMER; 974 975 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0); 976 } 977 rcu_read_unlock(); 978 } 979 980 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu) 981 { 982 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 983 struct blk_trace *bt; 984 985 rcu_read_lock(); 986 bt = rcu_dereference(q->blk_trace); 987 if (bt) { 988 __be64 rpdu = cpu_to_be64(pdu); 989 990 __blk_add_trace(bt, bio->bi_iter.bi_sector, 991 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, 992 BLK_TA_SPLIT, 993 blk_status_to_errno(bio->bi_status), 994 sizeof(rpdu), &rpdu, 995 blk_trace_bio_get_cgid(q, bio)); 996 } 997 rcu_read_unlock(); 998 } 999 1000 /** 1001 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation 1002 * @ignore: trace callback data parameter (not used) 1003 * @bio: the source bio 1004 * @dev: source device 1005 * @from: source sector 1006 * 1007 * Called after a bio is remapped to a different device and/or sector. 1008 **/ 1009 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev, 1010 sector_t from) 1011 { 1012 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 1013 struct blk_trace *bt; 1014 struct blk_io_trace_remap r; 1015 1016 rcu_read_lock(); 1017 bt = rcu_dereference(q->blk_trace); 1018 if (likely(!bt)) { 1019 rcu_read_unlock(); 1020 return; 1021 } 1022 1023 r.device_from = cpu_to_be32(dev); 1024 r.device_to = cpu_to_be32(bio_dev(bio)); 1025 r.sector_from = cpu_to_be64(from); 1026 1027 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 1028 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, 1029 blk_status_to_errno(bio->bi_status), 1030 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); 1031 rcu_read_unlock(); 1032 } 1033 1034 /** 1035 * blk_add_trace_rq_remap - Add a trace for a request-remap operation 1036 * @ignore: trace callback data parameter (not used) 1037 * @rq: the source request 1038 * @dev: target device 1039 * @from: source sector 1040 * 1041 * Description: 1042 * Device mapper remaps request to other devices. 1043 * Add a trace for that action. 1044 * 1045 **/ 1046 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, 1047 sector_t from) 1048 { 1049 struct blk_trace *bt; 1050 struct blk_io_trace_remap r; 1051 1052 rcu_read_lock(); 1053 bt = rcu_dereference(rq->q->blk_trace); 1054 if (likely(!bt)) { 1055 rcu_read_unlock(); 1056 return; 1057 } 1058 1059 r.device_from = cpu_to_be32(dev); 1060 r.device_to = cpu_to_be32(disk_devt(rq->q->disk)); 1061 r.sector_from = cpu_to_be64(from); 1062 1063 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 1064 rq_data_dir(rq), 0, BLK_TA_REMAP, 0, 1065 sizeof(r), &r, blk_trace_request_get_cgid(rq)); 1066 rcu_read_unlock(); 1067 } 1068 1069 /** 1070 * blk_add_driver_data - Add binary message with driver-specific data 1071 * @rq: io request 1072 * @data: driver-specific data 1073 * @len: length of driver-specific data 1074 * 1075 * Description: 1076 * Some drivers might want to write driver-specific data per request. 1077 * 1078 **/ 1079 void blk_add_driver_data(struct request *rq, void *data, size_t len) 1080 { 1081 struct blk_trace *bt; 1082 1083 rcu_read_lock(); 1084 bt = rcu_dereference(rq->q->blk_trace); 1085 if (likely(!bt)) { 1086 rcu_read_unlock(); 1087 return; 1088 } 1089 1090 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, 1091 BLK_TA_DRV_DATA, 0, len, data, 1092 blk_trace_request_get_cgid(rq)); 1093 rcu_read_unlock(); 1094 } 1095 EXPORT_SYMBOL_GPL(blk_add_driver_data); 1096 1097 static void blk_register_tracepoints(void) 1098 { 1099 int ret; 1100 1101 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1102 WARN_ON(ret); 1103 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1104 WARN_ON(ret); 1105 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); 1106 WARN_ON(ret); 1107 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1108 WARN_ON(ret); 1109 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1110 WARN_ON(ret); 1111 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); 1112 WARN_ON(ret); 1113 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1114 WARN_ON(ret); 1115 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1116 WARN_ON(ret); 1117 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1118 WARN_ON(ret); 1119 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1120 WARN_ON(ret); 1121 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); 1122 WARN_ON(ret); 1123 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1124 WARN_ON(ret); 1125 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); 1126 WARN_ON(ret); 1127 ret = register_trace_block_split(blk_add_trace_split, NULL); 1128 WARN_ON(ret); 1129 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1130 WARN_ON(ret); 1131 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1132 WARN_ON(ret); 1133 } 1134 1135 static void blk_unregister_tracepoints(void) 1136 { 1137 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1138 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1139 unregister_trace_block_split(blk_add_trace_split, NULL); 1140 unregister_trace_block_unplug(blk_add_trace_unplug, NULL); 1141 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1142 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1143 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1144 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1145 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1146 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1147 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); 1148 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1149 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1150 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); 1151 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1152 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1153 1154 tracepoint_synchronize_unregister(); 1155 } 1156 1157 /* 1158 * struct blk_io_tracer formatting routines 1159 */ 1160 1161 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) 1162 { 1163 int i = 0; 1164 int tc = t->action >> BLK_TC_SHIFT; 1165 1166 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1167 rwbs[i++] = 'N'; 1168 goto out; 1169 } 1170 1171 if (tc & BLK_TC_FLUSH) 1172 rwbs[i++] = 'F'; 1173 1174 if (tc & BLK_TC_DISCARD) 1175 rwbs[i++] = 'D'; 1176 else if (tc & BLK_TC_WRITE) 1177 rwbs[i++] = 'W'; 1178 else if (t->bytes) 1179 rwbs[i++] = 'R'; 1180 else 1181 rwbs[i++] = 'N'; 1182 1183 if (tc & BLK_TC_FUA) 1184 rwbs[i++] = 'F'; 1185 if (tc & BLK_TC_AHEAD) 1186 rwbs[i++] = 'A'; 1187 if (tc & BLK_TC_SYNC) 1188 rwbs[i++] = 'S'; 1189 if (tc & BLK_TC_META) 1190 rwbs[i++] = 'M'; 1191 out: 1192 rwbs[i] = '\0'; 1193 } 1194 1195 static inline 1196 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) 1197 { 1198 return (const struct blk_io_trace *)ent; 1199 } 1200 1201 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) 1202 { 1203 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0); 1204 } 1205 1206 static inline u64 t_cgid(const struct trace_entry *ent) 1207 { 1208 return *(u64 *)(te_blk_io_trace(ent) + 1); 1209 } 1210 1211 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) 1212 { 1213 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0); 1214 } 1215 1216 static inline u32 t_action(const struct trace_entry *ent) 1217 { 1218 return te_blk_io_trace(ent)->action; 1219 } 1220 1221 static inline u32 t_bytes(const struct trace_entry *ent) 1222 { 1223 return te_blk_io_trace(ent)->bytes; 1224 } 1225 1226 static inline u32 t_sec(const struct trace_entry *ent) 1227 { 1228 return te_blk_io_trace(ent)->bytes >> 9; 1229 } 1230 1231 static inline unsigned long long t_sector(const struct trace_entry *ent) 1232 { 1233 return te_blk_io_trace(ent)->sector; 1234 } 1235 1236 static inline __u16 t_error(const struct trace_entry *ent) 1237 { 1238 return te_blk_io_trace(ent)->error; 1239 } 1240 1241 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) 1242 { 1243 const __be64 *val = pdu_start(ent, has_cg); 1244 return be64_to_cpu(*val); 1245 } 1246 1247 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, 1248 bool has_cg); 1249 1250 static void blk_log_action_classic(struct trace_iterator *iter, const char *act, 1251 bool has_cg) 1252 { 1253 char rwbs[RWBS_LEN]; 1254 unsigned long long ts = iter->ts; 1255 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); 1256 unsigned secs = (unsigned long)ts; 1257 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1258 1259 fill_rwbs(rwbs, t); 1260 1261 trace_seq_printf(&iter->seq, 1262 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", 1263 MAJOR(t->device), MINOR(t->device), iter->cpu, 1264 secs, nsec_rem, iter->ent->pid, act, rwbs); 1265 } 1266 1267 static void blk_log_action(struct trace_iterator *iter, const char *act, 1268 bool has_cg) 1269 { 1270 char rwbs[RWBS_LEN]; 1271 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1272 1273 fill_rwbs(rwbs, t); 1274 if (has_cg) { 1275 u64 id = t_cgid(iter->ent); 1276 1277 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) { 1278 char blkcg_name_buf[NAME_MAX + 1] = "<...>"; 1279 1280 cgroup_path_from_kernfs_id(id, blkcg_name_buf, 1281 sizeof(blkcg_name_buf)); 1282 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ", 1283 MAJOR(t->device), MINOR(t->device), 1284 blkcg_name_buf, act, rwbs); 1285 } else { 1286 /* 1287 * The cgid portion used to be "INO,GEN". Userland 1288 * builds a FILEID_INO32_GEN fid out of them and 1289 * opens the cgroup using open_by_handle_at(2). 1290 * While 32bit ino setups are still the same, 64bit 1291 * ones now use the 64bit ino as the whole ID and 1292 * no longer use generation. 1293 * 1294 * Regardless of the content, always output 1295 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can 1296 * be mapped back to @id on both 64 and 32bit ino 1297 * setups. See __kernfs_fh_to_dentry(). 1298 */ 1299 trace_seq_printf(&iter->seq, 1300 "%3d,%-3d %llx,%-llx %2s %3s ", 1301 MAJOR(t->device), MINOR(t->device), 1302 id & U32_MAX, id >> 32, act, rwbs); 1303 } 1304 } else 1305 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", 1306 MAJOR(t->device), MINOR(t->device), act, rwbs); 1307 } 1308 1309 static void blk_log_dump_pdu(struct trace_seq *s, 1310 const struct trace_entry *ent, bool has_cg) 1311 { 1312 const unsigned char *pdu_buf; 1313 int pdu_len; 1314 int i, end; 1315 1316 pdu_buf = pdu_start(ent, has_cg); 1317 pdu_len = pdu_real_len(ent, has_cg); 1318 1319 if (!pdu_len) 1320 return; 1321 1322 /* find the last zero that needs to be printed */ 1323 for (end = pdu_len - 1; end >= 0; end--) 1324 if (pdu_buf[end]) 1325 break; 1326 end++; 1327 1328 trace_seq_putc(s, '('); 1329 1330 for (i = 0; i < pdu_len; i++) { 1331 1332 trace_seq_printf(s, "%s%02x", 1333 i == 0 ? "" : " ", pdu_buf[i]); 1334 1335 /* 1336 * stop when the rest is just zeros and indicate so 1337 * with a ".." appended 1338 */ 1339 if (i == end && end != pdu_len - 1) { 1340 trace_seq_puts(s, " ..) "); 1341 return; 1342 } 1343 } 1344 1345 trace_seq_puts(s, ") "); 1346 } 1347 1348 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1349 { 1350 char cmd[TASK_COMM_LEN]; 1351 1352 trace_find_cmdline(ent->pid, cmd); 1353 1354 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1355 trace_seq_printf(s, "%u ", t_bytes(ent)); 1356 blk_log_dump_pdu(s, ent, has_cg); 1357 trace_seq_printf(s, "[%s]\n", cmd); 1358 } else { 1359 if (t_sec(ent)) 1360 trace_seq_printf(s, "%llu + %u [%s]\n", 1361 t_sector(ent), t_sec(ent), cmd); 1362 else 1363 trace_seq_printf(s, "[%s]\n", cmd); 1364 } 1365 } 1366 1367 static void blk_log_with_error(struct trace_seq *s, 1368 const struct trace_entry *ent, bool has_cg) 1369 { 1370 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1371 blk_log_dump_pdu(s, ent, has_cg); 1372 trace_seq_printf(s, "[%d]\n", t_error(ent)); 1373 } else { 1374 if (t_sec(ent)) 1375 trace_seq_printf(s, "%llu + %u [%d]\n", 1376 t_sector(ent), 1377 t_sec(ent), t_error(ent)); 1378 else 1379 trace_seq_printf(s, "%llu [%d]\n", 1380 t_sector(ent), t_error(ent)); 1381 } 1382 } 1383 1384 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1385 { 1386 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); 1387 1388 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", 1389 t_sector(ent), t_sec(ent), 1390 MAJOR(be32_to_cpu(__r->device_from)), 1391 MINOR(be32_to_cpu(__r->device_from)), 1392 be64_to_cpu(__r->sector_from)); 1393 } 1394 1395 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1396 { 1397 char cmd[TASK_COMM_LEN]; 1398 1399 trace_find_cmdline(ent->pid, cmd); 1400 1401 trace_seq_printf(s, "[%s]\n", cmd); 1402 } 1403 1404 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1405 { 1406 char cmd[TASK_COMM_LEN]; 1407 1408 trace_find_cmdline(ent->pid, cmd); 1409 1410 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); 1411 } 1412 1413 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1414 { 1415 char cmd[TASK_COMM_LEN]; 1416 1417 trace_find_cmdline(ent->pid, cmd); 1418 1419 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), 1420 get_pdu_int(ent, has_cg), cmd); 1421 } 1422 1423 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, 1424 bool has_cg) 1425 { 1426 1427 trace_seq_putmem(s, pdu_start(ent, has_cg), 1428 pdu_real_len(ent, has_cg)); 1429 trace_seq_putc(s, '\n'); 1430 } 1431 1432 /* 1433 * struct tracer operations 1434 */ 1435 1436 static void blk_tracer_print_header(struct seq_file *m) 1437 { 1438 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1439 return; 1440 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" 1441 "# | | | | | |\n"); 1442 } 1443 1444 static void blk_tracer_start(struct trace_array *tr) 1445 { 1446 blk_tracer_enabled = true; 1447 } 1448 1449 static int blk_tracer_init(struct trace_array *tr) 1450 { 1451 blk_tr = tr; 1452 blk_tracer_start(tr); 1453 return 0; 1454 } 1455 1456 static void blk_tracer_stop(struct trace_array *tr) 1457 { 1458 blk_tracer_enabled = false; 1459 } 1460 1461 static void blk_tracer_reset(struct trace_array *tr) 1462 { 1463 blk_tracer_stop(tr); 1464 } 1465 1466 static const struct { 1467 const char *act[2]; 1468 void (*print)(struct trace_seq *s, const struct trace_entry *ent, 1469 bool has_cg); 1470 } what2act[] = { 1471 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, 1472 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, 1473 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, 1474 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, 1475 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, 1476 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, 1477 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, 1478 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1479 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1480 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1481 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, 1482 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1483 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1484 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, 1485 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, 1486 }; 1487 1488 static enum print_line_t print_one_line(struct trace_iterator *iter, 1489 bool classic) 1490 { 1491 struct trace_array *tr = iter->tr; 1492 struct trace_seq *s = &iter->seq; 1493 const struct blk_io_trace *t; 1494 u16 what; 1495 bool long_act; 1496 blk_log_action_t *log_action; 1497 bool has_cg; 1498 1499 t = te_blk_io_trace(iter->ent); 1500 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP; 1501 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); 1502 log_action = classic ? &blk_log_action_classic : &blk_log_action; 1503 has_cg = t->action & __BLK_TA_CGROUP; 1504 1505 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1506 log_action(iter, long_act ? "message" : "m", has_cg); 1507 blk_log_msg(s, iter->ent, has_cg); 1508 return trace_handle_return(s); 1509 } 1510 1511 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) 1512 trace_seq_printf(s, "Unknown action %x\n", what); 1513 else { 1514 log_action(iter, what2act[what].act[long_act], has_cg); 1515 what2act[what].print(s, iter->ent, has_cg); 1516 } 1517 1518 return trace_handle_return(s); 1519 } 1520 1521 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, 1522 int flags, struct trace_event *event) 1523 { 1524 return print_one_line(iter, false); 1525 } 1526 1527 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) 1528 { 1529 struct trace_seq *s = &iter->seq; 1530 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; 1531 const int offset = offsetof(struct blk_io_trace, sector); 1532 struct blk_io_trace old = { 1533 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, 1534 .time = iter->ts, 1535 }; 1536 1537 trace_seq_putmem(s, &old, offset); 1538 trace_seq_putmem(s, &t->sector, 1539 sizeof(old) - offset + t->pdu_len); 1540 } 1541 1542 static enum print_line_t 1543 blk_trace_event_print_binary(struct trace_iterator *iter, int flags, 1544 struct trace_event *event) 1545 { 1546 blk_trace_synthesize_old_trace(iter); 1547 1548 return trace_handle_return(&iter->seq); 1549 } 1550 1551 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) 1552 { 1553 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1554 return TRACE_TYPE_UNHANDLED; 1555 1556 return print_one_line(iter, true); 1557 } 1558 1559 static int 1560 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1561 { 1562 /* don't output context-info for blk_classic output */ 1563 if (bit == TRACE_BLK_OPT_CLASSIC) { 1564 if (set) 1565 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1566 else 1567 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; 1568 } 1569 return 0; 1570 } 1571 1572 static struct tracer blk_tracer __read_mostly = { 1573 .name = "blk", 1574 .init = blk_tracer_init, 1575 .reset = blk_tracer_reset, 1576 .start = blk_tracer_start, 1577 .stop = blk_tracer_stop, 1578 .print_header = blk_tracer_print_header, 1579 .print_line = blk_tracer_print_line, 1580 .flags = &blk_tracer_flags, 1581 .set_flag = blk_tracer_set_flag, 1582 }; 1583 1584 static struct trace_event_functions trace_blk_event_funcs = { 1585 .trace = blk_trace_event_print, 1586 .binary = blk_trace_event_print_binary, 1587 }; 1588 1589 static struct trace_event trace_blk_event = { 1590 .type = TRACE_BLK, 1591 .funcs = &trace_blk_event_funcs, 1592 }; 1593 1594 static int __init init_blk_tracer(void) 1595 { 1596 if (!register_trace_event(&trace_blk_event)) { 1597 pr_warn("Warning: could not register block events\n"); 1598 return 1; 1599 } 1600 1601 if (register_tracer(&blk_tracer) != 0) { 1602 pr_warn("Warning: could not register the block tracer\n"); 1603 unregister_trace_event(&trace_blk_event); 1604 return 1; 1605 } 1606 1607 return 0; 1608 } 1609 1610 device_initcall(init_blk_tracer); 1611 1612 static int blk_trace_remove_queue(struct request_queue *q) 1613 { 1614 struct blk_trace *bt; 1615 1616 bt = rcu_replace_pointer(q->blk_trace, NULL, 1617 lockdep_is_held(&q->debugfs_mutex)); 1618 if (bt == NULL) 1619 return -EINVAL; 1620 1621 if (bt->trace_state == Blktrace_running) { 1622 bt->trace_state = Blktrace_stopped; 1623 raw_spin_lock_irq(&running_trace_lock); 1624 list_del_init(&bt->running_list); 1625 raw_spin_unlock_irq(&running_trace_lock); 1626 relay_flush(bt->rchan); 1627 } 1628 1629 put_probe_ref(); 1630 synchronize_rcu(); 1631 blk_trace_free(q, bt); 1632 return 0; 1633 } 1634 1635 /* 1636 * Setup everything required to start tracing 1637 */ 1638 static int blk_trace_setup_queue(struct request_queue *q, 1639 struct block_device *bdev) 1640 { 1641 struct blk_trace *bt = NULL; 1642 int ret = -ENOMEM; 1643 1644 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 1645 if (!bt) 1646 return -ENOMEM; 1647 1648 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 1649 if (!bt->msg_data) 1650 goto free_bt; 1651 1652 bt->dev = bdev->bd_dev; 1653 bt->act_mask = (u16)-1; 1654 1655 blk_trace_setup_lba(bt, bdev); 1656 1657 rcu_assign_pointer(q->blk_trace, bt); 1658 get_probe_ref(); 1659 return 0; 1660 1661 free_bt: 1662 blk_trace_free(q, bt); 1663 return ret; 1664 } 1665 1666 /* 1667 * sysfs interface to enable and configure tracing 1668 */ 1669 1670 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1671 struct device_attribute *attr, 1672 char *buf); 1673 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1674 struct device_attribute *attr, 1675 const char *buf, size_t count); 1676 #define BLK_TRACE_DEVICE_ATTR(_name) \ 1677 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ 1678 sysfs_blk_trace_attr_show, \ 1679 sysfs_blk_trace_attr_store) 1680 1681 static BLK_TRACE_DEVICE_ATTR(enable); 1682 static BLK_TRACE_DEVICE_ATTR(act_mask); 1683 static BLK_TRACE_DEVICE_ATTR(pid); 1684 static BLK_TRACE_DEVICE_ATTR(start_lba); 1685 static BLK_TRACE_DEVICE_ATTR(end_lba); 1686 1687 static struct attribute *blk_trace_attrs[] = { 1688 &dev_attr_enable.attr, 1689 &dev_attr_act_mask.attr, 1690 &dev_attr_pid.attr, 1691 &dev_attr_start_lba.attr, 1692 &dev_attr_end_lba.attr, 1693 NULL 1694 }; 1695 1696 struct attribute_group blk_trace_attr_group = { 1697 .name = "trace", 1698 .attrs = blk_trace_attrs, 1699 }; 1700 1701 static const struct { 1702 int mask; 1703 const char *str; 1704 } mask_maps[] = { 1705 { BLK_TC_READ, "read" }, 1706 { BLK_TC_WRITE, "write" }, 1707 { BLK_TC_FLUSH, "flush" }, 1708 { BLK_TC_SYNC, "sync" }, 1709 { BLK_TC_QUEUE, "queue" }, 1710 { BLK_TC_REQUEUE, "requeue" }, 1711 { BLK_TC_ISSUE, "issue" }, 1712 { BLK_TC_COMPLETE, "complete" }, 1713 { BLK_TC_FS, "fs" }, 1714 { BLK_TC_PC, "pc" }, 1715 { BLK_TC_NOTIFY, "notify" }, 1716 { BLK_TC_AHEAD, "ahead" }, 1717 { BLK_TC_META, "meta" }, 1718 { BLK_TC_DISCARD, "discard" }, 1719 { BLK_TC_DRV_DATA, "drv_data" }, 1720 { BLK_TC_FUA, "fua" }, 1721 }; 1722 1723 static int blk_trace_str2mask(const char *str) 1724 { 1725 int i; 1726 int mask = 0; 1727 char *buf, *s, *token; 1728 1729 buf = kstrdup(str, GFP_KERNEL); 1730 if (buf == NULL) 1731 return -ENOMEM; 1732 s = strstrip(buf); 1733 1734 while (1) { 1735 token = strsep(&s, ","); 1736 if (token == NULL) 1737 break; 1738 1739 if (*token == '\0') 1740 continue; 1741 1742 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1743 if (strcasecmp(token, mask_maps[i].str) == 0) { 1744 mask |= mask_maps[i].mask; 1745 break; 1746 } 1747 } 1748 if (i == ARRAY_SIZE(mask_maps)) { 1749 mask = -EINVAL; 1750 break; 1751 } 1752 } 1753 kfree(buf); 1754 1755 return mask; 1756 } 1757 1758 static ssize_t blk_trace_mask2str(char *buf, int mask) 1759 { 1760 int i; 1761 char *p = buf; 1762 1763 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1764 if (mask & mask_maps[i].mask) { 1765 p += sprintf(p, "%s%s", 1766 (p == buf) ? "" : ",", mask_maps[i].str); 1767 } 1768 } 1769 *p++ = '\n'; 1770 1771 return p - buf; 1772 } 1773 1774 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1775 struct device_attribute *attr, 1776 char *buf) 1777 { 1778 struct block_device *bdev = dev_to_bdev(dev); 1779 struct request_queue *q = bdev_get_queue(bdev); 1780 struct blk_trace *bt; 1781 ssize_t ret = -ENXIO; 1782 1783 mutex_lock(&q->debugfs_mutex); 1784 1785 bt = rcu_dereference_protected(q->blk_trace, 1786 lockdep_is_held(&q->debugfs_mutex)); 1787 if (attr == &dev_attr_enable) { 1788 ret = sprintf(buf, "%u\n", !!bt); 1789 goto out_unlock_bdev; 1790 } 1791 1792 if (bt == NULL) 1793 ret = sprintf(buf, "disabled\n"); 1794 else if (attr == &dev_attr_act_mask) 1795 ret = blk_trace_mask2str(buf, bt->act_mask); 1796 else if (attr == &dev_attr_pid) 1797 ret = sprintf(buf, "%u\n", bt->pid); 1798 else if (attr == &dev_attr_start_lba) 1799 ret = sprintf(buf, "%llu\n", bt->start_lba); 1800 else if (attr == &dev_attr_end_lba) 1801 ret = sprintf(buf, "%llu\n", bt->end_lba); 1802 1803 out_unlock_bdev: 1804 mutex_unlock(&q->debugfs_mutex); 1805 return ret; 1806 } 1807 1808 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1809 struct device_attribute *attr, 1810 const char *buf, size_t count) 1811 { 1812 struct block_device *bdev = dev_to_bdev(dev); 1813 struct request_queue *q = bdev_get_queue(bdev); 1814 struct blk_trace *bt; 1815 u64 value; 1816 ssize_t ret = -EINVAL; 1817 1818 if (count == 0) 1819 goto out; 1820 1821 if (attr == &dev_attr_act_mask) { 1822 if (kstrtoull(buf, 0, &value)) { 1823 /* Assume it is a list of trace category names */ 1824 ret = blk_trace_str2mask(buf); 1825 if (ret < 0) 1826 goto out; 1827 value = ret; 1828 } 1829 } else { 1830 if (kstrtoull(buf, 0, &value)) 1831 goto out; 1832 } 1833 1834 mutex_lock(&q->debugfs_mutex); 1835 1836 bt = rcu_dereference_protected(q->blk_trace, 1837 lockdep_is_held(&q->debugfs_mutex)); 1838 if (attr == &dev_attr_enable) { 1839 if (!!value == !!bt) { 1840 ret = 0; 1841 goto out_unlock_bdev; 1842 } 1843 if (value) 1844 ret = blk_trace_setup_queue(q, bdev); 1845 else 1846 ret = blk_trace_remove_queue(q); 1847 goto out_unlock_bdev; 1848 } 1849 1850 ret = 0; 1851 if (bt == NULL) { 1852 ret = blk_trace_setup_queue(q, bdev); 1853 bt = rcu_dereference_protected(q->blk_trace, 1854 lockdep_is_held(&q->debugfs_mutex)); 1855 } 1856 1857 if (ret == 0) { 1858 if (attr == &dev_attr_act_mask) 1859 bt->act_mask = value; 1860 else if (attr == &dev_attr_pid) 1861 bt->pid = value; 1862 else if (attr == &dev_attr_start_lba) 1863 bt->start_lba = value; 1864 else if (attr == &dev_attr_end_lba) 1865 bt->end_lba = value; 1866 } 1867 1868 out_unlock_bdev: 1869 mutex_unlock(&q->debugfs_mutex); 1870 out: 1871 return ret ? ret : count; 1872 } 1873 1874 int blk_trace_init_sysfs(struct device *dev) 1875 { 1876 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1877 } 1878 1879 void blk_trace_remove_sysfs(struct device *dev) 1880 { 1881 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); 1882 } 1883 1884 #endif /* CONFIG_BLK_DEV_IO_TRACE */ 1885 1886 #ifdef CONFIG_EVENT_TRACING 1887 1888 /** 1889 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string. 1890 * @rwbs: buffer to be filled 1891 * @op: REQ_OP_XXX for the tracepoint 1892 * 1893 * Description: 1894 * Maps the REQ_OP_XXX to character and fills the buffer provided by the 1895 * caller with resulting string. 1896 * 1897 **/ 1898 void blk_fill_rwbs(char *rwbs, unsigned int op) 1899 { 1900 int i = 0; 1901 1902 if (op & REQ_PREFLUSH) 1903 rwbs[i++] = 'F'; 1904 1905 switch (op & REQ_OP_MASK) { 1906 case REQ_OP_WRITE: 1907 rwbs[i++] = 'W'; 1908 break; 1909 case REQ_OP_DISCARD: 1910 rwbs[i++] = 'D'; 1911 break; 1912 case REQ_OP_SECURE_ERASE: 1913 rwbs[i++] = 'D'; 1914 rwbs[i++] = 'E'; 1915 break; 1916 case REQ_OP_FLUSH: 1917 rwbs[i++] = 'F'; 1918 break; 1919 case REQ_OP_READ: 1920 rwbs[i++] = 'R'; 1921 break; 1922 default: 1923 rwbs[i++] = 'N'; 1924 } 1925 1926 if (op & REQ_FUA) 1927 rwbs[i++] = 'F'; 1928 if (op & REQ_RAHEAD) 1929 rwbs[i++] = 'A'; 1930 if (op & REQ_SYNC) 1931 rwbs[i++] = 'S'; 1932 if (op & REQ_META) 1933 rwbs[i++] = 'M'; 1934 1935 rwbs[i] = '\0'; 1936 } 1937 EXPORT_SYMBOL_GPL(blk_fill_rwbs); 1938 1939 #endif /* CONFIG_EVENT_TRACING */ 1940 1941