1 /* 2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * 17 */ 18 #include <linux/kernel.h> 19 #include <linux/blkdev.h> 20 #include <linux/blktrace_api.h> 21 #include <linux/percpu.h> 22 #include <linux/init.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/debugfs.h> 26 #include <linux/export.h> 27 #include <linux/time.h> 28 #include <linux/uaccess.h> 29 #include <linux/list.h> 30 #include <linux/blk-cgroup.h> 31 32 #include "../../block/blk.h" 33 34 #include <trace/events/block.h> 35 36 #include "trace_output.h" 37 38 #ifdef CONFIG_BLK_DEV_IO_TRACE 39 40 static unsigned int blktrace_seq __read_mostly = 1; 41 42 static struct trace_array *blk_tr; 43 static bool blk_tracer_enabled __read_mostly; 44 45 static LIST_HEAD(running_trace_list); 46 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); 47 48 /* Select an alternative, minimalistic output than the original one */ 49 #define TRACE_BLK_OPT_CLASSIC 0x1 50 #define TRACE_BLK_OPT_CGROUP 0x2 51 #define TRACE_BLK_OPT_CGNAME 0x4 52 53 static struct tracer_opt blk_tracer_opts[] = { 54 /* Default disable the minimalistic output */ 55 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, 56 #ifdef CONFIG_BLK_CGROUP 57 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) }, 58 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) }, 59 #endif 60 { } 61 }; 62 63 static struct tracer_flags blk_tracer_flags = { 64 .val = 0, 65 .opts = blk_tracer_opts, 66 }; 67 68 /* Global reference count of probes */ 69 static DEFINE_MUTEX(blk_probe_mutex); 70 static int blk_probes_ref; 71 72 static void blk_register_tracepoints(void); 73 static void blk_unregister_tracepoints(void); 74 75 /* 76 * Send out a notify message. 77 */ 78 static void trace_note(struct blk_trace *bt, pid_t pid, int action, 79 const void *data, size_t len, 80 union kernfs_node_id *cgid) 81 { 82 struct blk_io_trace *t; 83 struct ring_buffer_event *event = NULL; 84 struct ring_buffer *buffer = NULL; 85 int pc = 0; 86 int cpu = smp_processor_id(); 87 bool blk_tracer = blk_tracer_enabled; 88 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0; 89 90 if (blk_tracer) { 91 buffer = blk_tr->trace_buffer.buffer; 92 pc = preempt_count(); 93 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 94 sizeof(*t) + len + cgid_len, 95 0, pc); 96 if (!event) 97 return; 98 t = ring_buffer_event_data(event); 99 goto record_it; 100 } 101 102 if (!bt->rchan) 103 return; 104 105 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); 106 if (t) { 107 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 108 t->time = ktime_to_ns(ktime_get()); 109 record_it: 110 t->device = bt->dev; 111 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); 112 t->pid = pid; 113 t->cpu = cpu; 114 t->pdu_len = len + cgid_len; 115 if (cgid) 116 memcpy((void *)t + sizeof(*t), cgid, cgid_len); 117 memcpy((void *) t + sizeof(*t) + cgid_len, data, len); 118 119 if (blk_tracer) 120 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); 121 } 122 } 123 124 /* 125 * Send out a notify for this process, if we haven't done so since a trace 126 * started 127 */ 128 static void trace_note_tsk(struct task_struct *tsk) 129 { 130 unsigned long flags; 131 struct blk_trace *bt; 132 133 tsk->btrace_seq = blktrace_seq; 134 spin_lock_irqsave(&running_trace_lock, flags); 135 list_for_each_entry(bt, &running_trace_list, running_list) { 136 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, 137 sizeof(tsk->comm), NULL); 138 } 139 spin_unlock_irqrestore(&running_trace_lock, flags); 140 } 141 142 static void trace_note_time(struct blk_trace *bt) 143 { 144 struct timespec64 now; 145 unsigned long flags; 146 u32 words[2]; 147 148 /* need to check user space to see if this breaks in y2038 or y2106 */ 149 ktime_get_real_ts64(&now); 150 words[0] = (u32)now.tv_sec; 151 words[1] = now.tv_nsec; 152 153 local_irq_save(flags); 154 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL); 155 local_irq_restore(flags); 156 } 157 158 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg, 159 const char *fmt, ...) 160 { 161 int n; 162 va_list args; 163 unsigned long flags; 164 char *buf; 165 166 if (unlikely(bt->trace_state != Blktrace_running && 167 !blk_tracer_enabled)) 168 return; 169 170 /* 171 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note 172 * message to the trace. 173 */ 174 if (!(bt->act_mask & BLK_TC_NOTIFY)) 175 return; 176 177 local_irq_save(flags); 178 buf = this_cpu_ptr(bt->msg_data); 179 va_start(args, fmt); 180 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); 181 va_end(args); 182 183 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 184 blkcg = NULL; 185 #ifdef CONFIG_BLK_CGROUP 186 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, 187 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL); 188 #else 189 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL); 190 #endif 191 local_irq_restore(flags); 192 } 193 EXPORT_SYMBOL_GPL(__trace_note_message); 194 195 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, 196 pid_t pid) 197 { 198 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) 199 return 1; 200 if (sector && (sector < bt->start_lba || sector > bt->end_lba)) 201 return 1; 202 if (bt->pid && pid != bt->pid) 203 return 1; 204 205 return 0; 206 } 207 208 /* 209 * Data direction bit lookup 210 */ 211 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 212 BLK_TC_ACT(BLK_TC_WRITE) }; 213 214 #define BLK_TC_RAHEAD BLK_TC_AHEAD 215 #define BLK_TC_PREFLUSH BLK_TC_FLUSH 216 217 /* The ilog2() calls fall out because they're constant */ 218 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ 219 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) 220 221 /* 222 * The worker for the various blk_add_trace*() types. Fills out a 223 * blk_io_trace structure and places it in a per-cpu subbuffer. 224 */ 225 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, 226 int op, int op_flags, u32 what, int error, int pdu_len, 227 void *pdu_data, union kernfs_node_id *cgid) 228 { 229 struct task_struct *tsk = current; 230 struct ring_buffer_event *event = NULL; 231 struct ring_buffer *buffer = NULL; 232 struct blk_io_trace *t; 233 unsigned long flags = 0; 234 unsigned long *sequence; 235 pid_t pid; 236 int cpu, pc = 0; 237 bool blk_tracer = blk_tracer_enabled; 238 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0; 239 240 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) 241 return; 242 243 what |= ddir_act[op_is_write(op) ? WRITE : READ]; 244 what |= MASK_TC_BIT(op_flags, SYNC); 245 what |= MASK_TC_BIT(op_flags, RAHEAD); 246 what |= MASK_TC_BIT(op_flags, META); 247 what |= MASK_TC_BIT(op_flags, PREFLUSH); 248 what |= MASK_TC_BIT(op_flags, FUA); 249 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) 250 what |= BLK_TC_ACT(BLK_TC_DISCARD); 251 if (op == REQ_OP_FLUSH) 252 what |= BLK_TC_ACT(BLK_TC_FLUSH); 253 if (cgid) 254 what |= __BLK_TA_CGROUP; 255 256 pid = tsk->pid; 257 if (act_log_check(bt, what, sector, pid)) 258 return; 259 cpu = raw_smp_processor_id(); 260 261 if (blk_tracer) { 262 tracing_record_cmdline(current); 263 264 buffer = blk_tr->trace_buffer.buffer; 265 pc = preempt_count(); 266 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 267 sizeof(*t) + pdu_len + cgid_len, 268 0, pc); 269 if (!event) 270 return; 271 t = ring_buffer_event_data(event); 272 goto record_it; 273 } 274 275 if (unlikely(tsk->btrace_seq != blktrace_seq)) 276 trace_note_tsk(tsk); 277 278 /* 279 * A word about the locking here - we disable interrupts to reserve 280 * some space in the relay per-cpu buffer, to prevent an irq 281 * from coming in and stepping on our toes. 282 */ 283 local_irq_save(flags); 284 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len); 285 if (t) { 286 sequence = per_cpu_ptr(bt->sequence, cpu); 287 288 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 289 t->sequence = ++(*sequence); 290 t->time = ktime_to_ns(ktime_get()); 291 record_it: 292 /* 293 * These two are not needed in ftrace as they are in the 294 * generic trace_entry, filled by tracing_generic_entry_update, 295 * but for the trace_event->bin() synthesizer benefit we do it 296 * here too. 297 */ 298 t->cpu = cpu; 299 t->pid = pid; 300 301 t->sector = sector; 302 t->bytes = bytes; 303 t->action = what; 304 t->device = bt->dev; 305 t->error = error; 306 t->pdu_len = pdu_len + cgid_len; 307 308 if (cgid_len) 309 memcpy((void *)t + sizeof(*t), cgid, cgid_len); 310 if (pdu_len) 311 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); 312 313 if (blk_tracer) { 314 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); 315 return; 316 } 317 } 318 319 local_irq_restore(flags); 320 } 321 322 static void blk_trace_free(struct blk_trace *bt) 323 { 324 debugfs_remove(bt->msg_file); 325 debugfs_remove(bt->dropped_file); 326 relay_close(bt->rchan); 327 debugfs_remove(bt->dir); 328 free_percpu(bt->sequence); 329 free_percpu(bt->msg_data); 330 kfree(bt); 331 } 332 333 static void get_probe_ref(void) 334 { 335 mutex_lock(&blk_probe_mutex); 336 if (++blk_probes_ref == 1) 337 blk_register_tracepoints(); 338 mutex_unlock(&blk_probe_mutex); 339 } 340 341 static void put_probe_ref(void) 342 { 343 mutex_lock(&blk_probe_mutex); 344 if (!--blk_probes_ref) 345 blk_unregister_tracepoints(); 346 mutex_unlock(&blk_probe_mutex); 347 } 348 349 static void blk_trace_cleanup(struct blk_trace *bt) 350 { 351 blk_trace_free(bt); 352 put_probe_ref(); 353 } 354 355 static int __blk_trace_remove(struct request_queue *q) 356 { 357 struct blk_trace *bt; 358 359 bt = xchg(&q->blk_trace, NULL); 360 if (!bt) 361 return -EINVAL; 362 363 if (bt->trace_state != Blktrace_running) 364 blk_trace_cleanup(bt); 365 366 return 0; 367 } 368 369 int blk_trace_remove(struct request_queue *q) 370 { 371 int ret; 372 373 mutex_lock(&q->blk_trace_mutex); 374 ret = __blk_trace_remove(q); 375 mutex_unlock(&q->blk_trace_mutex); 376 377 return ret; 378 } 379 EXPORT_SYMBOL_GPL(blk_trace_remove); 380 381 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, 382 size_t count, loff_t *ppos) 383 { 384 struct blk_trace *bt = filp->private_data; 385 char buf[16]; 386 387 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); 388 389 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); 390 } 391 392 static const struct file_operations blk_dropped_fops = { 393 .owner = THIS_MODULE, 394 .open = simple_open, 395 .read = blk_dropped_read, 396 .llseek = default_llseek, 397 }; 398 399 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, 400 size_t count, loff_t *ppos) 401 { 402 char *msg; 403 struct blk_trace *bt; 404 405 if (count >= BLK_TN_MAX_MSG) 406 return -EINVAL; 407 408 msg = memdup_user_nul(buffer, count); 409 if (IS_ERR(msg)) 410 return PTR_ERR(msg); 411 412 bt = filp->private_data; 413 __trace_note_message(bt, NULL, "%s", msg); 414 kfree(msg); 415 416 return count; 417 } 418 419 static const struct file_operations blk_msg_fops = { 420 .owner = THIS_MODULE, 421 .open = simple_open, 422 .write = blk_msg_write, 423 .llseek = noop_llseek, 424 }; 425 426 /* 427 * Keep track of how many times we encountered a full subbuffer, to aid 428 * the user space app in telling how many lost events there were. 429 */ 430 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, 431 void *prev_subbuf, size_t prev_padding) 432 { 433 struct blk_trace *bt; 434 435 if (!relay_buf_full(buf)) 436 return 1; 437 438 bt = buf->chan->private_data; 439 atomic_inc(&bt->dropped); 440 return 0; 441 } 442 443 static int blk_remove_buf_file_callback(struct dentry *dentry) 444 { 445 debugfs_remove(dentry); 446 447 return 0; 448 } 449 450 static struct dentry *blk_create_buf_file_callback(const char *filename, 451 struct dentry *parent, 452 umode_t mode, 453 struct rchan_buf *buf, 454 int *is_global) 455 { 456 return debugfs_create_file(filename, mode, parent, buf, 457 &relay_file_operations); 458 } 459 460 static struct rchan_callbacks blk_relay_callbacks = { 461 .subbuf_start = blk_subbuf_start_callback, 462 .create_buf_file = blk_create_buf_file_callback, 463 .remove_buf_file = blk_remove_buf_file_callback, 464 }; 465 466 static void blk_trace_setup_lba(struct blk_trace *bt, 467 struct block_device *bdev) 468 { 469 struct hd_struct *part = NULL; 470 471 if (bdev) 472 part = bdev->bd_part; 473 474 if (part) { 475 bt->start_lba = part->start_sect; 476 bt->end_lba = part->start_sect + part->nr_sects; 477 } else { 478 bt->start_lba = 0; 479 bt->end_lba = -1ULL; 480 } 481 } 482 483 /* 484 * Setup everything required to start tracing 485 */ 486 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 487 struct block_device *bdev, 488 struct blk_user_trace_setup *buts) 489 { 490 struct blk_trace *bt = NULL; 491 struct dentry *dir = NULL; 492 int ret; 493 494 if (!buts->buf_size || !buts->buf_nr) 495 return -EINVAL; 496 497 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); 498 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; 499 500 /* 501 * some device names have larger paths - convert the slashes 502 * to underscores for this to work as expected 503 */ 504 strreplace(buts->name, '/', '_'); 505 506 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 507 if (!bt) 508 return -ENOMEM; 509 510 ret = -ENOMEM; 511 bt->sequence = alloc_percpu(unsigned long); 512 if (!bt->sequence) 513 goto err; 514 515 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 516 if (!bt->msg_data) 517 goto err; 518 519 ret = -ENOENT; 520 521 if (!blk_debugfs_root) 522 goto err; 523 524 dir = debugfs_lookup(buts->name, blk_debugfs_root); 525 if (!dir) 526 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); 527 if (!dir) 528 goto err; 529 530 bt->dev = dev; 531 atomic_set(&bt->dropped, 0); 532 INIT_LIST_HEAD(&bt->running_list); 533 534 ret = -EIO; 535 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, 536 &blk_dropped_fops); 537 if (!bt->dropped_file) 538 goto err; 539 540 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); 541 if (!bt->msg_file) 542 goto err; 543 544 bt->rchan = relay_open("trace", dir, buts->buf_size, 545 buts->buf_nr, &blk_relay_callbacks, bt); 546 if (!bt->rchan) 547 goto err; 548 549 bt->act_mask = buts->act_mask; 550 if (!bt->act_mask) 551 bt->act_mask = (u16) -1; 552 553 blk_trace_setup_lba(bt, bdev); 554 555 /* overwrite with user settings */ 556 if (buts->start_lba) 557 bt->start_lba = buts->start_lba; 558 if (buts->end_lba) 559 bt->end_lba = buts->end_lba; 560 561 bt->pid = buts->pid; 562 bt->trace_state = Blktrace_setup; 563 564 ret = -EBUSY; 565 if (cmpxchg(&q->blk_trace, NULL, bt)) 566 goto err; 567 568 get_probe_ref(); 569 570 ret = 0; 571 err: 572 if (dir && !bt->dir) 573 dput(dir); 574 if (ret) 575 blk_trace_free(bt); 576 return ret; 577 } 578 579 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 580 struct block_device *bdev, char __user *arg) 581 { 582 struct blk_user_trace_setup buts; 583 int ret; 584 585 ret = copy_from_user(&buts, arg, sizeof(buts)); 586 if (ret) 587 return -EFAULT; 588 589 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 590 if (ret) 591 return ret; 592 593 if (copy_to_user(arg, &buts, sizeof(buts))) { 594 blk_trace_remove(q); 595 return -EFAULT; 596 } 597 return 0; 598 } 599 600 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 601 struct block_device *bdev, 602 char __user *arg) 603 { 604 int ret; 605 606 mutex_lock(&q->blk_trace_mutex); 607 ret = __blk_trace_setup(q, name, dev, bdev, arg); 608 mutex_unlock(&q->blk_trace_mutex); 609 610 return ret; 611 } 612 EXPORT_SYMBOL_GPL(blk_trace_setup); 613 614 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 615 static int compat_blk_trace_setup(struct request_queue *q, char *name, 616 dev_t dev, struct block_device *bdev, 617 char __user *arg) 618 { 619 struct blk_user_trace_setup buts; 620 struct compat_blk_user_trace_setup cbuts; 621 int ret; 622 623 if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 624 return -EFAULT; 625 626 buts = (struct blk_user_trace_setup) { 627 .act_mask = cbuts.act_mask, 628 .buf_size = cbuts.buf_size, 629 .buf_nr = cbuts.buf_nr, 630 .start_lba = cbuts.start_lba, 631 .end_lba = cbuts.end_lba, 632 .pid = cbuts.pid, 633 }; 634 635 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 636 if (ret) 637 return ret; 638 639 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { 640 blk_trace_remove(q); 641 return -EFAULT; 642 } 643 644 return 0; 645 } 646 #endif 647 648 static int __blk_trace_startstop(struct request_queue *q, int start) 649 { 650 int ret; 651 struct blk_trace *bt = q->blk_trace; 652 653 if (bt == NULL) 654 return -EINVAL; 655 656 /* 657 * For starting a trace, we can transition from a setup or stopped 658 * trace. For stopping a trace, the state must be running 659 */ 660 ret = -EINVAL; 661 if (start) { 662 if (bt->trace_state == Blktrace_setup || 663 bt->trace_state == Blktrace_stopped) { 664 blktrace_seq++; 665 smp_mb(); 666 bt->trace_state = Blktrace_running; 667 spin_lock_irq(&running_trace_lock); 668 list_add(&bt->running_list, &running_trace_list); 669 spin_unlock_irq(&running_trace_lock); 670 671 trace_note_time(bt); 672 ret = 0; 673 } 674 } else { 675 if (bt->trace_state == Blktrace_running) { 676 bt->trace_state = Blktrace_stopped; 677 spin_lock_irq(&running_trace_lock); 678 list_del_init(&bt->running_list); 679 spin_unlock_irq(&running_trace_lock); 680 relay_flush(bt->rchan); 681 ret = 0; 682 } 683 } 684 685 return ret; 686 } 687 688 int blk_trace_startstop(struct request_queue *q, int start) 689 { 690 int ret; 691 692 mutex_lock(&q->blk_trace_mutex); 693 ret = __blk_trace_startstop(q, start); 694 mutex_unlock(&q->blk_trace_mutex); 695 696 return ret; 697 } 698 EXPORT_SYMBOL_GPL(blk_trace_startstop); 699 700 /* 701 * When reading or writing the blktrace sysfs files, the references to the 702 * opened sysfs or device files should prevent the underlying block device 703 * from being removed. So no further delete protection is really needed. 704 */ 705 706 /** 707 * blk_trace_ioctl: - handle the ioctls associated with tracing 708 * @bdev: the block device 709 * @cmd: the ioctl cmd 710 * @arg: the argument data, if any 711 * 712 **/ 713 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 714 { 715 struct request_queue *q; 716 int ret, start = 0; 717 char b[BDEVNAME_SIZE]; 718 719 q = bdev_get_queue(bdev); 720 if (!q) 721 return -ENXIO; 722 723 mutex_lock(&q->blk_trace_mutex); 724 725 switch (cmd) { 726 case BLKTRACESETUP: 727 bdevname(bdev, b); 728 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 729 break; 730 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 731 case BLKTRACESETUP32: 732 bdevname(bdev, b); 733 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 734 break; 735 #endif 736 case BLKTRACESTART: 737 start = 1; 738 case BLKTRACESTOP: 739 ret = __blk_trace_startstop(q, start); 740 break; 741 case BLKTRACETEARDOWN: 742 ret = __blk_trace_remove(q); 743 break; 744 default: 745 ret = -ENOTTY; 746 break; 747 } 748 749 mutex_unlock(&q->blk_trace_mutex); 750 return ret; 751 } 752 753 /** 754 * blk_trace_shutdown: - stop and cleanup trace structures 755 * @q: the request queue associated with the device 756 * 757 **/ 758 void blk_trace_shutdown(struct request_queue *q) 759 { 760 mutex_lock(&q->blk_trace_mutex); 761 762 if (q->blk_trace) { 763 __blk_trace_startstop(q, 0); 764 __blk_trace_remove(q); 765 } 766 767 mutex_unlock(&q->blk_trace_mutex); 768 } 769 770 #ifdef CONFIG_BLK_CGROUP 771 static union kernfs_node_id * 772 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 773 { 774 struct blk_trace *bt = q->blk_trace; 775 776 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 777 return NULL; 778 779 if (!bio->bi_css) 780 return NULL; 781 return cgroup_get_kernfs_id(bio->bi_css->cgroup); 782 } 783 #else 784 static union kernfs_node_id * 785 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 786 { 787 return NULL; 788 } 789 #endif 790 791 static union kernfs_node_id * 792 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq) 793 { 794 if (!rq->bio) 795 return NULL; 796 /* Use the first bio */ 797 return blk_trace_bio_get_cgid(q, rq->bio); 798 } 799 800 /* 801 * blktrace probes 802 */ 803 804 /** 805 * blk_add_trace_rq - Add a trace for a request oriented action 806 * @rq: the source request 807 * @error: return status to log 808 * @nr_bytes: number of completed bytes 809 * @what: the action 810 * @cgid: the cgroup info 811 * 812 * Description: 813 * Records an action against a request. Will log the bio offset + size. 814 * 815 **/ 816 static void blk_add_trace_rq(struct request *rq, int error, 817 unsigned int nr_bytes, u32 what, 818 union kernfs_node_id *cgid) 819 { 820 struct blk_trace *bt = rq->q->blk_trace; 821 822 if (likely(!bt)) 823 return; 824 825 if (blk_rq_is_passthrough(rq)) 826 what |= BLK_TC_ACT(BLK_TC_PC); 827 else 828 what |= BLK_TC_ACT(BLK_TC_FS); 829 830 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), 831 rq->cmd_flags, what, error, 0, NULL, cgid); 832 } 833 834 static void blk_add_trace_rq_insert(void *ignore, 835 struct request_queue *q, struct request *rq) 836 { 837 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, 838 blk_trace_request_get_cgid(q, rq)); 839 } 840 841 static void blk_add_trace_rq_issue(void *ignore, 842 struct request_queue *q, struct request *rq) 843 { 844 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, 845 blk_trace_request_get_cgid(q, rq)); 846 } 847 848 static void blk_add_trace_rq_requeue(void *ignore, 849 struct request_queue *q, 850 struct request *rq) 851 { 852 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, 853 blk_trace_request_get_cgid(q, rq)); 854 } 855 856 static void blk_add_trace_rq_complete(void *ignore, struct request *rq, 857 int error, unsigned int nr_bytes) 858 { 859 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, 860 blk_trace_request_get_cgid(rq->q, rq)); 861 } 862 863 /** 864 * blk_add_trace_bio - Add a trace for a bio oriented action 865 * @q: queue the io is for 866 * @bio: the source bio 867 * @what: the action 868 * @error: error, if any 869 * 870 * Description: 871 * Records an action against a bio. Will log the bio offset + size. 872 * 873 **/ 874 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 875 u32 what, int error, union kernfs_node_id *cgid) 876 { 877 struct blk_trace *bt = q->blk_trace; 878 879 if (likely(!bt)) 880 return; 881 882 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 883 bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); 884 } 885 886 static void blk_add_trace_bio_bounce(void *ignore, 887 struct request_queue *q, struct bio *bio) 888 { 889 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0, 890 blk_trace_bio_get_cgid(q, bio)); 891 } 892 893 static void blk_add_trace_bio_complete(void *ignore, 894 struct request_queue *q, struct bio *bio, 895 int error) 896 { 897 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error, 898 blk_trace_bio_get_cgid(q, bio)); 899 } 900 901 static void blk_add_trace_bio_backmerge(void *ignore, 902 struct request_queue *q, 903 struct request *rq, 904 struct bio *bio) 905 { 906 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0, 907 blk_trace_bio_get_cgid(q, bio)); 908 } 909 910 static void blk_add_trace_bio_frontmerge(void *ignore, 911 struct request_queue *q, 912 struct request *rq, 913 struct bio *bio) 914 { 915 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0, 916 blk_trace_bio_get_cgid(q, bio)); 917 } 918 919 static void blk_add_trace_bio_queue(void *ignore, 920 struct request_queue *q, struct bio *bio) 921 { 922 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0, 923 blk_trace_bio_get_cgid(q, bio)); 924 } 925 926 static void blk_add_trace_getrq(void *ignore, 927 struct request_queue *q, 928 struct bio *bio, int rw) 929 { 930 if (bio) 931 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, 932 blk_trace_bio_get_cgid(q, bio)); 933 else { 934 struct blk_trace *bt = q->blk_trace; 935 936 if (bt) 937 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, 938 NULL, NULL); 939 } 940 } 941 942 943 static void blk_add_trace_sleeprq(void *ignore, 944 struct request_queue *q, 945 struct bio *bio, int rw) 946 { 947 if (bio) 948 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, 949 blk_trace_bio_get_cgid(q, bio)); 950 else { 951 struct blk_trace *bt = q->blk_trace; 952 953 if (bt) 954 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, 955 0, 0, NULL, NULL); 956 } 957 } 958 959 static void blk_add_trace_plug(void *ignore, struct request_queue *q) 960 { 961 struct blk_trace *bt = q->blk_trace; 962 963 if (bt) 964 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); 965 } 966 967 static void blk_add_trace_unplug(void *ignore, struct request_queue *q, 968 unsigned int depth, bool explicit) 969 { 970 struct blk_trace *bt = q->blk_trace; 971 972 if (bt) { 973 __be64 rpdu = cpu_to_be64(depth); 974 u32 what; 975 976 if (explicit) 977 what = BLK_TA_UNPLUG_IO; 978 else 979 what = BLK_TA_UNPLUG_TIMER; 980 981 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); 982 } 983 } 984 985 static void blk_add_trace_split(void *ignore, 986 struct request_queue *q, struct bio *bio, 987 unsigned int pdu) 988 { 989 struct blk_trace *bt = q->blk_trace; 990 991 if (bt) { 992 __be64 rpdu = cpu_to_be64(pdu); 993 994 __blk_add_trace(bt, bio->bi_iter.bi_sector, 995 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, 996 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), 997 &rpdu, blk_trace_bio_get_cgid(q, bio)); 998 } 999 } 1000 1001 /** 1002 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation 1003 * @ignore: trace callback data parameter (not used) 1004 * @q: queue the io is for 1005 * @bio: the source bio 1006 * @dev: target device 1007 * @from: source sector 1008 * 1009 * Description: 1010 * Device mapper or raid target sometimes need to split a bio because 1011 * it spans a stripe (or similar). Add a trace for that action. 1012 * 1013 **/ 1014 static void blk_add_trace_bio_remap(void *ignore, 1015 struct request_queue *q, struct bio *bio, 1016 dev_t dev, sector_t from) 1017 { 1018 struct blk_trace *bt = q->blk_trace; 1019 struct blk_io_trace_remap r; 1020 1021 if (likely(!bt)) 1022 return; 1023 1024 r.device_from = cpu_to_be32(dev); 1025 r.device_to = cpu_to_be32(bio_dev(bio)); 1026 r.sector_from = cpu_to_be64(from); 1027 1028 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 1029 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, 1030 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); 1031 } 1032 1033 /** 1034 * blk_add_trace_rq_remap - Add a trace for a request-remap operation 1035 * @ignore: trace callback data parameter (not used) 1036 * @q: queue the io is for 1037 * @rq: the source request 1038 * @dev: target device 1039 * @from: source sector 1040 * 1041 * Description: 1042 * Device mapper remaps request to other devices. 1043 * Add a trace for that action. 1044 * 1045 **/ 1046 static void blk_add_trace_rq_remap(void *ignore, 1047 struct request_queue *q, 1048 struct request *rq, dev_t dev, 1049 sector_t from) 1050 { 1051 struct blk_trace *bt = q->blk_trace; 1052 struct blk_io_trace_remap r; 1053 1054 if (likely(!bt)) 1055 return; 1056 1057 r.device_from = cpu_to_be32(dev); 1058 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); 1059 r.sector_from = cpu_to_be64(from); 1060 1061 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 1062 rq_data_dir(rq), 0, BLK_TA_REMAP, 0, 1063 sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); 1064 } 1065 1066 /** 1067 * blk_add_driver_data - Add binary message with driver-specific data 1068 * @q: queue the io is for 1069 * @rq: io request 1070 * @data: driver-specific data 1071 * @len: length of driver-specific data 1072 * 1073 * Description: 1074 * Some drivers might want to write driver-specific data per request. 1075 * 1076 **/ 1077 void blk_add_driver_data(struct request_queue *q, 1078 struct request *rq, 1079 void *data, size_t len) 1080 { 1081 struct blk_trace *bt = q->blk_trace; 1082 1083 if (likely(!bt)) 1084 return; 1085 1086 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, 1087 BLK_TA_DRV_DATA, 0, len, data, 1088 blk_trace_request_get_cgid(q, rq)); 1089 } 1090 EXPORT_SYMBOL_GPL(blk_add_driver_data); 1091 1092 static void blk_register_tracepoints(void) 1093 { 1094 int ret; 1095 1096 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1097 WARN_ON(ret); 1098 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1099 WARN_ON(ret); 1100 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1101 WARN_ON(ret); 1102 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1103 WARN_ON(ret); 1104 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); 1105 WARN_ON(ret); 1106 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1107 WARN_ON(ret); 1108 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1109 WARN_ON(ret); 1110 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1111 WARN_ON(ret); 1112 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1113 WARN_ON(ret); 1114 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); 1115 WARN_ON(ret); 1116 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1117 WARN_ON(ret); 1118 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1119 WARN_ON(ret); 1120 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); 1121 WARN_ON(ret); 1122 ret = register_trace_block_split(blk_add_trace_split, NULL); 1123 WARN_ON(ret); 1124 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1125 WARN_ON(ret); 1126 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1127 WARN_ON(ret); 1128 } 1129 1130 static void blk_unregister_tracepoints(void) 1131 { 1132 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1133 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1134 unregister_trace_block_split(blk_add_trace_split, NULL); 1135 unregister_trace_block_unplug(blk_add_trace_unplug, NULL); 1136 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1137 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1138 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1139 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1140 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1141 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1142 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1143 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); 1144 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1145 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1146 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1147 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1148 1149 tracepoint_synchronize_unregister(); 1150 } 1151 1152 /* 1153 * struct blk_io_tracer formatting routines 1154 */ 1155 1156 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) 1157 { 1158 int i = 0; 1159 int tc = t->action >> BLK_TC_SHIFT; 1160 1161 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1162 rwbs[i++] = 'N'; 1163 goto out; 1164 } 1165 1166 if (tc & BLK_TC_FLUSH) 1167 rwbs[i++] = 'F'; 1168 1169 if (tc & BLK_TC_DISCARD) 1170 rwbs[i++] = 'D'; 1171 else if (tc & BLK_TC_WRITE) 1172 rwbs[i++] = 'W'; 1173 else if (t->bytes) 1174 rwbs[i++] = 'R'; 1175 else 1176 rwbs[i++] = 'N'; 1177 1178 if (tc & BLK_TC_FUA) 1179 rwbs[i++] = 'F'; 1180 if (tc & BLK_TC_AHEAD) 1181 rwbs[i++] = 'A'; 1182 if (tc & BLK_TC_SYNC) 1183 rwbs[i++] = 'S'; 1184 if (tc & BLK_TC_META) 1185 rwbs[i++] = 'M'; 1186 out: 1187 rwbs[i] = '\0'; 1188 } 1189 1190 static inline 1191 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) 1192 { 1193 return (const struct blk_io_trace *)ent; 1194 } 1195 1196 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) 1197 { 1198 return (void *)(te_blk_io_trace(ent) + 1) + 1199 (has_cg ? sizeof(union kernfs_node_id) : 0); 1200 } 1201 1202 static inline const void *cgid_start(const struct trace_entry *ent) 1203 { 1204 return (void *)(te_blk_io_trace(ent) + 1); 1205 } 1206 1207 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) 1208 { 1209 return te_blk_io_trace(ent)->pdu_len - 1210 (has_cg ? sizeof(union kernfs_node_id) : 0); 1211 } 1212 1213 static inline u32 t_action(const struct trace_entry *ent) 1214 { 1215 return te_blk_io_trace(ent)->action; 1216 } 1217 1218 static inline u32 t_bytes(const struct trace_entry *ent) 1219 { 1220 return te_blk_io_trace(ent)->bytes; 1221 } 1222 1223 static inline u32 t_sec(const struct trace_entry *ent) 1224 { 1225 return te_blk_io_trace(ent)->bytes >> 9; 1226 } 1227 1228 static inline unsigned long long t_sector(const struct trace_entry *ent) 1229 { 1230 return te_blk_io_trace(ent)->sector; 1231 } 1232 1233 static inline __u16 t_error(const struct trace_entry *ent) 1234 { 1235 return te_blk_io_trace(ent)->error; 1236 } 1237 1238 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) 1239 { 1240 const __u64 *val = pdu_start(ent, has_cg); 1241 return be64_to_cpu(*val); 1242 } 1243 1244 static void get_pdu_remap(const struct trace_entry *ent, 1245 struct blk_io_trace_remap *r, bool has_cg) 1246 { 1247 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); 1248 __u64 sector_from = __r->sector_from; 1249 1250 r->device_from = be32_to_cpu(__r->device_from); 1251 r->device_to = be32_to_cpu(__r->device_to); 1252 r->sector_from = be64_to_cpu(sector_from); 1253 } 1254 1255 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, 1256 bool has_cg); 1257 1258 static void blk_log_action_classic(struct trace_iterator *iter, const char *act, 1259 bool has_cg) 1260 { 1261 char rwbs[RWBS_LEN]; 1262 unsigned long long ts = iter->ts; 1263 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); 1264 unsigned secs = (unsigned long)ts; 1265 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1266 1267 fill_rwbs(rwbs, t); 1268 1269 trace_seq_printf(&iter->seq, 1270 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", 1271 MAJOR(t->device), MINOR(t->device), iter->cpu, 1272 secs, nsec_rem, iter->ent->pid, act, rwbs); 1273 } 1274 1275 static void blk_log_action(struct trace_iterator *iter, const char *act, 1276 bool has_cg) 1277 { 1278 char rwbs[RWBS_LEN]; 1279 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1280 1281 fill_rwbs(rwbs, t); 1282 if (has_cg) { 1283 const union kernfs_node_id *id = cgid_start(iter->ent); 1284 1285 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) { 1286 char blkcg_name_buf[NAME_MAX + 1] = "<...>"; 1287 1288 cgroup_path_from_kernfs_id(id, blkcg_name_buf, 1289 sizeof(blkcg_name_buf)); 1290 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ", 1291 MAJOR(t->device), MINOR(t->device), 1292 blkcg_name_buf, act, rwbs); 1293 } else 1294 trace_seq_printf(&iter->seq, 1295 "%3d,%-3d %x,%-x %2s %3s ", 1296 MAJOR(t->device), MINOR(t->device), 1297 id->ino, id->generation, act, rwbs); 1298 } else 1299 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", 1300 MAJOR(t->device), MINOR(t->device), act, rwbs); 1301 } 1302 1303 static void blk_log_dump_pdu(struct trace_seq *s, 1304 const struct trace_entry *ent, bool has_cg) 1305 { 1306 const unsigned char *pdu_buf; 1307 int pdu_len; 1308 int i, end; 1309 1310 pdu_buf = pdu_start(ent, has_cg); 1311 pdu_len = pdu_real_len(ent, has_cg); 1312 1313 if (!pdu_len) 1314 return; 1315 1316 /* find the last zero that needs to be printed */ 1317 for (end = pdu_len - 1; end >= 0; end--) 1318 if (pdu_buf[end]) 1319 break; 1320 end++; 1321 1322 trace_seq_putc(s, '('); 1323 1324 for (i = 0; i < pdu_len; i++) { 1325 1326 trace_seq_printf(s, "%s%02x", 1327 i == 0 ? "" : " ", pdu_buf[i]); 1328 1329 /* 1330 * stop when the rest is just zeroes and indicate so 1331 * with a ".." appended 1332 */ 1333 if (i == end && end != pdu_len - 1) { 1334 trace_seq_puts(s, " ..) "); 1335 return; 1336 } 1337 } 1338 1339 trace_seq_puts(s, ") "); 1340 } 1341 1342 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1343 { 1344 char cmd[TASK_COMM_LEN]; 1345 1346 trace_find_cmdline(ent->pid, cmd); 1347 1348 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1349 trace_seq_printf(s, "%u ", t_bytes(ent)); 1350 blk_log_dump_pdu(s, ent, has_cg); 1351 trace_seq_printf(s, "[%s]\n", cmd); 1352 } else { 1353 if (t_sec(ent)) 1354 trace_seq_printf(s, "%llu + %u [%s]\n", 1355 t_sector(ent), t_sec(ent), cmd); 1356 else 1357 trace_seq_printf(s, "[%s]\n", cmd); 1358 } 1359 } 1360 1361 static void blk_log_with_error(struct trace_seq *s, 1362 const struct trace_entry *ent, bool has_cg) 1363 { 1364 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1365 blk_log_dump_pdu(s, ent, has_cg); 1366 trace_seq_printf(s, "[%d]\n", t_error(ent)); 1367 } else { 1368 if (t_sec(ent)) 1369 trace_seq_printf(s, "%llu + %u [%d]\n", 1370 t_sector(ent), 1371 t_sec(ent), t_error(ent)); 1372 else 1373 trace_seq_printf(s, "%llu [%d]\n", 1374 t_sector(ent), t_error(ent)); 1375 } 1376 } 1377 1378 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1379 { 1380 struct blk_io_trace_remap r = { .device_from = 0, }; 1381 1382 get_pdu_remap(ent, &r, has_cg); 1383 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", 1384 t_sector(ent), t_sec(ent), 1385 MAJOR(r.device_from), MINOR(r.device_from), 1386 (unsigned long long)r.sector_from); 1387 } 1388 1389 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1390 { 1391 char cmd[TASK_COMM_LEN]; 1392 1393 trace_find_cmdline(ent->pid, cmd); 1394 1395 trace_seq_printf(s, "[%s]\n", cmd); 1396 } 1397 1398 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1399 { 1400 char cmd[TASK_COMM_LEN]; 1401 1402 trace_find_cmdline(ent->pid, cmd); 1403 1404 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); 1405 } 1406 1407 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1408 { 1409 char cmd[TASK_COMM_LEN]; 1410 1411 trace_find_cmdline(ent->pid, cmd); 1412 1413 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), 1414 get_pdu_int(ent, has_cg), cmd); 1415 } 1416 1417 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, 1418 bool has_cg) 1419 { 1420 1421 trace_seq_putmem(s, pdu_start(ent, has_cg), 1422 pdu_real_len(ent, has_cg)); 1423 trace_seq_putc(s, '\n'); 1424 } 1425 1426 /* 1427 * struct tracer operations 1428 */ 1429 1430 static void blk_tracer_print_header(struct seq_file *m) 1431 { 1432 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1433 return; 1434 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" 1435 "# | | | | | |\n"); 1436 } 1437 1438 static void blk_tracer_start(struct trace_array *tr) 1439 { 1440 blk_tracer_enabled = true; 1441 } 1442 1443 static int blk_tracer_init(struct trace_array *tr) 1444 { 1445 blk_tr = tr; 1446 blk_tracer_start(tr); 1447 return 0; 1448 } 1449 1450 static void blk_tracer_stop(struct trace_array *tr) 1451 { 1452 blk_tracer_enabled = false; 1453 } 1454 1455 static void blk_tracer_reset(struct trace_array *tr) 1456 { 1457 blk_tracer_stop(tr); 1458 } 1459 1460 static const struct { 1461 const char *act[2]; 1462 void (*print)(struct trace_seq *s, const struct trace_entry *ent, 1463 bool has_cg); 1464 } what2act[] = { 1465 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, 1466 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, 1467 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, 1468 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, 1469 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, 1470 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, 1471 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, 1472 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1473 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1474 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1475 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, 1476 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1477 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1478 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, 1479 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, 1480 }; 1481 1482 static enum print_line_t print_one_line(struct trace_iterator *iter, 1483 bool classic) 1484 { 1485 struct trace_array *tr = iter->tr; 1486 struct trace_seq *s = &iter->seq; 1487 const struct blk_io_trace *t; 1488 u16 what; 1489 bool long_act; 1490 blk_log_action_t *log_action; 1491 bool has_cg; 1492 1493 t = te_blk_io_trace(iter->ent); 1494 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP; 1495 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); 1496 log_action = classic ? &blk_log_action_classic : &blk_log_action; 1497 has_cg = t->action & __BLK_TA_CGROUP; 1498 1499 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1500 log_action(iter, long_act ? "message" : "m", has_cg); 1501 blk_log_msg(s, iter->ent, has_cg); 1502 return trace_handle_return(s); 1503 } 1504 1505 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) 1506 trace_seq_printf(s, "Unknown action %x\n", what); 1507 else { 1508 log_action(iter, what2act[what].act[long_act], has_cg); 1509 what2act[what].print(s, iter->ent, has_cg); 1510 } 1511 1512 return trace_handle_return(s); 1513 } 1514 1515 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, 1516 int flags, struct trace_event *event) 1517 { 1518 return print_one_line(iter, false); 1519 } 1520 1521 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) 1522 { 1523 struct trace_seq *s = &iter->seq; 1524 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; 1525 const int offset = offsetof(struct blk_io_trace, sector); 1526 struct blk_io_trace old = { 1527 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, 1528 .time = iter->ts, 1529 }; 1530 1531 trace_seq_putmem(s, &old, offset); 1532 trace_seq_putmem(s, &t->sector, 1533 sizeof(old) - offset + t->pdu_len); 1534 } 1535 1536 static enum print_line_t 1537 blk_trace_event_print_binary(struct trace_iterator *iter, int flags, 1538 struct trace_event *event) 1539 { 1540 blk_trace_synthesize_old_trace(iter); 1541 1542 return trace_handle_return(&iter->seq); 1543 } 1544 1545 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) 1546 { 1547 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1548 return TRACE_TYPE_UNHANDLED; 1549 1550 return print_one_line(iter, true); 1551 } 1552 1553 static int 1554 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1555 { 1556 /* don't output context-info for blk_classic output */ 1557 if (bit == TRACE_BLK_OPT_CLASSIC) { 1558 if (set) 1559 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1560 else 1561 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; 1562 } 1563 return 0; 1564 } 1565 1566 static struct tracer blk_tracer __read_mostly = { 1567 .name = "blk", 1568 .init = blk_tracer_init, 1569 .reset = blk_tracer_reset, 1570 .start = blk_tracer_start, 1571 .stop = blk_tracer_stop, 1572 .print_header = blk_tracer_print_header, 1573 .print_line = blk_tracer_print_line, 1574 .flags = &blk_tracer_flags, 1575 .set_flag = blk_tracer_set_flag, 1576 }; 1577 1578 static struct trace_event_functions trace_blk_event_funcs = { 1579 .trace = blk_trace_event_print, 1580 .binary = blk_trace_event_print_binary, 1581 }; 1582 1583 static struct trace_event trace_blk_event = { 1584 .type = TRACE_BLK, 1585 .funcs = &trace_blk_event_funcs, 1586 }; 1587 1588 static int __init init_blk_tracer(void) 1589 { 1590 if (!register_trace_event(&trace_blk_event)) { 1591 pr_warn("Warning: could not register block events\n"); 1592 return 1; 1593 } 1594 1595 if (register_tracer(&blk_tracer) != 0) { 1596 pr_warn("Warning: could not register the block tracer\n"); 1597 unregister_trace_event(&trace_blk_event); 1598 return 1; 1599 } 1600 1601 return 0; 1602 } 1603 1604 device_initcall(init_blk_tracer); 1605 1606 static int blk_trace_remove_queue(struct request_queue *q) 1607 { 1608 struct blk_trace *bt; 1609 1610 bt = xchg(&q->blk_trace, NULL); 1611 if (bt == NULL) 1612 return -EINVAL; 1613 1614 put_probe_ref(); 1615 blk_trace_free(bt); 1616 return 0; 1617 } 1618 1619 /* 1620 * Setup everything required to start tracing 1621 */ 1622 static int blk_trace_setup_queue(struct request_queue *q, 1623 struct block_device *bdev) 1624 { 1625 struct blk_trace *bt = NULL; 1626 int ret = -ENOMEM; 1627 1628 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 1629 if (!bt) 1630 return -ENOMEM; 1631 1632 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 1633 if (!bt->msg_data) 1634 goto free_bt; 1635 1636 bt->dev = bdev->bd_dev; 1637 bt->act_mask = (u16)-1; 1638 1639 blk_trace_setup_lba(bt, bdev); 1640 1641 ret = -EBUSY; 1642 if (cmpxchg(&q->blk_trace, NULL, bt)) 1643 goto free_bt; 1644 1645 get_probe_ref(); 1646 return 0; 1647 1648 free_bt: 1649 blk_trace_free(bt); 1650 return ret; 1651 } 1652 1653 /* 1654 * sysfs interface to enable and configure tracing 1655 */ 1656 1657 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1658 struct device_attribute *attr, 1659 char *buf); 1660 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1661 struct device_attribute *attr, 1662 const char *buf, size_t count); 1663 #define BLK_TRACE_DEVICE_ATTR(_name) \ 1664 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ 1665 sysfs_blk_trace_attr_show, \ 1666 sysfs_blk_trace_attr_store) 1667 1668 static BLK_TRACE_DEVICE_ATTR(enable); 1669 static BLK_TRACE_DEVICE_ATTR(act_mask); 1670 static BLK_TRACE_DEVICE_ATTR(pid); 1671 static BLK_TRACE_DEVICE_ATTR(start_lba); 1672 static BLK_TRACE_DEVICE_ATTR(end_lba); 1673 1674 static struct attribute *blk_trace_attrs[] = { 1675 &dev_attr_enable.attr, 1676 &dev_attr_act_mask.attr, 1677 &dev_attr_pid.attr, 1678 &dev_attr_start_lba.attr, 1679 &dev_attr_end_lba.attr, 1680 NULL 1681 }; 1682 1683 struct attribute_group blk_trace_attr_group = { 1684 .name = "trace", 1685 .attrs = blk_trace_attrs, 1686 }; 1687 1688 static const struct { 1689 int mask; 1690 const char *str; 1691 } mask_maps[] = { 1692 { BLK_TC_READ, "read" }, 1693 { BLK_TC_WRITE, "write" }, 1694 { BLK_TC_FLUSH, "flush" }, 1695 { BLK_TC_SYNC, "sync" }, 1696 { BLK_TC_QUEUE, "queue" }, 1697 { BLK_TC_REQUEUE, "requeue" }, 1698 { BLK_TC_ISSUE, "issue" }, 1699 { BLK_TC_COMPLETE, "complete" }, 1700 { BLK_TC_FS, "fs" }, 1701 { BLK_TC_PC, "pc" }, 1702 { BLK_TC_NOTIFY, "notify" }, 1703 { BLK_TC_AHEAD, "ahead" }, 1704 { BLK_TC_META, "meta" }, 1705 { BLK_TC_DISCARD, "discard" }, 1706 { BLK_TC_DRV_DATA, "drv_data" }, 1707 { BLK_TC_FUA, "fua" }, 1708 }; 1709 1710 static int blk_trace_str2mask(const char *str) 1711 { 1712 int i; 1713 int mask = 0; 1714 char *buf, *s, *token; 1715 1716 buf = kstrdup(str, GFP_KERNEL); 1717 if (buf == NULL) 1718 return -ENOMEM; 1719 s = strstrip(buf); 1720 1721 while (1) { 1722 token = strsep(&s, ","); 1723 if (token == NULL) 1724 break; 1725 1726 if (*token == '\0') 1727 continue; 1728 1729 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1730 if (strcasecmp(token, mask_maps[i].str) == 0) { 1731 mask |= mask_maps[i].mask; 1732 break; 1733 } 1734 } 1735 if (i == ARRAY_SIZE(mask_maps)) { 1736 mask = -EINVAL; 1737 break; 1738 } 1739 } 1740 kfree(buf); 1741 1742 return mask; 1743 } 1744 1745 static ssize_t blk_trace_mask2str(char *buf, int mask) 1746 { 1747 int i; 1748 char *p = buf; 1749 1750 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1751 if (mask & mask_maps[i].mask) { 1752 p += sprintf(p, "%s%s", 1753 (p == buf) ? "" : ",", mask_maps[i].str); 1754 } 1755 } 1756 *p++ = '\n'; 1757 1758 return p - buf; 1759 } 1760 1761 static struct request_queue *blk_trace_get_queue(struct block_device *bdev) 1762 { 1763 if (bdev->bd_disk == NULL) 1764 return NULL; 1765 1766 return bdev_get_queue(bdev); 1767 } 1768 1769 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1770 struct device_attribute *attr, 1771 char *buf) 1772 { 1773 struct hd_struct *p = dev_to_part(dev); 1774 struct request_queue *q; 1775 struct block_device *bdev; 1776 ssize_t ret = -ENXIO; 1777 1778 bdev = bdget(part_devt(p)); 1779 if (bdev == NULL) 1780 goto out; 1781 1782 q = blk_trace_get_queue(bdev); 1783 if (q == NULL) 1784 goto out_bdput; 1785 1786 mutex_lock(&q->blk_trace_mutex); 1787 1788 if (attr == &dev_attr_enable) { 1789 ret = sprintf(buf, "%u\n", !!q->blk_trace); 1790 goto out_unlock_bdev; 1791 } 1792 1793 if (q->blk_trace == NULL) 1794 ret = sprintf(buf, "disabled\n"); 1795 else if (attr == &dev_attr_act_mask) 1796 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); 1797 else if (attr == &dev_attr_pid) 1798 ret = sprintf(buf, "%u\n", q->blk_trace->pid); 1799 else if (attr == &dev_attr_start_lba) 1800 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); 1801 else if (attr == &dev_attr_end_lba) 1802 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); 1803 1804 out_unlock_bdev: 1805 mutex_unlock(&q->blk_trace_mutex); 1806 out_bdput: 1807 bdput(bdev); 1808 out: 1809 return ret; 1810 } 1811 1812 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1813 struct device_attribute *attr, 1814 const char *buf, size_t count) 1815 { 1816 struct block_device *bdev; 1817 struct request_queue *q; 1818 struct hd_struct *p; 1819 u64 value; 1820 ssize_t ret = -EINVAL; 1821 1822 if (count == 0) 1823 goto out; 1824 1825 if (attr == &dev_attr_act_mask) { 1826 if (kstrtoull(buf, 0, &value)) { 1827 /* Assume it is a list of trace category names */ 1828 ret = blk_trace_str2mask(buf); 1829 if (ret < 0) 1830 goto out; 1831 value = ret; 1832 } 1833 } else if (kstrtoull(buf, 0, &value)) 1834 goto out; 1835 1836 ret = -ENXIO; 1837 1838 p = dev_to_part(dev); 1839 bdev = bdget(part_devt(p)); 1840 if (bdev == NULL) 1841 goto out; 1842 1843 q = blk_trace_get_queue(bdev); 1844 if (q == NULL) 1845 goto out_bdput; 1846 1847 mutex_lock(&q->blk_trace_mutex); 1848 1849 if (attr == &dev_attr_enable) { 1850 if (value) 1851 ret = blk_trace_setup_queue(q, bdev); 1852 else 1853 ret = blk_trace_remove_queue(q); 1854 goto out_unlock_bdev; 1855 } 1856 1857 ret = 0; 1858 if (q->blk_trace == NULL) 1859 ret = blk_trace_setup_queue(q, bdev); 1860 1861 if (ret == 0) { 1862 if (attr == &dev_attr_act_mask) 1863 q->blk_trace->act_mask = value; 1864 else if (attr == &dev_attr_pid) 1865 q->blk_trace->pid = value; 1866 else if (attr == &dev_attr_start_lba) 1867 q->blk_trace->start_lba = value; 1868 else if (attr == &dev_attr_end_lba) 1869 q->blk_trace->end_lba = value; 1870 } 1871 1872 out_unlock_bdev: 1873 mutex_unlock(&q->blk_trace_mutex); 1874 out_bdput: 1875 bdput(bdev); 1876 out: 1877 return ret ? ret : count; 1878 } 1879 1880 int blk_trace_init_sysfs(struct device *dev) 1881 { 1882 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1883 } 1884 1885 void blk_trace_remove_sysfs(struct device *dev) 1886 { 1887 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); 1888 } 1889 1890 #endif /* CONFIG_BLK_DEV_IO_TRACE */ 1891 1892 #ifdef CONFIG_EVENT_TRACING 1893 1894 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes) 1895 { 1896 int i = 0; 1897 1898 if (op & REQ_PREFLUSH) 1899 rwbs[i++] = 'F'; 1900 1901 switch (op & REQ_OP_MASK) { 1902 case REQ_OP_WRITE: 1903 case REQ_OP_WRITE_SAME: 1904 rwbs[i++] = 'W'; 1905 break; 1906 case REQ_OP_DISCARD: 1907 rwbs[i++] = 'D'; 1908 break; 1909 case REQ_OP_SECURE_ERASE: 1910 rwbs[i++] = 'D'; 1911 rwbs[i++] = 'E'; 1912 break; 1913 case REQ_OP_FLUSH: 1914 rwbs[i++] = 'F'; 1915 break; 1916 case REQ_OP_READ: 1917 rwbs[i++] = 'R'; 1918 break; 1919 default: 1920 rwbs[i++] = 'N'; 1921 } 1922 1923 if (op & REQ_FUA) 1924 rwbs[i++] = 'F'; 1925 if (op & REQ_RAHEAD) 1926 rwbs[i++] = 'A'; 1927 if (op & REQ_SYNC) 1928 rwbs[i++] = 'S'; 1929 if (op & REQ_META) 1930 rwbs[i++] = 'M'; 1931 1932 rwbs[i] = '\0'; 1933 } 1934 EXPORT_SYMBOL_GPL(blk_fill_rwbs); 1935 1936 #endif /* CONFIG_EVENT_TRACING */ 1937 1938