1 #undef TRACE_SYSTEM 2 #define TRACE_SYSTEM block 3 4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 5 #define _TRACE_BLOCK_H 6 7 #include <linux/blktrace_api.h> 8 #include <linux/blkdev.h> 9 #include <linux/tracepoint.h> 10 11 DECLARE_EVENT_CLASS(block_rq_with_error, 12 13 TP_PROTO(struct request_queue *q, struct request *rq), 14 15 TP_ARGS(q, rq), 16 17 TP_STRUCT__entry( 18 __field( dev_t, dev ) 19 __field( sector_t, sector ) 20 __field( unsigned int, nr_sector ) 21 __field( int, errors ) 22 __array( char, rwbs, 6 ) 23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 24 ), 25 26 TP_fast_assign( 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 28 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 29 0 : blk_rq_pos(rq); 30 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 31 0 : blk_rq_sectors(rq); 32 __entry->errors = rq->errors; 33 34 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 35 blk_dump_cmd(__get_str(cmd), rq); 36 ), 37 38 TP_printk("%d,%d %s (%s) %llu + %u [%d]", 39 MAJOR(__entry->dev), MINOR(__entry->dev), 40 __entry->rwbs, __get_str(cmd), 41 (unsigned long long)__entry->sector, 42 __entry->nr_sector, __entry->errors) 43 ); 44 45 /** 46 * block_rq_abort - abort block operation request 47 * @q: queue containing the block operation request 48 * @rq: block IO operation request 49 * 50 * Called immediately after pending block IO operation request @rq in 51 * queue @q is aborted. The fields in the operation request @rq 52 * can be examined to determine which device and sectors the pending 53 * operation would access. 54 */ 55 DEFINE_EVENT(block_rq_with_error, block_rq_abort, 56 57 TP_PROTO(struct request_queue *q, struct request *rq), 58 59 TP_ARGS(q, rq) 60 ); 61 62 /** 63 * block_rq_requeue - place block IO request back on a queue 64 * @q: queue holding operation 65 * @rq: block IO operation request 66 * 67 * The block operation request @rq is being placed back into queue 68 * @q. For some reason the request was not completed and needs to be 69 * put back in the queue. 70 */ 71 DEFINE_EVENT(block_rq_with_error, block_rq_requeue, 72 73 TP_PROTO(struct request_queue *q, struct request *rq), 74 75 TP_ARGS(q, rq) 76 ); 77 78 /** 79 * block_rq_complete - block IO operation completed by device driver 80 * @q: queue containing the block operation request 81 * @rq: block operations request 82 * 83 * The block_rq_complete tracepoint event indicates that some portion 84 * of operation request has been completed by the device driver. If 85 * the @rq->bio is %NULL, then there is absolutely no additional work to 86 * do for the request. If @rq->bio is non-NULL then there is 87 * additional work required to complete the request. 88 */ 89 DEFINE_EVENT(block_rq_with_error, block_rq_complete, 90 91 TP_PROTO(struct request_queue *q, struct request *rq), 92 93 TP_ARGS(q, rq) 94 ); 95 96 DECLARE_EVENT_CLASS(block_rq, 97 98 TP_PROTO(struct request_queue *q, struct request *rq), 99 100 TP_ARGS(q, rq), 101 102 TP_STRUCT__entry( 103 __field( dev_t, dev ) 104 __field( sector_t, sector ) 105 __field( unsigned int, nr_sector ) 106 __field( unsigned int, bytes ) 107 __array( char, rwbs, 6 ) 108 __array( char, comm, TASK_COMM_LEN ) 109 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 110 ), 111 112 TP_fast_assign( 113 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 114 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 115 0 : blk_rq_pos(rq); 116 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 117 0 : blk_rq_sectors(rq); 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 119 blk_rq_bytes(rq) : 0; 120 121 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 122 blk_dump_cmd(__get_str(cmd), rq); 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 124 ), 125 126 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", 127 MAJOR(__entry->dev), MINOR(__entry->dev), 128 __entry->rwbs, __entry->bytes, __get_str(cmd), 129 (unsigned long long)__entry->sector, 130 __entry->nr_sector, __entry->comm) 131 ); 132 133 /** 134 * block_rq_insert - insert block operation request into queue 135 * @q: target queue 136 * @rq: block IO operation request 137 * 138 * Called immediately before block operation request @rq is inserted 139 * into queue @q. The fields in the operation request @rq struct can 140 * be examined to determine which device and sectors the pending 141 * operation would access. 142 */ 143 DEFINE_EVENT(block_rq, block_rq_insert, 144 145 TP_PROTO(struct request_queue *q, struct request *rq), 146 147 TP_ARGS(q, rq) 148 ); 149 150 /** 151 * block_rq_issue - issue pending block IO request operation to device driver 152 * @q: queue holding operation 153 * @rq: block IO operation operation request 154 * 155 * Called when block operation request @rq from queue @q is sent to a 156 * device driver for processing. 157 */ 158 DEFINE_EVENT(block_rq, block_rq_issue, 159 160 TP_PROTO(struct request_queue *q, struct request *rq), 161 162 TP_ARGS(q, rq) 163 ); 164 165 /** 166 * block_bio_bounce - used bounce buffer when processing block operation 167 * @q: queue holding the block operation 168 * @bio: block operation 169 * 170 * A bounce buffer was used to handle the block operation @bio in @q. 171 * This occurs when hardware limitations prevent a direct transfer of 172 * data between the @bio data memory area and the IO device. Use of a 173 * bounce buffer requires extra copying of data and decreases 174 * performance. 175 */ 176 TRACE_EVENT(block_bio_bounce, 177 178 TP_PROTO(struct request_queue *q, struct bio *bio), 179 180 TP_ARGS(q, bio), 181 182 TP_STRUCT__entry( 183 __field( dev_t, dev ) 184 __field( sector_t, sector ) 185 __field( unsigned int, nr_sector ) 186 __array( char, rwbs, 6 ) 187 __array( char, comm, TASK_COMM_LEN ) 188 ), 189 190 TP_fast_assign( 191 __entry->dev = bio->bi_bdev ? 192 bio->bi_bdev->bd_dev : 0; 193 __entry->sector = bio->bi_sector; 194 __entry->nr_sector = bio->bi_size >> 9; 195 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 196 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 197 ), 198 199 TP_printk("%d,%d %s %llu + %u [%s]", 200 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 201 (unsigned long long)__entry->sector, 202 __entry->nr_sector, __entry->comm) 203 ); 204 205 /** 206 * block_bio_complete - completed all work on the block operation 207 * @q: queue holding the block operation 208 * @bio: block operation completed 209 * @error: io error value 210 * 211 * This tracepoint indicates there is no further work to do on this 212 * block IO operation @bio. 213 */ 214 TRACE_EVENT(block_bio_complete, 215 216 TP_PROTO(struct request_queue *q, struct bio *bio, int error), 217 218 TP_ARGS(q, bio, error), 219 220 TP_STRUCT__entry( 221 __field( dev_t, dev ) 222 __field( sector_t, sector ) 223 __field( unsigned, nr_sector ) 224 __field( int, error ) 225 __array( char, rwbs, 6 ) 226 ), 227 228 TP_fast_assign( 229 __entry->dev = bio->bi_bdev->bd_dev; 230 __entry->sector = bio->bi_sector; 231 __entry->nr_sector = bio->bi_size >> 9; 232 __entry->error = error; 233 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 234 ), 235 236 TP_printk("%d,%d %s %llu + %u [%d]", 237 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 238 (unsigned long long)__entry->sector, 239 __entry->nr_sector, __entry->error) 240 ); 241 242 DECLARE_EVENT_CLASS(block_bio, 243 244 TP_PROTO(struct request_queue *q, struct bio *bio), 245 246 TP_ARGS(q, bio), 247 248 TP_STRUCT__entry( 249 __field( dev_t, dev ) 250 __field( sector_t, sector ) 251 __field( unsigned int, nr_sector ) 252 __array( char, rwbs, 6 ) 253 __array( char, comm, TASK_COMM_LEN ) 254 ), 255 256 TP_fast_assign( 257 __entry->dev = bio->bi_bdev->bd_dev; 258 __entry->sector = bio->bi_sector; 259 __entry->nr_sector = bio->bi_size >> 9; 260 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 261 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 262 ), 263 264 TP_printk("%d,%d %s %llu + %u [%s]", 265 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 266 (unsigned long long)__entry->sector, 267 __entry->nr_sector, __entry->comm) 268 ); 269 270 /** 271 * block_bio_backmerge - merging block operation to the end of an existing operation 272 * @q: queue holding operation 273 * @bio: new block operation to merge 274 * 275 * Merging block request @bio to the end of an existing block request 276 * in queue @q. 277 */ 278 DEFINE_EVENT(block_bio, block_bio_backmerge, 279 280 TP_PROTO(struct request_queue *q, struct bio *bio), 281 282 TP_ARGS(q, bio) 283 ); 284 285 /** 286 * block_bio_frontmerge - merging block operation to the beginning of an existing operation 287 * @q: queue holding operation 288 * @bio: new block operation to merge 289 * 290 * Merging block IO operation @bio to the beginning of an existing block 291 * operation in queue @q. 292 */ 293 DEFINE_EVENT(block_bio, block_bio_frontmerge, 294 295 TP_PROTO(struct request_queue *q, struct bio *bio), 296 297 TP_ARGS(q, bio) 298 ); 299 300 /** 301 * block_bio_queue - putting new block IO operation in queue 302 * @q: queue holding operation 303 * @bio: new block operation 304 * 305 * About to place the block IO operation @bio into queue @q. 306 */ 307 DEFINE_EVENT(block_bio, block_bio_queue, 308 309 TP_PROTO(struct request_queue *q, struct bio *bio), 310 311 TP_ARGS(q, bio) 312 ); 313 314 DECLARE_EVENT_CLASS(block_get_rq, 315 316 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 317 318 TP_ARGS(q, bio, rw), 319 320 TP_STRUCT__entry( 321 __field( dev_t, dev ) 322 __field( sector_t, sector ) 323 __field( unsigned int, nr_sector ) 324 __array( char, rwbs, 6 ) 325 __array( char, comm, TASK_COMM_LEN ) 326 ), 327 328 TP_fast_assign( 329 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 330 __entry->sector = bio ? bio->bi_sector : 0; 331 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; 332 blk_fill_rwbs(__entry->rwbs, 333 bio ? bio->bi_rw : 0, __entry->nr_sector); 334 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 335 ), 336 337 TP_printk("%d,%d %s %llu + %u [%s]", 338 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 339 (unsigned long long)__entry->sector, 340 __entry->nr_sector, __entry->comm) 341 ); 342 343 /** 344 * block_getrq - get a free request entry in queue for block IO operations 345 * @q: queue for operations 346 * @bio: pending block IO operation 347 * @rw: low bit indicates a read (%0) or a write (%1) 348 * 349 * A request struct for queue @q has been allocated to handle the 350 * block IO operation @bio. 351 */ 352 DEFINE_EVENT(block_get_rq, block_getrq, 353 354 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 355 356 TP_ARGS(q, bio, rw) 357 ); 358 359 /** 360 * block_sleeprq - waiting to get a free request entry in queue for block IO operation 361 * @q: queue for operation 362 * @bio: pending block IO operation 363 * @rw: low bit indicates a read (%0) or a write (%1) 364 * 365 * In the case where a request struct cannot be provided for queue @q 366 * the process needs to wait for an request struct to become 367 * available. This tracepoint event is generated each time the 368 * process goes to sleep waiting for request struct become available. 369 */ 370 DEFINE_EVENT(block_get_rq, block_sleeprq, 371 372 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 373 374 TP_ARGS(q, bio, rw) 375 ); 376 377 /** 378 * block_plug - keep operations requests in request queue 379 * @q: request queue to plug 380 * 381 * Plug the request queue @q. Do not allow block operation requests 382 * to be sent to the device driver. Instead, accumulate requests in 383 * the queue to improve throughput performance of the block device. 384 */ 385 TRACE_EVENT(block_plug, 386 387 TP_PROTO(struct request_queue *q), 388 389 TP_ARGS(q), 390 391 TP_STRUCT__entry( 392 __array( char, comm, TASK_COMM_LEN ) 393 ), 394 395 TP_fast_assign( 396 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 397 ), 398 399 TP_printk("[%s]", __entry->comm) 400 ); 401 402 DECLARE_EVENT_CLASS(block_unplug, 403 404 TP_PROTO(struct request_queue *q), 405 406 TP_ARGS(q), 407 408 TP_STRUCT__entry( 409 __field( int, nr_rq ) 410 __array( char, comm, TASK_COMM_LEN ) 411 ), 412 413 TP_fast_assign( 414 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; 415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 416 ), 417 418 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 419 ); 420 421 /** 422 * block_unplug_timer - timed release of operations requests in queue to device driver 423 * @q: request queue to unplug 424 * 425 * Unplug the request queue @q because a timer expired and allow block 426 * operation requests to be sent to the device driver. 427 */ 428 DEFINE_EVENT(block_unplug, block_unplug_timer, 429 430 TP_PROTO(struct request_queue *q), 431 432 TP_ARGS(q) 433 ); 434 435 /** 436 * block_unplug_io - release of operations requests in request queue 437 * @q: request queue to unplug 438 * 439 * Unplug request queue @q because device driver is scheduled to work 440 * on elements in the request queue. 441 */ 442 DEFINE_EVENT(block_unplug, block_unplug_io, 443 444 TP_PROTO(struct request_queue *q), 445 446 TP_ARGS(q) 447 ); 448 449 /** 450 * block_split - split a single bio struct into two bio structs 451 * @q: queue containing the bio 452 * @bio: block operation being split 453 * @new_sector: The starting sector for the new bio 454 * 455 * The bio request @bio in request queue @q needs to be split into two 456 * bio requests. The newly created @bio request starts at 457 * @new_sector. This split may be required due to hardware limitation 458 * such as operation crossing device boundaries in a RAID system. 459 */ 460 TRACE_EVENT(block_split, 461 462 TP_PROTO(struct request_queue *q, struct bio *bio, 463 unsigned int new_sector), 464 465 TP_ARGS(q, bio, new_sector), 466 467 TP_STRUCT__entry( 468 __field( dev_t, dev ) 469 __field( sector_t, sector ) 470 __field( sector_t, new_sector ) 471 __array( char, rwbs, 6 ) 472 __array( char, comm, TASK_COMM_LEN ) 473 ), 474 475 TP_fast_assign( 476 __entry->dev = bio->bi_bdev->bd_dev; 477 __entry->sector = bio->bi_sector; 478 __entry->new_sector = new_sector; 479 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 480 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 481 ), 482 483 TP_printk("%d,%d %s %llu / %llu [%s]", 484 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 485 (unsigned long long)__entry->sector, 486 (unsigned long long)__entry->new_sector, 487 __entry->comm) 488 ); 489 490 /** 491 * block_bio_remap - map request for a logical device to the raw device 492 * @q: queue holding the operation 493 * @bio: revised operation 494 * @dev: device for the operation 495 * @from: original sector for the operation 496 * 497 * An operation for a logical device has been mapped to the 498 * raw block device. 499 */ 500 TRACE_EVENT(block_bio_remap, 501 502 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 503 sector_t from), 504 505 TP_ARGS(q, bio, dev, from), 506 507 TP_STRUCT__entry( 508 __field( dev_t, dev ) 509 __field( sector_t, sector ) 510 __field( unsigned int, nr_sector ) 511 __field( dev_t, old_dev ) 512 __field( sector_t, old_sector ) 513 __array( char, rwbs, 6 ) 514 ), 515 516 TP_fast_assign( 517 __entry->dev = bio->bi_bdev->bd_dev; 518 __entry->sector = bio->bi_sector; 519 __entry->nr_sector = bio->bi_size >> 9; 520 __entry->old_dev = dev; 521 __entry->old_sector = from; 522 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 523 ), 524 525 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 526 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 527 (unsigned long long)__entry->sector, 528 __entry->nr_sector, 529 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 530 (unsigned long long)__entry->old_sector) 531 ); 532 533 /** 534 * block_rq_remap - map request for a block operation request 535 * @q: queue holding the operation 536 * @rq: block IO operation request 537 * @dev: device for the operation 538 * @from: original sector for the operation 539 * 540 * The block operation request @rq in @q has been remapped. The block 541 * operation request @rq holds the current information and @from hold 542 * the original sector. 543 */ 544 TRACE_EVENT(block_rq_remap, 545 546 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 547 sector_t from), 548 549 TP_ARGS(q, rq, dev, from), 550 551 TP_STRUCT__entry( 552 __field( dev_t, dev ) 553 __field( sector_t, sector ) 554 __field( unsigned int, nr_sector ) 555 __field( dev_t, old_dev ) 556 __field( sector_t, old_sector ) 557 __array( char, rwbs, 6 ) 558 ), 559 560 TP_fast_assign( 561 __entry->dev = disk_devt(rq->rq_disk); 562 __entry->sector = blk_rq_pos(rq); 563 __entry->nr_sector = blk_rq_sectors(rq); 564 __entry->old_dev = dev; 565 __entry->old_sector = from; 566 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 567 ), 568 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 570 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 571 (unsigned long long)__entry->sector, 572 __entry->nr_sector, 573 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 574 (unsigned long long)__entry->old_sector) 575 ); 576 577 #endif /* _TRACE_BLOCK_H */ 578 579 /* This part must be outside protection */ 580 #include <trace/define_trace.h> 581 582