1 #undef TRACE_SYSTEM 2 #define TRACE_SYSTEM block 3 4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 5 #define _TRACE_BLOCK_H 6 7 #include <linux/blktrace_api.h> 8 #include <linux/blkdev.h> 9 #include <linux/tracepoint.h> 10 11 #define RWBS_LEN 8 12 13 DECLARE_EVENT_CLASS(block_rq_with_error, 14 15 TP_PROTO(struct request_queue *q, struct request *rq), 16 17 TP_ARGS(q, rq), 18 19 TP_STRUCT__entry( 20 __field( dev_t, dev ) 21 __field( sector_t, sector ) 22 __field( unsigned int, nr_sector ) 23 __field( int, errors ) 24 __array( char, rwbs, RWBS_LEN ) 25 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 26 ), 27 28 TP_fast_assign( 29 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 30 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 31 0 : blk_rq_pos(rq); 32 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 33 0 : blk_rq_sectors(rq); 34 __entry->errors = rq->errors; 35 36 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 37 blk_dump_cmd(__get_str(cmd), rq); 38 ), 39 40 TP_printk("%d,%d %s (%s) %llu + %u [%d]", 41 MAJOR(__entry->dev), MINOR(__entry->dev), 42 __entry->rwbs, __get_str(cmd), 43 (unsigned long long)__entry->sector, 44 __entry->nr_sector, __entry->errors) 45 ); 46 47 /** 48 * block_rq_abort - abort block operation request 49 * @q: queue containing the block operation request 50 * @rq: block IO operation request 51 * 52 * Called immediately after pending block IO operation request @rq in 53 * queue @q is aborted. The fields in the operation request @rq 54 * can be examined to determine which device and sectors the pending 55 * operation would access. 56 */ 57 DEFINE_EVENT(block_rq_with_error, block_rq_abort, 58 59 TP_PROTO(struct request_queue *q, struct request *rq), 60 61 TP_ARGS(q, rq) 62 ); 63 64 /** 65 * block_rq_requeue - place block IO request back on a queue 66 * @q: queue holding operation 67 * @rq: block IO operation request 68 * 69 * The block operation request @rq is being placed back into queue 70 * @q. For some reason the request was not completed and needs to be 71 * put back in the queue. 72 */ 73 DEFINE_EVENT(block_rq_with_error, block_rq_requeue, 74 75 TP_PROTO(struct request_queue *q, struct request *rq), 76 77 TP_ARGS(q, rq) 78 ); 79 80 /** 81 * block_rq_complete - block IO operation completed by device driver 82 * @q: queue containing the block operation request 83 * @rq: block operations request 84 * 85 * The block_rq_complete tracepoint event indicates that some portion 86 * of operation request has been completed by the device driver. If 87 * the @rq->bio is %NULL, then there is absolutely no additional work to 88 * do for the request. If @rq->bio is non-NULL then there is 89 * additional work required to complete the request. 90 */ 91 DEFINE_EVENT(block_rq_with_error, block_rq_complete, 92 93 TP_PROTO(struct request_queue *q, struct request *rq), 94 95 TP_ARGS(q, rq) 96 ); 97 98 DECLARE_EVENT_CLASS(block_rq, 99 100 TP_PROTO(struct request_queue *q, struct request *rq), 101 102 TP_ARGS(q, rq), 103 104 TP_STRUCT__entry( 105 __field( dev_t, dev ) 106 __field( sector_t, sector ) 107 __field( unsigned int, nr_sector ) 108 __field( unsigned int, bytes ) 109 __array( char, rwbs, RWBS_LEN ) 110 __array( char, comm, TASK_COMM_LEN ) 111 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 112 ), 113 114 TP_fast_assign( 115 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 116 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 117 0 : blk_rq_pos(rq); 118 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 119 0 : blk_rq_sectors(rq); 120 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 121 blk_rq_bytes(rq) : 0; 122 123 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 124 blk_dump_cmd(__get_str(cmd), rq); 125 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 126 ), 127 128 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", 129 MAJOR(__entry->dev), MINOR(__entry->dev), 130 __entry->rwbs, __entry->bytes, __get_str(cmd), 131 (unsigned long long)__entry->sector, 132 __entry->nr_sector, __entry->comm) 133 ); 134 135 /** 136 * block_rq_insert - insert block operation request into queue 137 * @q: target queue 138 * @rq: block IO operation request 139 * 140 * Called immediately before block operation request @rq is inserted 141 * into queue @q. The fields in the operation request @rq struct can 142 * be examined to determine which device and sectors the pending 143 * operation would access. 144 */ 145 DEFINE_EVENT(block_rq, block_rq_insert, 146 147 TP_PROTO(struct request_queue *q, struct request *rq), 148 149 TP_ARGS(q, rq) 150 ); 151 152 /** 153 * block_rq_issue - issue pending block IO request operation to device driver 154 * @q: queue holding operation 155 * @rq: block IO operation operation request 156 * 157 * Called when block operation request @rq from queue @q is sent to a 158 * device driver for processing. 159 */ 160 DEFINE_EVENT(block_rq, block_rq_issue, 161 162 TP_PROTO(struct request_queue *q, struct request *rq), 163 164 TP_ARGS(q, rq) 165 ); 166 167 /** 168 * block_bio_bounce - used bounce buffer when processing block operation 169 * @q: queue holding the block operation 170 * @bio: block operation 171 * 172 * A bounce buffer was used to handle the block operation @bio in @q. 173 * This occurs when hardware limitations prevent a direct transfer of 174 * data between the @bio data memory area and the IO device. Use of a 175 * bounce buffer requires extra copying of data and decreases 176 * performance. 177 */ 178 TRACE_EVENT(block_bio_bounce, 179 180 TP_PROTO(struct request_queue *q, struct bio *bio), 181 182 TP_ARGS(q, bio), 183 184 TP_STRUCT__entry( 185 __field( dev_t, dev ) 186 __field( sector_t, sector ) 187 __field( unsigned int, nr_sector ) 188 __array( char, rwbs, RWBS_LEN ) 189 __array( char, comm, TASK_COMM_LEN ) 190 ), 191 192 TP_fast_assign( 193 __entry->dev = bio->bi_bdev ? 194 bio->bi_bdev->bd_dev : 0; 195 __entry->sector = bio->bi_sector; 196 __entry->nr_sector = bio->bi_size >> 9; 197 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 198 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 199 ), 200 201 TP_printk("%d,%d %s %llu + %u [%s]", 202 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 203 (unsigned long long)__entry->sector, 204 __entry->nr_sector, __entry->comm) 205 ); 206 207 /** 208 * block_bio_complete - completed all work on the block operation 209 * @q: queue holding the block operation 210 * @bio: block operation completed 211 * @error: io error value 212 * 213 * This tracepoint indicates there is no further work to do on this 214 * block IO operation @bio. 215 */ 216 TRACE_EVENT(block_bio_complete, 217 218 TP_PROTO(struct request_queue *q, struct bio *bio, int error), 219 220 TP_ARGS(q, bio, error), 221 222 TP_STRUCT__entry( 223 __field( dev_t, dev ) 224 __field( sector_t, sector ) 225 __field( unsigned, nr_sector ) 226 __field( int, error ) 227 __array( char, rwbs, RWBS_LEN) 228 ), 229 230 TP_fast_assign( 231 __entry->dev = bio->bi_bdev->bd_dev; 232 __entry->sector = bio->bi_sector; 233 __entry->nr_sector = bio->bi_size >> 9; 234 __entry->error = error; 235 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 236 ), 237 238 TP_printk("%d,%d %s %llu + %u [%d]", 239 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 240 (unsigned long long)__entry->sector, 241 __entry->nr_sector, __entry->error) 242 ); 243 244 DECLARE_EVENT_CLASS(block_bio, 245 246 TP_PROTO(struct request_queue *q, struct bio *bio), 247 248 TP_ARGS(q, bio), 249 250 TP_STRUCT__entry( 251 __field( dev_t, dev ) 252 __field( sector_t, sector ) 253 __field( unsigned int, nr_sector ) 254 __array( char, rwbs, RWBS_LEN ) 255 __array( char, comm, TASK_COMM_LEN ) 256 ), 257 258 TP_fast_assign( 259 __entry->dev = bio->bi_bdev->bd_dev; 260 __entry->sector = bio->bi_sector; 261 __entry->nr_sector = bio->bi_size >> 9; 262 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 263 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 264 ), 265 266 TP_printk("%d,%d %s %llu + %u [%s]", 267 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 268 (unsigned long long)__entry->sector, 269 __entry->nr_sector, __entry->comm) 270 ); 271 272 /** 273 * block_bio_backmerge - merging block operation to the end of an existing operation 274 * @q: queue holding operation 275 * @bio: new block operation to merge 276 * 277 * Merging block request @bio to the end of an existing block request 278 * in queue @q. 279 */ 280 DEFINE_EVENT(block_bio, block_bio_backmerge, 281 282 TP_PROTO(struct request_queue *q, struct bio *bio), 283 284 TP_ARGS(q, bio) 285 ); 286 287 /** 288 * block_bio_frontmerge - merging block operation to the beginning of an existing operation 289 * @q: queue holding operation 290 * @bio: new block operation to merge 291 * 292 * Merging block IO operation @bio to the beginning of an existing block 293 * operation in queue @q. 294 */ 295 DEFINE_EVENT(block_bio, block_bio_frontmerge, 296 297 TP_PROTO(struct request_queue *q, struct bio *bio), 298 299 TP_ARGS(q, bio) 300 ); 301 302 /** 303 * block_bio_queue - putting new block IO operation in queue 304 * @q: queue holding operation 305 * @bio: new block operation 306 * 307 * About to place the block IO operation @bio into queue @q. 308 */ 309 DEFINE_EVENT(block_bio, block_bio_queue, 310 311 TP_PROTO(struct request_queue *q, struct bio *bio), 312 313 TP_ARGS(q, bio) 314 ); 315 316 DECLARE_EVENT_CLASS(block_get_rq, 317 318 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 319 320 TP_ARGS(q, bio, rw), 321 322 TP_STRUCT__entry( 323 __field( dev_t, dev ) 324 __field( sector_t, sector ) 325 __field( unsigned int, nr_sector ) 326 __array( char, rwbs, RWBS_LEN ) 327 __array( char, comm, TASK_COMM_LEN ) 328 ), 329 330 TP_fast_assign( 331 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 332 __entry->sector = bio ? bio->bi_sector : 0; 333 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; 334 blk_fill_rwbs(__entry->rwbs, 335 bio ? bio->bi_rw : 0, __entry->nr_sector); 336 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 337 ), 338 339 TP_printk("%d,%d %s %llu + %u [%s]", 340 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 341 (unsigned long long)__entry->sector, 342 __entry->nr_sector, __entry->comm) 343 ); 344 345 /** 346 * block_getrq - get a free request entry in queue for block IO operations 347 * @q: queue for operations 348 * @bio: pending block IO operation 349 * @rw: low bit indicates a read (%0) or a write (%1) 350 * 351 * A request struct for queue @q has been allocated to handle the 352 * block IO operation @bio. 353 */ 354 DEFINE_EVENT(block_get_rq, block_getrq, 355 356 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 357 358 TP_ARGS(q, bio, rw) 359 ); 360 361 /** 362 * block_sleeprq - waiting to get a free request entry in queue for block IO operation 363 * @q: queue for operation 364 * @bio: pending block IO operation 365 * @rw: low bit indicates a read (%0) or a write (%1) 366 * 367 * In the case where a request struct cannot be provided for queue @q 368 * the process needs to wait for an request struct to become 369 * available. This tracepoint event is generated each time the 370 * process goes to sleep waiting for request struct become available. 371 */ 372 DEFINE_EVENT(block_get_rq, block_sleeprq, 373 374 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 375 376 TP_ARGS(q, bio, rw) 377 ); 378 379 /** 380 * block_plug - keep operations requests in request queue 381 * @q: request queue to plug 382 * 383 * Plug the request queue @q. Do not allow block operation requests 384 * to be sent to the device driver. Instead, accumulate requests in 385 * the queue to improve throughput performance of the block device. 386 */ 387 TRACE_EVENT(block_plug, 388 389 TP_PROTO(struct request_queue *q), 390 391 TP_ARGS(q), 392 393 TP_STRUCT__entry( 394 __array( char, comm, TASK_COMM_LEN ) 395 ), 396 397 TP_fast_assign( 398 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 399 ), 400 401 TP_printk("[%s]", __entry->comm) 402 ); 403 404 DECLARE_EVENT_CLASS(block_unplug, 405 406 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 407 408 TP_ARGS(q, depth, explicit), 409 410 TP_STRUCT__entry( 411 __field( int, nr_rq ) 412 __array( char, comm, TASK_COMM_LEN ) 413 ), 414 415 TP_fast_assign( 416 __entry->nr_rq = depth; 417 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 418 ), 419 420 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 421 ); 422 423 /** 424 * block_unplug - release of operations requests in request queue 425 * @q: request queue to unplug 426 * @depth: number of requests just added to the queue 427 * @explicit: whether this was an explicit unplug, or one from schedule() 428 * 429 * Unplug request queue @q because device driver is scheduled to work 430 * on elements in the request queue. 431 */ 432 DEFINE_EVENT(block_unplug, block_unplug, 433 434 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 435 436 TP_ARGS(q, depth, explicit) 437 ); 438 439 /** 440 * block_split - split a single bio struct into two bio structs 441 * @q: queue containing the bio 442 * @bio: block operation being split 443 * @new_sector: The starting sector for the new bio 444 * 445 * The bio request @bio in request queue @q needs to be split into two 446 * bio requests. The newly created @bio request starts at 447 * @new_sector. This split may be required due to hardware limitation 448 * such as operation crossing device boundaries in a RAID system. 449 */ 450 TRACE_EVENT(block_split, 451 452 TP_PROTO(struct request_queue *q, struct bio *bio, 453 unsigned int new_sector), 454 455 TP_ARGS(q, bio, new_sector), 456 457 TP_STRUCT__entry( 458 __field( dev_t, dev ) 459 __field( sector_t, sector ) 460 __field( sector_t, new_sector ) 461 __array( char, rwbs, RWBS_LEN ) 462 __array( char, comm, TASK_COMM_LEN ) 463 ), 464 465 TP_fast_assign( 466 __entry->dev = bio->bi_bdev->bd_dev; 467 __entry->sector = bio->bi_sector; 468 __entry->new_sector = new_sector; 469 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 470 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 471 ), 472 473 TP_printk("%d,%d %s %llu / %llu [%s]", 474 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 475 (unsigned long long)__entry->sector, 476 (unsigned long long)__entry->new_sector, 477 __entry->comm) 478 ); 479 480 /** 481 * block_bio_remap - map request for a logical device to the raw device 482 * @q: queue holding the operation 483 * @bio: revised operation 484 * @dev: device for the operation 485 * @from: original sector for the operation 486 * 487 * An operation for a logical device has been mapped to the 488 * raw block device. 489 */ 490 TRACE_EVENT(block_bio_remap, 491 492 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 493 sector_t from), 494 495 TP_ARGS(q, bio, dev, from), 496 497 TP_STRUCT__entry( 498 __field( dev_t, dev ) 499 __field( sector_t, sector ) 500 __field( unsigned int, nr_sector ) 501 __field( dev_t, old_dev ) 502 __field( sector_t, old_sector ) 503 __array( char, rwbs, RWBS_LEN) 504 ), 505 506 TP_fast_assign( 507 __entry->dev = bio->bi_bdev->bd_dev; 508 __entry->sector = bio->bi_sector; 509 __entry->nr_sector = bio->bi_size >> 9; 510 __entry->old_dev = dev; 511 __entry->old_sector = from; 512 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 513 ), 514 515 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 516 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 517 (unsigned long long)__entry->sector, 518 __entry->nr_sector, 519 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 520 (unsigned long long)__entry->old_sector) 521 ); 522 523 /** 524 * block_rq_remap - map request for a block operation request 525 * @q: queue holding the operation 526 * @rq: block IO operation request 527 * @dev: device for the operation 528 * @from: original sector for the operation 529 * 530 * The block operation request @rq in @q has been remapped. The block 531 * operation request @rq holds the current information and @from hold 532 * the original sector. 533 */ 534 TRACE_EVENT(block_rq_remap, 535 536 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 537 sector_t from), 538 539 TP_ARGS(q, rq, dev, from), 540 541 TP_STRUCT__entry( 542 __field( dev_t, dev ) 543 __field( sector_t, sector ) 544 __field( unsigned int, nr_sector ) 545 __field( dev_t, old_dev ) 546 __field( sector_t, old_sector ) 547 __array( char, rwbs, RWBS_LEN) 548 ), 549 550 TP_fast_assign( 551 __entry->dev = disk_devt(rq->rq_disk); 552 __entry->sector = blk_rq_pos(rq); 553 __entry->nr_sector = blk_rq_sectors(rq); 554 __entry->old_dev = dev; 555 __entry->old_sector = from; 556 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 557 ), 558 559 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 560 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 561 (unsigned long long)__entry->sector, 562 __entry->nr_sector, 563 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 564 (unsigned long long)__entry->old_sector) 565 ); 566 567 #endif /* _TRACE_BLOCK_H */ 568 569 /* This part must be outside protection */ 570 #include <trace/define_trace.h> 571 572