1 #undef TRACE_SYSTEM 2 #define TRACE_SYSTEM block 3 4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 5 #define _TRACE_BLOCK_H 6 7 #include <linux/blktrace_api.h> 8 #include <linux/blkdev.h> 9 #include <linux/buffer_head.h> 10 #include <linux/tracepoint.h> 11 12 #define RWBS_LEN 8 13 14 DECLARE_EVENT_CLASS(block_buffer, 15 16 TP_PROTO(struct buffer_head *bh), 17 18 TP_ARGS(bh), 19 20 TP_STRUCT__entry ( 21 __field( dev_t, dev ) 22 __field( sector_t, sector ) 23 __field( size_t, size ) 24 ), 25 26 TP_fast_assign( 27 __entry->dev = bh->b_bdev->bd_dev; 28 __entry->sector = bh->b_blocknr; 29 __entry->size = bh->b_size; 30 ), 31 32 TP_printk("%d,%d sector=%llu size=%zu", 33 MAJOR(__entry->dev), MINOR(__entry->dev), 34 (unsigned long long)__entry->sector, __entry->size 35 ) 36 ); 37 38 /** 39 * block_touch_buffer - mark a buffer accessed 40 * @bh: buffer_head being touched 41 * 42 * Called from touch_buffer(). 43 */ 44 DEFINE_EVENT(block_buffer, block_touch_buffer, 45 46 TP_PROTO(struct buffer_head *bh), 47 48 TP_ARGS(bh) 49 ); 50 51 /** 52 * block_dirty_buffer - mark a buffer dirty 53 * @bh: buffer_head being dirtied 54 * 55 * Called from mark_buffer_dirty(). 56 */ 57 DEFINE_EVENT(block_buffer, block_dirty_buffer, 58 59 TP_PROTO(struct buffer_head *bh), 60 61 TP_ARGS(bh) 62 ); 63 64 DECLARE_EVENT_CLASS(block_rq_with_error, 65 66 TP_PROTO(struct request_queue *q, struct request *rq), 67 68 TP_ARGS(q, rq), 69 70 TP_STRUCT__entry( 71 __field( dev_t, dev ) 72 __field( sector_t, sector ) 73 __field( unsigned int, nr_sector ) 74 __field( int, errors ) 75 __array( char, rwbs, RWBS_LEN ) 76 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 77 ), 78 79 TP_fast_assign( 80 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 81 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 82 0 : blk_rq_pos(rq); 83 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 84 0 : blk_rq_sectors(rq); 85 __entry->errors = rq->errors; 86 87 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 88 blk_dump_cmd(__get_str(cmd), rq); 89 ), 90 91 TP_printk("%d,%d %s (%s) %llu + %u [%d]", 92 MAJOR(__entry->dev), MINOR(__entry->dev), 93 __entry->rwbs, __get_str(cmd), 94 (unsigned long long)__entry->sector, 95 __entry->nr_sector, __entry->errors) 96 ); 97 98 /** 99 * block_rq_abort - abort block operation request 100 * @q: queue containing the block operation request 101 * @rq: block IO operation request 102 * 103 * Called immediately after pending block IO operation request @rq in 104 * queue @q is aborted. The fields in the operation request @rq 105 * can be examined to determine which device and sectors the pending 106 * operation would access. 107 */ 108 DEFINE_EVENT(block_rq_with_error, block_rq_abort, 109 110 TP_PROTO(struct request_queue *q, struct request *rq), 111 112 TP_ARGS(q, rq) 113 ); 114 115 /** 116 * block_rq_requeue - place block IO request back on a queue 117 * @q: queue holding operation 118 * @rq: block IO operation request 119 * 120 * The block operation request @rq is being placed back into queue 121 * @q. For some reason the request was not completed and needs to be 122 * put back in the queue. 123 */ 124 DEFINE_EVENT(block_rq_with_error, block_rq_requeue, 125 126 TP_PROTO(struct request_queue *q, struct request *rq), 127 128 TP_ARGS(q, rq) 129 ); 130 131 /** 132 * block_rq_complete - block IO operation completed by device driver 133 * @q: queue containing the block operation request 134 * @rq: block operations request 135 * @nr_bytes: number of completed bytes 136 * 137 * The block_rq_complete tracepoint event indicates that some portion 138 * of operation request has been completed by the device driver. If 139 * the @rq->bio is %NULL, then there is absolutely no additional work to 140 * do for the request. If @rq->bio is non-NULL then there is 141 * additional work required to complete the request. 142 */ 143 TRACE_EVENT(block_rq_complete, 144 145 TP_PROTO(struct request_queue *q, struct request *rq, 146 unsigned int nr_bytes), 147 148 TP_ARGS(q, rq, nr_bytes), 149 150 TP_STRUCT__entry( 151 __field( dev_t, dev ) 152 __field( sector_t, sector ) 153 __field( unsigned int, nr_sector ) 154 __field( int, errors ) 155 __array( char, rwbs, RWBS_LEN ) 156 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 157 ), 158 159 TP_fast_assign( 160 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 161 __entry->sector = blk_rq_pos(rq); 162 __entry->nr_sector = nr_bytes >> 9; 163 __entry->errors = rq->errors; 164 165 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); 166 blk_dump_cmd(__get_str(cmd), rq); 167 ), 168 169 TP_printk("%d,%d %s (%s) %llu + %u [%d]", 170 MAJOR(__entry->dev), MINOR(__entry->dev), 171 __entry->rwbs, __get_str(cmd), 172 (unsigned long long)__entry->sector, 173 __entry->nr_sector, __entry->errors) 174 ); 175 176 DECLARE_EVENT_CLASS(block_rq, 177 178 TP_PROTO(struct request_queue *q, struct request *rq), 179 180 TP_ARGS(q, rq), 181 182 TP_STRUCT__entry( 183 __field( dev_t, dev ) 184 __field( sector_t, sector ) 185 __field( unsigned int, nr_sector ) 186 __field( unsigned int, bytes ) 187 __array( char, rwbs, RWBS_LEN ) 188 __array( char, comm, TASK_COMM_LEN ) 189 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 190 ), 191 192 TP_fast_assign( 193 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 194 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 195 0 : blk_rq_pos(rq); 196 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 197 0 : blk_rq_sectors(rq); 198 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 199 blk_rq_bytes(rq) : 0; 200 201 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 202 blk_dump_cmd(__get_str(cmd), rq); 203 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 204 ), 205 206 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", 207 MAJOR(__entry->dev), MINOR(__entry->dev), 208 __entry->rwbs, __entry->bytes, __get_str(cmd), 209 (unsigned long long)__entry->sector, 210 __entry->nr_sector, __entry->comm) 211 ); 212 213 /** 214 * block_rq_insert - insert block operation request into queue 215 * @q: target queue 216 * @rq: block IO operation request 217 * 218 * Called immediately before block operation request @rq is inserted 219 * into queue @q. The fields in the operation request @rq struct can 220 * be examined to determine which device and sectors the pending 221 * operation would access. 222 */ 223 DEFINE_EVENT(block_rq, block_rq_insert, 224 225 TP_PROTO(struct request_queue *q, struct request *rq), 226 227 TP_ARGS(q, rq) 228 ); 229 230 /** 231 * block_rq_issue - issue pending block IO request operation to device driver 232 * @q: queue holding operation 233 * @rq: block IO operation operation request 234 * 235 * Called when block operation request @rq from queue @q is sent to a 236 * device driver for processing. 237 */ 238 DEFINE_EVENT(block_rq, block_rq_issue, 239 240 TP_PROTO(struct request_queue *q, struct request *rq), 241 242 TP_ARGS(q, rq) 243 ); 244 245 /** 246 * block_bio_bounce - used bounce buffer when processing block operation 247 * @q: queue holding the block operation 248 * @bio: block operation 249 * 250 * A bounce buffer was used to handle the block operation @bio in @q. 251 * This occurs when hardware limitations prevent a direct transfer of 252 * data between the @bio data memory area and the IO device. Use of a 253 * bounce buffer requires extra copying of data and decreases 254 * performance. 255 */ 256 TRACE_EVENT(block_bio_bounce, 257 258 TP_PROTO(struct request_queue *q, struct bio *bio), 259 260 TP_ARGS(q, bio), 261 262 TP_STRUCT__entry( 263 __field( dev_t, dev ) 264 __field( sector_t, sector ) 265 __field( unsigned int, nr_sector ) 266 __array( char, rwbs, RWBS_LEN ) 267 __array( char, comm, TASK_COMM_LEN ) 268 ), 269 270 TP_fast_assign( 271 __entry->dev = bio->bi_bdev ? 272 bio->bi_bdev->bd_dev : 0; 273 __entry->sector = bio->bi_iter.bi_sector; 274 __entry->nr_sector = bio_sectors(bio); 275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 276 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 277 ), 278 279 TP_printk("%d,%d %s %llu + %u [%s]", 280 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 281 (unsigned long long)__entry->sector, 282 __entry->nr_sector, __entry->comm) 283 ); 284 285 /** 286 * block_bio_complete - completed all work on the block operation 287 * @q: queue holding the block operation 288 * @bio: block operation completed 289 * @error: io error value 290 * 291 * This tracepoint indicates there is no further work to do on this 292 * block IO operation @bio. 293 */ 294 TRACE_EVENT(block_bio_complete, 295 296 TP_PROTO(struct request_queue *q, struct bio *bio, int error), 297 298 TP_ARGS(q, bio, error), 299 300 TP_STRUCT__entry( 301 __field( dev_t, dev ) 302 __field( sector_t, sector ) 303 __field( unsigned, nr_sector ) 304 __field( int, error ) 305 __array( char, rwbs, RWBS_LEN) 306 ), 307 308 TP_fast_assign( 309 __entry->dev = bio->bi_bdev->bd_dev; 310 __entry->sector = bio->bi_iter.bi_sector; 311 __entry->nr_sector = bio_sectors(bio); 312 __entry->error = error; 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 314 ), 315 316 TP_printk("%d,%d %s %llu + %u [%d]", 317 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 318 (unsigned long long)__entry->sector, 319 __entry->nr_sector, __entry->error) 320 ); 321 322 DECLARE_EVENT_CLASS(block_bio_merge, 323 324 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), 325 326 TP_ARGS(q, rq, bio), 327 328 TP_STRUCT__entry( 329 __field( dev_t, dev ) 330 __field( sector_t, sector ) 331 __field( unsigned int, nr_sector ) 332 __array( char, rwbs, RWBS_LEN ) 333 __array( char, comm, TASK_COMM_LEN ) 334 ), 335 336 TP_fast_assign( 337 __entry->dev = bio->bi_bdev->bd_dev; 338 __entry->sector = bio->bi_iter.bi_sector; 339 __entry->nr_sector = bio_sectors(bio); 340 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 341 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 342 ), 343 344 TP_printk("%d,%d %s %llu + %u [%s]", 345 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 346 (unsigned long long)__entry->sector, 347 __entry->nr_sector, __entry->comm) 348 ); 349 350 /** 351 * block_bio_backmerge - merging block operation to the end of an existing operation 352 * @q: queue holding operation 353 * @rq: request bio is being merged into 354 * @bio: new block operation to merge 355 * 356 * Merging block request @bio to the end of an existing block request 357 * in queue @q. 358 */ 359 DEFINE_EVENT(block_bio_merge, block_bio_backmerge, 360 361 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), 362 363 TP_ARGS(q, rq, bio) 364 ); 365 366 /** 367 * block_bio_frontmerge - merging block operation to the beginning of an existing operation 368 * @q: queue holding operation 369 * @rq: request bio is being merged into 370 * @bio: new block operation to merge 371 * 372 * Merging block IO operation @bio to the beginning of an existing block 373 * operation in queue @q. 374 */ 375 DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, 376 377 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), 378 379 TP_ARGS(q, rq, bio) 380 ); 381 382 /** 383 * block_bio_queue - putting new block IO operation in queue 384 * @q: queue holding operation 385 * @bio: new block operation 386 * 387 * About to place the block IO operation @bio into queue @q. 388 */ 389 TRACE_EVENT(block_bio_queue, 390 391 TP_PROTO(struct request_queue *q, struct bio *bio), 392 393 TP_ARGS(q, bio), 394 395 TP_STRUCT__entry( 396 __field( dev_t, dev ) 397 __field( sector_t, sector ) 398 __field( unsigned int, nr_sector ) 399 __array( char, rwbs, RWBS_LEN ) 400 __array( char, comm, TASK_COMM_LEN ) 401 ), 402 403 TP_fast_assign( 404 __entry->dev = bio->bi_bdev->bd_dev; 405 __entry->sector = bio->bi_iter.bi_sector; 406 __entry->nr_sector = bio_sectors(bio); 407 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 408 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 409 ), 410 411 TP_printk("%d,%d %s %llu + %u [%s]", 412 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 413 (unsigned long long)__entry->sector, 414 __entry->nr_sector, __entry->comm) 415 ); 416 417 DECLARE_EVENT_CLASS(block_get_rq, 418 419 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 420 421 TP_ARGS(q, bio, rw), 422 423 TP_STRUCT__entry( 424 __field( dev_t, dev ) 425 __field( sector_t, sector ) 426 __field( unsigned int, nr_sector ) 427 __array( char, rwbs, RWBS_LEN ) 428 __array( char, comm, TASK_COMM_LEN ) 429 ), 430 431 TP_fast_assign( 432 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 433 __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 434 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 435 blk_fill_rwbs(__entry->rwbs, 436 bio ? bio->bi_rw : 0, __entry->nr_sector); 437 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 438 ), 439 440 TP_printk("%d,%d %s %llu + %u [%s]", 441 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 442 (unsigned long long)__entry->sector, 443 __entry->nr_sector, __entry->comm) 444 ); 445 446 /** 447 * block_getrq - get a free request entry in queue for block IO operations 448 * @q: queue for operations 449 * @bio: pending block IO operation 450 * @rw: low bit indicates a read (%0) or a write (%1) 451 * 452 * A request struct for queue @q has been allocated to handle the 453 * block IO operation @bio. 454 */ 455 DEFINE_EVENT(block_get_rq, block_getrq, 456 457 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 458 459 TP_ARGS(q, bio, rw) 460 ); 461 462 /** 463 * block_sleeprq - waiting to get a free request entry in queue for block IO operation 464 * @q: queue for operation 465 * @bio: pending block IO operation 466 * @rw: low bit indicates a read (%0) or a write (%1) 467 * 468 * In the case where a request struct cannot be provided for queue @q 469 * the process needs to wait for an request struct to become 470 * available. This tracepoint event is generated each time the 471 * process goes to sleep waiting for request struct become available. 472 */ 473 DEFINE_EVENT(block_get_rq, block_sleeprq, 474 475 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 476 477 TP_ARGS(q, bio, rw) 478 ); 479 480 /** 481 * block_plug - keep operations requests in request queue 482 * @q: request queue to plug 483 * 484 * Plug the request queue @q. Do not allow block operation requests 485 * to be sent to the device driver. Instead, accumulate requests in 486 * the queue to improve throughput performance of the block device. 487 */ 488 TRACE_EVENT(block_plug, 489 490 TP_PROTO(struct request_queue *q), 491 492 TP_ARGS(q), 493 494 TP_STRUCT__entry( 495 __array( char, comm, TASK_COMM_LEN ) 496 ), 497 498 TP_fast_assign( 499 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 500 ), 501 502 TP_printk("[%s]", __entry->comm) 503 ); 504 505 DECLARE_EVENT_CLASS(block_unplug, 506 507 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 508 509 TP_ARGS(q, depth, explicit), 510 511 TP_STRUCT__entry( 512 __field( int, nr_rq ) 513 __array( char, comm, TASK_COMM_LEN ) 514 ), 515 516 TP_fast_assign( 517 __entry->nr_rq = depth; 518 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 519 ), 520 521 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 522 ); 523 524 /** 525 * block_unplug - release of operations requests in request queue 526 * @q: request queue to unplug 527 * @depth: number of requests just added to the queue 528 * @explicit: whether this was an explicit unplug, or one from schedule() 529 * 530 * Unplug request queue @q because device driver is scheduled to work 531 * on elements in the request queue. 532 */ 533 DEFINE_EVENT(block_unplug, block_unplug, 534 535 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 536 537 TP_ARGS(q, depth, explicit) 538 ); 539 540 /** 541 * block_split - split a single bio struct into two bio structs 542 * @q: queue containing the bio 543 * @bio: block operation being split 544 * @new_sector: The starting sector for the new bio 545 * 546 * The bio request @bio in request queue @q needs to be split into two 547 * bio requests. The newly created @bio request starts at 548 * @new_sector. This split may be required due to hardware limitation 549 * such as operation crossing device boundaries in a RAID system. 550 */ 551 TRACE_EVENT(block_split, 552 553 TP_PROTO(struct request_queue *q, struct bio *bio, 554 unsigned int new_sector), 555 556 TP_ARGS(q, bio, new_sector), 557 558 TP_STRUCT__entry( 559 __field( dev_t, dev ) 560 __field( sector_t, sector ) 561 __field( sector_t, new_sector ) 562 __array( char, rwbs, RWBS_LEN ) 563 __array( char, comm, TASK_COMM_LEN ) 564 ), 565 566 TP_fast_assign( 567 __entry->dev = bio->bi_bdev->bd_dev; 568 __entry->sector = bio->bi_iter.bi_sector; 569 __entry->new_sector = new_sector; 570 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 571 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 572 ), 573 574 TP_printk("%d,%d %s %llu / %llu [%s]", 575 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 576 (unsigned long long)__entry->sector, 577 (unsigned long long)__entry->new_sector, 578 __entry->comm) 579 ); 580 581 /** 582 * block_bio_remap - map request for a logical device to the raw device 583 * @q: queue holding the operation 584 * @bio: revised operation 585 * @dev: device for the operation 586 * @from: original sector for the operation 587 * 588 * An operation for a logical device has been mapped to the 589 * raw block device. 590 */ 591 TRACE_EVENT(block_bio_remap, 592 593 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 594 sector_t from), 595 596 TP_ARGS(q, bio, dev, from), 597 598 TP_STRUCT__entry( 599 __field( dev_t, dev ) 600 __field( sector_t, sector ) 601 __field( unsigned int, nr_sector ) 602 __field( dev_t, old_dev ) 603 __field( sector_t, old_sector ) 604 __array( char, rwbs, RWBS_LEN) 605 ), 606 607 TP_fast_assign( 608 __entry->dev = bio->bi_bdev->bd_dev; 609 __entry->sector = bio->bi_iter.bi_sector; 610 __entry->nr_sector = bio_sectors(bio); 611 __entry->old_dev = dev; 612 __entry->old_sector = from; 613 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 614 ), 615 616 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 617 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 618 (unsigned long long)__entry->sector, 619 __entry->nr_sector, 620 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 621 (unsigned long long)__entry->old_sector) 622 ); 623 624 /** 625 * block_rq_remap - map request for a block operation request 626 * @q: queue holding the operation 627 * @rq: block IO operation request 628 * @dev: device for the operation 629 * @from: original sector for the operation 630 * 631 * The block operation request @rq in @q has been remapped. The block 632 * operation request @rq holds the current information and @from hold 633 * the original sector. 634 */ 635 TRACE_EVENT(block_rq_remap, 636 637 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 638 sector_t from), 639 640 TP_ARGS(q, rq, dev, from), 641 642 TP_STRUCT__entry( 643 __field( dev_t, dev ) 644 __field( sector_t, sector ) 645 __field( unsigned int, nr_sector ) 646 __field( dev_t, old_dev ) 647 __field( sector_t, old_sector ) 648 __field( unsigned int, nr_bios ) 649 __array( char, rwbs, RWBS_LEN) 650 ), 651 652 TP_fast_assign( 653 __entry->dev = disk_devt(rq->rq_disk); 654 __entry->sector = blk_rq_pos(rq); 655 __entry->nr_sector = blk_rq_sectors(rq); 656 __entry->old_dev = dev; 657 __entry->old_sector = from; 658 __entry->nr_bios = blk_rq_count_bios(rq); 659 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 660 ), 661 662 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", 663 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 664 (unsigned long long)__entry->sector, 665 __entry->nr_sector, 666 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 667 (unsigned long long)__entry->old_sector, __entry->nr_bios) 668 ); 669 670 #endif /* _TRACE_BLOCK_H */ 671 672 /* This part must be outside protection */ 673 #include <trace/define_trace.h> 674 675