xref: /openbmc/linux/include/trace/events/block.h (revision aac5987a)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM block
3 
4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BLOCK_H
6 
7 #include <linux/blktrace_api.h>
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/tracepoint.h>
11 
12 #define RWBS_LEN	8
13 
14 DECLARE_EVENT_CLASS(block_buffer,
15 
16 	TP_PROTO(struct buffer_head *bh),
17 
18 	TP_ARGS(bh),
19 
20 	TP_STRUCT__entry (
21 		__field(  dev_t,	dev			)
22 		__field(  sector_t,	sector			)
23 		__field(  size_t,	size			)
24 	),
25 
26 	TP_fast_assign(
27 		__entry->dev		= bh->b_bdev->bd_dev;
28 		__entry->sector		= bh->b_blocknr;
29 		__entry->size		= bh->b_size;
30 	),
31 
32 	TP_printk("%d,%d sector=%llu size=%zu",
33 		MAJOR(__entry->dev), MINOR(__entry->dev),
34 		(unsigned long long)__entry->sector, __entry->size
35 	)
36 );
37 
38 /**
39  * block_touch_buffer - mark a buffer accessed
40  * @bh: buffer_head being touched
41  *
42  * Called from touch_buffer().
43  */
44 DEFINE_EVENT(block_buffer, block_touch_buffer,
45 
46 	TP_PROTO(struct buffer_head *bh),
47 
48 	TP_ARGS(bh)
49 );
50 
51 /**
52  * block_dirty_buffer - mark a buffer dirty
53  * @bh: buffer_head being dirtied
54  *
55  * Called from mark_buffer_dirty().
56  */
57 DEFINE_EVENT(block_buffer, block_dirty_buffer,
58 
59 	TP_PROTO(struct buffer_head *bh),
60 
61 	TP_ARGS(bh)
62 );
63 
64 DECLARE_EVENT_CLASS(block_rq_with_error,
65 
66 	TP_PROTO(struct request_queue *q, struct request *rq),
67 
68 	TP_ARGS(q, rq),
69 
70 	TP_STRUCT__entry(
71 		__field(  dev_t,	dev			)
72 		__field(  sector_t,	sector			)
73 		__field(  unsigned int,	nr_sector		)
74 		__field(  int,		errors			)
75 		__array(  char,		rwbs,	RWBS_LEN	)
76 		__dynamic_array( char,	cmd,	1		)
77 	),
78 
79 	TP_fast_assign(
80 		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
81 		__entry->sector    = blk_rq_trace_sector(rq);
82 		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
83 		__entry->errors    = rq->errors;
84 
85 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
86 		__get_str(cmd)[0] = '\0';
87 	),
88 
89 	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
90 		  MAJOR(__entry->dev), MINOR(__entry->dev),
91 		  __entry->rwbs, __get_str(cmd),
92 		  (unsigned long long)__entry->sector,
93 		  __entry->nr_sector, __entry->errors)
94 );
95 
96 /**
97  * block_rq_abort - abort block operation request
98  * @q: queue containing the block operation request
99  * @rq: block IO operation request
100  *
101  * Called immediately after pending block IO operation request @rq in
102  * queue @q is aborted. The fields in the operation request @rq
103  * can be examined to determine which device and sectors the pending
104  * operation would access.
105  */
106 DEFINE_EVENT(block_rq_with_error, block_rq_abort,
107 
108 	TP_PROTO(struct request_queue *q, struct request *rq),
109 
110 	TP_ARGS(q, rq)
111 );
112 
113 /**
114  * block_rq_requeue - place block IO request back on a queue
115  * @q: queue holding operation
116  * @rq: block IO operation request
117  *
118  * The block operation request @rq is being placed back into queue
119  * @q.  For some reason the request was not completed and needs to be
120  * put back in the queue.
121  */
122 DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
123 
124 	TP_PROTO(struct request_queue *q, struct request *rq),
125 
126 	TP_ARGS(q, rq)
127 );
128 
129 /**
130  * block_rq_complete - block IO operation completed by device driver
131  * @q: queue containing the block operation request
132  * @rq: block operations request
133  * @nr_bytes: number of completed bytes
134  *
135  * The block_rq_complete tracepoint event indicates that some portion
136  * of operation request has been completed by the device driver.  If
137  * the @rq->bio is %NULL, then there is absolutely no additional work to
138  * do for the request. If @rq->bio is non-NULL then there is
139  * additional work required to complete the request.
140  */
141 TRACE_EVENT(block_rq_complete,
142 
143 	TP_PROTO(struct request_queue *q, struct request *rq,
144 		 unsigned int nr_bytes),
145 
146 	TP_ARGS(q, rq, nr_bytes),
147 
148 	TP_STRUCT__entry(
149 		__field(  dev_t,	dev			)
150 		__field(  sector_t,	sector			)
151 		__field(  unsigned int,	nr_sector		)
152 		__field(  int,		errors			)
153 		__array(  char,		rwbs,	RWBS_LEN	)
154 		__dynamic_array( char,	cmd,	1		)
155 	),
156 
157 	TP_fast_assign(
158 		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
159 		__entry->sector    = blk_rq_pos(rq);
160 		__entry->nr_sector = nr_bytes >> 9;
161 		__entry->errors    = rq->errors;
162 
163 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
164 		__get_str(cmd)[0] = '\0';
165 	),
166 
167 	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
168 		  MAJOR(__entry->dev), MINOR(__entry->dev),
169 		  __entry->rwbs, __get_str(cmd),
170 		  (unsigned long long)__entry->sector,
171 		  __entry->nr_sector, __entry->errors)
172 );
173 
174 DECLARE_EVENT_CLASS(block_rq,
175 
176 	TP_PROTO(struct request_queue *q, struct request *rq),
177 
178 	TP_ARGS(q, rq),
179 
180 	TP_STRUCT__entry(
181 		__field(  dev_t,	dev			)
182 		__field(  sector_t,	sector			)
183 		__field(  unsigned int,	nr_sector		)
184 		__field(  unsigned int,	bytes			)
185 		__array(  char,		rwbs,	RWBS_LEN	)
186 		__array(  char,         comm,   TASK_COMM_LEN   )
187 		__dynamic_array( char,	cmd,	1		)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
192 		__entry->sector    = blk_rq_trace_sector(rq);
193 		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
194 		__entry->bytes     = blk_rq_bytes(rq);
195 
196 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
197 		__get_str(cmd)[0] = '\0';
198 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
199 	),
200 
201 	TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
202 		  MAJOR(__entry->dev), MINOR(__entry->dev),
203 		  __entry->rwbs, __entry->bytes, __get_str(cmd),
204 		  (unsigned long long)__entry->sector,
205 		  __entry->nr_sector, __entry->comm)
206 );
207 
208 /**
209  * block_rq_insert - insert block operation request into queue
210  * @q: target queue
211  * @rq: block IO operation request
212  *
213  * Called immediately before block operation request @rq is inserted
214  * into queue @q.  The fields in the operation request @rq struct can
215  * be examined to determine which device and sectors the pending
216  * operation would access.
217  */
218 DEFINE_EVENT(block_rq, block_rq_insert,
219 
220 	TP_PROTO(struct request_queue *q, struct request *rq),
221 
222 	TP_ARGS(q, rq)
223 );
224 
225 /**
226  * block_rq_issue - issue pending block IO request operation to device driver
227  * @q: queue holding operation
228  * @rq: block IO operation operation request
229  *
230  * Called when block operation request @rq from queue @q is sent to a
231  * device driver for processing.
232  */
233 DEFINE_EVENT(block_rq, block_rq_issue,
234 
235 	TP_PROTO(struct request_queue *q, struct request *rq),
236 
237 	TP_ARGS(q, rq)
238 );
239 
240 /**
241  * block_bio_bounce - used bounce buffer when processing block operation
242  * @q: queue holding the block operation
243  * @bio: block operation
244  *
245  * A bounce buffer was used to handle the block operation @bio in @q.
246  * This occurs when hardware limitations prevent a direct transfer of
247  * data between the @bio data memory area and the IO device.  Use of a
248  * bounce buffer requires extra copying of data and decreases
249  * performance.
250  */
251 TRACE_EVENT(block_bio_bounce,
252 
253 	TP_PROTO(struct request_queue *q, struct bio *bio),
254 
255 	TP_ARGS(q, bio),
256 
257 	TP_STRUCT__entry(
258 		__field( dev_t,		dev			)
259 		__field( sector_t,	sector			)
260 		__field( unsigned int,	nr_sector		)
261 		__array( char,		rwbs,	RWBS_LEN	)
262 		__array( char,		comm,	TASK_COMM_LEN	)
263 	),
264 
265 	TP_fast_assign(
266 		__entry->dev		= bio->bi_bdev ?
267 					  bio->bi_bdev->bd_dev : 0;
268 		__entry->sector		= bio->bi_iter.bi_sector;
269 		__entry->nr_sector	= bio_sectors(bio);
270 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
271 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
272 	),
273 
274 	TP_printk("%d,%d %s %llu + %u [%s]",
275 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
276 		  (unsigned long long)__entry->sector,
277 		  __entry->nr_sector, __entry->comm)
278 );
279 
280 /**
281  * block_bio_complete - completed all work on the block operation
282  * @q: queue holding the block operation
283  * @bio: block operation completed
284  * @error: io error value
285  *
286  * This tracepoint indicates there is no further work to do on this
287  * block IO operation @bio.
288  */
289 TRACE_EVENT(block_bio_complete,
290 
291 	TP_PROTO(struct request_queue *q, struct bio *bio, int error),
292 
293 	TP_ARGS(q, bio, error),
294 
295 	TP_STRUCT__entry(
296 		__field( dev_t,		dev		)
297 		__field( sector_t,	sector		)
298 		__field( unsigned,	nr_sector	)
299 		__field( int,		error		)
300 		__array( char,		rwbs,	RWBS_LEN)
301 	),
302 
303 	TP_fast_assign(
304 		__entry->dev		= bio->bi_bdev->bd_dev;
305 		__entry->sector		= bio->bi_iter.bi_sector;
306 		__entry->nr_sector	= bio_sectors(bio);
307 		__entry->error		= error;
308 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
309 	),
310 
311 	TP_printk("%d,%d %s %llu + %u [%d]",
312 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
313 		  (unsigned long long)__entry->sector,
314 		  __entry->nr_sector, __entry->error)
315 );
316 
317 DECLARE_EVENT_CLASS(block_bio_merge,
318 
319 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
320 
321 	TP_ARGS(q, rq, bio),
322 
323 	TP_STRUCT__entry(
324 		__field( dev_t,		dev			)
325 		__field( sector_t,	sector			)
326 		__field( unsigned int,	nr_sector		)
327 		__array( char,		rwbs,	RWBS_LEN	)
328 		__array( char,		comm,	TASK_COMM_LEN	)
329 	),
330 
331 	TP_fast_assign(
332 		__entry->dev		= bio->bi_bdev->bd_dev;
333 		__entry->sector		= bio->bi_iter.bi_sector;
334 		__entry->nr_sector	= bio_sectors(bio);
335 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
336 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
337 	),
338 
339 	TP_printk("%d,%d %s %llu + %u [%s]",
340 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
341 		  (unsigned long long)__entry->sector,
342 		  __entry->nr_sector, __entry->comm)
343 );
344 
345 /**
346  * block_bio_backmerge - merging block operation to the end of an existing operation
347  * @q: queue holding operation
348  * @rq: request bio is being merged into
349  * @bio: new block operation to merge
350  *
351  * Merging block request @bio to the end of an existing block request
352  * in queue @q.
353  */
354 DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
355 
356 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
357 
358 	TP_ARGS(q, rq, bio)
359 );
360 
361 /**
362  * block_bio_frontmerge - merging block operation to the beginning of an existing operation
363  * @q: queue holding operation
364  * @rq: request bio is being merged into
365  * @bio: new block operation to merge
366  *
367  * Merging block IO operation @bio to the beginning of an existing block
368  * operation in queue @q.
369  */
370 DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
371 
372 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
373 
374 	TP_ARGS(q, rq, bio)
375 );
376 
377 /**
378  * block_bio_queue - putting new block IO operation in queue
379  * @q: queue holding operation
380  * @bio: new block operation
381  *
382  * About to place the block IO operation @bio into queue @q.
383  */
384 TRACE_EVENT(block_bio_queue,
385 
386 	TP_PROTO(struct request_queue *q, struct bio *bio),
387 
388 	TP_ARGS(q, bio),
389 
390 	TP_STRUCT__entry(
391 		__field( dev_t,		dev			)
392 		__field( sector_t,	sector			)
393 		__field( unsigned int,	nr_sector		)
394 		__array( char,		rwbs,	RWBS_LEN	)
395 		__array( char,		comm,	TASK_COMM_LEN	)
396 	),
397 
398 	TP_fast_assign(
399 		__entry->dev		= bio->bi_bdev->bd_dev;
400 		__entry->sector		= bio->bi_iter.bi_sector;
401 		__entry->nr_sector	= bio_sectors(bio);
402 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
403 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
404 	),
405 
406 	TP_printk("%d,%d %s %llu + %u [%s]",
407 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
408 		  (unsigned long long)__entry->sector,
409 		  __entry->nr_sector, __entry->comm)
410 );
411 
412 DECLARE_EVENT_CLASS(block_get_rq,
413 
414 	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
415 
416 	TP_ARGS(q, bio, rw),
417 
418 	TP_STRUCT__entry(
419 		__field( dev_t,		dev			)
420 		__field( sector_t,	sector			)
421 		__field( unsigned int,	nr_sector		)
422 		__array( char,		rwbs,	RWBS_LEN	)
423 		__array( char,		comm,	TASK_COMM_LEN	)
424         ),
425 
426 	TP_fast_assign(
427 		__entry->dev		= bio ? bio->bi_bdev->bd_dev : 0;
428 		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
429 		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
430 		blk_fill_rwbs(__entry->rwbs,
431 			      bio ? bio->bi_opf : 0, __entry->nr_sector);
432 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
433         ),
434 
435 	TP_printk("%d,%d %s %llu + %u [%s]",
436 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
437 		  (unsigned long long)__entry->sector,
438 		  __entry->nr_sector, __entry->comm)
439 );
440 
441 /**
442  * block_getrq - get a free request entry in queue for block IO operations
443  * @q: queue for operations
444  * @bio: pending block IO operation
445  * @rw: low bit indicates a read (%0) or a write (%1)
446  *
447  * A request struct for queue @q has been allocated to handle the
448  * block IO operation @bio.
449  */
450 DEFINE_EVENT(block_get_rq, block_getrq,
451 
452 	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
453 
454 	TP_ARGS(q, bio, rw)
455 );
456 
457 /**
458  * block_sleeprq - waiting to get a free request entry in queue for block IO operation
459  * @q: queue for operation
460  * @bio: pending block IO operation
461  * @rw: low bit indicates a read (%0) or a write (%1)
462  *
463  * In the case where a request struct cannot be provided for queue @q
464  * the process needs to wait for an request struct to become
465  * available.  This tracepoint event is generated each time the
466  * process goes to sleep waiting for request struct become available.
467  */
468 DEFINE_EVENT(block_get_rq, block_sleeprq,
469 
470 	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
471 
472 	TP_ARGS(q, bio, rw)
473 );
474 
475 /**
476  * block_plug - keep operations requests in request queue
477  * @q: request queue to plug
478  *
479  * Plug the request queue @q.  Do not allow block operation requests
480  * to be sent to the device driver. Instead, accumulate requests in
481  * the queue to improve throughput performance of the block device.
482  */
483 TRACE_EVENT(block_plug,
484 
485 	TP_PROTO(struct request_queue *q),
486 
487 	TP_ARGS(q),
488 
489 	TP_STRUCT__entry(
490 		__array( char,		comm,	TASK_COMM_LEN	)
491 	),
492 
493 	TP_fast_assign(
494 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
495 	),
496 
497 	TP_printk("[%s]", __entry->comm)
498 );
499 
500 DECLARE_EVENT_CLASS(block_unplug,
501 
502 	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
503 
504 	TP_ARGS(q, depth, explicit),
505 
506 	TP_STRUCT__entry(
507 		__field( int,		nr_rq			)
508 		__array( char,		comm,	TASK_COMM_LEN	)
509 	),
510 
511 	TP_fast_assign(
512 		__entry->nr_rq = depth;
513 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
514 	),
515 
516 	TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
517 );
518 
519 /**
520  * block_unplug - release of operations requests in request queue
521  * @q: request queue to unplug
522  * @depth: number of requests just added to the queue
523  * @explicit: whether this was an explicit unplug, or one from schedule()
524  *
525  * Unplug request queue @q because device driver is scheduled to work
526  * on elements in the request queue.
527  */
528 DEFINE_EVENT(block_unplug, block_unplug,
529 
530 	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
531 
532 	TP_ARGS(q, depth, explicit)
533 );
534 
535 /**
536  * block_split - split a single bio struct into two bio structs
537  * @q: queue containing the bio
538  * @bio: block operation being split
539  * @new_sector: The starting sector for the new bio
540  *
541  * The bio request @bio in request queue @q needs to be split into two
542  * bio requests. The newly created @bio request starts at
543  * @new_sector. This split may be required due to hardware limitation
544  * such as operation crossing device boundaries in a RAID system.
545  */
546 TRACE_EVENT(block_split,
547 
548 	TP_PROTO(struct request_queue *q, struct bio *bio,
549 		 unsigned int new_sector),
550 
551 	TP_ARGS(q, bio, new_sector),
552 
553 	TP_STRUCT__entry(
554 		__field( dev_t,		dev				)
555 		__field( sector_t,	sector				)
556 		__field( sector_t,	new_sector			)
557 		__array( char,		rwbs,		RWBS_LEN	)
558 		__array( char,		comm,		TASK_COMM_LEN	)
559 	),
560 
561 	TP_fast_assign(
562 		__entry->dev		= bio->bi_bdev->bd_dev;
563 		__entry->sector		= bio->bi_iter.bi_sector;
564 		__entry->new_sector	= new_sector;
565 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
566 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
567 	),
568 
569 	TP_printk("%d,%d %s %llu / %llu [%s]",
570 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
571 		  (unsigned long long)__entry->sector,
572 		  (unsigned long long)__entry->new_sector,
573 		  __entry->comm)
574 );
575 
576 /**
577  * block_bio_remap - map request for a logical device to the raw device
578  * @q: queue holding the operation
579  * @bio: revised operation
580  * @dev: device for the operation
581  * @from: original sector for the operation
582  *
583  * An operation for a logical device has been mapped to the
584  * raw block device.
585  */
586 TRACE_EVENT(block_bio_remap,
587 
588 	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
589 		 sector_t from),
590 
591 	TP_ARGS(q, bio, dev, from),
592 
593 	TP_STRUCT__entry(
594 		__field( dev_t,		dev		)
595 		__field( sector_t,	sector		)
596 		__field( unsigned int,	nr_sector	)
597 		__field( dev_t,		old_dev		)
598 		__field( sector_t,	old_sector	)
599 		__array( char,		rwbs,	RWBS_LEN)
600 	),
601 
602 	TP_fast_assign(
603 		__entry->dev		= bio->bi_bdev->bd_dev;
604 		__entry->sector		= bio->bi_iter.bi_sector;
605 		__entry->nr_sector	= bio_sectors(bio);
606 		__entry->old_dev	= dev;
607 		__entry->old_sector	= from;
608 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
609 	),
610 
611 	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
612 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
613 		  (unsigned long long)__entry->sector,
614 		  __entry->nr_sector,
615 		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
616 		  (unsigned long long)__entry->old_sector)
617 );
618 
619 /**
620  * block_rq_remap - map request for a block operation request
621  * @q: queue holding the operation
622  * @rq: block IO operation request
623  * @dev: device for the operation
624  * @from: original sector for the operation
625  *
626  * The block operation request @rq in @q has been remapped.  The block
627  * operation request @rq holds the current information and @from hold
628  * the original sector.
629  */
630 TRACE_EVENT(block_rq_remap,
631 
632 	TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
633 		 sector_t from),
634 
635 	TP_ARGS(q, rq, dev, from),
636 
637 	TP_STRUCT__entry(
638 		__field( dev_t,		dev		)
639 		__field( sector_t,	sector		)
640 		__field( unsigned int,	nr_sector	)
641 		__field( dev_t,		old_dev		)
642 		__field( sector_t,	old_sector	)
643 		__field( unsigned int,	nr_bios		)
644 		__array( char,		rwbs,	RWBS_LEN)
645 	),
646 
647 	TP_fast_assign(
648 		__entry->dev		= disk_devt(rq->rq_disk);
649 		__entry->sector		= blk_rq_pos(rq);
650 		__entry->nr_sector	= blk_rq_sectors(rq);
651 		__entry->old_dev	= dev;
652 		__entry->old_sector	= from;
653 		__entry->nr_bios	= blk_rq_count_bios(rq);
654 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
655 	),
656 
657 	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
658 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
659 		  (unsigned long long)__entry->sector,
660 		  __entry->nr_sector,
661 		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
662 		  (unsigned long long)__entry->old_sector, __entry->nr_bios)
663 );
664 
665 #endif /* _TRACE_BLOCK_H */
666 
667 /* This part must be outside protection */
668 #include <trace/define_trace.h>
669 
670