xref: /openbmc/linux/include/trace/events/writeback.h (revision e4bc13ad)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM writeback
3 
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
6 
7 #include <linux/tracepoint.h>
8 #include <linux/backing-dev.h>
9 #include <linux/writeback.h>
10 
11 #define show_inode_state(state)					\
12 	__print_flags(state, "|",				\
13 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
14 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
15 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
16 		{I_NEW,			"I_NEW"},		\
17 		{I_WILL_FREE,		"I_WILL_FREE"},		\
18 		{I_FREEING,		"I_FREEING"},		\
19 		{I_CLEAR,		"I_CLEAR"},		\
20 		{I_SYNC,		"I_SYNC"},		\
21 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
22 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
23 		{I_REFERENCED,		"I_REFERENCED"}		\
24 	)
25 
26 /* enums need to be exported to user space */
27 #undef EM
28 #undef EMe
29 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
30 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
31 
32 #define WB_WORK_REASON							\
33 	EM( WB_REASON_BACKGROUND,		"background")		\
34 	EM( WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages")	\
35 	EM( WB_REASON_SYNC,			"sync")			\
36 	EM( WB_REASON_PERIODIC,			"periodic")		\
37 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
38 	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
39 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
40 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
41 
42 WB_WORK_REASON
43 
44 /*
45  * Now redefine the EM() and EMe() macros to map the enums to the strings
46  * that will be printed in the output.
47  */
48 #undef EM
49 #undef EMe
50 #define EM(a,b)		{ a, b },
51 #define EMe(a,b)	{ a, b }
52 
53 struct wb_writeback_work;
54 
55 TRACE_EVENT(writeback_dirty_page,
56 
57 	TP_PROTO(struct page *page, struct address_space *mapping),
58 
59 	TP_ARGS(page, mapping),
60 
61 	TP_STRUCT__entry (
62 		__array(char, name, 32)
63 		__field(unsigned long, ino)
64 		__field(pgoff_t, index)
65 	),
66 
67 	TP_fast_assign(
68 		strncpy(__entry->name,
69 			mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
70 		__entry->ino = mapping ? mapping->host->i_ino : 0;
71 		__entry->index = page->index;
72 	),
73 
74 	TP_printk("bdi %s: ino=%lu index=%lu",
75 		__entry->name,
76 		__entry->ino,
77 		__entry->index
78 	)
79 );
80 
81 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
82 
83 	TP_PROTO(struct inode *inode, int flags),
84 
85 	TP_ARGS(inode, flags),
86 
87 	TP_STRUCT__entry (
88 		__array(char, name, 32)
89 		__field(unsigned long, ino)
90 		__field(unsigned long, state)
91 		__field(unsigned long, flags)
92 	),
93 
94 	TP_fast_assign(
95 		struct backing_dev_info *bdi = inode_to_bdi(inode);
96 
97 		/* may be called for files on pseudo FSes w/ unregistered bdi */
98 		strncpy(__entry->name,
99 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
100 		__entry->ino		= inode->i_ino;
101 		__entry->state		= inode->i_state;
102 		__entry->flags		= flags;
103 	),
104 
105 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
106 		__entry->name,
107 		__entry->ino,
108 		show_inode_state(__entry->state),
109 		show_inode_state(__entry->flags)
110 	)
111 );
112 
113 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
114 
115 	TP_PROTO(struct inode *inode, int flags),
116 
117 	TP_ARGS(inode, flags)
118 );
119 
120 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
121 
122 	TP_PROTO(struct inode *inode, int flags),
123 
124 	TP_ARGS(inode, flags)
125 );
126 
127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
128 
129 	TP_PROTO(struct inode *inode, int flags),
130 
131 	TP_ARGS(inode, flags)
132 );
133 
134 DECLARE_EVENT_CLASS(writeback_write_inode_template,
135 
136 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
137 
138 	TP_ARGS(inode, wbc),
139 
140 	TP_STRUCT__entry (
141 		__array(char, name, 32)
142 		__field(unsigned long, ino)
143 		__field(int, sync_mode)
144 	),
145 
146 	TP_fast_assign(
147 		strncpy(__entry->name,
148 			dev_name(inode_to_bdi(inode)->dev), 32);
149 		__entry->ino		= inode->i_ino;
150 		__entry->sync_mode	= wbc->sync_mode;
151 	),
152 
153 	TP_printk("bdi %s: ino=%lu sync_mode=%d",
154 		__entry->name,
155 		__entry->ino,
156 		__entry->sync_mode
157 	)
158 );
159 
160 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
161 
162 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
163 
164 	TP_ARGS(inode, wbc)
165 );
166 
167 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
168 
169 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
170 
171 	TP_ARGS(inode, wbc)
172 );
173 
174 DECLARE_EVENT_CLASS(writeback_work_class,
175 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
176 	TP_ARGS(bdi, work),
177 	TP_STRUCT__entry(
178 		__array(char, name, 32)
179 		__field(long, nr_pages)
180 		__field(dev_t, sb_dev)
181 		__field(int, sync_mode)
182 		__field(int, for_kupdate)
183 		__field(int, range_cyclic)
184 		__field(int, for_background)
185 		__field(int, reason)
186 	),
187 	TP_fast_assign(
188 		strncpy(__entry->name,
189 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
190 		__entry->nr_pages = work->nr_pages;
191 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
192 		__entry->sync_mode = work->sync_mode;
193 		__entry->for_kupdate = work->for_kupdate;
194 		__entry->range_cyclic = work->range_cyclic;
195 		__entry->for_background	= work->for_background;
196 		__entry->reason = work->reason;
197 	),
198 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
199 		  "kupdate=%d range_cyclic=%d background=%d reason=%s",
200 		  __entry->name,
201 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
202 		  __entry->nr_pages,
203 		  __entry->sync_mode,
204 		  __entry->for_kupdate,
205 		  __entry->range_cyclic,
206 		  __entry->for_background,
207 		  __print_symbolic(__entry->reason, WB_WORK_REASON)
208 	)
209 );
210 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
211 DEFINE_EVENT(writeback_work_class, name, \
212 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
213 	TP_ARGS(bdi, work))
214 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
215 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
216 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
217 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
218 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
219 
220 TRACE_EVENT(writeback_pages_written,
221 	TP_PROTO(long pages_written),
222 	TP_ARGS(pages_written),
223 	TP_STRUCT__entry(
224 		__field(long,		pages)
225 	),
226 	TP_fast_assign(
227 		__entry->pages		= pages_written;
228 	),
229 	TP_printk("%ld", __entry->pages)
230 );
231 
232 DECLARE_EVENT_CLASS(writeback_class,
233 	TP_PROTO(struct backing_dev_info *bdi),
234 	TP_ARGS(bdi),
235 	TP_STRUCT__entry(
236 		__array(char, name, 32)
237 	),
238 	TP_fast_assign(
239 		strncpy(__entry->name, dev_name(bdi->dev), 32);
240 	),
241 	TP_printk("bdi %s",
242 		  __entry->name
243 	)
244 );
245 #define DEFINE_WRITEBACK_EVENT(name) \
246 DEFINE_EVENT(writeback_class, name, \
247 	TP_PROTO(struct backing_dev_info *bdi), \
248 	TP_ARGS(bdi))
249 
250 DEFINE_WRITEBACK_EVENT(writeback_nowork);
251 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
252 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
253 
254 DECLARE_EVENT_CLASS(wbc_class,
255 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
256 	TP_ARGS(wbc, bdi),
257 	TP_STRUCT__entry(
258 		__array(char, name, 32)
259 		__field(long, nr_to_write)
260 		__field(long, pages_skipped)
261 		__field(int, sync_mode)
262 		__field(int, for_kupdate)
263 		__field(int, for_background)
264 		__field(int, for_reclaim)
265 		__field(int, range_cyclic)
266 		__field(long, range_start)
267 		__field(long, range_end)
268 	),
269 
270 	TP_fast_assign(
271 		strncpy(__entry->name, dev_name(bdi->dev), 32);
272 		__entry->nr_to_write	= wbc->nr_to_write;
273 		__entry->pages_skipped	= wbc->pages_skipped;
274 		__entry->sync_mode	= wbc->sync_mode;
275 		__entry->for_kupdate	= wbc->for_kupdate;
276 		__entry->for_background	= wbc->for_background;
277 		__entry->for_reclaim	= wbc->for_reclaim;
278 		__entry->range_cyclic	= wbc->range_cyclic;
279 		__entry->range_start	= (long)wbc->range_start;
280 		__entry->range_end	= (long)wbc->range_end;
281 	),
282 
283 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
284 		"bgrd=%d reclm=%d cyclic=%d "
285 		"start=0x%lx end=0x%lx",
286 		__entry->name,
287 		__entry->nr_to_write,
288 		__entry->pages_skipped,
289 		__entry->sync_mode,
290 		__entry->for_kupdate,
291 		__entry->for_background,
292 		__entry->for_reclaim,
293 		__entry->range_cyclic,
294 		__entry->range_start,
295 		__entry->range_end)
296 )
297 
298 #define DEFINE_WBC_EVENT(name) \
299 DEFINE_EVENT(wbc_class, name, \
300 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
301 	TP_ARGS(wbc, bdi))
302 DEFINE_WBC_EVENT(wbc_writepage);
303 
304 TRACE_EVENT(writeback_queue_io,
305 	TP_PROTO(struct bdi_writeback *wb,
306 		 struct wb_writeback_work *work,
307 		 int moved),
308 	TP_ARGS(wb, work, moved),
309 	TP_STRUCT__entry(
310 		__array(char,		name, 32)
311 		__field(unsigned long,	older)
312 		__field(long,		age)
313 		__field(int,		moved)
314 		__field(int,		reason)
315 	),
316 	TP_fast_assign(
317 		unsigned long *older_than_this = work->older_than_this;
318 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
319 		__entry->older	= older_than_this ?  *older_than_this : 0;
320 		__entry->age	= older_than_this ?
321 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
322 		__entry->moved	= moved;
323 		__entry->reason	= work->reason;
324 	),
325 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
326 		__entry->name,
327 		__entry->older,	/* older_than_this in jiffies */
328 		__entry->age,	/* older_than_this in relative milliseconds */
329 		__entry->moved,
330 		__print_symbolic(__entry->reason, WB_WORK_REASON)
331 	)
332 );
333 
334 TRACE_EVENT(global_dirty_state,
335 
336 	TP_PROTO(unsigned long background_thresh,
337 		 unsigned long dirty_thresh
338 	),
339 
340 	TP_ARGS(background_thresh,
341 		dirty_thresh
342 	),
343 
344 	TP_STRUCT__entry(
345 		__field(unsigned long,	nr_dirty)
346 		__field(unsigned long,	nr_writeback)
347 		__field(unsigned long,	nr_unstable)
348 		__field(unsigned long,	background_thresh)
349 		__field(unsigned long,	dirty_thresh)
350 		__field(unsigned long,	dirty_limit)
351 		__field(unsigned long,	nr_dirtied)
352 		__field(unsigned long,	nr_written)
353 	),
354 
355 	TP_fast_assign(
356 		__entry->nr_dirty	= global_page_state(NR_FILE_DIRTY);
357 		__entry->nr_writeback	= global_page_state(NR_WRITEBACK);
358 		__entry->nr_unstable	= global_page_state(NR_UNSTABLE_NFS);
359 		__entry->nr_dirtied	= global_page_state(NR_DIRTIED);
360 		__entry->nr_written	= global_page_state(NR_WRITTEN);
361 		__entry->background_thresh = background_thresh;
362 		__entry->dirty_thresh	= dirty_thresh;
363 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
364 	),
365 
366 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
367 		  "bg_thresh=%lu thresh=%lu limit=%lu "
368 		  "dirtied=%lu written=%lu",
369 		  __entry->nr_dirty,
370 		  __entry->nr_writeback,
371 		  __entry->nr_unstable,
372 		  __entry->background_thresh,
373 		  __entry->dirty_thresh,
374 		  __entry->dirty_limit,
375 		  __entry->nr_dirtied,
376 		  __entry->nr_written
377 	)
378 );
379 
380 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
381 
382 TRACE_EVENT(bdi_dirty_ratelimit,
383 
384 	TP_PROTO(struct backing_dev_info *bdi,
385 		 unsigned long dirty_rate,
386 		 unsigned long task_ratelimit),
387 
388 	TP_ARGS(bdi, dirty_rate, task_ratelimit),
389 
390 	TP_STRUCT__entry(
391 		__array(char,		bdi, 32)
392 		__field(unsigned long,	write_bw)
393 		__field(unsigned long,	avg_write_bw)
394 		__field(unsigned long,	dirty_rate)
395 		__field(unsigned long,	dirty_ratelimit)
396 		__field(unsigned long,	task_ratelimit)
397 		__field(unsigned long,	balanced_dirty_ratelimit)
398 	),
399 
400 	TP_fast_assign(
401 		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
402 		__entry->write_bw	= KBps(bdi->wb.write_bandwidth);
403 		__entry->avg_write_bw	= KBps(bdi->wb.avg_write_bandwidth);
404 		__entry->dirty_rate	= KBps(dirty_rate);
405 		__entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit);
406 		__entry->task_ratelimit	= KBps(task_ratelimit);
407 		__entry->balanced_dirty_ratelimit =
408 					KBps(bdi->wb.balanced_dirty_ratelimit);
409 	),
410 
411 	TP_printk("bdi %s: "
412 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
413 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
414 		  "balanced_dirty_ratelimit=%lu",
415 		  __entry->bdi,
416 		  __entry->write_bw,		/* write bandwidth */
417 		  __entry->avg_write_bw,	/* avg write bandwidth */
418 		  __entry->dirty_rate,		/* bdi dirty rate */
419 		  __entry->dirty_ratelimit,	/* base ratelimit */
420 		  __entry->task_ratelimit, /* ratelimit with position control */
421 		  __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
422 	)
423 );
424 
425 TRACE_EVENT(balance_dirty_pages,
426 
427 	TP_PROTO(struct backing_dev_info *bdi,
428 		 unsigned long thresh,
429 		 unsigned long bg_thresh,
430 		 unsigned long dirty,
431 		 unsigned long bdi_thresh,
432 		 unsigned long bdi_dirty,
433 		 unsigned long dirty_ratelimit,
434 		 unsigned long task_ratelimit,
435 		 unsigned long dirtied,
436 		 unsigned long period,
437 		 long pause,
438 		 unsigned long start_time),
439 
440 	TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
441 		dirty_ratelimit, task_ratelimit,
442 		dirtied, period, pause, start_time),
443 
444 	TP_STRUCT__entry(
445 		__array(	 char,	bdi, 32)
446 		__field(unsigned long,	limit)
447 		__field(unsigned long,	setpoint)
448 		__field(unsigned long,	dirty)
449 		__field(unsigned long,	bdi_setpoint)
450 		__field(unsigned long,	bdi_dirty)
451 		__field(unsigned long,	dirty_ratelimit)
452 		__field(unsigned long,	task_ratelimit)
453 		__field(unsigned int,	dirtied)
454 		__field(unsigned int,	dirtied_pause)
455 		__field(unsigned long,	paused)
456 		__field(	 long,	pause)
457 		__field(unsigned long,	period)
458 		__field(	 long,	think)
459 	),
460 
461 	TP_fast_assign(
462 		unsigned long freerun = (thresh + bg_thresh) / 2;
463 		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
464 
465 		__entry->limit		= global_wb_domain.dirty_limit;
466 		__entry->setpoint	= (global_wb_domain.dirty_limit +
467 						freerun) / 2;
468 		__entry->dirty		= dirty;
469 		__entry->bdi_setpoint	= __entry->setpoint *
470 						bdi_thresh / (thresh + 1);
471 		__entry->bdi_dirty	= bdi_dirty;
472 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
473 		__entry->task_ratelimit	= KBps(task_ratelimit);
474 		__entry->dirtied	= dirtied;
475 		__entry->dirtied_pause	= current->nr_dirtied_pause;
476 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
477 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
478 		__entry->period		= period * 1000 / HZ;
479 		__entry->pause		= pause * 1000 / HZ;
480 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
481 	),
482 
483 
484 	TP_printk("bdi %s: "
485 		  "limit=%lu setpoint=%lu dirty=%lu "
486 		  "bdi_setpoint=%lu bdi_dirty=%lu "
487 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
488 		  "dirtied=%u dirtied_pause=%u "
489 		  "paused=%lu pause=%ld period=%lu think=%ld",
490 		  __entry->bdi,
491 		  __entry->limit,
492 		  __entry->setpoint,
493 		  __entry->dirty,
494 		  __entry->bdi_setpoint,
495 		  __entry->bdi_dirty,
496 		  __entry->dirty_ratelimit,
497 		  __entry->task_ratelimit,
498 		  __entry->dirtied,
499 		  __entry->dirtied_pause,
500 		  __entry->paused,	/* ms */
501 		  __entry->pause,	/* ms */
502 		  __entry->period,	/* ms */
503 		  __entry->think	/* ms */
504 	  )
505 );
506 
507 TRACE_EVENT(writeback_sb_inodes_requeue,
508 
509 	TP_PROTO(struct inode *inode),
510 	TP_ARGS(inode),
511 
512 	TP_STRUCT__entry(
513 		__array(char, name, 32)
514 		__field(unsigned long, ino)
515 		__field(unsigned long, state)
516 		__field(unsigned long, dirtied_when)
517 	),
518 
519 	TP_fast_assign(
520 		strncpy(__entry->name,
521 		        dev_name(inode_to_bdi(inode)->dev), 32);
522 		__entry->ino		= inode->i_ino;
523 		__entry->state		= inode->i_state;
524 		__entry->dirtied_when	= inode->dirtied_when;
525 	),
526 
527 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
528 		  __entry->name,
529 		  __entry->ino,
530 		  show_inode_state(__entry->state),
531 		  __entry->dirtied_when,
532 		  (jiffies - __entry->dirtied_when) / HZ
533 	)
534 );
535 
536 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
537 
538 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
539 
540 	TP_ARGS(usec_timeout, usec_delayed),
541 
542 	TP_STRUCT__entry(
543 		__field(	unsigned int,	usec_timeout	)
544 		__field(	unsigned int,	usec_delayed	)
545 	),
546 
547 	TP_fast_assign(
548 		__entry->usec_timeout	= usec_timeout;
549 		__entry->usec_delayed	= usec_delayed;
550 	),
551 
552 	TP_printk("usec_timeout=%u usec_delayed=%u",
553 			__entry->usec_timeout,
554 			__entry->usec_delayed)
555 );
556 
557 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
558 
559 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
560 
561 	TP_ARGS(usec_timeout, usec_delayed)
562 );
563 
564 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
565 
566 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
567 
568 	TP_ARGS(usec_timeout, usec_delayed)
569 );
570 
571 DECLARE_EVENT_CLASS(writeback_single_inode_template,
572 
573 	TP_PROTO(struct inode *inode,
574 		 struct writeback_control *wbc,
575 		 unsigned long nr_to_write
576 	),
577 
578 	TP_ARGS(inode, wbc, nr_to_write),
579 
580 	TP_STRUCT__entry(
581 		__array(char, name, 32)
582 		__field(unsigned long, ino)
583 		__field(unsigned long, state)
584 		__field(unsigned long, dirtied_when)
585 		__field(unsigned long, writeback_index)
586 		__field(long, nr_to_write)
587 		__field(unsigned long, wrote)
588 	),
589 
590 	TP_fast_assign(
591 		strncpy(__entry->name,
592 			dev_name(inode_to_bdi(inode)->dev), 32);
593 		__entry->ino		= inode->i_ino;
594 		__entry->state		= inode->i_state;
595 		__entry->dirtied_when	= inode->dirtied_when;
596 		__entry->writeback_index = inode->i_mapping->writeback_index;
597 		__entry->nr_to_write	= nr_to_write;
598 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
599 	),
600 
601 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
602 		  "index=%lu to_write=%ld wrote=%lu",
603 		  __entry->name,
604 		  __entry->ino,
605 		  show_inode_state(__entry->state),
606 		  __entry->dirtied_when,
607 		  (jiffies - __entry->dirtied_when) / HZ,
608 		  __entry->writeback_index,
609 		  __entry->nr_to_write,
610 		  __entry->wrote
611 	)
612 );
613 
614 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
615 	TP_PROTO(struct inode *inode,
616 		 struct writeback_control *wbc,
617 		 unsigned long nr_to_write),
618 	TP_ARGS(inode, wbc, nr_to_write)
619 );
620 
621 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
622 	TP_PROTO(struct inode *inode,
623 		 struct writeback_control *wbc,
624 		 unsigned long nr_to_write),
625 	TP_ARGS(inode, wbc, nr_to_write)
626 );
627 
628 DECLARE_EVENT_CLASS(writeback_lazytime_template,
629 	TP_PROTO(struct inode *inode),
630 
631 	TP_ARGS(inode),
632 
633 	TP_STRUCT__entry(
634 		__field(	dev_t,	dev			)
635 		__field(unsigned long,	ino			)
636 		__field(unsigned long,	state			)
637 		__field(	__u16, mode			)
638 		__field(unsigned long, dirtied_when		)
639 	),
640 
641 	TP_fast_assign(
642 		__entry->dev	= inode->i_sb->s_dev;
643 		__entry->ino	= inode->i_ino;
644 		__entry->state	= inode->i_state;
645 		__entry->mode	= inode->i_mode;
646 		__entry->dirtied_when = inode->dirtied_when;
647 	),
648 
649 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
650 		  MAJOR(__entry->dev), MINOR(__entry->dev),
651 		  __entry->ino, __entry->dirtied_when,
652 		  show_inode_state(__entry->state), __entry->mode)
653 );
654 
655 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
656 	TP_PROTO(struct inode *inode),
657 
658 	TP_ARGS(inode)
659 );
660 
661 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
662 	TP_PROTO(struct inode *inode),
663 
664 	TP_ARGS(inode)
665 );
666 
667 DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
668 
669 	TP_PROTO(struct inode *inode),
670 
671 	TP_ARGS(inode)
672 );
673 
674 #endif /* _TRACE_WRITEBACK_H */
675 
676 /* This part must be outside protection */
677 #include <trace/define_trace.h>
678