xref: /openbmc/linux/include/trace/events/writeback.h (revision 0ae45f63)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM writeback
3 
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
6 
7 #include <linux/tracepoint.h>
8 #include <linux/backing-dev.h>
9 #include <linux/writeback.h>
10 
11 #define show_inode_state(state)					\
12 	__print_flags(state, "|",				\
13 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
14 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
15 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
16 		{I_NEW,			"I_NEW"},		\
17 		{I_WILL_FREE,		"I_WILL_FREE"},		\
18 		{I_FREEING,		"I_FREEING"},		\
19 		{I_CLEAR,		"I_CLEAR"},		\
20 		{I_SYNC,		"I_SYNC"},		\
21 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
22 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
23 		{I_REFERENCED,		"I_REFERENCED"}		\
24 	)
25 
26 #define WB_WORK_REASON							\
27 		{WB_REASON_BACKGROUND,		"background"},		\
28 		{WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages"},	\
29 		{WB_REASON_SYNC,		"sync"},		\
30 		{WB_REASON_PERIODIC,		"periodic"},		\
31 		{WB_REASON_LAPTOP_TIMER,	"laptop_timer"},	\
32 		{WB_REASON_FREE_MORE_MEM,	"free_more_memory"},	\
33 		{WB_REASON_FS_FREE_SPACE,	"fs_free_space"},	\
34 		{WB_REASON_FORKER_THREAD,	"forker_thread"}
35 
36 struct wb_writeback_work;
37 
38 TRACE_EVENT(writeback_dirty_page,
39 
40 	TP_PROTO(struct page *page, struct address_space *mapping),
41 
42 	TP_ARGS(page, mapping),
43 
44 	TP_STRUCT__entry (
45 		__array(char, name, 32)
46 		__field(unsigned long, ino)
47 		__field(pgoff_t, index)
48 	),
49 
50 	TP_fast_assign(
51 		strncpy(__entry->name,
52 			mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
53 		__entry->ino = mapping ? mapping->host->i_ino : 0;
54 		__entry->index = page->index;
55 	),
56 
57 	TP_printk("bdi %s: ino=%lu index=%lu",
58 		__entry->name,
59 		__entry->ino,
60 		__entry->index
61 	)
62 );
63 
64 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
65 
66 	TP_PROTO(struct inode *inode, int flags),
67 
68 	TP_ARGS(inode, flags),
69 
70 	TP_STRUCT__entry (
71 		__array(char, name, 32)
72 		__field(unsigned long, ino)
73 		__field(unsigned long, state)
74 		__field(unsigned long, flags)
75 	),
76 
77 	TP_fast_assign(
78 		struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
79 
80 		/* may be called for files on pseudo FSes w/ unregistered bdi */
81 		strncpy(__entry->name,
82 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
83 		__entry->ino		= inode->i_ino;
84 		__entry->state		= inode->i_state;
85 		__entry->flags		= flags;
86 	),
87 
88 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
89 		__entry->name,
90 		__entry->ino,
91 		show_inode_state(__entry->state),
92 		show_inode_state(__entry->flags)
93 	)
94 );
95 
96 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
97 
98 	TP_PROTO(struct inode *inode, int flags),
99 
100 	TP_ARGS(inode, flags)
101 );
102 
103 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
104 
105 	TP_PROTO(struct inode *inode, int flags),
106 
107 	TP_ARGS(inode, flags)
108 );
109 
110 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
111 
112 	TP_PROTO(struct inode *inode, int flags),
113 
114 	TP_ARGS(inode, flags)
115 );
116 
117 DECLARE_EVENT_CLASS(writeback_write_inode_template,
118 
119 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
120 
121 	TP_ARGS(inode, wbc),
122 
123 	TP_STRUCT__entry (
124 		__array(char, name, 32)
125 		__field(unsigned long, ino)
126 		__field(int, sync_mode)
127 	),
128 
129 	TP_fast_assign(
130 		strncpy(__entry->name,
131 			dev_name(inode->i_mapping->backing_dev_info->dev), 32);
132 		__entry->ino		= inode->i_ino;
133 		__entry->sync_mode	= wbc->sync_mode;
134 	),
135 
136 	TP_printk("bdi %s: ino=%lu sync_mode=%d",
137 		__entry->name,
138 		__entry->ino,
139 		__entry->sync_mode
140 	)
141 );
142 
143 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
144 
145 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
146 
147 	TP_ARGS(inode, wbc)
148 );
149 
150 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
151 
152 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
153 
154 	TP_ARGS(inode, wbc)
155 );
156 
157 DECLARE_EVENT_CLASS(writeback_work_class,
158 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
159 	TP_ARGS(bdi, work),
160 	TP_STRUCT__entry(
161 		__array(char, name, 32)
162 		__field(long, nr_pages)
163 		__field(dev_t, sb_dev)
164 		__field(int, sync_mode)
165 		__field(int, for_kupdate)
166 		__field(int, range_cyclic)
167 		__field(int, for_background)
168 		__field(int, reason)
169 	),
170 	TP_fast_assign(
171 		struct device *dev = bdi->dev;
172 		if (!dev)
173 			dev = default_backing_dev_info.dev;
174 		strncpy(__entry->name, dev_name(dev), 32);
175 		__entry->nr_pages = work->nr_pages;
176 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
177 		__entry->sync_mode = work->sync_mode;
178 		__entry->for_kupdate = work->for_kupdate;
179 		__entry->range_cyclic = work->range_cyclic;
180 		__entry->for_background	= work->for_background;
181 		__entry->reason = work->reason;
182 	),
183 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
184 		  "kupdate=%d range_cyclic=%d background=%d reason=%s",
185 		  __entry->name,
186 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
187 		  __entry->nr_pages,
188 		  __entry->sync_mode,
189 		  __entry->for_kupdate,
190 		  __entry->range_cyclic,
191 		  __entry->for_background,
192 		  __print_symbolic(__entry->reason, WB_WORK_REASON)
193 	)
194 );
195 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
196 DEFINE_EVENT(writeback_work_class, name, \
197 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
198 	TP_ARGS(bdi, work))
199 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
200 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
201 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
202 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
203 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
204 
205 TRACE_EVENT(writeback_pages_written,
206 	TP_PROTO(long pages_written),
207 	TP_ARGS(pages_written),
208 	TP_STRUCT__entry(
209 		__field(long,		pages)
210 	),
211 	TP_fast_assign(
212 		__entry->pages		= pages_written;
213 	),
214 	TP_printk("%ld", __entry->pages)
215 );
216 
217 DECLARE_EVENT_CLASS(writeback_class,
218 	TP_PROTO(struct backing_dev_info *bdi),
219 	TP_ARGS(bdi),
220 	TP_STRUCT__entry(
221 		__array(char, name, 32)
222 	),
223 	TP_fast_assign(
224 		strncpy(__entry->name, dev_name(bdi->dev), 32);
225 	),
226 	TP_printk("bdi %s",
227 		  __entry->name
228 	)
229 );
230 #define DEFINE_WRITEBACK_EVENT(name) \
231 DEFINE_EVENT(writeback_class, name, \
232 	TP_PROTO(struct backing_dev_info *bdi), \
233 	TP_ARGS(bdi))
234 
235 DEFINE_WRITEBACK_EVENT(writeback_nowork);
236 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
237 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
238 DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
239 
240 DECLARE_EVENT_CLASS(wbc_class,
241 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
242 	TP_ARGS(wbc, bdi),
243 	TP_STRUCT__entry(
244 		__array(char, name, 32)
245 		__field(long, nr_to_write)
246 		__field(long, pages_skipped)
247 		__field(int, sync_mode)
248 		__field(int, for_kupdate)
249 		__field(int, for_background)
250 		__field(int, for_reclaim)
251 		__field(int, range_cyclic)
252 		__field(long, range_start)
253 		__field(long, range_end)
254 	),
255 
256 	TP_fast_assign(
257 		strncpy(__entry->name, dev_name(bdi->dev), 32);
258 		__entry->nr_to_write	= wbc->nr_to_write;
259 		__entry->pages_skipped	= wbc->pages_skipped;
260 		__entry->sync_mode	= wbc->sync_mode;
261 		__entry->for_kupdate	= wbc->for_kupdate;
262 		__entry->for_background	= wbc->for_background;
263 		__entry->for_reclaim	= wbc->for_reclaim;
264 		__entry->range_cyclic	= wbc->range_cyclic;
265 		__entry->range_start	= (long)wbc->range_start;
266 		__entry->range_end	= (long)wbc->range_end;
267 	),
268 
269 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
270 		"bgrd=%d reclm=%d cyclic=%d "
271 		"start=0x%lx end=0x%lx",
272 		__entry->name,
273 		__entry->nr_to_write,
274 		__entry->pages_skipped,
275 		__entry->sync_mode,
276 		__entry->for_kupdate,
277 		__entry->for_background,
278 		__entry->for_reclaim,
279 		__entry->range_cyclic,
280 		__entry->range_start,
281 		__entry->range_end)
282 )
283 
284 #define DEFINE_WBC_EVENT(name) \
285 DEFINE_EVENT(wbc_class, name, \
286 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
287 	TP_ARGS(wbc, bdi))
288 DEFINE_WBC_EVENT(wbc_writepage);
289 
290 TRACE_EVENT(writeback_queue_io,
291 	TP_PROTO(struct bdi_writeback *wb,
292 		 struct wb_writeback_work *work,
293 		 int moved),
294 	TP_ARGS(wb, work, moved),
295 	TP_STRUCT__entry(
296 		__array(char,		name, 32)
297 		__field(unsigned long,	older)
298 		__field(long,		age)
299 		__field(int,		moved)
300 		__field(int,		reason)
301 	),
302 	TP_fast_assign(
303 		unsigned long *older_than_this = work->older_than_this;
304 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
305 		__entry->older	= older_than_this ?  *older_than_this : 0;
306 		__entry->age	= older_than_this ?
307 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
308 		__entry->moved	= moved;
309 		__entry->reason	= work->reason;
310 	),
311 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
312 		__entry->name,
313 		__entry->older,	/* older_than_this in jiffies */
314 		__entry->age,	/* older_than_this in relative milliseconds */
315 		__entry->moved,
316 		__print_symbolic(__entry->reason, WB_WORK_REASON)
317 	)
318 );
319 
320 TRACE_EVENT(global_dirty_state,
321 
322 	TP_PROTO(unsigned long background_thresh,
323 		 unsigned long dirty_thresh
324 	),
325 
326 	TP_ARGS(background_thresh,
327 		dirty_thresh
328 	),
329 
330 	TP_STRUCT__entry(
331 		__field(unsigned long,	nr_dirty)
332 		__field(unsigned long,	nr_writeback)
333 		__field(unsigned long,	nr_unstable)
334 		__field(unsigned long,	background_thresh)
335 		__field(unsigned long,	dirty_thresh)
336 		__field(unsigned long,	dirty_limit)
337 		__field(unsigned long,	nr_dirtied)
338 		__field(unsigned long,	nr_written)
339 	),
340 
341 	TP_fast_assign(
342 		__entry->nr_dirty	= global_page_state(NR_FILE_DIRTY);
343 		__entry->nr_writeback	= global_page_state(NR_WRITEBACK);
344 		__entry->nr_unstable	= global_page_state(NR_UNSTABLE_NFS);
345 		__entry->nr_dirtied	= global_page_state(NR_DIRTIED);
346 		__entry->nr_written	= global_page_state(NR_WRITTEN);
347 		__entry->background_thresh = background_thresh;
348 		__entry->dirty_thresh	= dirty_thresh;
349 		__entry->dirty_limit = global_dirty_limit;
350 	),
351 
352 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
353 		  "bg_thresh=%lu thresh=%lu limit=%lu "
354 		  "dirtied=%lu written=%lu",
355 		  __entry->nr_dirty,
356 		  __entry->nr_writeback,
357 		  __entry->nr_unstable,
358 		  __entry->background_thresh,
359 		  __entry->dirty_thresh,
360 		  __entry->dirty_limit,
361 		  __entry->nr_dirtied,
362 		  __entry->nr_written
363 	)
364 );
365 
366 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
367 
368 TRACE_EVENT(bdi_dirty_ratelimit,
369 
370 	TP_PROTO(struct backing_dev_info *bdi,
371 		 unsigned long dirty_rate,
372 		 unsigned long task_ratelimit),
373 
374 	TP_ARGS(bdi, dirty_rate, task_ratelimit),
375 
376 	TP_STRUCT__entry(
377 		__array(char,		bdi, 32)
378 		__field(unsigned long,	write_bw)
379 		__field(unsigned long,	avg_write_bw)
380 		__field(unsigned long,	dirty_rate)
381 		__field(unsigned long,	dirty_ratelimit)
382 		__field(unsigned long,	task_ratelimit)
383 		__field(unsigned long,	balanced_dirty_ratelimit)
384 	),
385 
386 	TP_fast_assign(
387 		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
388 		__entry->write_bw	= KBps(bdi->write_bandwidth);
389 		__entry->avg_write_bw	= KBps(bdi->avg_write_bandwidth);
390 		__entry->dirty_rate	= KBps(dirty_rate);
391 		__entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
392 		__entry->task_ratelimit	= KBps(task_ratelimit);
393 		__entry->balanced_dirty_ratelimit =
394 					  KBps(bdi->balanced_dirty_ratelimit);
395 	),
396 
397 	TP_printk("bdi %s: "
398 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
399 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
400 		  "balanced_dirty_ratelimit=%lu",
401 		  __entry->bdi,
402 		  __entry->write_bw,		/* write bandwidth */
403 		  __entry->avg_write_bw,	/* avg write bandwidth */
404 		  __entry->dirty_rate,		/* bdi dirty rate */
405 		  __entry->dirty_ratelimit,	/* base ratelimit */
406 		  __entry->task_ratelimit, /* ratelimit with position control */
407 		  __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
408 	)
409 );
410 
411 TRACE_EVENT(balance_dirty_pages,
412 
413 	TP_PROTO(struct backing_dev_info *bdi,
414 		 unsigned long thresh,
415 		 unsigned long bg_thresh,
416 		 unsigned long dirty,
417 		 unsigned long bdi_thresh,
418 		 unsigned long bdi_dirty,
419 		 unsigned long dirty_ratelimit,
420 		 unsigned long task_ratelimit,
421 		 unsigned long dirtied,
422 		 unsigned long period,
423 		 long pause,
424 		 unsigned long start_time),
425 
426 	TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
427 		dirty_ratelimit, task_ratelimit,
428 		dirtied, period, pause, start_time),
429 
430 	TP_STRUCT__entry(
431 		__array(	 char,	bdi, 32)
432 		__field(unsigned long,	limit)
433 		__field(unsigned long,	setpoint)
434 		__field(unsigned long,	dirty)
435 		__field(unsigned long,	bdi_setpoint)
436 		__field(unsigned long,	bdi_dirty)
437 		__field(unsigned long,	dirty_ratelimit)
438 		__field(unsigned long,	task_ratelimit)
439 		__field(unsigned int,	dirtied)
440 		__field(unsigned int,	dirtied_pause)
441 		__field(unsigned long,	paused)
442 		__field(	 long,	pause)
443 		__field(unsigned long,	period)
444 		__field(	 long,	think)
445 	),
446 
447 	TP_fast_assign(
448 		unsigned long freerun = (thresh + bg_thresh) / 2;
449 		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
450 
451 		__entry->limit		= global_dirty_limit;
452 		__entry->setpoint	= (global_dirty_limit + freerun) / 2;
453 		__entry->dirty		= dirty;
454 		__entry->bdi_setpoint	= __entry->setpoint *
455 						bdi_thresh / (thresh + 1);
456 		__entry->bdi_dirty	= bdi_dirty;
457 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
458 		__entry->task_ratelimit	= KBps(task_ratelimit);
459 		__entry->dirtied	= dirtied;
460 		__entry->dirtied_pause	= current->nr_dirtied_pause;
461 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
462 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
463 		__entry->period		= period * 1000 / HZ;
464 		__entry->pause		= pause * 1000 / HZ;
465 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
466 	),
467 
468 
469 	TP_printk("bdi %s: "
470 		  "limit=%lu setpoint=%lu dirty=%lu "
471 		  "bdi_setpoint=%lu bdi_dirty=%lu "
472 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
473 		  "dirtied=%u dirtied_pause=%u "
474 		  "paused=%lu pause=%ld period=%lu think=%ld",
475 		  __entry->bdi,
476 		  __entry->limit,
477 		  __entry->setpoint,
478 		  __entry->dirty,
479 		  __entry->bdi_setpoint,
480 		  __entry->bdi_dirty,
481 		  __entry->dirty_ratelimit,
482 		  __entry->task_ratelimit,
483 		  __entry->dirtied,
484 		  __entry->dirtied_pause,
485 		  __entry->paused,	/* ms */
486 		  __entry->pause,	/* ms */
487 		  __entry->period,	/* ms */
488 		  __entry->think	/* ms */
489 	  )
490 );
491 
492 TRACE_EVENT(writeback_sb_inodes_requeue,
493 
494 	TP_PROTO(struct inode *inode),
495 	TP_ARGS(inode),
496 
497 	TP_STRUCT__entry(
498 		__array(char, name, 32)
499 		__field(unsigned long, ino)
500 		__field(unsigned long, state)
501 		__field(unsigned long, dirtied_when)
502 	),
503 
504 	TP_fast_assign(
505 		strncpy(__entry->name,
506 		        dev_name(inode_to_bdi(inode)->dev), 32);
507 		__entry->ino		= inode->i_ino;
508 		__entry->state		= inode->i_state;
509 		__entry->dirtied_when	= inode->dirtied_when;
510 	),
511 
512 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
513 		  __entry->name,
514 		  __entry->ino,
515 		  show_inode_state(__entry->state),
516 		  __entry->dirtied_when,
517 		  (jiffies - __entry->dirtied_when) / HZ
518 	)
519 );
520 
521 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
522 
523 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
524 
525 	TP_ARGS(usec_timeout, usec_delayed),
526 
527 	TP_STRUCT__entry(
528 		__field(	unsigned int,	usec_timeout	)
529 		__field(	unsigned int,	usec_delayed	)
530 	),
531 
532 	TP_fast_assign(
533 		__entry->usec_timeout	= usec_timeout;
534 		__entry->usec_delayed	= usec_delayed;
535 	),
536 
537 	TP_printk("usec_timeout=%u usec_delayed=%u",
538 			__entry->usec_timeout,
539 			__entry->usec_delayed)
540 );
541 
542 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
543 
544 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
545 
546 	TP_ARGS(usec_timeout, usec_delayed)
547 );
548 
549 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
550 
551 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
552 
553 	TP_ARGS(usec_timeout, usec_delayed)
554 );
555 
556 DECLARE_EVENT_CLASS(writeback_single_inode_template,
557 
558 	TP_PROTO(struct inode *inode,
559 		 struct writeback_control *wbc,
560 		 unsigned long nr_to_write
561 	),
562 
563 	TP_ARGS(inode, wbc, nr_to_write),
564 
565 	TP_STRUCT__entry(
566 		__array(char, name, 32)
567 		__field(unsigned long, ino)
568 		__field(unsigned long, state)
569 		__field(unsigned long, dirtied_when)
570 		__field(unsigned long, writeback_index)
571 		__field(long, nr_to_write)
572 		__field(unsigned long, wrote)
573 	),
574 
575 	TP_fast_assign(
576 		strncpy(__entry->name,
577 			dev_name(inode_to_bdi(inode)->dev), 32);
578 		__entry->ino		= inode->i_ino;
579 		__entry->state		= inode->i_state;
580 		__entry->dirtied_when	= inode->dirtied_when;
581 		__entry->writeback_index = inode->i_mapping->writeback_index;
582 		__entry->nr_to_write	= nr_to_write;
583 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
584 	),
585 
586 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
587 		  "index=%lu to_write=%ld wrote=%lu",
588 		  __entry->name,
589 		  __entry->ino,
590 		  show_inode_state(__entry->state),
591 		  __entry->dirtied_when,
592 		  (jiffies - __entry->dirtied_when) / HZ,
593 		  __entry->writeback_index,
594 		  __entry->nr_to_write,
595 		  __entry->wrote
596 	)
597 );
598 
599 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
600 	TP_PROTO(struct inode *inode,
601 		 struct writeback_control *wbc,
602 		 unsigned long nr_to_write),
603 	TP_ARGS(inode, wbc, nr_to_write)
604 );
605 
606 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
607 	TP_PROTO(struct inode *inode,
608 		 struct writeback_control *wbc,
609 		 unsigned long nr_to_write),
610 	TP_ARGS(inode, wbc, nr_to_write)
611 );
612 
613 DECLARE_EVENT_CLASS(writeback_lazytime_template,
614 	TP_PROTO(struct inode *inode),
615 
616 	TP_ARGS(inode),
617 
618 	TP_STRUCT__entry(
619 		__field(	dev_t,	dev			)
620 		__field(unsigned long,	ino			)
621 		__field(unsigned long,	state			)
622 		__field(	__u16, mode			)
623 		__field(unsigned long, dirtied_when		)
624 	),
625 
626 	TP_fast_assign(
627 		__entry->dev	= inode->i_sb->s_dev;
628 		__entry->ino	= inode->i_ino;
629 		__entry->state	= inode->i_state;
630 		__entry->mode	= inode->i_mode;
631 		__entry->dirtied_when = inode->dirtied_when;
632 	),
633 
634 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
635 		  MAJOR(__entry->dev), MINOR(__entry->dev),
636 		  __entry->ino, __entry->dirtied_when,
637 		  show_inode_state(__entry->state), __entry->mode)
638 );
639 
640 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
641 	TP_PROTO(struct inode *inode),
642 
643 	TP_ARGS(inode)
644 );
645 
646 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
647 	TP_PROTO(struct inode *inode),
648 
649 	TP_ARGS(inode)
650 );
651 
652 DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
653 
654 	TP_PROTO(struct inode *inode),
655 
656 	TP_ARGS(inode)
657 );
658 
659 #endif /* _TRACE_WRITEBACK_H */
660 
661 /* This part must be outside protection */
662 #include <trace/define_trace.h>
663