xref: /openbmc/linux/include/trace/events/writeback.h (revision 76ce0265)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
24 		{I_REFERENCED,		"I_REFERENCED"}		\
25 	)
26 
27 /* enums need to be exported to user space */
28 #undef EM
29 #undef EMe
30 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
31 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
32 
33 #define WB_WORK_REASON							\
34 	EM( WB_REASON_BACKGROUND,		"background")		\
35 	EM( WB_REASON_VMSCAN,			"vmscan")		\
36 	EM( WB_REASON_SYNC,			"sync")			\
37 	EM( WB_REASON_PERIODIC,			"periodic")		\
38 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
39 	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
40 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
41 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
42 
43 WB_WORK_REASON
44 
45 /*
46  * Now redefine the EM() and EMe() macros to map the enums to the strings
47  * that will be printed in the output.
48  */
49 #undef EM
50 #undef EMe
51 #define EM(a,b)		{ a, b },
52 #define EMe(a,b)	{ a, b }
53 
54 struct wb_writeback_work;
55 
56 DECLARE_EVENT_CLASS(writeback_page_template,
57 
58 	TP_PROTO(struct page *page, struct address_space *mapping),
59 
60 	TP_ARGS(page, mapping),
61 
62 	TP_STRUCT__entry (
63 		__array(char, name, 32)
64 		__field(ino_t, ino)
65 		__field(pgoff_t, index)
66 	),
67 
68 	TP_fast_assign(
69 		strscpy_pad(__entry->name,
70 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
71 					 NULL), 32);
72 		__entry->ino = mapping ? mapping->host->i_ino : 0;
73 		__entry->index = page->index;
74 	),
75 
76 	TP_printk("bdi %s: ino=%lu index=%lu",
77 		__entry->name,
78 		(unsigned long)__entry->ino,
79 		__entry->index
80 	)
81 );
82 
83 DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
84 
85 	TP_PROTO(struct page *page, struct address_space *mapping),
86 
87 	TP_ARGS(page, mapping)
88 );
89 
90 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
91 
92 	TP_PROTO(struct page *page, struct address_space *mapping),
93 
94 	TP_ARGS(page, mapping)
95 );
96 
97 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
98 
99 	TP_PROTO(struct inode *inode, int flags),
100 
101 	TP_ARGS(inode, flags),
102 
103 	TP_STRUCT__entry (
104 		__array(char, name, 32)
105 		__field(ino_t, ino)
106 		__field(unsigned long, state)
107 		__field(unsigned long, flags)
108 	),
109 
110 	TP_fast_assign(
111 		struct backing_dev_info *bdi = inode_to_bdi(inode);
112 
113 		/* may be called for files on pseudo FSes w/ unregistered bdi */
114 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
115 		__entry->ino		= inode->i_ino;
116 		__entry->state		= inode->i_state;
117 		__entry->flags		= flags;
118 	),
119 
120 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
121 		__entry->name,
122 		(unsigned long)__entry->ino,
123 		show_inode_state(__entry->state),
124 		show_inode_state(__entry->flags)
125 	)
126 );
127 
128 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
129 
130 	TP_PROTO(struct inode *inode, int flags),
131 
132 	TP_ARGS(inode, flags)
133 );
134 
135 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
136 
137 	TP_PROTO(struct inode *inode, int flags),
138 
139 	TP_ARGS(inode, flags)
140 );
141 
142 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
143 
144 	TP_PROTO(struct inode *inode, int flags),
145 
146 	TP_ARGS(inode, flags)
147 );
148 
149 #ifdef CREATE_TRACE_POINTS
150 #ifdef CONFIG_CGROUP_WRITEBACK
151 
152 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
153 {
154 	return cgroup_ino(wb->memcg_css->cgroup);
155 }
156 
157 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
158 {
159 	if (wbc->wb)
160 		return __trace_wb_assign_cgroup(wbc->wb);
161 	else
162 		return 1;
163 }
164 #else	/* CONFIG_CGROUP_WRITEBACK */
165 
166 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
167 {
168 	return 1;
169 }
170 
171 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
172 {
173 	return 1;
174 }
175 
176 #endif	/* CONFIG_CGROUP_WRITEBACK */
177 #endif	/* CREATE_TRACE_POINTS */
178 
179 #ifdef CONFIG_CGROUP_WRITEBACK
180 TRACE_EVENT(inode_foreign_history,
181 
182 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
183 		 unsigned int history),
184 
185 	TP_ARGS(inode, wbc, history),
186 
187 	TP_STRUCT__entry(
188 		__array(char,		name, 32)
189 		__field(ino_t,		ino)
190 		__field(ino_t,		cgroup_ino)
191 		__field(unsigned int,	history)
192 	),
193 
194 	TP_fast_assign(
195 		strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
196 		__entry->ino		= inode->i_ino;
197 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
198 		__entry->history	= history;
199 	),
200 
201 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
202 		__entry->name,
203 		(unsigned long)__entry->ino,
204 		(unsigned long)__entry->cgroup_ino,
205 		__entry->history
206 	)
207 );
208 
209 TRACE_EVENT(inode_switch_wbs,
210 
211 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
212 		 struct bdi_writeback *new_wb),
213 
214 	TP_ARGS(inode, old_wb, new_wb),
215 
216 	TP_STRUCT__entry(
217 		__array(char,		name, 32)
218 		__field(ino_t,		ino)
219 		__field(ino_t,		old_cgroup_ino)
220 		__field(ino_t,		new_cgroup_ino)
221 	),
222 
223 	TP_fast_assign(
224 		strncpy(__entry->name,	bdi_dev_name(old_wb->bdi), 32);
225 		__entry->ino		= inode->i_ino;
226 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
227 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
228 	),
229 
230 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
231 		__entry->name,
232 		(unsigned long)__entry->ino,
233 		(unsigned long)__entry->old_cgroup_ino,
234 		(unsigned long)__entry->new_cgroup_ino
235 	)
236 );
237 
238 TRACE_EVENT(track_foreign_dirty,
239 
240 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
241 
242 	TP_ARGS(page, wb),
243 
244 	TP_STRUCT__entry(
245 		__array(char,		name, 32)
246 		__field(u64,		bdi_id)
247 		__field(ino_t,		ino)
248 		__field(unsigned int,	memcg_id)
249 		__field(ino_t,		cgroup_ino)
250 		__field(ino_t,		page_cgroup_ino)
251 	),
252 
253 	TP_fast_assign(
254 		struct address_space *mapping = page_mapping(page);
255 		struct inode *inode = mapping ? mapping->host : NULL;
256 
257 		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
258 		__entry->bdi_id		= wb->bdi->id;
259 		__entry->ino		= inode ? inode->i_ino : 0;
260 		__entry->memcg_id	= wb->memcg_css->id;
261 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
262 		__entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
263 	),
264 
265 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
266 		__entry->name,
267 		__entry->bdi_id,
268 		(unsigned long)__entry->ino,
269 		__entry->memcg_id,
270 		(unsigned long)__entry->cgroup_ino,
271 		(unsigned long)__entry->page_cgroup_ino
272 	)
273 );
274 
275 TRACE_EVENT(flush_foreign,
276 
277 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
278 		 unsigned int frn_memcg_id),
279 
280 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
281 
282 	TP_STRUCT__entry(
283 		__array(char,		name, 32)
284 		__field(ino_t,		cgroup_ino)
285 		__field(unsigned int,	frn_bdi_id)
286 		__field(unsigned int,	frn_memcg_id)
287 	),
288 
289 	TP_fast_assign(
290 		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
291 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
292 		__entry->frn_bdi_id	= frn_bdi_id;
293 		__entry->frn_memcg_id	= frn_memcg_id;
294 	),
295 
296 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
297 		__entry->name,
298 		(unsigned long)__entry->cgroup_ino,
299 		__entry->frn_bdi_id,
300 		__entry->frn_memcg_id
301 	)
302 );
303 #endif
304 
305 DECLARE_EVENT_CLASS(writeback_write_inode_template,
306 
307 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
308 
309 	TP_ARGS(inode, wbc),
310 
311 	TP_STRUCT__entry (
312 		__array(char, name, 32)
313 		__field(ino_t, ino)
314 		__field(int, sync_mode)
315 		__field(ino_t, cgroup_ino)
316 	),
317 
318 	TP_fast_assign(
319 		strscpy_pad(__entry->name,
320 			    bdi_dev_name(inode_to_bdi(inode)), 32);
321 		__entry->ino		= inode->i_ino;
322 		__entry->sync_mode	= wbc->sync_mode;
323 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
324 	),
325 
326 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
327 		__entry->name,
328 		(unsigned long)__entry->ino,
329 		__entry->sync_mode,
330 		(unsigned long)__entry->cgroup_ino
331 	)
332 );
333 
334 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
335 
336 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
337 
338 	TP_ARGS(inode, wbc)
339 );
340 
341 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
342 
343 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
344 
345 	TP_ARGS(inode, wbc)
346 );
347 
348 DECLARE_EVENT_CLASS(writeback_work_class,
349 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
350 	TP_ARGS(wb, work),
351 	TP_STRUCT__entry(
352 		__array(char, name, 32)
353 		__field(long, nr_pages)
354 		__field(dev_t, sb_dev)
355 		__field(int, sync_mode)
356 		__field(int, for_kupdate)
357 		__field(int, range_cyclic)
358 		__field(int, for_background)
359 		__field(int, reason)
360 		__field(ino_t, cgroup_ino)
361 	),
362 	TP_fast_assign(
363 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
364 		__entry->nr_pages = work->nr_pages;
365 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
366 		__entry->sync_mode = work->sync_mode;
367 		__entry->for_kupdate = work->for_kupdate;
368 		__entry->range_cyclic = work->range_cyclic;
369 		__entry->for_background	= work->for_background;
370 		__entry->reason = work->reason;
371 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
372 	),
373 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
374 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
375 		  __entry->name,
376 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
377 		  __entry->nr_pages,
378 		  __entry->sync_mode,
379 		  __entry->for_kupdate,
380 		  __entry->range_cyclic,
381 		  __entry->for_background,
382 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
383 		  (unsigned long)__entry->cgroup_ino
384 	)
385 );
386 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
387 DEFINE_EVENT(writeback_work_class, name, \
388 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
389 	TP_ARGS(wb, work))
390 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
391 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
392 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
393 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
394 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
395 
396 TRACE_EVENT(writeback_pages_written,
397 	TP_PROTO(long pages_written),
398 	TP_ARGS(pages_written),
399 	TP_STRUCT__entry(
400 		__field(long,		pages)
401 	),
402 	TP_fast_assign(
403 		__entry->pages		= pages_written;
404 	),
405 	TP_printk("%ld", __entry->pages)
406 );
407 
408 DECLARE_EVENT_CLASS(writeback_class,
409 	TP_PROTO(struct bdi_writeback *wb),
410 	TP_ARGS(wb),
411 	TP_STRUCT__entry(
412 		__array(char, name, 32)
413 		__field(ino_t, cgroup_ino)
414 	),
415 	TP_fast_assign(
416 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
417 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
418 	),
419 	TP_printk("bdi %s: cgroup_ino=%lu",
420 		  __entry->name,
421 		  (unsigned long)__entry->cgroup_ino
422 	)
423 );
424 #define DEFINE_WRITEBACK_EVENT(name) \
425 DEFINE_EVENT(writeback_class, name, \
426 	TP_PROTO(struct bdi_writeback *wb), \
427 	TP_ARGS(wb))
428 
429 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
430 
431 TRACE_EVENT(writeback_bdi_register,
432 	TP_PROTO(struct backing_dev_info *bdi),
433 	TP_ARGS(bdi),
434 	TP_STRUCT__entry(
435 		__array(char, name, 32)
436 	),
437 	TP_fast_assign(
438 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
439 	),
440 	TP_printk("bdi %s",
441 		__entry->name
442 	)
443 );
444 
445 DECLARE_EVENT_CLASS(wbc_class,
446 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
447 	TP_ARGS(wbc, bdi),
448 	TP_STRUCT__entry(
449 		__array(char, name, 32)
450 		__field(long, nr_to_write)
451 		__field(long, pages_skipped)
452 		__field(int, sync_mode)
453 		__field(int, for_kupdate)
454 		__field(int, for_background)
455 		__field(int, for_reclaim)
456 		__field(int, range_cyclic)
457 		__field(long, range_start)
458 		__field(long, range_end)
459 		__field(ino_t, cgroup_ino)
460 	),
461 
462 	TP_fast_assign(
463 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
464 		__entry->nr_to_write	= wbc->nr_to_write;
465 		__entry->pages_skipped	= wbc->pages_skipped;
466 		__entry->sync_mode	= wbc->sync_mode;
467 		__entry->for_kupdate	= wbc->for_kupdate;
468 		__entry->for_background	= wbc->for_background;
469 		__entry->for_reclaim	= wbc->for_reclaim;
470 		__entry->range_cyclic	= wbc->range_cyclic;
471 		__entry->range_start	= (long)wbc->range_start;
472 		__entry->range_end	= (long)wbc->range_end;
473 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
474 	),
475 
476 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
477 		"bgrd=%d reclm=%d cyclic=%d "
478 		"start=0x%lx end=0x%lx cgroup_ino=%lu",
479 		__entry->name,
480 		__entry->nr_to_write,
481 		__entry->pages_skipped,
482 		__entry->sync_mode,
483 		__entry->for_kupdate,
484 		__entry->for_background,
485 		__entry->for_reclaim,
486 		__entry->range_cyclic,
487 		__entry->range_start,
488 		__entry->range_end,
489 		(unsigned long)__entry->cgroup_ino
490 	)
491 )
492 
493 #define DEFINE_WBC_EVENT(name) \
494 DEFINE_EVENT(wbc_class, name, \
495 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
496 	TP_ARGS(wbc, bdi))
497 DEFINE_WBC_EVENT(wbc_writepage);
498 
499 TRACE_EVENT(writeback_queue_io,
500 	TP_PROTO(struct bdi_writeback *wb,
501 		 struct wb_writeback_work *work,
502 		 int moved),
503 	TP_ARGS(wb, work, moved),
504 	TP_STRUCT__entry(
505 		__array(char,		name, 32)
506 		__field(unsigned long,	older)
507 		__field(long,		age)
508 		__field(int,		moved)
509 		__field(int,		reason)
510 		__field(ino_t,		cgroup_ino)
511 	),
512 	TP_fast_assign(
513 		unsigned long *older_than_this = work->older_than_this;
514 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
515 		__entry->older	= older_than_this ?  *older_than_this : 0;
516 		__entry->age	= older_than_this ?
517 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
518 		__entry->moved	= moved;
519 		__entry->reason	= work->reason;
520 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
521 	),
522 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
523 		__entry->name,
524 		__entry->older,	/* older_than_this in jiffies */
525 		__entry->age,	/* older_than_this in relative milliseconds */
526 		__entry->moved,
527 		__print_symbolic(__entry->reason, WB_WORK_REASON),
528 		(unsigned long)__entry->cgroup_ino
529 	)
530 );
531 
532 TRACE_EVENT(global_dirty_state,
533 
534 	TP_PROTO(unsigned long background_thresh,
535 		 unsigned long dirty_thresh
536 	),
537 
538 	TP_ARGS(background_thresh,
539 		dirty_thresh
540 	),
541 
542 	TP_STRUCT__entry(
543 		__field(unsigned long,	nr_dirty)
544 		__field(unsigned long,	nr_writeback)
545 		__field(unsigned long,	nr_unstable)
546 		__field(unsigned long,	background_thresh)
547 		__field(unsigned long,	dirty_thresh)
548 		__field(unsigned long,	dirty_limit)
549 		__field(unsigned long,	nr_dirtied)
550 		__field(unsigned long,	nr_written)
551 	),
552 
553 	TP_fast_assign(
554 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
555 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
556 		__entry->nr_unstable	= global_node_page_state(NR_UNSTABLE_NFS);
557 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
558 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
559 		__entry->background_thresh = background_thresh;
560 		__entry->dirty_thresh	= dirty_thresh;
561 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
562 	),
563 
564 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
565 		  "bg_thresh=%lu thresh=%lu limit=%lu "
566 		  "dirtied=%lu written=%lu",
567 		  __entry->nr_dirty,
568 		  __entry->nr_writeback,
569 		  __entry->nr_unstable,
570 		  __entry->background_thresh,
571 		  __entry->dirty_thresh,
572 		  __entry->dirty_limit,
573 		  __entry->nr_dirtied,
574 		  __entry->nr_written
575 	)
576 );
577 
578 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
579 
580 TRACE_EVENT(bdi_dirty_ratelimit,
581 
582 	TP_PROTO(struct bdi_writeback *wb,
583 		 unsigned long dirty_rate,
584 		 unsigned long task_ratelimit),
585 
586 	TP_ARGS(wb, dirty_rate, task_ratelimit),
587 
588 	TP_STRUCT__entry(
589 		__array(char,		bdi, 32)
590 		__field(unsigned long,	write_bw)
591 		__field(unsigned long,	avg_write_bw)
592 		__field(unsigned long,	dirty_rate)
593 		__field(unsigned long,	dirty_ratelimit)
594 		__field(unsigned long,	task_ratelimit)
595 		__field(unsigned long,	balanced_dirty_ratelimit)
596 		__field(ino_t,		cgroup_ino)
597 	),
598 
599 	TP_fast_assign(
600 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
601 		__entry->write_bw	= KBps(wb->write_bandwidth);
602 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
603 		__entry->dirty_rate	= KBps(dirty_rate);
604 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
605 		__entry->task_ratelimit	= KBps(task_ratelimit);
606 		__entry->balanced_dirty_ratelimit =
607 					KBps(wb->balanced_dirty_ratelimit);
608 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
609 	),
610 
611 	TP_printk("bdi %s: "
612 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
613 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
614 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
615 		  __entry->bdi,
616 		  __entry->write_bw,		/* write bandwidth */
617 		  __entry->avg_write_bw,	/* avg write bandwidth */
618 		  __entry->dirty_rate,		/* bdi dirty rate */
619 		  __entry->dirty_ratelimit,	/* base ratelimit */
620 		  __entry->task_ratelimit, /* ratelimit with position control */
621 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
622 		  (unsigned long)__entry->cgroup_ino
623 	)
624 );
625 
626 TRACE_EVENT(balance_dirty_pages,
627 
628 	TP_PROTO(struct bdi_writeback *wb,
629 		 unsigned long thresh,
630 		 unsigned long bg_thresh,
631 		 unsigned long dirty,
632 		 unsigned long bdi_thresh,
633 		 unsigned long bdi_dirty,
634 		 unsigned long dirty_ratelimit,
635 		 unsigned long task_ratelimit,
636 		 unsigned long dirtied,
637 		 unsigned long period,
638 		 long pause,
639 		 unsigned long start_time),
640 
641 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
642 		dirty_ratelimit, task_ratelimit,
643 		dirtied, period, pause, start_time),
644 
645 	TP_STRUCT__entry(
646 		__array(	 char,	bdi, 32)
647 		__field(unsigned long,	limit)
648 		__field(unsigned long,	setpoint)
649 		__field(unsigned long,	dirty)
650 		__field(unsigned long,	bdi_setpoint)
651 		__field(unsigned long,	bdi_dirty)
652 		__field(unsigned long,	dirty_ratelimit)
653 		__field(unsigned long,	task_ratelimit)
654 		__field(unsigned int,	dirtied)
655 		__field(unsigned int,	dirtied_pause)
656 		__field(unsigned long,	paused)
657 		__field(	 long,	pause)
658 		__field(unsigned long,	period)
659 		__field(	 long,	think)
660 		__field(ino_t,		cgroup_ino)
661 	),
662 
663 	TP_fast_assign(
664 		unsigned long freerun = (thresh + bg_thresh) / 2;
665 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
666 
667 		__entry->limit		= global_wb_domain.dirty_limit;
668 		__entry->setpoint	= (global_wb_domain.dirty_limit +
669 						freerun) / 2;
670 		__entry->dirty		= dirty;
671 		__entry->bdi_setpoint	= __entry->setpoint *
672 						bdi_thresh / (thresh + 1);
673 		__entry->bdi_dirty	= bdi_dirty;
674 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
675 		__entry->task_ratelimit	= KBps(task_ratelimit);
676 		__entry->dirtied	= dirtied;
677 		__entry->dirtied_pause	= current->nr_dirtied_pause;
678 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
679 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
680 		__entry->period		= period * 1000 / HZ;
681 		__entry->pause		= pause * 1000 / HZ;
682 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
683 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
684 	),
685 
686 
687 	TP_printk("bdi %s: "
688 		  "limit=%lu setpoint=%lu dirty=%lu "
689 		  "bdi_setpoint=%lu bdi_dirty=%lu "
690 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
691 		  "dirtied=%u dirtied_pause=%u "
692 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
693 		  __entry->bdi,
694 		  __entry->limit,
695 		  __entry->setpoint,
696 		  __entry->dirty,
697 		  __entry->bdi_setpoint,
698 		  __entry->bdi_dirty,
699 		  __entry->dirty_ratelimit,
700 		  __entry->task_ratelimit,
701 		  __entry->dirtied,
702 		  __entry->dirtied_pause,
703 		  __entry->paused,	/* ms */
704 		  __entry->pause,	/* ms */
705 		  __entry->period,	/* ms */
706 		  __entry->think,	/* ms */
707 		  (unsigned long)__entry->cgroup_ino
708 	  )
709 );
710 
711 TRACE_EVENT(writeback_sb_inodes_requeue,
712 
713 	TP_PROTO(struct inode *inode),
714 	TP_ARGS(inode),
715 
716 	TP_STRUCT__entry(
717 		__array(char, name, 32)
718 		__field(ino_t, ino)
719 		__field(unsigned long, state)
720 		__field(unsigned long, dirtied_when)
721 		__field(ino_t, cgroup_ino)
722 	),
723 
724 	TP_fast_assign(
725 		strscpy_pad(__entry->name,
726 			    bdi_dev_name(inode_to_bdi(inode)), 32);
727 		__entry->ino		= inode->i_ino;
728 		__entry->state		= inode->i_state;
729 		__entry->dirtied_when	= inode->dirtied_when;
730 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
731 	),
732 
733 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
734 		  __entry->name,
735 		  (unsigned long)__entry->ino,
736 		  show_inode_state(__entry->state),
737 		  __entry->dirtied_when,
738 		  (jiffies - __entry->dirtied_when) / HZ,
739 		  (unsigned long)__entry->cgroup_ino
740 	)
741 );
742 
743 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
744 
745 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
746 
747 	TP_ARGS(usec_timeout, usec_delayed),
748 
749 	TP_STRUCT__entry(
750 		__field(	unsigned int,	usec_timeout	)
751 		__field(	unsigned int,	usec_delayed	)
752 	),
753 
754 	TP_fast_assign(
755 		__entry->usec_timeout	= usec_timeout;
756 		__entry->usec_delayed	= usec_delayed;
757 	),
758 
759 	TP_printk("usec_timeout=%u usec_delayed=%u",
760 			__entry->usec_timeout,
761 			__entry->usec_delayed)
762 );
763 
764 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
765 
766 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
767 
768 	TP_ARGS(usec_timeout, usec_delayed)
769 );
770 
771 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
772 
773 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
774 
775 	TP_ARGS(usec_timeout, usec_delayed)
776 );
777 
778 DECLARE_EVENT_CLASS(writeback_single_inode_template,
779 
780 	TP_PROTO(struct inode *inode,
781 		 struct writeback_control *wbc,
782 		 unsigned long nr_to_write
783 	),
784 
785 	TP_ARGS(inode, wbc, nr_to_write),
786 
787 	TP_STRUCT__entry(
788 		__array(char, name, 32)
789 		__field(ino_t, ino)
790 		__field(unsigned long, state)
791 		__field(unsigned long, dirtied_when)
792 		__field(unsigned long, writeback_index)
793 		__field(long, nr_to_write)
794 		__field(unsigned long, wrote)
795 		__field(ino_t, cgroup_ino)
796 	),
797 
798 	TP_fast_assign(
799 		strscpy_pad(__entry->name,
800 			    bdi_dev_name(inode_to_bdi(inode)), 32);
801 		__entry->ino		= inode->i_ino;
802 		__entry->state		= inode->i_state;
803 		__entry->dirtied_when	= inode->dirtied_when;
804 		__entry->writeback_index = inode->i_mapping->writeback_index;
805 		__entry->nr_to_write	= nr_to_write;
806 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
807 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
808 	),
809 
810 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
811 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
812 		  __entry->name,
813 		  (unsigned long)__entry->ino,
814 		  show_inode_state(__entry->state),
815 		  __entry->dirtied_when,
816 		  (jiffies - __entry->dirtied_when) / HZ,
817 		  __entry->writeback_index,
818 		  __entry->nr_to_write,
819 		  __entry->wrote,
820 		  (unsigned long)__entry->cgroup_ino
821 	)
822 );
823 
824 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
825 	TP_PROTO(struct inode *inode,
826 		 struct writeback_control *wbc,
827 		 unsigned long nr_to_write),
828 	TP_ARGS(inode, wbc, nr_to_write)
829 );
830 
831 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
832 	TP_PROTO(struct inode *inode,
833 		 struct writeback_control *wbc,
834 		 unsigned long nr_to_write),
835 	TP_ARGS(inode, wbc, nr_to_write)
836 );
837 
838 DECLARE_EVENT_CLASS(writeback_inode_template,
839 	TP_PROTO(struct inode *inode),
840 
841 	TP_ARGS(inode),
842 
843 	TP_STRUCT__entry(
844 		__field(	dev_t,	dev			)
845 		__field(	ino_t,	ino			)
846 		__field(unsigned long,	state			)
847 		__field(	__u16, mode			)
848 		__field(unsigned long, dirtied_when		)
849 	),
850 
851 	TP_fast_assign(
852 		__entry->dev	= inode->i_sb->s_dev;
853 		__entry->ino	= inode->i_ino;
854 		__entry->state	= inode->i_state;
855 		__entry->mode	= inode->i_mode;
856 		__entry->dirtied_when = inode->dirtied_when;
857 	),
858 
859 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
860 		  MAJOR(__entry->dev), MINOR(__entry->dev),
861 		  (unsigned long)__entry->ino, __entry->dirtied_when,
862 		  show_inode_state(__entry->state), __entry->mode)
863 );
864 
865 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
866 	TP_PROTO(struct inode *inode),
867 
868 	TP_ARGS(inode)
869 );
870 
871 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
872 	TP_PROTO(struct inode *inode),
873 
874 	TP_ARGS(inode)
875 );
876 
877 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
878 
879 	TP_PROTO(struct inode *inode),
880 
881 	TP_ARGS(inode)
882 );
883 
884 /*
885  * Inode writeback list tracking.
886  */
887 
888 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
889 	TP_PROTO(struct inode *inode),
890 	TP_ARGS(inode)
891 );
892 
893 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
894 	TP_PROTO(struct inode *inode),
895 	TP_ARGS(inode)
896 );
897 
898 #endif /* _TRACE_WRITEBACK_H */
899 
900 /* This part must be outside protection */
901 #include <trace/define_trace.h>
902