xref: /openbmc/linux/include/trace/events/writeback.h (revision 3a8e9ac8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
24 		{I_REFERENCED,		"I_REFERENCED"}		\
25 	)
26 
27 /* enums need to be exported to user space */
28 #undef EM
29 #undef EMe
30 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
31 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
32 
33 #define WB_WORK_REASON							\
34 	EM( WB_REASON_BACKGROUND,		"background")		\
35 	EM( WB_REASON_VMSCAN,			"vmscan")		\
36 	EM( WB_REASON_SYNC,			"sync")			\
37 	EM( WB_REASON_PERIODIC,			"periodic")		\
38 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
39 	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
40 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
41 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
42 
43 WB_WORK_REASON
44 
45 /*
46  * Now redefine the EM() and EMe() macros to map the enums to the strings
47  * that will be printed in the output.
48  */
49 #undef EM
50 #undef EMe
51 #define EM(a,b)		{ a, b },
52 #define EMe(a,b)	{ a, b }
53 
54 struct wb_writeback_work;
55 
56 DECLARE_EVENT_CLASS(writeback_page_template,
57 
58 	TP_PROTO(struct page *page, struct address_space *mapping),
59 
60 	TP_ARGS(page, mapping),
61 
62 	TP_STRUCT__entry (
63 		__array(char, name, 32)
64 		__field(unsigned long, ino)
65 		__field(pgoff_t, index)
66 	),
67 
68 	TP_fast_assign(
69 		strncpy(__entry->name,
70 			mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
71 		__entry->ino = mapping ? mapping->host->i_ino : 0;
72 		__entry->index = page->index;
73 	),
74 
75 	TP_printk("bdi %s: ino=%lu index=%lu",
76 		__entry->name,
77 		__entry->ino,
78 		__entry->index
79 	)
80 );
81 
82 DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
83 
84 	TP_PROTO(struct page *page, struct address_space *mapping),
85 
86 	TP_ARGS(page, mapping)
87 );
88 
89 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
90 
91 	TP_PROTO(struct page *page, struct address_space *mapping),
92 
93 	TP_ARGS(page, mapping)
94 );
95 
96 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
97 
98 	TP_PROTO(struct inode *inode, int flags),
99 
100 	TP_ARGS(inode, flags),
101 
102 	TP_STRUCT__entry (
103 		__array(char, name, 32)
104 		__field(unsigned long, ino)
105 		__field(unsigned long, state)
106 		__field(unsigned long, flags)
107 	),
108 
109 	TP_fast_assign(
110 		struct backing_dev_info *bdi = inode_to_bdi(inode);
111 
112 		/* may be called for files on pseudo FSes w/ unregistered bdi */
113 		strncpy(__entry->name,
114 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
115 		__entry->ino		= inode->i_ino;
116 		__entry->state		= inode->i_state;
117 		__entry->flags		= flags;
118 	),
119 
120 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
121 		__entry->name,
122 		__entry->ino,
123 		show_inode_state(__entry->state),
124 		show_inode_state(__entry->flags)
125 	)
126 );
127 
128 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
129 
130 	TP_PROTO(struct inode *inode, int flags),
131 
132 	TP_ARGS(inode, flags)
133 );
134 
135 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
136 
137 	TP_PROTO(struct inode *inode, int flags),
138 
139 	TP_ARGS(inode, flags)
140 );
141 
142 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
143 
144 	TP_PROTO(struct inode *inode, int flags),
145 
146 	TP_ARGS(inode, flags)
147 );
148 
149 #ifdef CREATE_TRACE_POINTS
150 #ifdef CONFIG_CGROUP_WRITEBACK
151 
152 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
153 {
154 	return wb->memcg_css->cgroup->kn->id.ino;
155 }
156 
157 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
158 {
159 	if (wbc->wb)
160 		return __trace_wb_assign_cgroup(wbc->wb);
161 	else
162 		return -1U;
163 }
164 #else	/* CONFIG_CGROUP_WRITEBACK */
165 
166 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
167 {
168 	return -1U;
169 }
170 
171 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
172 {
173 	return -1U;
174 }
175 
176 #endif	/* CONFIG_CGROUP_WRITEBACK */
177 #endif	/* CREATE_TRACE_POINTS */
178 
179 #ifdef CONFIG_CGROUP_WRITEBACK
180 TRACE_EVENT(inode_foreign_history,
181 
182 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
183 		 unsigned int history),
184 
185 	TP_ARGS(inode, wbc, history),
186 
187 	TP_STRUCT__entry(
188 		__array(char,		name, 32)
189 		__field(unsigned long,	ino)
190 		__field(unsigned int,	cgroup_ino)
191 		__field(unsigned int,	history)
192 	),
193 
194 	TP_fast_assign(
195 		strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
196 		__entry->ino		= inode->i_ino;
197 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
198 		__entry->history	= history;
199 	),
200 
201 	TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x",
202 		__entry->name,
203 		__entry->ino,
204 		__entry->cgroup_ino,
205 		__entry->history
206 	)
207 );
208 
209 TRACE_EVENT(inode_switch_wbs,
210 
211 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
212 		 struct bdi_writeback *new_wb),
213 
214 	TP_ARGS(inode, old_wb, new_wb),
215 
216 	TP_STRUCT__entry(
217 		__array(char,		name, 32)
218 		__field(unsigned long,	ino)
219 		__field(unsigned int,	old_cgroup_ino)
220 		__field(unsigned int,	new_cgroup_ino)
221 	),
222 
223 	TP_fast_assign(
224 		strncpy(__entry->name,	dev_name(old_wb->bdi->dev), 32);
225 		__entry->ino		= inode->i_ino;
226 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
227 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
228 	),
229 
230 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u",
231 		__entry->name,
232 		__entry->ino,
233 		__entry->old_cgroup_ino,
234 		__entry->new_cgroup_ino
235 	)
236 );
237 
238 TRACE_EVENT(track_foreign_dirty,
239 
240 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
241 
242 	TP_ARGS(page, wb),
243 
244 	TP_STRUCT__entry(
245 		__array(char,		name, 32)
246 		__field(u64,		bdi_id)
247 		__field(unsigned long,	ino)
248 		__field(unsigned int,	memcg_id)
249 		__field(unsigned int,	cgroup_ino)
250 		__field(unsigned int,	page_cgroup_ino)
251 	),
252 
253 	TP_fast_assign(
254 		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
255 		__entry->bdi_id		= wb->bdi->id;
256 		__entry->ino		= page->mapping->host->i_ino;
257 		__entry->memcg_id	= wb->memcg_css->id;
258 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
259 		__entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino;
260 	),
261 
262 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u",
263 		__entry->name,
264 		__entry->bdi_id,
265 		__entry->ino,
266 		__entry->memcg_id,
267 		__entry->cgroup_ino,
268 		__entry->page_cgroup_ino
269 	)
270 );
271 
272 TRACE_EVENT(flush_foreign,
273 
274 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
275 		 unsigned int frn_memcg_id),
276 
277 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
278 
279 	TP_STRUCT__entry(
280 		__array(char,		name, 32)
281 		__field(unsigned int,	cgroup_ino)
282 		__field(unsigned int,	frn_bdi_id)
283 		__field(unsigned int,	frn_memcg_id)
284 	),
285 
286 	TP_fast_assign(
287 		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
288 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
289 		__entry->frn_bdi_id	= frn_bdi_id;
290 		__entry->frn_memcg_id	= frn_memcg_id;
291 	),
292 
293 	TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u",
294 		__entry->name,
295 		__entry->cgroup_ino,
296 		__entry->frn_bdi_id,
297 		__entry->frn_memcg_id
298 	)
299 );
300 #endif
301 
302 DECLARE_EVENT_CLASS(writeback_write_inode_template,
303 
304 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
305 
306 	TP_ARGS(inode, wbc),
307 
308 	TP_STRUCT__entry (
309 		__array(char, name, 32)
310 		__field(unsigned long, ino)
311 		__field(int, sync_mode)
312 		__field(unsigned int, cgroup_ino)
313 	),
314 
315 	TP_fast_assign(
316 		strncpy(__entry->name,
317 			dev_name(inode_to_bdi(inode)->dev), 32);
318 		__entry->ino		= inode->i_ino;
319 		__entry->sync_mode	= wbc->sync_mode;
320 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
321 	),
322 
323 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
324 		__entry->name,
325 		__entry->ino,
326 		__entry->sync_mode,
327 		__entry->cgroup_ino
328 	)
329 );
330 
331 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
332 
333 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
334 
335 	TP_ARGS(inode, wbc)
336 );
337 
338 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
339 
340 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
341 
342 	TP_ARGS(inode, wbc)
343 );
344 
345 DECLARE_EVENT_CLASS(writeback_work_class,
346 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
347 	TP_ARGS(wb, work),
348 	TP_STRUCT__entry(
349 		__array(char, name, 32)
350 		__field(long, nr_pages)
351 		__field(dev_t, sb_dev)
352 		__field(int, sync_mode)
353 		__field(int, for_kupdate)
354 		__field(int, range_cyclic)
355 		__field(int, for_background)
356 		__field(int, reason)
357 		__field(unsigned int, cgroup_ino)
358 	),
359 	TP_fast_assign(
360 		strncpy(__entry->name,
361 			wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
362 		__entry->nr_pages = work->nr_pages;
363 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
364 		__entry->sync_mode = work->sync_mode;
365 		__entry->for_kupdate = work->for_kupdate;
366 		__entry->range_cyclic = work->range_cyclic;
367 		__entry->for_background	= work->for_background;
368 		__entry->reason = work->reason;
369 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
370 	),
371 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
372 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
373 		  __entry->name,
374 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
375 		  __entry->nr_pages,
376 		  __entry->sync_mode,
377 		  __entry->for_kupdate,
378 		  __entry->range_cyclic,
379 		  __entry->for_background,
380 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
381 		  __entry->cgroup_ino
382 	)
383 );
384 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
385 DEFINE_EVENT(writeback_work_class, name, \
386 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
387 	TP_ARGS(wb, work))
388 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
389 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
390 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
391 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
392 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
393 
394 TRACE_EVENT(writeback_pages_written,
395 	TP_PROTO(long pages_written),
396 	TP_ARGS(pages_written),
397 	TP_STRUCT__entry(
398 		__field(long,		pages)
399 	),
400 	TP_fast_assign(
401 		__entry->pages		= pages_written;
402 	),
403 	TP_printk("%ld", __entry->pages)
404 );
405 
406 DECLARE_EVENT_CLASS(writeback_class,
407 	TP_PROTO(struct bdi_writeback *wb),
408 	TP_ARGS(wb),
409 	TP_STRUCT__entry(
410 		__array(char, name, 32)
411 		__field(unsigned int, cgroup_ino)
412 	),
413 	TP_fast_assign(
414 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
415 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
416 	),
417 	TP_printk("bdi %s: cgroup_ino=%u",
418 		  __entry->name,
419 		  __entry->cgroup_ino
420 	)
421 );
422 #define DEFINE_WRITEBACK_EVENT(name) \
423 DEFINE_EVENT(writeback_class, name, \
424 	TP_PROTO(struct bdi_writeback *wb), \
425 	TP_ARGS(wb))
426 
427 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
428 
429 TRACE_EVENT(writeback_bdi_register,
430 	TP_PROTO(struct backing_dev_info *bdi),
431 	TP_ARGS(bdi),
432 	TP_STRUCT__entry(
433 		__array(char, name, 32)
434 	),
435 	TP_fast_assign(
436 		strncpy(__entry->name, dev_name(bdi->dev), 32);
437 	),
438 	TP_printk("bdi %s",
439 		__entry->name
440 	)
441 );
442 
443 DECLARE_EVENT_CLASS(wbc_class,
444 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
445 	TP_ARGS(wbc, bdi),
446 	TP_STRUCT__entry(
447 		__array(char, name, 32)
448 		__field(long, nr_to_write)
449 		__field(long, pages_skipped)
450 		__field(int, sync_mode)
451 		__field(int, for_kupdate)
452 		__field(int, for_background)
453 		__field(int, for_reclaim)
454 		__field(int, range_cyclic)
455 		__field(long, range_start)
456 		__field(long, range_end)
457 		__field(unsigned int, cgroup_ino)
458 	),
459 
460 	TP_fast_assign(
461 		strncpy(__entry->name, dev_name(bdi->dev), 32);
462 		__entry->nr_to_write	= wbc->nr_to_write;
463 		__entry->pages_skipped	= wbc->pages_skipped;
464 		__entry->sync_mode	= wbc->sync_mode;
465 		__entry->for_kupdate	= wbc->for_kupdate;
466 		__entry->for_background	= wbc->for_background;
467 		__entry->for_reclaim	= wbc->for_reclaim;
468 		__entry->range_cyclic	= wbc->range_cyclic;
469 		__entry->range_start	= (long)wbc->range_start;
470 		__entry->range_end	= (long)wbc->range_end;
471 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
472 	),
473 
474 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
475 		"bgrd=%d reclm=%d cyclic=%d "
476 		"start=0x%lx end=0x%lx cgroup_ino=%u",
477 		__entry->name,
478 		__entry->nr_to_write,
479 		__entry->pages_skipped,
480 		__entry->sync_mode,
481 		__entry->for_kupdate,
482 		__entry->for_background,
483 		__entry->for_reclaim,
484 		__entry->range_cyclic,
485 		__entry->range_start,
486 		__entry->range_end,
487 		__entry->cgroup_ino
488 	)
489 )
490 
491 #define DEFINE_WBC_EVENT(name) \
492 DEFINE_EVENT(wbc_class, name, \
493 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
494 	TP_ARGS(wbc, bdi))
495 DEFINE_WBC_EVENT(wbc_writepage);
496 
497 TRACE_EVENT(writeback_queue_io,
498 	TP_PROTO(struct bdi_writeback *wb,
499 		 struct wb_writeback_work *work,
500 		 int moved),
501 	TP_ARGS(wb, work, moved),
502 	TP_STRUCT__entry(
503 		__array(char,		name, 32)
504 		__field(unsigned long,	older)
505 		__field(long,		age)
506 		__field(int,		moved)
507 		__field(int,		reason)
508 		__field(unsigned int,	cgroup_ino)
509 	),
510 	TP_fast_assign(
511 		unsigned long *older_than_this = work->older_than_this;
512 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
513 		__entry->older	= older_than_this ?  *older_than_this : 0;
514 		__entry->age	= older_than_this ?
515 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
516 		__entry->moved	= moved;
517 		__entry->reason	= work->reason;
518 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
519 	),
520 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
521 		__entry->name,
522 		__entry->older,	/* older_than_this in jiffies */
523 		__entry->age,	/* older_than_this in relative milliseconds */
524 		__entry->moved,
525 		__print_symbolic(__entry->reason, WB_WORK_REASON),
526 		__entry->cgroup_ino
527 	)
528 );
529 
530 TRACE_EVENT(global_dirty_state,
531 
532 	TP_PROTO(unsigned long background_thresh,
533 		 unsigned long dirty_thresh
534 	),
535 
536 	TP_ARGS(background_thresh,
537 		dirty_thresh
538 	),
539 
540 	TP_STRUCT__entry(
541 		__field(unsigned long,	nr_dirty)
542 		__field(unsigned long,	nr_writeback)
543 		__field(unsigned long,	nr_unstable)
544 		__field(unsigned long,	background_thresh)
545 		__field(unsigned long,	dirty_thresh)
546 		__field(unsigned long,	dirty_limit)
547 		__field(unsigned long,	nr_dirtied)
548 		__field(unsigned long,	nr_written)
549 	),
550 
551 	TP_fast_assign(
552 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
553 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
554 		__entry->nr_unstable	= global_node_page_state(NR_UNSTABLE_NFS);
555 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
556 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
557 		__entry->background_thresh = background_thresh;
558 		__entry->dirty_thresh	= dirty_thresh;
559 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
560 	),
561 
562 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
563 		  "bg_thresh=%lu thresh=%lu limit=%lu "
564 		  "dirtied=%lu written=%lu",
565 		  __entry->nr_dirty,
566 		  __entry->nr_writeback,
567 		  __entry->nr_unstable,
568 		  __entry->background_thresh,
569 		  __entry->dirty_thresh,
570 		  __entry->dirty_limit,
571 		  __entry->nr_dirtied,
572 		  __entry->nr_written
573 	)
574 );
575 
576 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
577 
578 TRACE_EVENT(bdi_dirty_ratelimit,
579 
580 	TP_PROTO(struct bdi_writeback *wb,
581 		 unsigned long dirty_rate,
582 		 unsigned long task_ratelimit),
583 
584 	TP_ARGS(wb, dirty_rate, task_ratelimit),
585 
586 	TP_STRUCT__entry(
587 		__array(char,		bdi, 32)
588 		__field(unsigned long,	write_bw)
589 		__field(unsigned long,	avg_write_bw)
590 		__field(unsigned long,	dirty_rate)
591 		__field(unsigned long,	dirty_ratelimit)
592 		__field(unsigned long,	task_ratelimit)
593 		__field(unsigned long,	balanced_dirty_ratelimit)
594 		__field(unsigned int,	cgroup_ino)
595 	),
596 
597 	TP_fast_assign(
598 		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
599 		__entry->write_bw	= KBps(wb->write_bandwidth);
600 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
601 		__entry->dirty_rate	= KBps(dirty_rate);
602 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
603 		__entry->task_ratelimit	= KBps(task_ratelimit);
604 		__entry->balanced_dirty_ratelimit =
605 					KBps(wb->balanced_dirty_ratelimit);
606 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
607 	),
608 
609 	TP_printk("bdi %s: "
610 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
611 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
612 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
613 		  __entry->bdi,
614 		  __entry->write_bw,		/* write bandwidth */
615 		  __entry->avg_write_bw,	/* avg write bandwidth */
616 		  __entry->dirty_rate,		/* bdi dirty rate */
617 		  __entry->dirty_ratelimit,	/* base ratelimit */
618 		  __entry->task_ratelimit, /* ratelimit with position control */
619 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
620 		  __entry->cgroup_ino
621 	)
622 );
623 
624 TRACE_EVENT(balance_dirty_pages,
625 
626 	TP_PROTO(struct bdi_writeback *wb,
627 		 unsigned long thresh,
628 		 unsigned long bg_thresh,
629 		 unsigned long dirty,
630 		 unsigned long bdi_thresh,
631 		 unsigned long bdi_dirty,
632 		 unsigned long dirty_ratelimit,
633 		 unsigned long task_ratelimit,
634 		 unsigned long dirtied,
635 		 unsigned long period,
636 		 long pause,
637 		 unsigned long start_time),
638 
639 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
640 		dirty_ratelimit, task_ratelimit,
641 		dirtied, period, pause, start_time),
642 
643 	TP_STRUCT__entry(
644 		__array(	 char,	bdi, 32)
645 		__field(unsigned long,	limit)
646 		__field(unsigned long,	setpoint)
647 		__field(unsigned long,	dirty)
648 		__field(unsigned long,	bdi_setpoint)
649 		__field(unsigned long,	bdi_dirty)
650 		__field(unsigned long,	dirty_ratelimit)
651 		__field(unsigned long,	task_ratelimit)
652 		__field(unsigned int,	dirtied)
653 		__field(unsigned int,	dirtied_pause)
654 		__field(unsigned long,	paused)
655 		__field(	 long,	pause)
656 		__field(unsigned long,	period)
657 		__field(	 long,	think)
658 		__field(unsigned int,	cgroup_ino)
659 	),
660 
661 	TP_fast_assign(
662 		unsigned long freerun = (thresh + bg_thresh) / 2;
663 		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
664 
665 		__entry->limit		= global_wb_domain.dirty_limit;
666 		__entry->setpoint	= (global_wb_domain.dirty_limit +
667 						freerun) / 2;
668 		__entry->dirty		= dirty;
669 		__entry->bdi_setpoint	= __entry->setpoint *
670 						bdi_thresh / (thresh + 1);
671 		__entry->bdi_dirty	= bdi_dirty;
672 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
673 		__entry->task_ratelimit	= KBps(task_ratelimit);
674 		__entry->dirtied	= dirtied;
675 		__entry->dirtied_pause	= current->nr_dirtied_pause;
676 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
677 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
678 		__entry->period		= period * 1000 / HZ;
679 		__entry->pause		= pause * 1000 / HZ;
680 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
681 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
682 	),
683 
684 
685 	TP_printk("bdi %s: "
686 		  "limit=%lu setpoint=%lu dirty=%lu "
687 		  "bdi_setpoint=%lu bdi_dirty=%lu "
688 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
689 		  "dirtied=%u dirtied_pause=%u "
690 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
691 		  __entry->bdi,
692 		  __entry->limit,
693 		  __entry->setpoint,
694 		  __entry->dirty,
695 		  __entry->bdi_setpoint,
696 		  __entry->bdi_dirty,
697 		  __entry->dirty_ratelimit,
698 		  __entry->task_ratelimit,
699 		  __entry->dirtied,
700 		  __entry->dirtied_pause,
701 		  __entry->paused,	/* ms */
702 		  __entry->pause,	/* ms */
703 		  __entry->period,	/* ms */
704 		  __entry->think,	/* ms */
705 		  __entry->cgroup_ino
706 	  )
707 );
708 
709 TRACE_EVENT(writeback_sb_inodes_requeue,
710 
711 	TP_PROTO(struct inode *inode),
712 	TP_ARGS(inode),
713 
714 	TP_STRUCT__entry(
715 		__array(char, name, 32)
716 		__field(unsigned long, ino)
717 		__field(unsigned long, state)
718 		__field(unsigned long, dirtied_when)
719 		__field(unsigned int, cgroup_ino)
720 	),
721 
722 	TP_fast_assign(
723 		strncpy(__entry->name,
724 		        dev_name(inode_to_bdi(inode)->dev), 32);
725 		__entry->ino		= inode->i_ino;
726 		__entry->state		= inode->i_state;
727 		__entry->dirtied_when	= inode->dirtied_when;
728 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
729 	),
730 
731 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
732 		  __entry->name,
733 		  __entry->ino,
734 		  show_inode_state(__entry->state),
735 		  __entry->dirtied_when,
736 		  (jiffies - __entry->dirtied_when) / HZ,
737 		  __entry->cgroup_ino
738 	)
739 );
740 
741 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
742 
743 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
744 
745 	TP_ARGS(usec_timeout, usec_delayed),
746 
747 	TP_STRUCT__entry(
748 		__field(	unsigned int,	usec_timeout	)
749 		__field(	unsigned int,	usec_delayed	)
750 	),
751 
752 	TP_fast_assign(
753 		__entry->usec_timeout	= usec_timeout;
754 		__entry->usec_delayed	= usec_delayed;
755 	),
756 
757 	TP_printk("usec_timeout=%u usec_delayed=%u",
758 			__entry->usec_timeout,
759 			__entry->usec_delayed)
760 );
761 
762 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
763 
764 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
765 
766 	TP_ARGS(usec_timeout, usec_delayed)
767 );
768 
769 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
770 
771 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
772 
773 	TP_ARGS(usec_timeout, usec_delayed)
774 );
775 
776 DECLARE_EVENT_CLASS(writeback_single_inode_template,
777 
778 	TP_PROTO(struct inode *inode,
779 		 struct writeback_control *wbc,
780 		 unsigned long nr_to_write
781 	),
782 
783 	TP_ARGS(inode, wbc, nr_to_write),
784 
785 	TP_STRUCT__entry(
786 		__array(char, name, 32)
787 		__field(unsigned long, ino)
788 		__field(unsigned long, state)
789 		__field(unsigned long, dirtied_when)
790 		__field(unsigned long, writeback_index)
791 		__field(long, nr_to_write)
792 		__field(unsigned long, wrote)
793 		__field(unsigned int, cgroup_ino)
794 	),
795 
796 	TP_fast_assign(
797 		strncpy(__entry->name,
798 			dev_name(inode_to_bdi(inode)->dev), 32);
799 		__entry->ino		= inode->i_ino;
800 		__entry->state		= inode->i_state;
801 		__entry->dirtied_when	= inode->dirtied_when;
802 		__entry->writeback_index = inode->i_mapping->writeback_index;
803 		__entry->nr_to_write	= nr_to_write;
804 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
805 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
806 	),
807 
808 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
809 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
810 		  __entry->name,
811 		  __entry->ino,
812 		  show_inode_state(__entry->state),
813 		  __entry->dirtied_when,
814 		  (jiffies - __entry->dirtied_when) / HZ,
815 		  __entry->writeback_index,
816 		  __entry->nr_to_write,
817 		  __entry->wrote,
818 		  __entry->cgroup_ino
819 	)
820 );
821 
822 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
823 	TP_PROTO(struct inode *inode,
824 		 struct writeback_control *wbc,
825 		 unsigned long nr_to_write),
826 	TP_ARGS(inode, wbc, nr_to_write)
827 );
828 
829 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
830 	TP_PROTO(struct inode *inode,
831 		 struct writeback_control *wbc,
832 		 unsigned long nr_to_write),
833 	TP_ARGS(inode, wbc, nr_to_write)
834 );
835 
836 DECLARE_EVENT_CLASS(writeback_inode_template,
837 	TP_PROTO(struct inode *inode),
838 
839 	TP_ARGS(inode),
840 
841 	TP_STRUCT__entry(
842 		__field(	dev_t,	dev			)
843 		__field(unsigned long,	ino			)
844 		__field(unsigned long,	state			)
845 		__field(	__u16, mode			)
846 		__field(unsigned long, dirtied_when		)
847 	),
848 
849 	TP_fast_assign(
850 		__entry->dev	= inode->i_sb->s_dev;
851 		__entry->ino	= inode->i_ino;
852 		__entry->state	= inode->i_state;
853 		__entry->mode	= inode->i_mode;
854 		__entry->dirtied_when = inode->dirtied_when;
855 	),
856 
857 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
858 		  MAJOR(__entry->dev), MINOR(__entry->dev),
859 		  __entry->ino, __entry->dirtied_when,
860 		  show_inode_state(__entry->state), __entry->mode)
861 );
862 
863 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
864 	TP_PROTO(struct inode *inode),
865 
866 	TP_ARGS(inode)
867 );
868 
869 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
870 	TP_PROTO(struct inode *inode),
871 
872 	TP_ARGS(inode)
873 );
874 
875 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
876 
877 	TP_PROTO(struct inode *inode),
878 
879 	TP_ARGS(inode)
880 );
881 
882 /*
883  * Inode writeback list tracking.
884  */
885 
886 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
887 	TP_PROTO(struct inode *inode),
888 	TP_ARGS(inode)
889 );
890 
891 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
892 	TP_PROTO(struct inode *inode),
893 	TP_ARGS(inode)
894 );
895 
896 #endif /* _TRACE_WRITEBACK_H */
897 
898 /* This part must be outside protection */
899 #include <trace/define_trace.h>
900