xref: /openbmc/linux/include/trace/events/writeback.h (revision 0feacaa2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
24 		{I_REFERENCED,		"I_REFERENCED"}		\
25 	)
26 
27 /* enums need to be exported to user space */
28 #undef EM
29 #undef EMe
30 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
31 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
32 
33 #define WB_WORK_REASON							\
34 	EM( WB_REASON_BACKGROUND,		"background")		\
35 	EM( WB_REASON_VMSCAN,			"vmscan")		\
36 	EM( WB_REASON_SYNC,			"sync")			\
37 	EM( WB_REASON_PERIODIC,			"periodic")		\
38 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
39 	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
40 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
41 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
42 
43 WB_WORK_REASON
44 
45 /*
46  * Now redefine the EM() and EMe() macros to map the enums to the strings
47  * that will be printed in the output.
48  */
49 #undef EM
50 #undef EMe
51 #define EM(a,b)		{ a, b },
52 #define EMe(a,b)	{ a, b }
53 
54 struct wb_writeback_work;
55 
56 DECLARE_EVENT_CLASS(writeback_page_template,
57 
58 	TP_PROTO(struct page *page, struct address_space *mapping),
59 
60 	TP_ARGS(page, mapping),
61 
62 	TP_STRUCT__entry (
63 		__array(char, name, 32)
64 		__field(unsigned long, ino)
65 		__field(pgoff_t, index)
66 	),
67 
68 	TP_fast_assign(
69 		strncpy(__entry->name,
70 			mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
71 		__entry->ino = mapping ? mapping->host->i_ino : 0;
72 		__entry->index = page->index;
73 	),
74 
75 	TP_printk("bdi %s: ino=%lu index=%lu",
76 		__entry->name,
77 		__entry->ino,
78 		__entry->index
79 	)
80 );
81 
82 DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
83 
84 	TP_PROTO(struct page *page, struct address_space *mapping),
85 
86 	TP_ARGS(page, mapping)
87 );
88 
89 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
90 
91 	TP_PROTO(struct page *page, struct address_space *mapping),
92 
93 	TP_ARGS(page, mapping)
94 );
95 
96 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
97 
98 	TP_PROTO(struct inode *inode, int flags),
99 
100 	TP_ARGS(inode, flags),
101 
102 	TP_STRUCT__entry (
103 		__array(char, name, 32)
104 		__field(unsigned long, ino)
105 		__field(unsigned long, state)
106 		__field(unsigned long, flags)
107 	),
108 
109 	TP_fast_assign(
110 		struct backing_dev_info *bdi = inode_to_bdi(inode);
111 
112 		/* may be called for files on pseudo FSes w/ unregistered bdi */
113 		strncpy(__entry->name,
114 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
115 		__entry->ino		= inode->i_ino;
116 		__entry->state		= inode->i_state;
117 		__entry->flags		= flags;
118 	),
119 
120 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
121 		__entry->name,
122 		__entry->ino,
123 		show_inode_state(__entry->state),
124 		show_inode_state(__entry->flags)
125 	)
126 );
127 
128 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
129 
130 	TP_PROTO(struct inode *inode, int flags),
131 
132 	TP_ARGS(inode, flags)
133 );
134 
135 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
136 
137 	TP_PROTO(struct inode *inode, int flags),
138 
139 	TP_ARGS(inode, flags)
140 );
141 
142 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
143 
144 	TP_PROTO(struct inode *inode, int flags),
145 
146 	TP_ARGS(inode, flags)
147 );
148 
149 #ifdef CREATE_TRACE_POINTS
150 #ifdef CONFIG_CGROUP_WRITEBACK
151 
152 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
153 {
154 	return wb->memcg_css->cgroup->kn->id.ino;
155 }
156 
157 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
158 {
159 	if (wbc->wb)
160 		return __trace_wb_assign_cgroup(wbc->wb);
161 	else
162 		return -1U;
163 }
164 #else	/* CONFIG_CGROUP_WRITEBACK */
165 
166 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
167 {
168 	return -1U;
169 }
170 
171 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
172 {
173 	return -1U;
174 }
175 
176 #endif	/* CONFIG_CGROUP_WRITEBACK */
177 #endif	/* CREATE_TRACE_POINTS */
178 
179 #ifdef CONFIG_CGROUP_WRITEBACK
180 TRACE_EVENT(inode_foreign_history,
181 
182 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
183 		 unsigned int history),
184 
185 	TP_ARGS(inode, wbc, history),
186 
187 	TP_STRUCT__entry(
188 		__array(char,		name, 32)
189 		__field(unsigned long,	ino)
190 		__field(unsigned int,	cgroup_ino)
191 		__field(unsigned int,	history)
192 	),
193 
194 	TP_fast_assign(
195 		strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
196 		__entry->ino		= inode->i_ino;
197 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
198 		__entry->history	= history;
199 	),
200 
201 	TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x",
202 		__entry->name,
203 		__entry->ino,
204 		__entry->cgroup_ino,
205 		__entry->history
206 	)
207 );
208 
209 TRACE_EVENT(inode_switch_wbs,
210 
211 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
212 		 struct bdi_writeback *new_wb),
213 
214 	TP_ARGS(inode, old_wb, new_wb),
215 
216 	TP_STRUCT__entry(
217 		__array(char,		name, 32)
218 		__field(unsigned long,	ino)
219 		__field(unsigned int,	old_cgroup_ino)
220 		__field(unsigned int,	new_cgroup_ino)
221 	),
222 
223 	TP_fast_assign(
224 		strncpy(__entry->name,	dev_name(old_wb->bdi->dev), 32);
225 		__entry->ino		= inode->i_ino;
226 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
227 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
228 	),
229 
230 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u",
231 		__entry->name,
232 		__entry->ino,
233 		__entry->old_cgroup_ino,
234 		__entry->new_cgroup_ino
235 	)
236 );
237 
238 TRACE_EVENT(track_foreign_dirty,
239 
240 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
241 
242 	TP_ARGS(page, wb),
243 
244 	TP_STRUCT__entry(
245 		__array(char,		name, 32)
246 		__field(u64,		bdi_id)
247 		__field(unsigned long,	ino)
248 		__field(unsigned int,	memcg_id)
249 		__field(unsigned int,	cgroup_ino)
250 		__field(unsigned int,	page_cgroup_ino)
251 	),
252 
253 	TP_fast_assign(
254 		struct address_space *mapping = page_mapping(page);
255 		struct inode *inode = mapping ? mapping->host : NULL;
256 
257 		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
258 		__entry->bdi_id		= wb->bdi->id;
259 		__entry->ino		= inode ? inode->i_ino : 0;
260 		__entry->memcg_id	= wb->memcg_css->id;
261 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
262 		__entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino;
263 	),
264 
265 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u",
266 		__entry->name,
267 		__entry->bdi_id,
268 		__entry->ino,
269 		__entry->memcg_id,
270 		__entry->cgroup_ino,
271 		__entry->page_cgroup_ino
272 	)
273 );
274 
275 TRACE_EVENT(flush_foreign,
276 
277 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
278 		 unsigned int frn_memcg_id),
279 
280 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
281 
282 	TP_STRUCT__entry(
283 		__array(char,		name, 32)
284 		__field(unsigned int,	cgroup_ino)
285 		__field(unsigned int,	frn_bdi_id)
286 		__field(unsigned int,	frn_memcg_id)
287 	),
288 
289 	TP_fast_assign(
290 		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
291 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
292 		__entry->frn_bdi_id	= frn_bdi_id;
293 		__entry->frn_memcg_id	= frn_memcg_id;
294 	),
295 
296 	TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u",
297 		__entry->name,
298 		__entry->cgroup_ino,
299 		__entry->frn_bdi_id,
300 		__entry->frn_memcg_id
301 	)
302 );
303 #endif
304 
305 DECLARE_EVENT_CLASS(writeback_write_inode_template,
306 
307 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
308 
309 	TP_ARGS(inode, wbc),
310 
311 	TP_STRUCT__entry (
312 		__array(char, name, 32)
313 		__field(unsigned long, ino)
314 		__field(int, sync_mode)
315 		__field(unsigned int, cgroup_ino)
316 	),
317 
318 	TP_fast_assign(
319 		strncpy(__entry->name,
320 			dev_name(inode_to_bdi(inode)->dev), 32);
321 		__entry->ino		= inode->i_ino;
322 		__entry->sync_mode	= wbc->sync_mode;
323 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
324 	),
325 
326 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
327 		__entry->name,
328 		__entry->ino,
329 		__entry->sync_mode,
330 		__entry->cgroup_ino
331 	)
332 );
333 
334 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
335 
336 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
337 
338 	TP_ARGS(inode, wbc)
339 );
340 
341 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
342 
343 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
344 
345 	TP_ARGS(inode, wbc)
346 );
347 
348 DECLARE_EVENT_CLASS(writeback_work_class,
349 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
350 	TP_ARGS(wb, work),
351 	TP_STRUCT__entry(
352 		__array(char, name, 32)
353 		__field(long, nr_pages)
354 		__field(dev_t, sb_dev)
355 		__field(int, sync_mode)
356 		__field(int, for_kupdate)
357 		__field(int, range_cyclic)
358 		__field(int, for_background)
359 		__field(int, reason)
360 		__field(unsigned int, cgroup_ino)
361 	),
362 	TP_fast_assign(
363 		strncpy(__entry->name,
364 			wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
365 		__entry->nr_pages = work->nr_pages;
366 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
367 		__entry->sync_mode = work->sync_mode;
368 		__entry->for_kupdate = work->for_kupdate;
369 		__entry->range_cyclic = work->range_cyclic;
370 		__entry->for_background	= work->for_background;
371 		__entry->reason = work->reason;
372 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
373 	),
374 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
375 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
376 		  __entry->name,
377 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
378 		  __entry->nr_pages,
379 		  __entry->sync_mode,
380 		  __entry->for_kupdate,
381 		  __entry->range_cyclic,
382 		  __entry->for_background,
383 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
384 		  __entry->cgroup_ino
385 	)
386 );
387 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
388 DEFINE_EVENT(writeback_work_class, name, \
389 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
390 	TP_ARGS(wb, work))
391 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
392 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
393 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
394 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
395 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
396 
397 TRACE_EVENT(writeback_pages_written,
398 	TP_PROTO(long pages_written),
399 	TP_ARGS(pages_written),
400 	TP_STRUCT__entry(
401 		__field(long,		pages)
402 	),
403 	TP_fast_assign(
404 		__entry->pages		= pages_written;
405 	),
406 	TP_printk("%ld", __entry->pages)
407 );
408 
409 DECLARE_EVENT_CLASS(writeback_class,
410 	TP_PROTO(struct bdi_writeback *wb),
411 	TP_ARGS(wb),
412 	TP_STRUCT__entry(
413 		__array(char, name, 32)
414 		__field(unsigned int, cgroup_ino)
415 	),
416 	TP_fast_assign(
417 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
418 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
419 	),
420 	TP_printk("bdi %s: cgroup_ino=%u",
421 		  __entry->name,
422 		  __entry->cgroup_ino
423 	)
424 );
425 #define DEFINE_WRITEBACK_EVENT(name) \
426 DEFINE_EVENT(writeback_class, name, \
427 	TP_PROTO(struct bdi_writeback *wb), \
428 	TP_ARGS(wb))
429 
430 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
431 
432 TRACE_EVENT(writeback_bdi_register,
433 	TP_PROTO(struct backing_dev_info *bdi),
434 	TP_ARGS(bdi),
435 	TP_STRUCT__entry(
436 		__array(char, name, 32)
437 	),
438 	TP_fast_assign(
439 		strncpy(__entry->name, dev_name(bdi->dev), 32);
440 	),
441 	TP_printk("bdi %s",
442 		__entry->name
443 	)
444 );
445 
446 DECLARE_EVENT_CLASS(wbc_class,
447 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
448 	TP_ARGS(wbc, bdi),
449 	TP_STRUCT__entry(
450 		__array(char, name, 32)
451 		__field(long, nr_to_write)
452 		__field(long, pages_skipped)
453 		__field(int, sync_mode)
454 		__field(int, for_kupdate)
455 		__field(int, for_background)
456 		__field(int, for_reclaim)
457 		__field(int, range_cyclic)
458 		__field(long, range_start)
459 		__field(long, range_end)
460 		__field(unsigned int, cgroup_ino)
461 	),
462 
463 	TP_fast_assign(
464 		strncpy(__entry->name, dev_name(bdi->dev), 32);
465 		__entry->nr_to_write	= wbc->nr_to_write;
466 		__entry->pages_skipped	= wbc->pages_skipped;
467 		__entry->sync_mode	= wbc->sync_mode;
468 		__entry->for_kupdate	= wbc->for_kupdate;
469 		__entry->for_background	= wbc->for_background;
470 		__entry->for_reclaim	= wbc->for_reclaim;
471 		__entry->range_cyclic	= wbc->range_cyclic;
472 		__entry->range_start	= (long)wbc->range_start;
473 		__entry->range_end	= (long)wbc->range_end;
474 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
475 	),
476 
477 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
478 		"bgrd=%d reclm=%d cyclic=%d "
479 		"start=0x%lx end=0x%lx cgroup_ino=%u",
480 		__entry->name,
481 		__entry->nr_to_write,
482 		__entry->pages_skipped,
483 		__entry->sync_mode,
484 		__entry->for_kupdate,
485 		__entry->for_background,
486 		__entry->for_reclaim,
487 		__entry->range_cyclic,
488 		__entry->range_start,
489 		__entry->range_end,
490 		__entry->cgroup_ino
491 	)
492 )
493 
494 #define DEFINE_WBC_EVENT(name) \
495 DEFINE_EVENT(wbc_class, name, \
496 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
497 	TP_ARGS(wbc, bdi))
498 DEFINE_WBC_EVENT(wbc_writepage);
499 
500 TRACE_EVENT(writeback_queue_io,
501 	TP_PROTO(struct bdi_writeback *wb,
502 		 struct wb_writeback_work *work,
503 		 int moved),
504 	TP_ARGS(wb, work, moved),
505 	TP_STRUCT__entry(
506 		__array(char,		name, 32)
507 		__field(unsigned long,	older)
508 		__field(long,		age)
509 		__field(int,		moved)
510 		__field(int,		reason)
511 		__field(unsigned int,	cgroup_ino)
512 	),
513 	TP_fast_assign(
514 		unsigned long *older_than_this = work->older_than_this;
515 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
516 		__entry->older	= older_than_this ?  *older_than_this : 0;
517 		__entry->age	= older_than_this ?
518 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
519 		__entry->moved	= moved;
520 		__entry->reason	= work->reason;
521 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
522 	),
523 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
524 		__entry->name,
525 		__entry->older,	/* older_than_this in jiffies */
526 		__entry->age,	/* older_than_this in relative milliseconds */
527 		__entry->moved,
528 		__print_symbolic(__entry->reason, WB_WORK_REASON),
529 		__entry->cgroup_ino
530 	)
531 );
532 
533 TRACE_EVENT(global_dirty_state,
534 
535 	TP_PROTO(unsigned long background_thresh,
536 		 unsigned long dirty_thresh
537 	),
538 
539 	TP_ARGS(background_thresh,
540 		dirty_thresh
541 	),
542 
543 	TP_STRUCT__entry(
544 		__field(unsigned long,	nr_dirty)
545 		__field(unsigned long,	nr_writeback)
546 		__field(unsigned long,	nr_unstable)
547 		__field(unsigned long,	background_thresh)
548 		__field(unsigned long,	dirty_thresh)
549 		__field(unsigned long,	dirty_limit)
550 		__field(unsigned long,	nr_dirtied)
551 		__field(unsigned long,	nr_written)
552 	),
553 
554 	TP_fast_assign(
555 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
556 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
557 		__entry->nr_unstable	= global_node_page_state(NR_UNSTABLE_NFS);
558 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
559 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
560 		__entry->background_thresh = background_thresh;
561 		__entry->dirty_thresh	= dirty_thresh;
562 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
563 	),
564 
565 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
566 		  "bg_thresh=%lu thresh=%lu limit=%lu "
567 		  "dirtied=%lu written=%lu",
568 		  __entry->nr_dirty,
569 		  __entry->nr_writeback,
570 		  __entry->nr_unstable,
571 		  __entry->background_thresh,
572 		  __entry->dirty_thresh,
573 		  __entry->dirty_limit,
574 		  __entry->nr_dirtied,
575 		  __entry->nr_written
576 	)
577 );
578 
579 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
580 
581 TRACE_EVENT(bdi_dirty_ratelimit,
582 
583 	TP_PROTO(struct bdi_writeback *wb,
584 		 unsigned long dirty_rate,
585 		 unsigned long task_ratelimit),
586 
587 	TP_ARGS(wb, dirty_rate, task_ratelimit),
588 
589 	TP_STRUCT__entry(
590 		__array(char,		bdi, 32)
591 		__field(unsigned long,	write_bw)
592 		__field(unsigned long,	avg_write_bw)
593 		__field(unsigned long,	dirty_rate)
594 		__field(unsigned long,	dirty_ratelimit)
595 		__field(unsigned long,	task_ratelimit)
596 		__field(unsigned long,	balanced_dirty_ratelimit)
597 		__field(unsigned int,	cgroup_ino)
598 	),
599 
600 	TP_fast_assign(
601 		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
602 		__entry->write_bw	= KBps(wb->write_bandwidth);
603 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
604 		__entry->dirty_rate	= KBps(dirty_rate);
605 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
606 		__entry->task_ratelimit	= KBps(task_ratelimit);
607 		__entry->balanced_dirty_ratelimit =
608 					KBps(wb->balanced_dirty_ratelimit);
609 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
610 	),
611 
612 	TP_printk("bdi %s: "
613 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
614 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
615 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
616 		  __entry->bdi,
617 		  __entry->write_bw,		/* write bandwidth */
618 		  __entry->avg_write_bw,	/* avg write bandwidth */
619 		  __entry->dirty_rate,		/* bdi dirty rate */
620 		  __entry->dirty_ratelimit,	/* base ratelimit */
621 		  __entry->task_ratelimit, /* ratelimit with position control */
622 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
623 		  __entry->cgroup_ino
624 	)
625 );
626 
627 TRACE_EVENT(balance_dirty_pages,
628 
629 	TP_PROTO(struct bdi_writeback *wb,
630 		 unsigned long thresh,
631 		 unsigned long bg_thresh,
632 		 unsigned long dirty,
633 		 unsigned long bdi_thresh,
634 		 unsigned long bdi_dirty,
635 		 unsigned long dirty_ratelimit,
636 		 unsigned long task_ratelimit,
637 		 unsigned long dirtied,
638 		 unsigned long period,
639 		 long pause,
640 		 unsigned long start_time),
641 
642 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
643 		dirty_ratelimit, task_ratelimit,
644 		dirtied, period, pause, start_time),
645 
646 	TP_STRUCT__entry(
647 		__array(	 char,	bdi, 32)
648 		__field(unsigned long,	limit)
649 		__field(unsigned long,	setpoint)
650 		__field(unsigned long,	dirty)
651 		__field(unsigned long,	bdi_setpoint)
652 		__field(unsigned long,	bdi_dirty)
653 		__field(unsigned long,	dirty_ratelimit)
654 		__field(unsigned long,	task_ratelimit)
655 		__field(unsigned int,	dirtied)
656 		__field(unsigned int,	dirtied_pause)
657 		__field(unsigned long,	paused)
658 		__field(	 long,	pause)
659 		__field(unsigned long,	period)
660 		__field(	 long,	think)
661 		__field(unsigned int,	cgroup_ino)
662 	),
663 
664 	TP_fast_assign(
665 		unsigned long freerun = (thresh + bg_thresh) / 2;
666 		strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
667 
668 		__entry->limit		= global_wb_domain.dirty_limit;
669 		__entry->setpoint	= (global_wb_domain.dirty_limit +
670 						freerun) / 2;
671 		__entry->dirty		= dirty;
672 		__entry->bdi_setpoint	= __entry->setpoint *
673 						bdi_thresh / (thresh + 1);
674 		__entry->bdi_dirty	= bdi_dirty;
675 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
676 		__entry->task_ratelimit	= KBps(task_ratelimit);
677 		__entry->dirtied	= dirtied;
678 		__entry->dirtied_pause	= current->nr_dirtied_pause;
679 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
680 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
681 		__entry->period		= period * 1000 / HZ;
682 		__entry->pause		= pause * 1000 / HZ;
683 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
684 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
685 	),
686 
687 
688 	TP_printk("bdi %s: "
689 		  "limit=%lu setpoint=%lu dirty=%lu "
690 		  "bdi_setpoint=%lu bdi_dirty=%lu "
691 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
692 		  "dirtied=%u dirtied_pause=%u "
693 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
694 		  __entry->bdi,
695 		  __entry->limit,
696 		  __entry->setpoint,
697 		  __entry->dirty,
698 		  __entry->bdi_setpoint,
699 		  __entry->bdi_dirty,
700 		  __entry->dirty_ratelimit,
701 		  __entry->task_ratelimit,
702 		  __entry->dirtied,
703 		  __entry->dirtied_pause,
704 		  __entry->paused,	/* ms */
705 		  __entry->pause,	/* ms */
706 		  __entry->period,	/* ms */
707 		  __entry->think,	/* ms */
708 		  __entry->cgroup_ino
709 	  )
710 );
711 
712 TRACE_EVENT(writeback_sb_inodes_requeue,
713 
714 	TP_PROTO(struct inode *inode),
715 	TP_ARGS(inode),
716 
717 	TP_STRUCT__entry(
718 		__array(char, name, 32)
719 		__field(unsigned long, ino)
720 		__field(unsigned long, state)
721 		__field(unsigned long, dirtied_when)
722 		__field(unsigned int, cgroup_ino)
723 	),
724 
725 	TP_fast_assign(
726 		strncpy(__entry->name,
727 		        dev_name(inode_to_bdi(inode)->dev), 32);
728 		__entry->ino		= inode->i_ino;
729 		__entry->state		= inode->i_state;
730 		__entry->dirtied_when	= inode->dirtied_when;
731 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
732 	),
733 
734 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
735 		  __entry->name,
736 		  __entry->ino,
737 		  show_inode_state(__entry->state),
738 		  __entry->dirtied_when,
739 		  (jiffies - __entry->dirtied_when) / HZ,
740 		  __entry->cgroup_ino
741 	)
742 );
743 
744 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
745 
746 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
747 
748 	TP_ARGS(usec_timeout, usec_delayed),
749 
750 	TP_STRUCT__entry(
751 		__field(	unsigned int,	usec_timeout	)
752 		__field(	unsigned int,	usec_delayed	)
753 	),
754 
755 	TP_fast_assign(
756 		__entry->usec_timeout	= usec_timeout;
757 		__entry->usec_delayed	= usec_delayed;
758 	),
759 
760 	TP_printk("usec_timeout=%u usec_delayed=%u",
761 			__entry->usec_timeout,
762 			__entry->usec_delayed)
763 );
764 
765 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
766 
767 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
768 
769 	TP_ARGS(usec_timeout, usec_delayed)
770 );
771 
772 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
773 
774 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
775 
776 	TP_ARGS(usec_timeout, usec_delayed)
777 );
778 
779 DECLARE_EVENT_CLASS(writeback_single_inode_template,
780 
781 	TP_PROTO(struct inode *inode,
782 		 struct writeback_control *wbc,
783 		 unsigned long nr_to_write
784 	),
785 
786 	TP_ARGS(inode, wbc, nr_to_write),
787 
788 	TP_STRUCT__entry(
789 		__array(char, name, 32)
790 		__field(unsigned long, ino)
791 		__field(unsigned long, state)
792 		__field(unsigned long, dirtied_when)
793 		__field(unsigned long, writeback_index)
794 		__field(long, nr_to_write)
795 		__field(unsigned long, wrote)
796 		__field(unsigned int, cgroup_ino)
797 	),
798 
799 	TP_fast_assign(
800 		strncpy(__entry->name,
801 			dev_name(inode_to_bdi(inode)->dev), 32);
802 		__entry->ino		= inode->i_ino;
803 		__entry->state		= inode->i_state;
804 		__entry->dirtied_when	= inode->dirtied_when;
805 		__entry->writeback_index = inode->i_mapping->writeback_index;
806 		__entry->nr_to_write	= nr_to_write;
807 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
808 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
809 	),
810 
811 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
812 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
813 		  __entry->name,
814 		  __entry->ino,
815 		  show_inode_state(__entry->state),
816 		  __entry->dirtied_when,
817 		  (jiffies - __entry->dirtied_when) / HZ,
818 		  __entry->writeback_index,
819 		  __entry->nr_to_write,
820 		  __entry->wrote,
821 		  __entry->cgroup_ino
822 	)
823 );
824 
825 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
826 	TP_PROTO(struct inode *inode,
827 		 struct writeback_control *wbc,
828 		 unsigned long nr_to_write),
829 	TP_ARGS(inode, wbc, nr_to_write)
830 );
831 
832 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
833 	TP_PROTO(struct inode *inode,
834 		 struct writeback_control *wbc,
835 		 unsigned long nr_to_write),
836 	TP_ARGS(inode, wbc, nr_to_write)
837 );
838 
839 DECLARE_EVENT_CLASS(writeback_inode_template,
840 	TP_PROTO(struct inode *inode),
841 
842 	TP_ARGS(inode),
843 
844 	TP_STRUCT__entry(
845 		__field(	dev_t,	dev			)
846 		__field(unsigned long,	ino			)
847 		__field(unsigned long,	state			)
848 		__field(	__u16, mode			)
849 		__field(unsigned long, dirtied_when		)
850 	),
851 
852 	TP_fast_assign(
853 		__entry->dev	= inode->i_sb->s_dev;
854 		__entry->ino	= inode->i_ino;
855 		__entry->state	= inode->i_state;
856 		__entry->mode	= inode->i_mode;
857 		__entry->dirtied_when = inode->dirtied_when;
858 	),
859 
860 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
861 		  MAJOR(__entry->dev), MINOR(__entry->dev),
862 		  __entry->ino, __entry->dirtied_when,
863 		  show_inode_state(__entry->state), __entry->mode)
864 );
865 
866 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
867 	TP_PROTO(struct inode *inode),
868 
869 	TP_ARGS(inode)
870 );
871 
872 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
873 	TP_PROTO(struct inode *inode),
874 
875 	TP_ARGS(inode)
876 );
877 
878 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
879 
880 	TP_PROTO(struct inode *inode),
881 
882 	TP_ARGS(inode)
883 );
884 
885 /*
886  * Inode writeback list tracking.
887  */
888 
889 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
890 	TP_PROTO(struct inode *inode),
891 	TP_ARGS(inode)
892 );
893 
894 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
895 	TP_PROTO(struct inode *inode),
896 	TP_ARGS(inode)
897 );
898 
899 #endif /* _TRACE_WRITEBACK_H */
900 
901 /* This part must be outside protection */
902 #include <trace/define_trace.h>
903