xref: /openbmc/linux/include/trace/events/writeback.h (revision e7253313)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
24 		{I_REFERENCED,		"I_REFERENCED"}		\
25 	)
26 
27 /* enums need to be exported to user space */
28 #undef EM
29 #undef EMe
30 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
31 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
32 
33 #define WB_WORK_REASON							\
34 	EM( WB_REASON_BACKGROUND,		"background")		\
35 	EM( WB_REASON_VMSCAN,			"vmscan")		\
36 	EM( WB_REASON_SYNC,			"sync")			\
37 	EM( WB_REASON_PERIODIC,			"periodic")		\
38 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
39 	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
40 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
41 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
42 
43 WB_WORK_REASON
44 
45 /*
46  * Now redefine the EM() and EMe() macros to map the enums to the strings
47  * that will be printed in the output.
48  */
49 #undef EM
50 #undef EMe
51 #define EM(a,b)		{ a, b },
52 #define EMe(a,b)	{ a, b }
53 
54 struct wb_writeback_work;
55 
56 DECLARE_EVENT_CLASS(writeback_page_template,
57 
58 	TP_PROTO(struct page *page, struct address_space *mapping),
59 
60 	TP_ARGS(page, mapping),
61 
62 	TP_STRUCT__entry (
63 		__array(char, name, 32)
64 		__field(ino_t, ino)
65 		__field(pgoff_t, index)
66 	),
67 
68 	TP_fast_assign(
69 		strscpy_pad(__entry->name,
70 			    mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
71 			    32);
72 		__entry->ino = mapping ? mapping->host->i_ino : 0;
73 		__entry->index = page->index;
74 	),
75 
76 	TP_printk("bdi %s: ino=%lu index=%lu",
77 		__entry->name,
78 		(unsigned long)__entry->ino,
79 		__entry->index
80 	)
81 );
82 
83 DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
84 
85 	TP_PROTO(struct page *page, struct address_space *mapping),
86 
87 	TP_ARGS(page, mapping)
88 );
89 
90 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
91 
92 	TP_PROTO(struct page *page, struct address_space *mapping),
93 
94 	TP_ARGS(page, mapping)
95 );
96 
97 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
98 
99 	TP_PROTO(struct inode *inode, int flags),
100 
101 	TP_ARGS(inode, flags),
102 
103 	TP_STRUCT__entry (
104 		__array(char, name, 32)
105 		__field(ino_t, ino)
106 		__field(unsigned long, state)
107 		__field(unsigned long, flags)
108 	),
109 
110 	TP_fast_assign(
111 		struct backing_dev_info *bdi = inode_to_bdi(inode);
112 
113 		/* may be called for files on pseudo FSes w/ unregistered bdi */
114 		strscpy_pad(__entry->name,
115 			    bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
116 		__entry->ino		= inode->i_ino;
117 		__entry->state		= inode->i_state;
118 		__entry->flags		= flags;
119 	),
120 
121 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
122 		__entry->name,
123 		(unsigned long)__entry->ino,
124 		show_inode_state(__entry->state),
125 		show_inode_state(__entry->flags)
126 	)
127 );
128 
129 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
130 
131 	TP_PROTO(struct inode *inode, int flags),
132 
133 	TP_ARGS(inode, flags)
134 );
135 
136 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
137 
138 	TP_PROTO(struct inode *inode, int flags),
139 
140 	TP_ARGS(inode, flags)
141 );
142 
143 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
144 
145 	TP_PROTO(struct inode *inode, int flags),
146 
147 	TP_ARGS(inode, flags)
148 );
149 
150 #ifdef CREATE_TRACE_POINTS
151 #ifdef CONFIG_CGROUP_WRITEBACK
152 
153 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
154 {
155 	return cgroup_ino(wb->memcg_css->cgroup);
156 }
157 
158 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
159 {
160 	if (wbc->wb)
161 		return __trace_wb_assign_cgroup(wbc->wb);
162 	else
163 		return 1;
164 }
165 #else	/* CONFIG_CGROUP_WRITEBACK */
166 
167 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
168 {
169 	return 1;
170 }
171 
172 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
173 {
174 	return 1;
175 }
176 
177 #endif	/* CONFIG_CGROUP_WRITEBACK */
178 #endif	/* CREATE_TRACE_POINTS */
179 
180 #ifdef CONFIG_CGROUP_WRITEBACK
181 TRACE_EVENT(inode_foreign_history,
182 
183 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
184 		 unsigned int history),
185 
186 	TP_ARGS(inode, wbc, history),
187 
188 	TP_STRUCT__entry(
189 		__array(char,		name, 32)
190 		__field(ino_t,		ino)
191 		__field(ino_t,		cgroup_ino)
192 		__field(unsigned int,	history)
193 	),
194 
195 	TP_fast_assign(
196 		strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
197 		__entry->ino		= inode->i_ino;
198 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
199 		__entry->history	= history;
200 	),
201 
202 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
203 		__entry->name,
204 		(unsigned long)__entry->ino,
205 		(unsigned long)__entry->cgroup_ino,
206 		__entry->history
207 	)
208 );
209 
210 TRACE_EVENT(inode_switch_wbs,
211 
212 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
213 		 struct bdi_writeback *new_wb),
214 
215 	TP_ARGS(inode, old_wb, new_wb),
216 
217 	TP_STRUCT__entry(
218 		__array(char,		name, 32)
219 		__field(ino_t,		ino)
220 		__field(ino_t,		old_cgroup_ino)
221 		__field(ino_t,		new_cgroup_ino)
222 	),
223 
224 	TP_fast_assign(
225 		strncpy(__entry->name,	dev_name(old_wb->bdi->dev), 32);
226 		__entry->ino		= inode->i_ino;
227 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
228 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
229 	),
230 
231 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
232 		__entry->name,
233 		(unsigned long)__entry->ino,
234 		(unsigned long)__entry->old_cgroup_ino,
235 		(unsigned long)__entry->new_cgroup_ino
236 	)
237 );
238 
239 TRACE_EVENT(track_foreign_dirty,
240 
241 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
242 
243 	TP_ARGS(page, wb),
244 
245 	TP_STRUCT__entry(
246 		__array(char,		name, 32)
247 		__field(u64,		bdi_id)
248 		__field(ino_t,		ino)
249 		__field(unsigned int,	memcg_id)
250 		__field(ino_t,		cgroup_ino)
251 		__field(ino_t,		page_cgroup_ino)
252 	),
253 
254 	TP_fast_assign(
255 		struct address_space *mapping = page_mapping(page);
256 		struct inode *inode = mapping ? mapping->host : NULL;
257 
258 		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
259 		__entry->bdi_id		= wb->bdi->id;
260 		__entry->ino		= inode ? inode->i_ino : 0;
261 		__entry->memcg_id	= wb->memcg_css->id;
262 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
263 		__entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
264 	),
265 
266 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
267 		__entry->name,
268 		__entry->bdi_id,
269 		(unsigned long)__entry->ino,
270 		__entry->memcg_id,
271 		(unsigned long)__entry->cgroup_ino,
272 		(unsigned long)__entry->page_cgroup_ino
273 	)
274 );
275 
276 TRACE_EVENT(flush_foreign,
277 
278 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
279 		 unsigned int frn_memcg_id),
280 
281 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
282 
283 	TP_STRUCT__entry(
284 		__array(char,		name, 32)
285 		__field(ino_t,		cgroup_ino)
286 		__field(unsigned int,	frn_bdi_id)
287 		__field(unsigned int,	frn_memcg_id)
288 	),
289 
290 	TP_fast_assign(
291 		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
292 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
293 		__entry->frn_bdi_id	= frn_bdi_id;
294 		__entry->frn_memcg_id	= frn_memcg_id;
295 	),
296 
297 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
298 		__entry->name,
299 		(unsigned long)__entry->cgroup_ino,
300 		__entry->frn_bdi_id,
301 		__entry->frn_memcg_id
302 	)
303 );
304 #endif
305 
306 DECLARE_EVENT_CLASS(writeback_write_inode_template,
307 
308 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
309 
310 	TP_ARGS(inode, wbc),
311 
312 	TP_STRUCT__entry (
313 		__array(char, name, 32)
314 		__field(ino_t, ino)
315 		__field(int, sync_mode)
316 		__field(ino_t, cgroup_ino)
317 	),
318 
319 	TP_fast_assign(
320 		strscpy_pad(__entry->name,
321 			    dev_name(inode_to_bdi(inode)->dev), 32);
322 		__entry->ino		= inode->i_ino;
323 		__entry->sync_mode	= wbc->sync_mode;
324 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
325 	),
326 
327 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
328 		__entry->name,
329 		(unsigned long)__entry->ino,
330 		__entry->sync_mode,
331 		(unsigned long)__entry->cgroup_ino
332 	)
333 );
334 
335 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
336 
337 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
338 
339 	TP_ARGS(inode, wbc)
340 );
341 
342 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
343 
344 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
345 
346 	TP_ARGS(inode, wbc)
347 );
348 
349 DECLARE_EVENT_CLASS(writeback_work_class,
350 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
351 	TP_ARGS(wb, work),
352 	TP_STRUCT__entry(
353 		__array(char, name, 32)
354 		__field(long, nr_pages)
355 		__field(dev_t, sb_dev)
356 		__field(int, sync_mode)
357 		__field(int, for_kupdate)
358 		__field(int, range_cyclic)
359 		__field(int, for_background)
360 		__field(int, reason)
361 		__field(ino_t, cgroup_ino)
362 	),
363 	TP_fast_assign(
364 		strscpy_pad(__entry->name,
365 			    wb->bdi->dev ? dev_name(wb->bdi->dev) :
366 			    "(unknown)", 32);
367 		__entry->nr_pages = work->nr_pages;
368 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
369 		__entry->sync_mode = work->sync_mode;
370 		__entry->for_kupdate = work->for_kupdate;
371 		__entry->range_cyclic = work->range_cyclic;
372 		__entry->for_background	= work->for_background;
373 		__entry->reason = work->reason;
374 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
375 	),
376 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
377 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
378 		  __entry->name,
379 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
380 		  __entry->nr_pages,
381 		  __entry->sync_mode,
382 		  __entry->for_kupdate,
383 		  __entry->range_cyclic,
384 		  __entry->for_background,
385 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
386 		  (unsigned long)__entry->cgroup_ino
387 	)
388 );
389 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
390 DEFINE_EVENT(writeback_work_class, name, \
391 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
392 	TP_ARGS(wb, work))
393 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
394 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
395 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
396 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
397 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
398 
399 TRACE_EVENT(writeback_pages_written,
400 	TP_PROTO(long pages_written),
401 	TP_ARGS(pages_written),
402 	TP_STRUCT__entry(
403 		__field(long,		pages)
404 	),
405 	TP_fast_assign(
406 		__entry->pages		= pages_written;
407 	),
408 	TP_printk("%ld", __entry->pages)
409 );
410 
411 DECLARE_EVENT_CLASS(writeback_class,
412 	TP_PROTO(struct bdi_writeback *wb),
413 	TP_ARGS(wb),
414 	TP_STRUCT__entry(
415 		__array(char, name, 32)
416 		__field(ino_t, cgroup_ino)
417 	),
418 	TP_fast_assign(
419 		strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
420 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
421 	),
422 	TP_printk("bdi %s: cgroup_ino=%lu",
423 		  __entry->name,
424 		  (unsigned long)__entry->cgroup_ino
425 	)
426 );
427 #define DEFINE_WRITEBACK_EVENT(name) \
428 DEFINE_EVENT(writeback_class, name, \
429 	TP_PROTO(struct bdi_writeback *wb), \
430 	TP_ARGS(wb))
431 
432 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
433 
434 TRACE_EVENT(writeback_bdi_register,
435 	TP_PROTO(struct backing_dev_info *bdi),
436 	TP_ARGS(bdi),
437 	TP_STRUCT__entry(
438 		__array(char, name, 32)
439 	),
440 	TP_fast_assign(
441 		strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
442 	),
443 	TP_printk("bdi %s",
444 		__entry->name
445 	)
446 );
447 
448 DECLARE_EVENT_CLASS(wbc_class,
449 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
450 	TP_ARGS(wbc, bdi),
451 	TP_STRUCT__entry(
452 		__array(char, name, 32)
453 		__field(long, nr_to_write)
454 		__field(long, pages_skipped)
455 		__field(int, sync_mode)
456 		__field(int, for_kupdate)
457 		__field(int, for_background)
458 		__field(int, for_reclaim)
459 		__field(int, range_cyclic)
460 		__field(long, range_start)
461 		__field(long, range_end)
462 		__field(ino_t, cgroup_ino)
463 	),
464 
465 	TP_fast_assign(
466 		strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
467 		__entry->nr_to_write	= wbc->nr_to_write;
468 		__entry->pages_skipped	= wbc->pages_skipped;
469 		__entry->sync_mode	= wbc->sync_mode;
470 		__entry->for_kupdate	= wbc->for_kupdate;
471 		__entry->for_background	= wbc->for_background;
472 		__entry->for_reclaim	= wbc->for_reclaim;
473 		__entry->range_cyclic	= wbc->range_cyclic;
474 		__entry->range_start	= (long)wbc->range_start;
475 		__entry->range_end	= (long)wbc->range_end;
476 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
477 	),
478 
479 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
480 		"bgrd=%d reclm=%d cyclic=%d "
481 		"start=0x%lx end=0x%lx cgroup_ino=%lu",
482 		__entry->name,
483 		__entry->nr_to_write,
484 		__entry->pages_skipped,
485 		__entry->sync_mode,
486 		__entry->for_kupdate,
487 		__entry->for_background,
488 		__entry->for_reclaim,
489 		__entry->range_cyclic,
490 		__entry->range_start,
491 		__entry->range_end,
492 		(unsigned long)__entry->cgroup_ino
493 	)
494 )
495 
496 #define DEFINE_WBC_EVENT(name) \
497 DEFINE_EVENT(wbc_class, name, \
498 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
499 	TP_ARGS(wbc, bdi))
500 DEFINE_WBC_EVENT(wbc_writepage);
501 
502 TRACE_EVENT(writeback_queue_io,
503 	TP_PROTO(struct bdi_writeback *wb,
504 		 struct wb_writeback_work *work,
505 		 int moved),
506 	TP_ARGS(wb, work, moved),
507 	TP_STRUCT__entry(
508 		__array(char,		name, 32)
509 		__field(unsigned long,	older)
510 		__field(long,		age)
511 		__field(int,		moved)
512 		__field(int,		reason)
513 		__field(ino_t,		cgroup_ino)
514 	),
515 	TP_fast_assign(
516 		unsigned long *older_than_this = work->older_than_this;
517 		strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
518 		__entry->older	= older_than_this ?  *older_than_this : 0;
519 		__entry->age	= older_than_this ?
520 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
521 		__entry->moved	= moved;
522 		__entry->reason	= work->reason;
523 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
524 	),
525 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
526 		__entry->name,
527 		__entry->older,	/* older_than_this in jiffies */
528 		__entry->age,	/* older_than_this in relative milliseconds */
529 		__entry->moved,
530 		__print_symbolic(__entry->reason, WB_WORK_REASON),
531 		(unsigned long)__entry->cgroup_ino
532 	)
533 );
534 
535 TRACE_EVENT(global_dirty_state,
536 
537 	TP_PROTO(unsigned long background_thresh,
538 		 unsigned long dirty_thresh
539 	),
540 
541 	TP_ARGS(background_thresh,
542 		dirty_thresh
543 	),
544 
545 	TP_STRUCT__entry(
546 		__field(unsigned long,	nr_dirty)
547 		__field(unsigned long,	nr_writeback)
548 		__field(unsigned long,	nr_unstable)
549 		__field(unsigned long,	background_thresh)
550 		__field(unsigned long,	dirty_thresh)
551 		__field(unsigned long,	dirty_limit)
552 		__field(unsigned long,	nr_dirtied)
553 		__field(unsigned long,	nr_written)
554 	),
555 
556 	TP_fast_assign(
557 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
558 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
559 		__entry->nr_unstable	= global_node_page_state(NR_UNSTABLE_NFS);
560 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
561 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
562 		__entry->background_thresh = background_thresh;
563 		__entry->dirty_thresh	= dirty_thresh;
564 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
565 	),
566 
567 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
568 		  "bg_thresh=%lu thresh=%lu limit=%lu "
569 		  "dirtied=%lu written=%lu",
570 		  __entry->nr_dirty,
571 		  __entry->nr_writeback,
572 		  __entry->nr_unstable,
573 		  __entry->background_thresh,
574 		  __entry->dirty_thresh,
575 		  __entry->dirty_limit,
576 		  __entry->nr_dirtied,
577 		  __entry->nr_written
578 	)
579 );
580 
581 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
582 
583 TRACE_EVENT(bdi_dirty_ratelimit,
584 
585 	TP_PROTO(struct bdi_writeback *wb,
586 		 unsigned long dirty_rate,
587 		 unsigned long task_ratelimit),
588 
589 	TP_ARGS(wb, dirty_rate, task_ratelimit),
590 
591 	TP_STRUCT__entry(
592 		__array(char,		bdi, 32)
593 		__field(unsigned long,	write_bw)
594 		__field(unsigned long,	avg_write_bw)
595 		__field(unsigned long,	dirty_rate)
596 		__field(unsigned long,	dirty_ratelimit)
597 		__field(unsigned long,	task_ratelimit)
598 		__field(unsigned long,	balanced_dirty_ratelimit)
599 		__field(ino_t,		cgroup_ino)
600 	),
601 
602 	TP_fast_assign(
603 		strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
604 		__entry->write_bw	= KBps(wb->write_bandwidth);
605 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
606 		__entry->dirty_rate	= KBps(dirty_rate);
607 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
608 		__entry->task_ratelimit	= KBps(task_ratelimit);
609 		__entry->balanced_dirty_ratelimit =
610 					KBps(wb->balanced_dirty_ratelimit);
611 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
612 	),
613 
614 	TP_printk("bdi %s: "
615 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
616 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
617 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
618 		  __entry->bdi,
619 		  __entry->write_bw,		/* write bandwidth */
620 		  __entry->avg_write_bw,	/* avg write bandwidth */
621 		  __entry->dirty_rate,		/* bdi dirty rate */
622 		  __entry->dirty_ratelimit,	/* base ratelimit */
623 		  __entry->task_ratelimit, /* ratelimit with position control */
624 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
625 		  (unsigned long)__entry->cgroup_ino
626 	)
627 );
628 
629 TRACE_EVENT(balance_dirty_pages,
630 
631 	TP_PROTO(struct bdi_writeback *wb,
632 		 unsigned long thresh,
633 		 unsigned long bg_thresh,
634 		 unsigned long dirty,
635 		 unsigned long bdi_thresh,
636 		 unsigned long bdi_dirty,
637 		 unsigned long dirty_ratelimit,
638 		 unsigned long task_ratelimit,
639 		 unsigned long dirtied,
640 		 unsigned long period,
641 		 long pause,
642 		 unsigned long start_time),
643 
644 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
645 		dirty_ratelimit, task_ratelimit,
646 		dirtied, period, pause, start_time),
647 
648 	TP_STRUCT__entry(
649 		__array(	 char,	bdi, 32)
650 		__field(unsigned long,	limit)
651 		__field(unsigned long,	setpoint)
652 		__field(unsigned long,	dirty)
653 		__field(unsigned long,	bdi_setpoint)
654 		__field(unsigned long,	bdi_dirty)
655 		__field(unsigned long,	dirty_ratelimit)
656 		__field(unsigned long,	task_ratelimit)
657 		__field(unsigned int,	dirtied)
658 		__field(unsigned int,	dirtied_pause)
659 		__field(unsigned long,	paused)
660 		__field(	 long,	pause)
661 		__field(unsigned long,	period)
662 		__field(	 long,	think)
663 		__field(ino_t,		cgroup_ino)
664 	),
665 
666 	TP_fast_assign(
667 		unsigned long freerun = (thresh + bg_thresh) / 2;
668 		strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
669 
670 		__entry->limit		= global_wb_domain.dirty_limit;
671 		__entry->setpoint	= (global_wb_domain.dirty_limit +
672 						freerun) / 2;
673 		__entry->dirty		= dirty;
674 		__entry->bdi_setpoint	= __entry->setpoint *
675 						bdi_thresh / (thresh + 1);
676 		__entry->bdi_dirty	= bdi_dirty;
677 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
678 		__entry->task_ratelimit	= KBps(task_ratelimit);
679 		__entry->dirtied	= dirtied;
680 		__entry->dirtied_pause	= current->nr_dirtied_pause;
681 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
682 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
683 		__entry->period		= period * 1000 / HZ;
684 		__entry->pause		= pause * 1000 / HZ;
685 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
686 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
687 	),
688 
689 
690 	TP_printk("bdi %s: "
691 		  "limit=%lu setpoint=%lu dirty=%lu "
692 		  "bdi_setpoint=%lu bdi_dirty=%lu "
693 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
694 		  "dirtied=%u dirtied_pause=%u "
695 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
696 		  __entry->bdi,
697 		  __entry->limit,
698 		  __entry->setpoint,
699 		  __entry->dirty,
700 		  __entry->bdi_setpoint,
701 		  __entry->bdi_dirty,
702 		  __entry->dirty_ratelimit,
703 		  __entry->task_ratelimit,
704 		  __entry->dirtied,
705 		  __entry->dirtied_pause,
706 		  __entry->paused,	/* ms */
707 		  __entry->pause,	/* ms */
708 		  __entry->period,	/* ms */
709 		  __entry->think,	/* ms */
710 		  (unsigned long)__entry->cgroup_ino
711 	  )
712 );
713 
714 TRACE_EVENT(writeback_sb_inodes_requeue,
715 
716 	TP_PROTO(struct inode *inode),
717 	TP_ARGS(inode),
718 
719 	TP_STRUCT__entry(
720 		__array(char, name, 32)
721 		__field(ino_t, ino)
722 		__field(unsigned long, state)
723 		__field(unsigned long, dirtied_when)
724 		__field(ino_t, cgroup_ino)
725 	),
726 
727 	TP_fast_assign(
728 		strscpy_pad(__entry->name,
729 			    dev_name(inode_to_bdi(inode)->dev), 32);
730 		__entry->ino		= inode->i_ino;
731 		__entry->state		= inode->i_state;
732 		__entry->dirtied_when	= inode->dirtied_when;
733 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
734 	),
735 
736 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
737 		  __entry->name,
738 		  (unsigned long)__entry->ino,
739 		  show_inode_state(__entry->state),
740 		  __entry->dirtied_when,
741 		  (jiffies - __entry->dirtied_when) / HZ,
742 		  (unsigned long)__entry->cgroup_ino
743 	)
744 );
745 
746 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
747 
748 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
749 
750 	TP_ARGS(usec_timeout, usec_delayed),
751 
752 	TP_STRUCT__entry(
753 		__field(	unsigned int,	usec_timeout	)
754 		__field(	unsigned int,	usec_delayed	)
755 	),
756 
757 	TP_fast_assign(
758 		__entry->usec_timeout	= usec_timeout;
759 		__entry->usec_delayed	= usec_delayed;
760 	),
761 
762 	TP_printk("usec_timeout=%u usec_delayed=%u",
763 			__entry->usec_timeout,
764 			__entry->usec_delayed)
765 );
766 
767 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
768 
769 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
770 
771 	TP_ARGS(usec_timeout, usec_delayed)
772 );
773 
774 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
775 
776 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
777 
778 	TP_ARGS(usec_timeout, usec_delayed)
779 );
780 
781 DECLARE_EVENT_CLASS(writeback_single_inode_template,
782 
783 	TP_PROTO(struct inode *inode,
784 		 struct writeback_control *wbc,
785 		 unsigned long nr_to_write
786 	),
787 
788 	TP_ARGS(inode, wbc, nr_to_write),
789 
790 	TP_STRUCT__entry(
791 		__array(char, name, 32)
792 		__field(ino_t, ino)
793 		__field(unsigned long, state)
794 		__field(unsigned long, dirtied_when)
795 		__field(unsigned long, writeback_index)
796 		__field(long, nr_to_write)
797 		__field(unsigned long, wrote)
798 		__field(ino_t, cgroup_ino)
799 	),
800 
801 	TP_fast_assign(
802 		strscpy_pad(__entry->name,
803 			    dev_name(inode_to_bdi(inode)->dev), 32);
804 		__entry->ino		= inode->i_ino;
805 		__entry->state		= inode->i_state;
806 		__entry->dirtied_when	= inode->dirtied_when;
807 		__entry->writeback_index = inode->i_mapping->writeback_index;
808 		__entry->nr_to_write	= nr_to_write;
809 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
810 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
811 	),
812 
813 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
814 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
815 		  __entry->name,
816 		  (unsigned long)__entry->ino,
817 		  show_inode_state(__entry->state),
818 		  __entry->dirtied_when,
819 		  (jiffies - __entry->dirtied_when) / HZ,
820 		  __entry->writeback_index,
821 		  __entry->nr_to_write,
822 		  __entry->wrote,
823 		  (unsigned long)__entry->cgroup_ino
824 	)
825 );
826 
827 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
828 	TP_PROTO(struct inode *inode,
829 		 struct writeback_control *wbc,
830 		 unsigned long nr_to_write),
831 	TP_ARGS(inode, wbc, nr_to_write)
832 );
833 
834 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
835 	TP_PROTO(struct inode *inode,
836 		 struct writeback_control *wbc,
837 		 unsigned long nr_to_write),
838 	TP_ARGS(inode, wbc, nr_to_write)
839 );
840 
841 DECLARE_EVENT_CLASS(writeback_inode_template,
842 	TP_PROTO(struct inode *inode),
843 
844 	TP_ARGS(inode),
845 
846 	TP_STRUCT__entry(
847 		__field(	dev_t,	dev			)
848 		__field(	ino_t,	ino			)
849 		__field(unsigned long,	state			)
850 		__field(	__u16, mode			)
851 		__field(unsigned long, dirtied_when		)
852 	),
853 
854 	TP_fast_assign(
855 		__entry->dev	= inode->i_sb->s_dev;
856 		__entry->ino	= inode->i_ino;
857 		__entry->state	= inode->i_state;
858 		__entry->mode	= inode->i_mode;
859 		__entry->dirtied_when = inode->dirtied_when;
860 	),
861 
862 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
863 		  MAJOR(__entry->dev), MINOR(__entry->dev),
864 		  (unsigned long)__entry->ino, __entry->dirtied_when,
865 		  show_inode_state(__entry->state), __entry->mode)
866 );
867 
868 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
869 	TP_PROTO(struct inode *inode),
870 
871 	TP_ARGS(inode)
872 );
873 
874 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
875 	TP_PROTO(struct inode *inode),
876 
877 	TP_ARGS(inode)
878 );
879 
880 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
881 
882 	TP_PROTO(struct inode *inode),
883 
884 	TP_ARGS(inode)
885 );
886 
887 /*
888  * Inode writeback list tracking.
889  */
890 
891 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
892 	TP_PROTO(struct inode *inode),
893 	TP_ARGS(inode)
894 );
895 
896 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
897 	TP_PROTO(struct inode *inode),
898 	TP_ARGS(inode)
899 );
900 
901 #endif /* _TRACE_WRITEBACK_H */
902 
903 /* This part must be outside protection */
904 #include <trace/define_trace.h>
905