xref: /openbmc/linux/include/trace/events/writeback.h (revision bef7a78d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_REFERENCED,		"I_REFERENCED"}		\
24 	)
25 
26 /* enums need to be exported to user space */
27 #undef EM
28 #undef EMe
29 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
30 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
31 
32 #define WB_WORK_REASON							\
33 	EM( WB_REASON_BACKGROUND,		"background")		\
34 	EM( WB_REASON_VMSCAN,			"vmscan")		\
35 	EM( WB_REASON_SYNC,			"sync")			\
36 	EM( WB_REASON_PERIODIC,			"periodic")		\
37 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
38 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
39 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
40 
41 WB_WORK_REASON
42 
43 /*
44  * Now redefine the EM() and EMe() macros to map the enums to the strings
45  * that will be printed in the output.
46  */
47 #undef EM
48 #undef EMe
49 #define EM(a,b)		{ a, b },
50 #define EMe(a,b)	{ a, b }
51 
52 struct wb_writeback_work;
53 
54 DECLARE_EVENT_CLASS(writeback_page_template,
55 
56 	TP_PROTO(struct page *page, struct address_space *mapping),
57 
58 	TP_ARGS(page, mapping),
59 
60 	TP_STRUCT__entry (
61 		__array(char, name, 32)
62 		__field(ino_t, ino)
63 		__field(pgoff_t, index)
64 	),
65 
66 	TP_fast_assign(
67 		strscpy_pad(__entry->name,
68 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
69 					 NULL), 32);
70 		__entry->ino = mapping ? mapping->host->i_ino : 0;
71 		__entry->index = page->index;
72 	),
73 
74 	TP_printk("bdi %s: ino=%lu index=%lu",
75 		__entry->name,
76 		(unsigned long)__entry->ino,
77 		__entry->index
78 	)
79 );
80 
81 DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
82 
83 	TP_PROTO(struct page *page, struct address_space *mapping),
84 
85 	TP_ARGS(page, mapping)
86 );
87 
88 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
89 
90 	TP_PROTO(struct page *page, struct address_space *mapping),
91 
92 	TP_ARGS(page, mapping)
93 );
94 
95 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
96 
97 	TP_PROTO(struct inode *inode, int flags),
98 
99 	TP_ARGS(inode, flags),
100 
101 	TP_STRUCT__entry (
102 		__array(char, name, 32)
103 		__field(ino_t, ino)
104 		__field(unsigned long, state)
105 		__field(unsigned long, flags)
106 	),
107 
108 	TP_fast_assign(
109 		struct backing_dev_info *bdi = inode_to_bdi(inode);
110 
111 		/* may be called for files on pseudo FSes w/ unregistered bdi */
112 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
113 		__entry->ino		= inode->i_ino;
114 		__entry->state		= inode->i_state;
115 		__entry->flags		= flags;
116 	),
117 
118 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
119 		__entry->name,
120 		(unsigned long)__entry->ino,
121 		show_inode_state(__entry->state),
122 		show_inode_state(__entry->flags)
123 	)
124 );
125 
126 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
127 
128 	TP_PROTO(struct inode *inode, int flags),
129 
130 	TP_ARGS(inode, flags)
131 );
132 
133 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
134 
135 	TP_PROTO(struct inode *inode, int flags),
136 
137 	TP_ARGS(inode, flags)
138 );
139 
140 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
141 
142 	TP_PROTO(struct inode *inode, int flags),
143 
144 	TP_ARGS(inode, flags)
145 );
146 
147 #ifdef CREATE_TRACE_POINTS
148 #ifdef CONFIG_CGROUP_WRITEBACK
149 
150 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
151 {
152 	return cgroup_ino(wb->memcg_css->cgroup);
153 }
154 
155 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
156 {
157 	if (wbc->wb)
158 		return __trace_wb_assign_cgroup(wbc->wb);
159 	else
160 		return 1;
161 }
162 #else	/* CONFIG_CGROUP_WRITEBACK */
163 
164 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
165 {
166 	return 1;
167 }
168 
169 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
170 {
171 	return 1;
172 }
173 
174 #endif	/* CONFIG_CGROUP_WRITEBACK */
175 #endif	/* CREATE_TRACE_POINTS */
176 
177 #ifdef CONFIG_CGROUP_WRITEBACK
178 TRACE_EVENT(inode_foreign_history,
179 
180 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
181 		 unsigned int history),
182 
183 	TP_ARGS(inode, wbc, history),
184 
185 	TP_STRUCT__entry(
186 		__array(char,		name, 32)
187 		__field(ino_t,		ino)
188 		__field(ino_t,		cgroup_ino)
189 		__field(unsigned int,	history)
190 	),
191 
192 	TP_fast_assign(
193 		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
194 		__entry->ino		= inode->i_ino;
195 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
196 		__entry->history	= history;
197 	),
198 
199 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
200 		__entry->name,
201 		(unsigned long)__entry->ino,
202 		(unsigned long)__entry->cgroup_ino,
203 		__entry->history
204 	)
205 );
206 
207 TRACE_EVENT(inode_switch_wbs,
208 
209 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
210 		 struct bdi_writeback *new_wb),
211 
212 	TP_ARGS(inode, old_wb, new_wb),
213 
214 	TP_STRUCT__entry(
215 		__array(char,		name, 32)
216 		__field(ino_t,		ino)
217 		__field(ino_t,		old_cgroup_ino)
218 		__field(ino_t,		new_cgroup_ino)
219 	),
220 
221 	TP_fast_assign(
222 		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
223 		__entry->ino		= inode->i_ino;
224 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
225 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
226 	),
227 
228 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
229 		__entry->name,
230 		(unsigned long)__entry->ino,
231 		(unsigned long)__entry->old_cgroup_ino,
232 		(unsigned long)__entry->new_cgroup_ino
233 	)
234 );
235 
236 TRACE_EVENT(track_foreign_dirty,
237 
238 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
239 
240 	TP_ARGS(page, wb),
241 
242 	TP_STRUCT__entry(
243 		__array(char,		name, 32)
244 		__field(u64,		bdi_id)
245 		__field(ino_t,		ino)
246 		__field(unsigned int,	memcg_id)
247 		__field(ino_t,		cgroup_ino)
248 		__field(ino_t,		page_cgroup_ino)
249 	),
250 
251 	TP_fast_assign(
252 		struct address_space *mapping = page_mapping(page);
253 		struct inode *inode = mapping ? mapping->host : NULL;
254 
255 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
256 		__entry->bdi_id		= wb->bdi->id;
257 		__entry->ino		= inode ? inode->i_ino : 0;
258 		__entry->memcg_id	= wb->memcg_css->id;
259 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
260 		__entry->page_cgroup_ino = cgroup_ino(page_memcg(page)->css.cgroup);
261 	),
262 
263 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
264 		__entry->name,
265 		__entry->bdi_id,
266 		(unsigned long)__entry->ino,
267 		__entry->memcg_id,
268 		(unsigned long)__entry->cgroup_ino,
269 		(unsigned long)__entry->page_cgroup_ino
270 	)
271 );
272 
273 TRACE_EVENT(flush_foreign,
274 
275 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
276 		 unsigned int frn_memcg_id),
277 
278 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
279 
280 	TP_STRUCT__entry(
281 		__array(char,		name, 32)
282 		__field(ino_t,		cgroup_ino)
283 		__field(unsigned int,	frn_bdi_id)
284 		__field(unsigned int,	frn_memcg_id)
285 	),
286 
287 	TP_fast_assign(
288 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
289 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
290 		__entry->frn_bdi_id	= frn_bdi_id;
291 		__entry->frn_memcg_id	= frn_memcg_id;
292 	),
293 
294 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
295 		__entry->name,
296 		(unsigned long)__entry->cgroup_ino,
297 		__entry->frn_bdi_id,
298 		__entry->frn_memcg_id
299 	)
300 );
301 #endif
302 
303 DECLARE_EVENT_CLASS(writeback_write_inode_template,
304 
305 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
306 
307 	TP_ARGS(inode, wbc),
308 
309 	TP_STRUCT__entry (
310 		__array(char, name, 32)
311 		__field(ino_t, ino)
312 		__field(int, sync_mode)
313 		__field(ino_t, cgroup_ino)
314 	),
315 
316 	TP_fast_assign(
317 		strscpy_pad(__entry->name,
318 			    bdi_dev_name(inode_to_bdi(inode)), 32);
319 		__entry->ino		= inode->i_ino;
320 		__entry->sync_mode	= wbc->sync_mode;
321 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
322 	),
323 
324 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
325 		__entry->name,
326 		(unsigned long)__entry->ino,
327 		__entry->sync_mode,
328 		(unsigned long)__entry->cgroup_ino
329 	)
330 );
331 
332 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
333 
334 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
335 
336 	TP_ARGS(inode, wbc)
337 );
338 
339 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
340 
341 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
342 
343 	TP_ARGS(inode, wbc)
344 );
345 
346 DECLARE_EVENT_CLASS(writeback_work_class,
347 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
348 	TP_ARGS(wb, work),
349 	TP_STRUCT__entry(
350 		__array(char, name, 32)
351 		__field(long, nr_pages)
352 		__field(dev_t, sb_dev)
353 		__field(int, sync_mode)
354 		__field(int, for_kupdate)
355 		__field(int, range_cyclic)
356 		__field(int, for_background)
357 		__field(int, reason)
358 		__field(ino_t, cgroup_ino)
359 	),
360 	TP_fast_assign(
361 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
362 		__entry->nr_pages = work->nr_pages;
363 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
364 		__entry->sync_mode = work->sync_mode;
365 		__entry->for_kupdate = work->for_kupdate;
366 		__entry->range_cyclic = work->range_cyclic;
367 		__entry->for_background	= work->for_background;
368 		__entry->reason = work->reason;
369 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
370 	),
371 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
372 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
373 		  __entry->name,
374 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
375 		  __entry->nr_pages,
376 		  __entry->sync_mode,
377 		  __entry->for_kupdate,
378 		  __entry->range_cyclic,
379 		  __entry->for_background,
380 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
381 		  (unsigned long)__entry->cgroup_ino
382 	)
383 );
384 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
385 DEFINE_EVENT(writeback_work_class, name, \
386 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
387 	TP_ARGS(wb, work))
388 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
389 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
390 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
391 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
392 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
393 
394 TRACE_EVENT(writeback_pages_written,
395 	TP_PROTO(long pages_written),
396 	TP_ARGS(pages_written),
397 	TP_STRUCT__entry(
398 		__field(long,		pages)
399 	),
400 	TP_fast_assign(
401 		__entry->pages		= pages_written;
402 	),
403 	TP_printk("%ld", __entry->pages)
404 );
405 
406 DECLARE_EVENT_CLASS(writeback_class,
407 	TP_PROTO(struct bdi_writeback *wb),
408 	TP_ARGS(wb),
409 	TP_STRUCT__entry(
410 		__array(char, name, 32)
411 		__field(ino_t, cgroup_ino)
412 	),
413 	TP_fast_assign(
414 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
415 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
416 	),
417 	TP_printk("bdi %s: cgroup_ino=%lu",
418 		  __entry->name,
419 		  (unsigned long)__entry->cgroup_ino
420 	)
421 );
422 #define DEFINE_WRITEBACK_EVENT(name) \
423 DEFINE_EVENT(writeback_class, name, \
424 	TP_PROTO(struct bdi_writeback *wb), \
425 	TP_ARGS(wb))
426 
427 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
428 
429 TRACE_EVENT(writeback_bdi_register,
430 	TP_PROTO(struct backing_dev_info *bdi),
431 	TP_ARGS(bdi),
432 	TP_STRUCT__entry(
433 		__array(char, name, 32)
434 	),
435 	TP_fast_assign(
436 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
437 	),
438 	TP_printk("bdi %s",
439 		__entry->name
440 	)
441 );
442 
443 DECLARE_EVENT_CLASS(wbc_class,
444 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
445 	TP_ARGS(wbc, bdi),
446 	TP_STRUCT__entry(
447 		__array(char, name, 32)
448 		__field(long, nr_to_write)
449 		__field(long, pages_skipped)
450 		__field(int, sync_mode)
451 		__field(int, for_kupdate)
452 		__field(int, for_background)
453 		__field(int, for_reclaim)
454 		__field(int, range_cyclic)
455 		__field(long, range_start)
456 		__field(long, range_end)
457 		__field(ino_t, cgroup_ino)
458 	),
459 
460 	TP_fast_assign(
461 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
462 		__entry->nr_to_write	= wbc->nr_to_write;
463 		__entry->pages_skipped	= wbc->pages_skipped;
464 		__entry->sync_mode	= wbc->sync_mode;
465 		__entry->for_kupdate	= wbc->for_kupdate;
466 		__entry->for_background	= wbc->for_background;
467 		__entry->for_reclaim	= wbc->for_reclaim;
468 		__entry->range_cyclic	= wbc->range_cyclic;
469 		__entry->range_start	= (long)wbc->range_start;
470 		__entry->range_end	= (long)wbc->range_end;
471 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
472 	),
473 
474 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
475 		"bgrd=%d reclm=%d cyclic=%d "
476 		"start=0x%lx end=0x%lx cgroup_ino=%lu",
477 		__entry->name,
478 		__entry->nr_to_write,
479 		__entry->pages_skipped,
480 		__entry->sync_mode,
481 		__entry->for_kupdate,
482 		__entry->for_background,
483 		__entry->for_reclaim,
484 		__entry->range_cyclic,
485 		__entry->range_start,
486 		__entry->range_end,
487 		(unsigned long)__entry->cgroup_ino
488 	)
489 )
490 
491 #define DEFINE_WBC_EVENT(name) \
492 DEFINE_EVENT(wbc_class, name, \
493 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
494 	TP_ARGS(wbc, bdi))
495 DEFINE_WBC_EVENT(wbc_writepage);
496 
497 TRACE_EVENT(writeback_queue_io,
498 	TP_PROTO(struct bdi_writeback *wb,
499 		 struct wb_writeback_work *work,
500 		 unsigned long dirtied_before,
501 		 int moved),
502 	TP_ARGS(wb, work, dirtied_before, moved),
503 	TP_STRUCT__entry(
504 		__array(char,		name, 32)
505 		__field(unsigned long,	older)
506 		__field(long,		age)
507 		__field(int,		moved)
508 		__field(int,		reason)
509 		__field(ino_t,		cgroup_ino)
510 	),
511 	TP_fast_assign(
512 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
513 		__entry->older	= dirtied_before;
514 		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
515 		__entry->moved	= moved;
516 		__entry->reason	= work->reason;
517 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
518 	),
519 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
520 		__entry->name,
521 		__entry->older,	/* dirtied_before in jiffies */
522 		__entry->age,	/* dirtied_before in relative milliseconds */
523 		__entry->moved,
524 		__print_symbolic(__entry->reason, WB_WORK_REASON),
525 		(unsigned long)__entry->cgroup_ino
526 	)
527 );
528 
529 TRACE_EVENT(global_dirty_state,
530 
531 	TP_PROTO(unsigned long background_thresh,
532 		 unsigned long dirty_thresh
533 	),
534 
535 	TP_ARGS(background_thresh,
536 		dirty_thresh
537 	),
538 
539 	TP_STRUCT__entry(
540 		__field(unsigned long,	nr_dirty)
541 		__field(unsigned long,	nr_writeback)
542 		__field(unsigned long,	background_thresh)
543 		__field(unsigned long,	dirty_thresh)
544 		__field(unsigned long,	dirty_limit)
545 		__field(unsigned long,	nr_dirtied)
546 		__field(unsigned long,	nr_written)
547 	),
548 
549 	TP_fast_assign(
550 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
551 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
552 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
553 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
554 		__entry->background_thresh = background_thresh;
555 		__entry->dirty_thresh	= dirty_thresh;
556 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
557 	),
558 
559 	TP_printk("dirty=%lu writeback=%lu "
560 		  "bg_thresh=%lu thresh=%lu limit=%lu "
561 		  "dirtied=%lu written=%lu",
562 		  __entry->nr_dirty,
563 		  __entry->nr_writeback,
564 		  __entry->background_thresh,
565 		  __entry->dirty_thresh,
566 		  __entry->dirty_limit,
567 		  __entry->nr_dirtied,
568 		  __entry->nr_written
569 	)
570 );
571 
572 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
573 
574 TRACE_EVENT(bdi_dirty_ratelimit,
575 
576 	TP_PROTO(struct bdi_writeback *wb,
577 		 unsigned long dirty_rate,
578 		 unsigned long task_ratelimit),
579 
580 	TP_ARGS(wb, dirty_rate, task_ratelimit),
581 
582 	TP_STRUCT__entry(
583 		__array(char,		bdi, 32)
584 		__field(unsigned long,	write_bw)
585 		__field(unsigned long,	avg_write_bw)
586 		__field(unsigned long,	dirty_rate)
587 		__field(unsigned long,	dirty_ratelimit)
588 		__field(unsigned long,	task_ratelimit)
589 		__field(unsigned long,	balanced_dirty_ratelimit)
590 		__field(ino_t,		cgroup_ino)
591 	),
592 
593 	TP_fast_assign(
594 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
595 		__entry->write_bw	= KBps(wb->write_bandwidth);
596 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
597 		__entry->dirty_rate	= KBps(dirty_rate);
598 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
599 		__entry->task_ratelimit	= KBps(task_ratelimit);
600 		__entry->balanced_dirty_ratelimit =
601 					KBps(wb->balanced_dirty_ratelimit);
602 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
603 	),
604 
605 	TP_printk("bdi %s: "
606 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
607 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
608 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
609 		  __entry->bdi,
610 		  __entry->write_bw,		/* write bandwidth */
611 		  __entry->avg_write_bw,	/* avg write bandwidth */
612 		  __entry->dirty_rate,		/* bdi dirty rate */
613 		  __entry->dirty_ratelimit,	/* base ratelimit */
614 		  __entry->task_ratelimit, /* ratelimit with position control */
615 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
616 		  (unsigned long)__entry->cgroup_ino
617 	)
618 );
619 
620 TRACE_EVENT(balance_dirty_pages,
621 
622 	TP_PROTO(struct bdi_writeback *wb,
623 		 unsigned long thresh,
624 		 unsigned long bg_thresh,
625 		 unsigned long dirty,
626 		 unsigned long bdi_thresh,
627 		 unsigned long bdi_dirty,
628 		 unsigned long dirty_ratelimit,
629 		 unsigned long task_ratelimit,
630 		 unsigned long dirtied,
631 		 unsigned long period,
632 		 long pause,
633 		 unsigned long start_time),
634 
635 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
636 		dirty_ratelimit, task_ratelimit,
637 		dirtied, period, pause, start_time),
638 
639 	TP_STRUCT__entry(
640 		__array(	 char,	bdi, 32)
641 		__field(unsigned long,	limit)
642 		__field(unsigned long,	setpoint)
643 		__field(unsigned long,	dirty)
644 		__field(unsigned long,	bdi_setpoint)
645 		__field(unsigned long,	bdi_dirty)
646 		__field(unsigned long,	dirty_ratelimit)
647 		__field(unsigned long,	task_ratelimit)
648 		__field(unsigned int,	dirtied)
649 		__field(unsigned int,	dirtied_pause)
650 		__field(unsigned long,	paused)
651 		__field(	 long,	pause)
652 		__field(unsigned long,	period)
653 		__field(	 long,	think)
654 		__field(ino_t,		cgroup_ino)
655 	),
656 
657 	TP_fast_assign(
658 		unsigned long freerun = (thresh + bg_thresh) / 2;
659 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
660 
661 		__entry->limit		= global_wb_domain.dirty_limit;
662 		__entry->setpoint	= (global_wb_domain.dirty_limit +
663 						freerun) / 2;
664 		__entry->dirty		= dirty;
665 		__entry->bdi_setpoint	= __entry->setpoint *
666 						bdi_thresh / (thresh + 1);
667 		__entry->bdi_dirty	= bdi_dirty;
668 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
669 		__entry->task_ratelimit	= KBps(task_ratelimit);
670 		__entry->dirtied	= dirtied;
671 		__entry->dirtied_pause	= current->nr_dirtied_pause;
672 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
673 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
674 		__entry->period		= period * 1000 / HZ;
675 		__entry->pause		= pause * 1000 / HZ;
676 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
677 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
678 	),
679 
680 
681 	TP_printk("bdi %s: "
682 		  "limit=%lu setpoint=%lu dirty=%lu "
683 		  "bdi_setpoint=%lu bdi_dirty=%lu "
684 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
685 		  "dirtied=%u dirtied_pause=%u "
686 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
687 		  __entry->bdi,
688 		  __entry->limit,
689 		  __entry->setpoint,
690 		  __entry->dirty,
691 		  __entry->bdi_setpoint,
692 		  __entry->bdi_dirty,
693 		  __entry->dirty_ratelimit,
694 		  __entry->task_ratelimit,
695 		  __entry->dirtied,
696 		  __entry->dirtied_pause,
697 		  __entry->paused,	/* ms */
698 		  __entry->pause,	/* ms */
699 		  __entry->period,	/* ms */
700 		  __entry->think,	/* ms */
701 		  (unsigned long)__entry->cgroup_ino
702 	  )
703 );
704 
705 TRACE_EVENT(writeback_sb_inodes_requeue,
706 
707 	TP_PROTO(struct inode *inode),
708 	TP_ARGS(inode),
709 
710 	TP_STRUCT__entry(
711 		__array(char, name, 32)
712 		__field(ino_t, ino)
713 		__field(unsigned long, state)
714 		__field(unsigned long, dirtied_when)
715 		__field(ino_t, cgroup_ino)
716 	),
717 
718 	TP_fast_assign(
719 		strscpy_pad(__entry->name,
720 			    bdi_dev_name(inode_to_bdi(inode)), 32);
721 		__entry->ino		= inode->i_ino;
722 		__entry->state		= inode->i_state;
723 		__entry->dirtied_when	= inode->dirtied_when;
724 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
725 	),
726 
727 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
728 		  __entry->name,
729 		  (unsigned long)__entry->ino,
730 		  show_inode_state(__entry->state),
731 		  __entry->dirtied_when,
732 		  (jiffies - __entry->dirtied_when) / HZ,
733 		  (unsigned long)__entry->cgroup_ino
734 	)
735 );
736 
737 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
738 
739 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
740 
741 	TP_ARGS(usec_timeout, usec_delayed),
742 
743 	TP_STRUCT__entry(
744 		__field(	unsigned int,	usec_timeout	)
745 		__field(	unsigned int,	usec_delayed	)
746 	),
747 
748 	TP_fast_assign(
749 		__entry->usec_timeout	= usec_timeout;
750 		__entry->usec_delayed	= usec_delayed;
751 	),
752 
753 	TP_printk("usec_timeout=%u usec_delayed=%u",
754 			__entry->usec_timeout,
755 			__entry->usec_delayed)
756 );
757 
758 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
759 
760 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
761 
762 	TP_ARGS(usec_timeout, usec_delayed)
763 );
764 
765 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
766 
767 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
768 
769 	TP_ARGS(usec_timeout, usec_delayed)
770 );
771 
772 DECLARE_EVENT_CLASS(writeback_single_inode_template,
773 
774 	TP_PROTO(struct inode *inode,
775 		 struct writeback_control *wbc,
776 		 unsigned long nr_to_write
777 	),
778 
779 	TP_ARGS(inode, wbc, nr_to_write),
780 
781 	TP_STRUCT__entry(
782 		__array(char, name, 32)
783 		__field(ino_t, ino)
784 		__field(unsigned long, state)
785 		__field(unsigned long, dirtied_when)
786 		__field(unsigned long, writeback_index)
787 		__field(long, nr_to_write)
788 		__field(unsigned long, wrote)
789 		__field(ino_t, cgroup_ino)
790 	),
791 
792 	TP_fast_assign(
793 		strscpy_pad(__entry->name,
794 			    bdi_dev_name(inode_to_bdi(inode)), 32);
795 		__entry->ino		= inode->i_ino;
796 		__entry->state		= inode->i_state;
797 		__entry->dirtied_when	= inode->dirtied_when;
798 		__entry->writeback_index = inode->i_mapping->writeback_index;
799 		__entry->nr_to_write	= nr_to_write;
800 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
801 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
802 	),
803 
804 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
805 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
806 		  __entry->name,
807 		  (unsigned long)__entry->ino,
808 		  show_inode_state(__entry->state),
809 		  __entry->dirtied_when,
810 		  (jiffies - __entry->dirtied_when) / HZ,
811 		  __entry->writeback_index,
812 		  __entry->nr_to_write,
813 		  __entry->wrote,
814 		  (unsigned long)__entry->cgroup_ino
815 	)
816 );
817 
818 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
819 	TP_PROTO(struct inode *inode,
820 		 struct writeback_control *wbc,
821 		 unsigned long nr_to_write),
822 	TP_ARGS(inode, wbc, nr_to_write)
823 );
824 
825 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
826 	TP_PROTO(struct inode *inode,
827 		 struct writeback_control *wbc,
828 		 unsigned long nr_to_write),
829 	TP_ARGS(inode, wbc, nr_to_write)
830 );
831 
832 DECLARE_EVENT_CLASS(writeback_inode_template,
833 	TP_PROTO(struct inode *inode),
834 
835 	TP_ARGS(inode),
836 
837 	TP_STRUCT__entry(
838 		__field(	dev_t,	dev			)
839 		__field(	ino_t,	ino			)
840 		__field(unsigned long,	state			)
841 		__field(	__u16, mode			)
842 		__field(unsigned long, dirtied_when		)
843 	),
844 
845 	TP_fast_assign(
846 		__entry->dev	= inode->i_sb->s_dev;
847 		__entry->ino	= inode->i_ino;
848 		__entry->state	= inode->i_state;
849 		__entry->mode	= inode->i_mode;
850 		__entry->dirtied_when = inode->dirtied_when;
851 	),
852 
853 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
854 		  MAJOR(__entry->dev), MINOR(__entry->dev),
855 		  (unsigned long)__entry->ino, __entry->dirtied_when,
856 		  show_inode_state(__entry->state), __entry->mode)
857 );
858 
859 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
860 	TP_PROTO(struct inode *inode),
861 
862 	TP_ARGS(inode)
863 );
864 
865 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
866 	TP_PROTO(struct inode *inode),
867 
868 	TP_ARGS(inode)
869 );
870 
871 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
872 
873 	TP_PROTO(struct inode *inode),
874 
875 	TP_ARGS(inode)
876 );
877 
878 /*
879  * Inode writeback list tracking.
880  */
881 
882 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
883 	TP_PROTO(struct inode *inode),
884 	TP_ARGS(inode)
885 );
886 
887 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
888 	TP_PROTO(struct inode *inode),
889 	TP_ARGS(inode)
890 );
891 
892 #endif /* _TRACE_WRITEBACK_H */
893 
894 /* This part must be outside protection */
895 #include <trace/define_trace.h>
896