xref: /openbmc/linux/include/trace/events/bcache.h (revision 31e67366)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcache
4 
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
7 
8 #include <linux/tracepoint.h>
9 
10 DECLARE_EVENT_CLASS(bcache_request,
11 	TP_PROTO(struct bcache_device *d, struct bio *bio),
12 	TP_ARGS(d, bio),
13 
14 	TP_STRUCT__entry(
15 		__field(dev_t,		dev			)
16 		__field(unsigned int,	orig_major		)
17 		__field(unsigned int,	orig_minor		)
18 		__field(sector_t,	sector			)
19 		__field(dev_t,		orig_sector		)
20 		__field(unsigned int,	nr_sector		)
21 		__array(char,		rwbs,	6		)
22 	),
23 
24 	TP_fast_assign(
25 		__entry->dev		= bio_dev(bio);
26 		__entry->orig_major	= d->disk->major;
27 		__entry->orig_minor	= d->disk->first_minor;
28 		__entry->sector		= bio->bi_iter.bi_sector;
29 		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
30 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
31 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
32 	),
33 
34 	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 		  MAJOR(__entry->dev), MINOR(__entry->dev),
36 		  __entry->rwbs, (unsigned long long)__entry->sector,
37 		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
38 		  (unsigned long long)__entry->orig_sector)
39 );
40 
41 DECLARE_EVENT_CLASS(bkey,
42 	TP_PROTO(struct bkey *k),
43 	TP_ARGS(k),
44 
45 	TP_STRUCT__entry(
46 		__field(u32,	size				)
47 		__field(u32,	inode				)
48 		__field(u64,	offset				)
49 		__field(bool,	dirty				)
50 	),
51 
52 	TP_fast_assign(
53 		__entry->inode	= KEY_INODE(k);
54 		__entry->offset	= KEY_OFFSET(k);
55 		__entry->size	= KEY_SIZE(k);
56 		__entry->dirty	= KEY_DIRTY(k);
57 	),
58 
59 	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
60 		  __entry->offset, __entry->size, __entry->dirty)
61 );
62 
63 DECLARE_EVENT_CLASS(btree_node,
64 	TP_PROTO(struct btree *b),
65 	TP_ARGS(b),
66 
67 	TP_STRUCT__entry(
68 		__field(size_t,		bucket			)
69 	),
70 
71 	TP_fast_assign(
72 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
73 	),
74 
75 	TP_printk("bucket %zu", __entry->bucket)
76 );
77 
78 /* request.c */
79 
80 DEFINE_EVENT(bcache_request, bcache_request_start,
81 	TP_PROTO(struct bcache_device *d, struct bio *bio),
82 	TP_ARGS(d, bio)
83 );
84 
85 DEFINE_EVENT(bcache_request, bcache_request_end,
86 	TP_PROTO(struct bcache_device *d, struct bio *bio),
87 	TP_ARGS(d, bio)
88 );
89 
90 DECLARE_EVENT_CLASS(bcache_bio,
91 	TP_PROTO(struct bio *bio),
92 	TP_ARGS(bio),
93 
94 	TP_STRUCT__entry(
95 		__field(dev_t,		dev			)
96 		__field(sector_t,	sector			)
97 		__field(unsigned int,	nr_sector		)
98 		__array(char,		rwbs,	6		)
99 	),
100 
101 	TP_fast_assign(
102 		__entry->dev		= bio_dev(bio);
103 		__entry->sector		= bio->bi_iter.bi_sector;
104 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
105 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106 	),
107 
108 	TP_printk("%d,%d  %s %llu + %u",
109 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110 		  (unsigned long long)__entry->sector, __entry->nr_sector)
111 );
112 
113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
114 	TP_PROTO(struct bio *bio),
115 	TP_ARGS(bio)
116 );
117 
118 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
119 	TP_PROTO(struct bio *bio),
120 	TP_ARGS(bio)
121 );
122 
123 TRACE_EVENT(bcache_read,
124 	TP_PROTO(struct bio *bio, bool hit, bool bypass),
125 	TP_ARGS(bio, hit, bypass),
126 
127 	TP_STRUCT__entry(
128 		__field(dev_t,		dev			)
129 		__field(sector_t,	sector			)
130 		__field(unsigned int,	nr_sector		)
131 		__array(char,		rwbs,	6		)
132 		__field(bool,		cache_hit		)
133 		__field(bool,		bypass			)
134 	),
135 
136 	TP_fast_assign(
137 		__entry->dev		= bio_dev(bio);
138 		__entry->sector		= bio->bi_iter.bi_sector;
139 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
140 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
141 		__entry->cache_hit = hit;
142 		__entry->bypass = bypass;
143 	),
144 
145 	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
146 		  MAJOR(__entry->dev), MINOR(__entry->dev),
147 		  __entry->rwbs, (unsigned long long)__entry->sector,
148 		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
149 );
150 
151 TRACE_EVENT(bcache_write,
152 	TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153 		bool writeback, bool bypass),
154 	TP_ARGS(c, inode, bio, writeback, bypass),
155 
156 	TP_STRUCT__entry(
157 		__array(char,		uuid,	16		)
158 		__field(u64,		inode			)
159 		__field(sector_t,	sector			)
160 		__field(unsigned int,	nr_sector		)
161 		__array(char,		rwbs,	6		)
162 		__field(bool,		writeback		)
163 		__field(bool,		bypass			)
164 	),
165 
166 	TP_fast_assign(
167 		memcpy(__entry->uuid, c->set_uuid, 16);
168 		__entry->inode		= inode;
169 		__entry->sector		= bio->bi_iter.bi_sector;
170 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
171 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
172 		__entry->writeback = writeback;
173 		__entry->bypass = bypass;
174 	),
175 
176 	TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
177 		  __entry->uuid, __entry->inode,
178 		  __entry->rwbs, (unsigned long long)__entry->sector,
179 		  __entry->nr_sector, __entry->writeback, __entry->bypass)
180 );
181 
182 DEFINE_EVENT(bcache_bio, bcache_read_retry,
183 	TP_PROTO(struct bio *bio),
184 	TP_ARGS(bio)
185 );
186 
187 DEFINE_EVENT(bkey, bcache_cache_insert,
188 	TP_PROTO(struct bkey *k),
189 	TP_ARGS(k)
190 );
191 
192 /* Journal */
193 
194 DECLARE_EVENT_CLASS(cache_set,
195 	TP_PROTO(struct cache_set *c),
196 	TP_ARGS(c),
197 
198 	TP_STRUCT__entry(
199 		__array(char,		uuid,	16 )
200 	),
201 
202 	TP_fast_assign(
203 		memcpy(__entry->uuid, c->set_uuid, 16);
204 	),
205 
206 	TP_printk("%pU", __entry->uuid)
207 );
208 
209 DEFINE_EVENT(bkey, bcache_journal_replay_key,
210 	TP_PROTO(struct bkey *k),
211 	TP_ARGS(k)
212 );
213 
214 DEFINE_EVENT(cache_set, bcache_journal_full,
215 	TP_PROTO(struct cache_set *c),
216 	TP_ARGS(c)
217 );
218 
219 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
220 	TP_PROTO(struct cache_set *c),
221 	TP_ARGS(c)
222 );
223 
224 TRACE_EVENT(bcache_journal_write,
225 	TP_PROTO(struct bio *bio, u32 keys),
226 	TP_ARGS(bio, keys),
227 
228 	TP_STRUCT__entry(
229 		__field(dev_t,		dev			)
230 		__field(sector_t,	sector			)
231 		__field(unsigned int,	nr_sector		)
232 		__array(char,		rwbs,	6		)
233 		__field(u32,		nr_keys			)
234 	),
235 
236 	TP_fast_assign(
237 		__entry->dev		= bio_dev(bio);
238 		__entry->sector		= bio->bi_iter.bi_sector;
239 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
240 		__entry->nr_keys	= keys;
241 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
242 	),
243 
244 	TP_printk("%d,%d  %s %llu + %u keys %u",
245 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
246 		  (unsigned long long)__entry->sector, __entry->nr_sector,
247 		  __entry->nr_keys)
248 );
249 
250 /* Btree */
251 
252 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
253 	TP_PROTO(struct cache_set *c),
254 	TP_ARGS(c)
255 );
256 
257 DEFINE_EVENT(btree_node, bcache_btree_read,
258 	TP_PROTO(struct btree *b),
259 	TP_ARGS(b)
260 );
261 
262 TRACE_EVENT(bcache_btree_write,
263 	TP_PROTO(struct btree *b),
264 	TP_ARGS(b),
265 
266 	TP_STRUCT__entry(
267 		__field(size_t,		bucket			)
268 		__field(unsigned,	block			)
269 		__field(unsigned,	keys			)
270 	),
271 
272 	TP_fast_assign(
273 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
274 		__entry->block	= b->written;
275 		__entry->keys	= b->keys.set[b->keys.nsets].data->keys;
276 	),
277 
278 	TP_printk("bucket %zu written block %u + %u",
279 		__entry->bucket, __entry->block, __entry->keys)
280 );
281 
282 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
283 	TP_PROTO(struct btree *b),
284 	TP_ARGS(b)
285 );
286 
287 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
288 	TP_PROTO(struct cache_set *c),
289 	TP_ARGS(c)
290 );
291 
292 DEFINE_EVENT(btree_node, bcache_btree_node_free,
293 	TP_PROTO(struct btree *b),
294 	TP_ARGS(b)
295 );
296 
297 TRACE_EVENT(bcache_btree_gc_coalesce,
298 	TP_PROTO(unsigned nodes),
299 	TP_ARGS(nodes),
300 
301 	TP_STRUCT__entry(
302 		__field(unsigned,	nodes			)
303 	),
304 
305 	TP_fast_assign(
306 		__entry->nodes	= nodes;
307 	),
308 
309 	TP_printk("coalesced %u nodes", __entry->nodes)
310 );
311 
312 DEFINE_EVENT(cache_set, bcache_gc_start,
313 	TP_PROTO(struct cache_set *c),
314 	TP_ARGS(c)
315 );
316 
317 DEFINE_EVENT(cache_set, bcache_gc_end,
318 	TP_PROTO(struct cache_set *c),
319 	TP_ARGS(c)
320 );
321 
322 DEFINE_EVENT(bkey, bcache_gc_copy,
323 	TP_PROTO(struct bkey *k),
324 	TP_ARGS(k)
325 );
326 
327 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
328 	TP_PROTO(struct bkey *k),
329 	TP_ARGS(k)
330 );
331 
332 TRACE_EVENT(bcache_btree_insert_key,
333 	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
334 	TP_ARGS(b, k, op, status),
335 
336 	TP_STRUCT__entry(
337 		__field(u64,	btree_node			)
338 		__field(u32,	btree_level			)
339 		__field(u32,	inode				)
340 		__field(u64,	offset				)
341 		__field(u32,	size				)
342 		__field(u8,	dirty				)
343 		__field(u8,	op				)
344 		__field(u8,	status				)
345 	),
346 
347 	TP_fast_assign(
348 		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
349 		__entry->btree_level = b->level;
350 		__entry->inode	= KEY_INODE(k);
351 		__entry->offset	= KEY_OFFSET(k);
352 		__entry->size	= KEY_SIZE(k);
353 		__entry->dirty	= KEY_DIRTY(k);
354 		__entry->op = op;
355 		__entry->status = status;
356 	),
357 
358 	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
359 		  __entry->status, __entry->op,
360 		  __entry->btree_node, __entry->btree_level,
361 		  __entry->inode, __entry->offset,
362 		  __entry->size, __entry->dirty)
363 );
364 
365 DECLARE_EVENT_CLASS(btree_split,
366 	TP_PROTO(struct btree *b, unsigned keys),
367 	TP_ARGS(b, keys),
368 
369 	TP_STRUCT__entry(
370 		__field(size_t,		bucket			)
371 		__field(unsigned,	keys			)
372 	),
373 
374 	TP_fast_assign(
375 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
376 		__entry->keys	= keys;
377 	),
378 
379 	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
380 );
381 
382 DEFINE_EVENT(btree_split, bcache_btree_node_split,
383 	TP_PROTO(struct btree *b, unsigned keys),
384 	TP_ARGS(b, keys)
385 );
386 
387 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
388 	TP_PROTO(struct btree *b, unsigned keys),
389 	TP_ARGS(b, keys)
390 );
391 
392 DEFINE_EVENT(btree_node, bcache_btree_set_root,
393 	TP_PROTO(struct btree *b),
394 	TP_ARGS(b)
395 );
396 
397 TRACE_EVENT(bcache_keyscan,
398 	TP_PROTO(unsigned nr_found,
399 		 unsigned start_inode, uint64_t start_offset,
400 		 unsigned end_inode, uint64_t end_offset),
401 	TP_ARGS(nr_found,
402 		start_inode, start_offset,
403 		end_inode, end_offset),
404 
405 	TP_STRUCT__entry(
406 		__field(__u32,	nr_found			)
407 		__field(__u32,	start_inode			)
408 		__field(__u64,	start_offset			)
409 		__field(__u32,	end_inode			)
410 		__field(__u64,	end_offset			)
411 	),
412 
413 	TP_fast_assign(
414 		__entry->nr_found	= nr_found;
415 		__entry->start_inode	= start_inode;
416 		__entry->start_offset	= start_offset;
417 		__entry->end_inode	= end_inode;
418 		__entry->end_offset	= end_offset;
419 	),
420 
421 	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
422 		  __entry->start_inode, __entry->start_offset,
423 		  __entry->end_inode, __entry->end_offset)
424 );
425 
426 /* Allocator */
427 
428 TRACE_EVENT(bcache_invalidate,
429 	TP_PROTO(struct cache *ca, size_t bucket),
430 	TP_ARGS(ca, bucket),
431 
432 	TP_STRUCT__entry(
433 		__field(unsigned,	sectors			)
434 		__field(dev_t,		dev			)
435 		__field(__u64,		offset			)
436 	),
437 
438 	TP_fast_assign(
439 		__entry->dev		= ca->bdev->bd_dev;
440 		__entry->offset		= bucket << ca->set->bucket_bits;
441 		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
442 	),
443 
444 	TP_printk("invalidated %u sectors at %d,%d sector=%llu",
445 		  __entry->sectors, MAJOR(__entry->dev),
446 		  MINOR(__entry->dev), __entry->offset)
447 );
448 
449 TRACE_EVENT(bcache_alloc,
450 	TP_PROTO(struct cache *ca, size_t bucket),
451 	TP_ARGS(ca, bucket),
452 
453 	TP_STRUCT__entry(
454 		__field(dev_t,		dev			)
455 		__field(__u64,		offset			)
456 	),
457 
458 	TP_fast_assign(
459 		__entry->dev		= ca->bdev->bd_dev;
460 		__entry->offset		= bucket << ca->set->bucket_bits;
461 	),
462 
463 	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
464 		  MINOR(__entry->dev), __entry->offset)
465 );
466 
467 TRACE_EVENT(bcache_alloc_fail,
468 	TP_PROTO(struct cache *ca, unsigned reserve),
469 	TP_ARGS(ca, reserve),
470 
471 	TP_STRUCT__entry(
472 		__field(dev_t,		dev			)
473 		__field(unsigned,	free			)
474 		__field(unsigned,	free_inc		)
475 		__field(unsigned,	blocked			)
476 	),
477 
478 	TP_fast_assign(
479 		__entry->dev		= ca->bdev->bd_dev;
480 		__entry->free		= fifo_used(&ca->free[reserve]);
481 		__entry->free_inc	= fifo_used(&ca->free_inc);
482 		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
483 	),
484 
485 	TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
486 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
487 		  __entry->free_inc, __entry->blocked)
488 );
489 
490 /* Background writeback */
491 
492 DEFINE_EVENT(bkey, bcache_writeback,
493 	TP_PROTO(struct bkey *k),
494 	TP_ARGS(k)
495 );
496 
497 DEFINE_EVENT(bkey, bcache_writeback_collision,
498 	TP_PROTO(struct bkey *k),
499 	TP_ARGS(k)
500 );
501 
502 #endif /* _TRACE_BCACHE_H */
503 
504 /* This part must be outside protection */
505 #include <trace/define_trace.h>
506