xref: /openbmc/linux/include/trace/events/io_uring.h (revision 53f9cd5c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM io_uring
4 
5 #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_IO_URING_H
7 
8 #include <linux/tracepoint.h>
9 #include <uapi/linux/io_uring.h>
10 #include <linux/io_uring_types.h>
11 #include <linux/io_uring.h>
12 
13 struct io_wq_work;
14 
15 /**
16  * io_uring_create - called after a new io_uring context was prepared
17  *
18  * @fd:		corresponding file descriptor
19  * @ctx:	pointer to a ring context structure
20  * @sq_entries:	actual SQ size
21  * @cq_entries:	actual CQ size
22  * @flags:	SQ ring flags, provided to io_uring_setup(2)
23  *
24  * Allows to trace io_uring creation and provide pointer to a context, that can
25  * be used later to find correlated events.
26  */
27 TRACE_EVENT(io_uring_create,
28 
29 	TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
30 
31 	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
32 
33 	TP_STRUCT__entry (
34 		__field(  int,		fd		)
35 		__field(  void *,	ctx		)
36 		__field(  u32,		sq_entries	)
37 		__field(  u32,		cq_entries	)
38 		__field(  u32,		flags		)
39 	),
40 
41 	TP_fast_assign(
42 		__entry->fd		= fd;
43 		__entry->ctx		= ctx;
44 		__entry->sq_entries	= sq_entries;
45 		__entry->cq_entries	= cq_entries;
46 		__entry->flags		= flags;
47 	),
48 
49 	TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
50 			  __entry->ctx, __entry->fd, __entry->sq_entries,
51 			  __entry->cq_entries, __entry->flags)
52 );
53 
54 /**
55  * io_uring_register - called after a buffer/file/eventfd was successfully
56  * 					   registered for a ring
57  *
58  * @ctx:		pointer to a ring context structure
59  * @opcode:		describes which operation to perform
60  * @nr_user_files:	number of registered files
61  * @nr_user_bufs:	number of registered buffers
62  * @ret:		return code
63  *
64  * Allows to trace fixed files/buffers, that could be registered to
65  * avoid an overhead of getting references to them for every operation. This
66  * event, together with io_uring_file_get, can provide a full picture of how
67  * much overhead one can reduce via fixing.
68  */
69 TRACE_EVENT(io_uring_register,
70 
71 	TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
72 			 unsigned nr_bufs, long ret),
73 
74 	TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
75 
76 	TP_STRUCT__entry (
77 		__field(  void *,	ctx	)
78 		__field(  unsigned,	opcode	)
79 		__field(  unsigned,	nr_files)
80 		__field(  unsigned,	nr_bufs	)
81 		__field(  long,		ret	)
82 	),
83 
84 	TP_fast_assign(
85 		__entry->ctx		= ctx;
86 		__entry->opcode		= opcode;
87 		__entry->nr_files	= nr_files;
88 		__entry->nr_bufs	= nr_bufs;
89 		__entry->ret		= ret;
90 	),
91 
92 	TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93 			  "ret %ld",
94 			  __entry->ctx, __entry->opcode, __entry->nr_files,
95 			  __entry->nr_bufs, __entry->ret)
96 );
97 
98 /**
99  * io_uring_file_get - called before getting references to an SQE file
100  *
101  * @req:	pointer to a submitted request
102  * @fd:		SQE file descriptor
103  *
104  * Allows to trace out how often an SQE file reference is obtained, which can
105  * help figuring out if it makes sense to use fixed files, or check that fixed
106  * files are used correctly.
107  */
108 TRACE_EVENT(io_uring_file_get,
109 
110 	TP_PROTO(struct io_kiocb *req, int fd),
111 
112 	TP_ARGS(req, fd),
113 
114 	TP_STRUCT__entry (
115 		__field(  void *,	ctx		)
116 		__field(  void *,	req		)
117 		__field(  u64,		user_data	)
118 		__field(  int,		fd		)
119 	),
120 
121 	TP_fast_assign(
122 		__entry->ctx		= req->ctx;
123 		__entry->req		= req;
124 		__entry->user_data	= req->cqe.user_data;
125 		__entry->fd		= fd;
126 	),
127 
128 	TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
129 		__entry->ctx, __entry->req, __entry->user_data, __entry->fd)
130 );
131 
132 /**
133  * io_uring_queue_async_work - called before submitting a new async work
134  *
135  * @req:	pointer to a submitted request
136  * @rw:		type of workqueue, hashed or normal
137  *
138  * Allows to trace asynchronous work submission.
139  */
140 TRACE_EVENT(io_uring_queue_async_work,
141 
142 	TP_PROTO(struct io_kiocb *req, int rw),
143 
144 	TP_ARGS(req, rw),
145 
146 	TP_STRUCT__entry (
147 		__field(  void *,			ctx		)
148 		__field(  void *,			req		)
149 		__field(  u64,				user_data	)
150 		__field(  u8,				opcode		)
151 		__field(  unsigned int,			flags		)
152 		__field(  struct io_wq_work *,		work		)
153 		__field(  int,				rw		)
154 
155 		__string( op_str, io_uring_get_opcode(req->opcode)	)
156 	),
157 
158 	TP_fast_assign(
159 		__entry->ctx		= req->ctx;
160 		__entry->req		= req;
161 		__entry->user_data	= req->cqe.user_data;
162 		__entry->flags		= req->flags;
163 		__entry->opcode		= req->opcode;
164 		__entry->work		= &req->work;
165 		__entry->rw		= rw;
166 
167 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
168 	),
169 
170 	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
171 		__entry->ctx, __entry->req, __entry->user_data,
172 		__get_str(op_str),
173 		__entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
174 );
175 
176 /**
177  * io_uring_defer - called when an io_uring request is deferred
178  *
179  * @req:	pointer to a deferred request
180  *
181  * Allows to track deferred requests, to get an insight about what requests are
182  * not started immediately.
183  */
184 TRACE_EVENT(io_uring_defer,
185 
186 	TP_PROTO(struct io_kiocb *req),
187 
188 	TP_ARGS(req),
189 
190 	TP_STRUCT__entry (
191 		__field(  void *,		ctx	)
192 		__field(  void *,		req	)
193 		__field(  unsigned long long,	data	)
194 		__field(  u8,			opcode	)
195 
196 		__string( op_str, io_uring_get_opcode(req->opcode) )
197 	),
198 
199 	TP_fast_assign(
200 		__entry->ctx	= req->ctx;
201 		__entry->req	= req;
202 		__entry->data	= req->cqe.user_data;
203 		__entry->opcode	= req->opcode;
204 
205 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
206 	),
207 
208 	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
209 		__entry->ctx, __entry->req, __entry->data,
210 		__get_str(op_str))
211 );
212 
213 /**
214  * io_uring_link - called before the io_uring request added into link_list of
215  * 		   another request
216  *
217  * @req:		pointer to a linked request
218  * @target_req:		pointer to a previous request, that would contain @req
219  *
220  * Allows to track linked requests, to understand dependencies between requests
221  * and how does it influence their execution flow.
222  */
223 TRACE_EVENT(io_uring_link,
224 
225 	TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
226 
227 	TP_ARGS(req, target_req),
228 
229 	TP_STRUCT__entry (
230 		__field(  void *,	ctx		)
231 		__field(  void *,	req		)
232 		__field(  void *,	target_req	)
233 	),
234 
235 	TP_fast_assign(
236 		__entry->ctx		= req->ctx;
237 		__entry->req		= req;
238 		__entry->target_req	= target_req;
239 	),
240 
241 	TP_printk("ring %p, request %p linked after %p",
242 			  __entry->ctx, __entry->req, __entry->target_req)
243 );
244 
245 /**
246  * io_uring_cqring_wait - called before start waiting for an available CQE
247  *
248  * @ctx:		pointer to a ring context structure
249  * @min_events:	minimal number of events to wait for
250  *
251  * Allows to track waiting for CQE, so that we can e.g. troubleshoot
252  * situations, when an application wants to wait for an event, that never
253  * comes.
254  */
255 TRACE_EVENT(io_uring_cqring_wait,
256 
257 	TP_PROTO(void *ctx, int min_events),
258 
259 	TP_ARGS(ctx, min_events),
260 
261 	TP_STRUCT__entry (
262 		__field(  void *,	ctx		)
263 		__field(  int,		min_events	)
264 	),
265 
266 	TP_fast_assign(
267 		__entry->ctx		= ctx;
268 		__entry->min_events	= min_events;
269 	),
270 
271 	TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
272 );
273 
274 /**
275  * io_uring_fail_link - called before failing a linked request
276  *
277  * @req:	request, which links were cancelled
278  * @link:	cancelled link
279  *
280  * Allows to track linked requests cancellation, to see not only that some work
281  * was cancelled, but also which request was the reason.
282  */
283 TRACE_EVENT(io_uring_fail_link,
284 
285 	TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
286 
287 	TP_ARGS(req, link),
288 
289 	TP_STRUCT__entry (
290 		__field(  void *,		ctx		)
291 		__field(  void *,		req		)
292 		__field(  unsigned long long,	user_data	)
293 		__field(  u8,			opcode		)
294 		__field(  void *,		link		)
295 
296 		__string( op_str, io_uring_get_opcode(req->opcode) )
297 	),
298 
299 	TP_fast_assign(
300 		__entry->ctx		= req->ctx;
301 		__entry->req		= req;
302 		__entry->user_data	= req->cqe.user_data;
303 		__entry->opcode		= req->opcode;
304 		__entry->link		= link;
305 
306 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
307 	),
308 
309 	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
310 		__entry->ctx, __entry->req, __entry->user_data,
311 		__get_str(op_str), __entry->link)
312 );
313 
314 /**
315  * io_uring_complete - called when completing an SQE
316  *
317  * @ctx:		pointer to a ring context structure
318  * @req:		pointer to a submitted request
319  * @user_data:		user data associated with the request
320  * @res:		result of the request
321  * @cflags:		completion flags
322  * @extra1:		extra 64-bit data for CQE32
323  * @extra2:		extra 64-bit data for CQE32
324  *
325  */
326 TRACE_EVENT(io_uring_complete,
327 
328 	TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
329 		 u64 extra1, u64 extra2),
330 
331 	TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
332 
333 	TP_STRUCT__entry (
334 		__field(  void *,	ctx		)
335 		__field(  void *,	req		)
336 		__field(  u64,		user_data	)
337 		__field(  int,		res		)
338 		__field(  unsigned,	cflags		)
339 		__field(  u64,		extra1		)
340 		__field(  u64,		extra2		)
341 	),
342 
343 	TP_fast_assign(
344 		__entry->ctx		= ctx;
345 		__entry->req		= req;
346 		__entry->user_data	= user_data;
347 		__entry->res		= res;
348 		__entry->cflags		= cflags;
349 		__entry->extra1		= extra1;
350 		__entry->extra2		= extra2;
351 	),
352 
353 	TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
354 		  "extra1 %llu extra2 %llu ",
355 		__entry->ctx, __entry->req,
356 		__entry->user_data,
357 		__entry->res, __entry->cflags,
358 		(unsigned long long) __entry->extra1,
359 		(unsigned long long) __entry->extra2)
360 );
361 
362 /**
363  * io_uring_submit_sqe - called before submitting one SQE
364  *
365  * @req:		pointer to a submitted request
366  * @force_nonblock:	whether a context blocking or not
367  *
368  * Allows to track SQE submitting, to understand what was the source of it, SQ
369  * thread or io_uring_enter call.
370  */
371 TRACE_EVENT(io_uring_submit_sqe,
372 
373 	TP_PROTO(struct io_kiocb *req, bool force_nonblock),
374 
375 	TP_ARGS(req, force_nonblock),
376 
377 	TP_STRUCT__entry (
378 		__field(  void *,		ctx		)
379 		__field(  void *,		req		)
380 		__field(  unsigned long long,	user_data	)
381 		__field(  u8,			opcode		)
382 		__field(  u32,			flags		)
383 		__field(  bool,			force_nonblock	)
384 		__field(  bool,			sq_thread	)
385 
386 		__string( op_str, io_uring_get_opcode(req->opcode) )
387 	),
388 
389 	TP_fast_assign(
390 		__entry->ctx		= req->ctx;
391 		__entry->req		= req;
392 		__entry->user_data	= req->cqe.user_data;
393 		__entry->opcode		= req->opcode;
394 		__entry->flags		= req->flags;
395 		__entry->force_nonblock	= force_nonblock;
396 		__entry->sq_thread	= req->ctx->flags & IORING_SETUP_SQPOLL;
397 
398 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
399 	),
400 
401 	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
402 		  "non block %d, sq_thread %d", __entry->ctx, __entry->req,
403 		  __entry->user_data, __get_str(op_str),
404 		  __entry->flags, __entry->force_nonblock, __entry->sq_thread)
405 );
406 
407 /*
408  * io_uring_poll_arm - called after arming a poll wait if successful
409  *
410  * @req:		pointer to the armed request
411  * @mask:		request poll events mask
412  * @events:		registered events of interest
413  *
414  * Allows to track which fds are waiting for and what are the events of
415  * interest.
416  */
417 TRACE_EVENT(io_uring_poll_arm,
418 
419 	TP_PROTO(struct io_kiocb *req, int mask, int events),
420 
421 	TP_ARGS(req, mask, events),
422 
423 	TP_STRUCT__entry (
424 		__field(  void *,		ctx		)
425 		__field(  void *,		req		)
426 		__field(  unsigned long long,	user_data	)
427 		__field(  u8,			opcode		)
428 		__field(  int,			mask		)
429 		__field(  int,			events		)
430 
431 		__string( op_str, io_uring_get_opcode(req->opcode) )
432 	),
433 
434 	TP_fast_assign(
435 		__entry->ctx		= req->ctx;
436 		__entry->req		= req;
437 		__entry->user_data	= req->cqe.user_data;
438 		__entry->opcode		= req->opcode;
439 		__entry->mask		= mask;
440 		__entry->events		= events;
441 
442 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
443 	),
444 
445 	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
446 		  __entry->ctx, __entry->req, __entry->user_data,
447 		  __get_str(op_str),
448 		  __entry->mask, __entry->events)
449 );
450 
451 /*
452  * io_uring_task_add - called after adding a task
453  *
454  * @req:		pointer to request
455  * @mask:		request poll events mask
456  *
457  */
458 TRACE_EVENT(io_uring_task_add,
459 
460 	TP_PROTO(struct io_kiocb *req, int mask),
461 
462 	TP_ARGS(req, mask),
463 
464 	TP_STRUCT__entry (
465 		__field(  void *,		ctx		)
466 		__field(  void *,		req		)
467 		__field(  unsigned long long,	user_data	)
468 		__field(  u8,			opcode		)
469 		__field(  int,			mask		)
470 
471 		__string( op_str, io_uring_get_opcode(req->opcode) )
472 	),
473 
474 	TP_fast_assign(
475 		__entry->ctx		= req->ctx;
476 		__entry->req		= req;
477 		__entry->user_data	= req->cqe.user_data;
478 		__entry->opcode		= req->opcode;
479 		__entry->mask		= mask;
480 
481 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
482 	),
483 
484 	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
485 		__entry->ctx, __entry->req, __entry->user_data,
486 		__get_str(op_str),
487 		__entry->mask)
488 );
489 
490 /*
491  * io_uring_req_failed - called when an sqe is errored dring submission
492  *
493  * @sqe:		pointer to the io_uring_sqe that failed
494  * @req:		pointer to request
495  * @error:		error it failed with
496  *
497  * Allows easier diagnosing of malformed requests in production systems.
498  */
499 TRACE_EVENT(io_uring_req_failed,
500 
501 	TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
502 
503 	TP_ARGS(sqe, req, error),
504 
505 	TP_STRUCT__entry (
506 		__field(  void *,		ctx		)
507 		__field(  void *,		req		)
508 		__field(  unsigned long long,	user_data	)
509 		__field(  u8,			opcode		)
510 		__field(  u8,			flags		)
511 		__field(  u8,			ioprio		)
512 		__field( u64,			off		)
513 		__field( u64,			addr		)
514 		__field( u32,			len		)
515 		__field( u32,			op_flags	)
516 		__field( u16,			buf_index	)
517 		__field( u16,			personality	)
518 		__field( u32,			file_index	)
519 		__field( u64,			pad1		)
520 		__field( u64,			addr3		)
521 		__field( int,			error		)
522 
523 		__string( op_str, io_uring_get_opcode(sqe->opcode) )
524 	),
525 
526 	TP_fast_assign(
527 		__entry->ctx		= req->ctx;
528 		__entry->req		= req;
529 		__entry->user_data	= sqe->user_data;
530 		__entry->opcode		= sqe->opcode;
531 		__entry->flags		= sqe->flags;
532 		__entry->ioprio		= sqe->ioprio;
533 		__entry->off		= sqe->off;
534 		__entry->addr		= sqe->addr;
535 		__entry->len		= sqe->len;
536 		__entry->op_flags	= sqe->poll32_events;
537 		__entry->buf_index	= sqe->buf_index;
538 		__entry->personality	= sqe->personality;
539 		__entry->file_index	= sqe->file_index;
540 		__entry->pad1		= sqe->__pad2[0];
541 		__entry->addr3		= sqe->addr3;
542 		__entry->error		= error;
543 
544 		__assign_str(op_str, io_uring_get_opcode(sqe->opcode));
545 	),
546 
547 	TP_printk("ring %p, req %p, user_data 0x%llx, "
548 		  "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
549 		  "len=%u, rw_flags=0x%x, buf_index=%d, "
550 		  "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
551 		  "error=%d",
552 		  __entry->ctx, __entry->req, __entry->user_data,
553 		  __get_str(op_str),
554 		  __entry->flags, __entry->ioprio,
555 		  (unsigned long long)__entry->off,
556 		  (unsigned long long) __entry->addr, __entry->len,
557 		  __entry->op_flags,
558 		  __entry->buf_index, __entry->personality, __entry->file_index,
559 		  (unsigned long long) __entry->pad1,
560 		  (unsigned long long) __entry->addr3, __entry->error)
561 );
562 
563 
564 /*
565  * io_uring_cqe_overflow - a CQE overflowed
566  *
567  * @ctx:		pointer to a ring context structure
568  * @user_data:		user data associated with the request
569  * @res:		CQE result
570  * @cflags:		CQE flags
571  * @ocqe:		pointer to the overflow cqe (if available)
572  *
573  */
574 TRACE_EVENT(io_uring_cqe_overflow,
575 
576 	TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
577 		 void *ocqe),
578 
579 	TP_ARGS(ctx, user_data, res, cflags, ocqe),
580 
581 	TP_STRUCT__entry (
582 		__field(  void *,		ctx		)
583 		__field(  unsigned long long,	user_data	)
584 		__field(  s32,			res		)
585 		__field(  u32,			cflags		)
586 		__field(  void *,		ocqe		)
587 	),
588 
589 	TP_fast_assign(
590 		__entry->ctx		= ctx;
591 		__entry->user_data	= user_data;
592 		__entry->res		= res;
593 		__entry->cflags		= cflags;
594 		__entry->ocqe		= ocqe;
595 	),
596 
597 	TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
598 		  "overflow_cqe %p",
599 		  __entry->ctx, __entry->user_data, __entry->res,
600 		  __entry->cflags, __entry->ocqe)
601 );
602 
603 /*
604  * io_uring_task_work_run - ran task work
605  *
606  * @tctx:		pointer to a io_uring_task
607  * @count:		how many functions it ran
608  * @loops:		how many loops it ran
609  *
610  */
611 TRACE_EVENT(io_uring_task_work_run,
612 
613 	TP_PROTO(void *tctx, unsigned int count, unsigned int loops),
614 
615 	TP_ARGS(tctx, count, loops),
616 
617 	TP_STRUCT__entry (
618 		__field(  void *,		tctx		)
619 		__field(  unsigned int,		count		)
620 		__field(  unsigned int,		loops		)
621 	),
622 
623 	TP_fast_assign(
624 		__entry->tctx		= tctx;
625 		__entry->count		= count;
626 		__entry->loops		= loops;
627 	),
628 
629 	TP_printk("tctx %p, count %u, loops %u",
630 		 __entry->tctx, __entry->count, __entry->loops)
631 );
632 
633 TRACE_EVENT(io_uring_short_write,
634 
635 	TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
636 
637 	TP_ARGS(ctx, fpos, wanted, got),
638 
639 	TP_STRUCT__entry(
640 		__field(void *,	ctx)
641 		__field(u64,	fpos)
642 		__field(u64,	wanted)
643 		__field(u64,	got)
644 	),
645 
646 	TP_fast_assign(
647 		__entry->ctx	= ctx;
648 		__entry->fpos	= fpos;
649 		__entry->wanted	= wanted;
650 		__entry->got	= got;
651 	),
652 
653 	TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
654 			  __entry->ctx, __entry->fpos,
655 			  __entry->wanted, __entry->got)
656 );
657 
658 #endif /* _TRACE_IO_URING_H */
659 
660 /* This part must be outside protection */
661 #include <trace/define_trace.h>
662