xref: /openbmc/linux/include/trace/events/io_uring.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1c826bd7aSDmitrii Dolgov /* SPDX-License-Identifier: GPL-2.0 */
2c826bd7aSDmitrii Dolgov #undef TRACE_SYSTEM
3c826bd7aSDmitrii Dolgov #define TRACE_SYSTEM io_uring
4c826bd7aSDmitrii Dolgov 
5c826bd7aSDmitrii Dolgov #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6c826bd7aSDmitrii Dolgov #define _TRACE_IO_URING_H
7c826bd7aSDmitrii Dolgov 
8c826bd7aSDmitrii Dolgov #include <linux/tracepoint.h>
9a87acfdeSJens Axboe #include <uapi/linux/io_uring.h>
1048863ffdSPavel Begunkov #include <linux/io_uring_types.h>
11033b87d2SDylan Yudaken #include <linux/io_uring.h>
12c826bd7aSDmitrii Dolgov 
13561fb04aSJens Axboe struct io_wq_work;
14561fb04aSJens Axboe 
15c826bd7aSDmitrii Dolgov /**
16c826bd7aSDmitrii Dolgov  * io_uring_create - called after a new io_uring context was prepared
17c826bd7aSDmitrii Dolgov  *
18c826bd7aSDmitrii Dolgov  * @fd:		corresponding file descriptor
19c826bd7aSDmitrii Dolgov  * @ctx:	pointer to a ring context structure
20c826bd7aSDmitrii Dolgov  * @sq_entries:	actual SQ size
21c826bd7aSDmitrii Dolgov  * @cq_entries:	actual CQ size
22c826bd7aSDmitrii Dolgov  * @flags:	SQ ring flags, provided to io_uring_setup(2)
23c826bd7aSDmitrii Dolgov  *
24c826bd7aSDmitrii Dolgov  * Allows to trace io_uring creation and provide pointer to a context, that can
25c826bd7aSDmitrii Dolgov  * be used later to find correlated events.
26c826bd7aSDmitrii Dolgov  */
27c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_create,
28c826bd7aSDmitrii Dolgov 
29c826bd7aSDmitrii Dolgov 	TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
30c826bd7aSDmitrii Dolgov 
31c826bd7aSDmitrii Dolgov 	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
32c826bd7aSDmitrii Dolgov 
33c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
34c826bd7aSDmitrii Dolgov 		__field(  int,		fd		)
35c826bd7aSDmitrii Dolgov 		__field(  void *,	ctx		)
36c826bd7aSDmitrii Dolgov 		__field(  u32,		sq_entries	)
37c826bd7aSDmitrii Dolgov 		__field(  u32,		cq_entries	)
38c826bd7aSDmitrii Dolgov 		__field(  u32,		flags		)
39c826bd7aSDmitrii Dolgov 	),
40c826bd7aSDmitrii Dolgov 
41c826bd7aSDmitrii Dolgov 	TP_fast_assign(
42c826bd7aSDmitrii Dolgov 		__entry->fd		= fd;
43c826bd7aSDmitrii Dolgov 		__entry->ctx		= ctx;
44c826bd7aSDmitrii Dolgov 		__entry->sq_entries	= sq_entries;
45c826bd7aSDmitrii Dolgov 		__entry->cq_entries	= cq_entries;
46c826bd7aSDmitrii Dolgov 		__entry->flags		= flags;
47c826bd7aSDmitrii Dolgov 	),
48c826bd7aSDmitrii Dolgov 
49052ebf1fSDylan Yudaken 	TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
50c826bd7aSDmitrii Dolgov 			  __entry->ctx, __entry->fd, __entry->sq_entries,
51c826bd7aSDmitrii Dolgov 			  __entry->cq_entries, __entry->flags)
52c826bd7aSDmitrii Dolgov );
53c826bd7aSDmitrii Dolgov 
54c826bd7aSDmitrii Dolgov /**
55f2cc020dSIngo Molnar  * io_uring_register - called after a buffer/file/eventfd was successfully
56c826bd7aSDmitrii Dolgov  * 					   registered for a ring
57c826bd7aSDmitrii Dolgov  *
58c826bd7aSDmitrii Dolgov  * @ctx:		pointer to a ring context structure
59c826bd7aSDmitrii Dolgov  * @opcode:		describes which operation to perform
60c826bd7aSDmitrii Dolgov  * @nr_user_files:	number of registered files
61c826bd7aSDmitrii Dolgov  * @nr_user_bufs:	number of registered buffers
62c826bd7aSDmitrii Dolgov  * @ret:		return code
63c826bd7aSDmitrii Dolgov  *
642757be22SUsama Arif  * Allows to trace fixed files/buffers, that could be registered to
65c826bd7aSDmitrii Dolgov  * avoid an overhead of getting references to them for every operation. This
66c826bd7aSDmitrii Dolgov  * event, together with io_uring_file_get, can provide a full picture of how
67c826bd7aSDmitrii Dolgov  * much overhead one can reduce via fixing.
68c826bd7aSDmitrii Dolgov  */
69c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_register,
70c826bd7aSDmitrii Dolgov 
71c826bd7aSDmitrii Dolgov 	TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
722757be22SUsama Arif 			 unsigned nr_bufs, long ret),
73c826bd7aSDmitrii Dolgov 
742757be22SUsama Arif 	TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
75c826bd7aSDmitrii Dolgov 
76c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
77c826bd7aSDmitrii Dolgov 		__field(  void *,	ctx	)
78c826bd7aSDmitrii Dolgov 		__field(  unsigned,	opcode	)
79c826bd7aSDmitrii Dolgov 		__field(  unsigned,	nr_files)
80c826bd7aSDmitrii Dolgov 		__field(  unsigned,	nr_bufs	)
81c826bd7aSDmitrii Dolgov 		__field(  long,		ret	)
82c826bd7aSDmitrii Dolgov 	),
83c826bd7aSDmitrii Dolgov 
84c826bd7aSDmitrii Dolgov 	TP_fast_assign(
85c826bd7aSDmitrii Dolgov 		__entry->ctx		= ctx;
86c826bd7aSDmitrii Dolgov 		__entry->opcode		= opcode;
87c826bd7aSDmitrii Dolgov 		__entry->nr_files	= nr_files;
88c826bd7aSDmitrii Dolgov 		__entry->nr_bufs	= nr_bufs;
89c826bd7aSDmitrii Dolgov 		__entry->ret		= ret;
90c826bd7aSDmitrii Dolgov 	),
91c826bd7aSDmitrii Dolgov 
92c826bd7aSDmitrii Dolgov 	TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
932757be22SUsama Arif 			  "ret %ld",
94c826bd7aSDmitrii Dolgov 			  __entry->ctx, __entry->opcode, __entry->nr_files,
952757be22SUsama Arif 			  __entry->nr_bufs, __entry->ret)
96c826bd7aSDmitrii Dolgov );
97c826bd7aSDmitrii Dolgov 
98c826bd7aSDmitrii Dolgov /**
99c826bd7aSDmitrii Dolgov  * io_uring_file_get - called before getting references to an SQE file
100c826bd7aSDmitrii Dolgov  *
101502c87d6SStefan Roesch  * @req:	pointer to a submitted request
102c826bd7aSDmitrii Dolgov  * @fd:		SQE file descriptor
103c826bd7aSDmitrii Dolgov  *
104c826bd7aSDmitrii Dolgov  * Allows to trace out how often an SQE file reference is obtained, which can
105c826bd7aSDmitrii Dolgov  * help figuring out if it makes sense to use fixed files, or check that fixed
106c826bd7aSDmitrii Dolgov  * files are used correctly.
107c826bd7aSDmitrii Dolgov  */
108c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_file_get,
109c826bd7aSDmitrii Dolgov 
11048863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req, int fd),
111c826bd7aSDmitrii Dolgov 
11248863ffdSPavel Begunkov 	TP_ARGS(req, fd),
113c826bd7aSDmitrii Dolgov 
114c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
115c826bd7aSDmitrii Dolgov 		__field(  void *,	ctx		)
116502c87d6SStefan Roesch 		__field(  void *,	req		)
117502c87d6SStefan Roesch 		__field(  u64,		user_data	)
118c826bd7aSDmitrii Dolgov 		__field(  int,		fd		)
119c826bd7aSDmitrii Dolgov 	),
120c826bd7aSDmitrii Dolgov 
121c826bd7aSDmitrii Dolgov 	TP_fast_assign(
12248863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
123502c87d6SStefan Roesch 		__entry->req		= req;
12448863ffdSPavel Begunkov 		__entry->user_data	= req->cqe.user_data;
125c826bd7aSDmitrii Dolgov 		__entry->fd		= fd;
126c826bd7aSDmitrii Dolgov 	),
127c826bd7aSDmitrii Dolgov 
128052ebf1fSDylan Yudaken 	TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
129502c87d6SStefan Roesch 		__entry->ctx, __entry->req, __entry->user_data, __entry->fd)
130c826bd7aSDmitrii Dolgov );
131c826bd7aSDmitrii Dolgov 
132c826bd7aSDmitrii Dolgov /**
133c826bd7aSDmitrii Dolgov  * io_uring_queue_async_work - called before submitting a new async work
134c826bd7aSDmitrii Dolgov  *
135c826bd7aSDmitrii Dolgov  * @req:	pointer to a submitted request
136502c87d6SStefan Roesch  * @rw:		type of workqueue, hashed or normal
137c826bd7aSDmitrii Dolgov  *
138c826bd7aSDmitrii Dolgov  * Allows to trace asynchronous work submission.
139c826bd7aSDmitrii Dolgov  */
140c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_queue_async_work,
141c826bd7aSDmitrii Dolgov 
14248863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req, int rw),
143c826bd7aSDmitrii Dolgov 
14448863ffdSPavel Begunkov 	TP_ARGS(req, rw),
145c826bd7aSDmitrii Dolgov 
146c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
147c826bd7aSDmitrii Dolgov 		__field(  void *,			ctx		)
148c826bd7aSDmitrii Dolgov 		__field(  void *,			req		)
149502c87d6SStefan Roesch 		__field(  u64,				user_data	)
150502c87d6SStefan Roesch 		__field(  u8,				opcode		)
151c826bd7aSDmitrii Dolgov 		__field(  unsigned int,			flags		)
152502c87d6SStefan Roesch 		__field(  struct io_wq_work *,		work		)
153502c87d6SStefan Roesch 		__field(  int,				rw		)
154e70b64a3SDylan Yudaken 
15548863ffdSPavel Begunkov 		__string( op_str, io_uring_get_opcode(req->opcode)	)
156c826bd7aSDmitrii Dolgov 	),
157c826bd7aSDmitrii Dolgov 
158c826bd7aSDmitrii Dolgov 	TP_fast_assign(
15948863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
160c826bd7aSDmitrii Dolgov 		__entry->req		= req;
16148863ffdSPavel Begunkov 		__entry->user_data	= req->cqe.user_data;
16248863ffdSPavel Begunkov 		__entry->flags		= req->flags;
16348863ffdSPavel Begunkov 		__entry->opcode		= req->opcode;
16448863ffdSPavel Begunkov 		__entry->work		= &req->work;
165502c87d6SStefan Roesch 		__entry->rw		= rw;
166e70b64a3SDylan Yudaken 
16748863ffdSPavel Begunkov 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
168c826bd7aSDmitrii Dolgov 	),
169c826bd7aSDmitrii Dolgov 
170033b87d2SDylan Yudaken 	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
171033b87d2SDylan Yudaken 		__entry->ctx, __entry->req, __entry->user_data,
172e70b64a3SDylan Yudaken 		__get_str(op_str),
173502c87d6SStefan Roesch 		__entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
174c826bd7aSDmitrii Dolgov );
175c826bd7aSDmitrii Dolgov 
176c826bd7aSDmitrii Dolgov /**
177915967f6SJens Axboe  * io_uring_defer - called when an io_uring request is deferred
178c826bd7aSDmitrii Dolgov  *
179c826bd7aSDmitrii Dolgov  * @req:	pointer to a deferred request
180c826bd7aSDmitrii Dolgov  *
181c826bd7aSDmitrii Dolgov  * Allows to track deferred requests, to get an insight about what requests are
182c826bd7aSDmitrii Dolgov  * not started immediately.
183c826bd7aSDmitrii Dolgov  */
184c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_defer,
185c826bd7aSDmitrii Dolgov 
18648863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req),
187c826bd7aSDmitrii Dolgov 
18848863ffdSPavel Begunkov 	TP_ARGS(req),
189c826bd7aSDmitrii Dolgov 
190c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
191c826bd7aSDmitrii Dolgov 		__field(  void *,		ctx	)
192c826bd7aSDmitrii Dolgov 		__field(  void *,		req	)
193915967f6SJens Axboe 		__field(  unsigned long long,	data	)
194502c87d6SStefan Roesch 		__field(  u8,			opcode	)
195e70b64a3SDylan Yudaken 
19648863ffdSPavel Begunkov 		__string( op_str, io_uring_get_opcode(req->opcode) )
197c826bd7aSDmitrii Dolgov 	),
198c826bd7aSDmitrii Dolgov 
199c826bd7aSDmitrii Dolgov 	TP_fast_assign(
20048863ffdSPavel Begunkov 		__entry->ctx	= req->ctx;
201c826bd7aSDmitrii Dolgov 		__entry->req	= req;
20248863ffdSPavel Begunkov 		__entry->data	= req->cqe.user_data;
20348863ffdSPavel Begunkov 		__entry->opcode	= req->opcode;
204e70b64a3SDylan Yudaken 
20548863ffdSPavel Begunkov 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
206c826bd7aSDmitrii Dolgov 	),
207c826bd7aSDmitrii Dolgov 
208033b87d2SDylan Yudaken 	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
209033b87d2SDylan Yudaken 		__entry->ctx, __entry->req, __entry->data,
210e70b64a3SDylan Yudaken 		__get_str(op_str))
211c826bd7aSDmitrii Dolgov );
212c826bd7aSDmitrii Dolgov 
213c826bd7aSDmitrii Dolgov /**
214c826bd7aSDmitrii Dolgov  * io_uring_link - called before the io_uring request added into link_list of
215c826bd7aSDmitrii Dolgov  * 		   another request
216c826bd7aSDmitrii Dolgov  *
217c826bd7aSDmitrii Dolgov  * @req:		pointer to a linked request
218c826bd7aSDmitrii Dolgov  * @target_req:		pointer to a previous request, that would contain @req
219c826bd7aSDmitrii Dolgov  *
220c826bd7aSDmitrii Dolgov  * Allows to track linked requests, to understand dependencies between requests
221c826bd7aSDmitrii Dolgov  * and how does it influence their execution flow.
222c826bd7aSDmitrii Dolgov  */
223c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_link,
224c826bd7aSDmitrii Dolgov 
22548863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
226c826bd7aSDmitrii Dolgov 
22748863ffdSPavel Begunkov 	TP_ARGS(req, target_req),
228c826bd7aSDmitrii Dolgov 
229c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
230c826bd7aSDmitrii Dolgov 		__field(  void *,	ctx		)
231c826bd7aSDmitrii Dolgov 		__field(  void *,	req		)
232c826bd7aSDmitrii Dolgov 		__field(  void *,	target_req	)
233c826bd7aSDmitrii Dolgov 	),
234c826bd7aSDmitrii Dolgov 
235c826bd7aSDmitrii Dolgov 	TP_fast_assign(
23648863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
237c826bd7aSDmitrii Dolgov 		__entry->req		= req;
238c826bd7aSDmitrii Dolgov 		__entry->target_req	= target_req;
239c826bd7aSDmitrii Dolgov 	),
240c826bd7aSDmitrii Dolgov 
241c826bd7aSDmitrii Dolgov 	TP_printk("ring %p, request %p linked after %p",
242c826bd7aSDmitrii Dolgov 			  __entry->ctx, __entry->req, __entry->target_req)
243c826bd7aSDmitrii Dolgov );
244c826bd7aSDmitrii Dolgov 
245c826bd7aSDmitrii Dolgov /**
246c826bd7aSDmitrii Dolgov  * io_uring_cqring_wait - called before start waiting for an available CQE
247c826bd7aSDmitrii Dolgov  *
248c826bd7aSDmitrii Dolgov  * @ctx:		pointer to a ring context structure
249c826bd7aSDmitrii Dolgov  * @min_events:	minimal number of events to wait for
250c826bd7aSDmitrii Dolgov  *
251c826bd7aSDmitrii Dolgov  * Allows to track waiting for CQE, so that we can e.g. troubleshoot
252c826bd7aSDmitrii Dolgov  * situations, when an application wants to wait for an event, that never
253c826bd7aSDmitrii Dolgov  * comes.
254c826bd7aSDmitrii Dolgov  */
255c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_cqring_wait,
256c826bd7aSDmitrii Dolgov 
257c826bd7aSDmitrii Dolgov 	TP_PROTO(void *ctx, int min_events),
258c826bd7aSDmitrii Dolgov 
259c826bd7aSDmitrii Dolgov 	TP_ARGS(ctx, min_events),
260c826bd7aSDmitrii Dolgov 
261c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
262c826bd7aSDmitrii Dolgov 		__field(  void *,	ctx		)
263c826bd7aSDmitrii Dolgov 		__field(  int,		min_events	)
264c826bd7aSDmitrii Dolgov 	),
265c826bd7aSDmitrii Dolgov 
266c826bd7aSDmitrii Dolgov 	TP_fast_assign(
267c826bd7aSDmitrii Dolgov 		__entry->ctx		= ctx;
268c826bd7aSDmitrii Dolgov 		__entry->min_events	= min_events;
269c826bd7aSDmitrii Dolgov 	),
270c826bd7aSDmitrii Dolgov 
271c826bd7aSDmitrii Dolgov 	TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
272c826bd7aSDmitrii Dolgov );
273c826bd7aSDmitrii Dolgov 
274c826bd7aSDmitrii Dolgov /**
275c826bd7aSDmitrii Dolgov  * io_uring_fail_link - called before failing a linked request
276c826bd7aSDmitrii Dolgov  *
277c826bd7aSDmitrii Dolgov  * @req:	request, which links were cancelled
278c826bd7aSDmitrii Dolgov  * @link:	cancelled link
279c826bd7aSDmitrii Dolgov  *
280c826bd7aSDmitrii Dolgov  * Allows to track linked requests cancellation, to see not only that some work
281c826bd7aSDmitrii Dolgov  * was cancelled, but also which request was the reason.
282c826bd7aSDmitrii Dolgov  */
283c826bd7aSDmitrii Dolgov TRACE_EVENT(io_uring_fail_link,
284c826bd7aSDmitrii Dolgov 
28548863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
286c826bd7aSDmitrii Dolgov 
28748863ffdSPavel Begunkov 	TP_ARGS(req, link),
288c826bd7aSDmitrii Dolgov 
289c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
290502c87d6SStefan Roesch 		__field(  void *,		ctx		)
291c826bd7aSDmitrii Dolgov 		__field(  void *,		req		)
292502c87d6SStefan Roesch 		__field(  unsigned long long,	user_data	)
293502c87d6SStefan Roesch 		__field(  u8,			opcode		)
294c826bd7aSDmitrii Dolgov 		__field(  void *,		link		)
295e70b64a3SDylan Yudaken 
29648863ffdSPavel Begunkov 		__string( op_str, io_uring_get_opcode(req->opcode) )
297c826bd7aSDmitrii Dolgov 	),
298c826bd7aSDmitrii Dolgov 
299c826bd7aSDmitrii Dolgov 	TP_fast_assign(
30048863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
301c826bd7aSDmitrii Dolgov 		__entry->req		= req;
30248863ffdSPavel Begunkov 		__entry->user_data	= req->cqe.user_data;
30348863ffdSPavel Begunkov 		__entry->opcode		= req->opcode;
304c826bd7aSDmitrii Dolgov 		__entry->link		= link;
305e70b64a3SDylan Yudaken 
30648863ffdSPavel Begunkov 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
307c826bd7aSDmitrii Dolgov 	),
308c826bd7aSDmitrii Dolgov 
309033b87d2SDylan Yudaken 	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
310033b87d2SDylan Yudaken 		__entry->ctx, __entry->req, __entry->user_data,
311e70b64a3SDylan Yudaken 		__get_str(op_str), __entry->link)
312c826bd7aSDmitrii Dolgov );
313c826bd7aSDmitrii Dolgov 
314c826bd7aSDmitrii Dolgov /**
31551c3ff62SJens Axboe  * io_uring_complete - called when completing an SQE
31651c3ff62SJens Axboe  *
31751c3ff62SJens Axboe  * @ctx:		pointer to a ring context structure
318502c87d6SStefan Roesch  * @req:		pointer to a submitted request
31951c3ff62SJens Axboe  * @user_data:		user data associated with the request
32051c3ff62SJens Axboe  * @res:		result of the request
3217471e1afSJens Axboe  * @cflags:		completion flags
322c4bb964fSStefan Roesch  * @extra1:		extra 64-bit data for CQE32
323c4bb964fSStefan Roesch  * @extra2:		extra 64-bit data for CQE32
32451c3ff62SJens Axboe  *
32551c3ff62SJens Axboe  */
32651c3ff62SJens Axboe TRACE_EVENT(io_uring_complete,
32751c3ff62SJens Axboe 
328c4bb964fSStefan Roesch 	TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
329c4bb964fSStefan Roesch 		 u64 extra1, u64 extra2),
33051c3ff62SJens Axboe 
331c4bb964fSStefan Roesch 	TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
33251c3ff62SJens Axboe 
33351c3ff62SJens Axboe 	TP_STRUCT__entry (
33451c3ff62SJens Axboe 		__field(  void *,	ctx		)
335502c87d6SStefan Roesch 		__field(  void *,	req		)
33651c3ff62SJens Axboe 		__field(  u64,		user_data	)
3372fc2a7a6SJens Axboe 		__field(  int,		res		)
3387471e1afSJens Axboe 		__field(  unsigned,	cflags		)
339c4bb964fSStefan Roesch 		__field(  u64,		extra1		)
340c4bb964fSStefan Roesch 		__field(  u64,		extra2		)
34151c3ff62SJens Axboe 	),
34251c3ff62SJens Axboe 
34351c3ff62SJens Axboe 	TP_fast_assign(
34451c3ff62SJens Axboe 		__entry->ctx		= ctx;
345502c87d6SStefan Roesch 		__entry->req		= req;
34651c3ff62SJens Axboe 		__entry->user_data	= user_data;
34751c3ff62SJens Axboe 		__entry->res		= res;
3487471e1afSJens Axboe 		__entry->cflags		= cflags;
349c4bb964fSStefan Roesch 		__entry->extra1		= extra1;
350c4bb964fSStefan Roesch 		__entry->extra2		= extra2;
35151c3ff62SJens Axboe 	),
35251c3ff62SJens Axboe 
353c4bb964fSStefan Roesch 	TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
354c4bb964fSStefan Roesch 		  "extra1 %llu extra2 %llu ",
355502c87d6SStefan Roesch 		__entry->ctx, __entry->req,
356052ebf1fSDylan Yudaken 		__entry->user_data,
357c4bb964fSStefan Roesch 		__entry->res, __entry->cflags,
358c4bb964fSStefan Roesch 		(unsigned long long) __entry->extra1,
359c4bb964fSStefan Roesch 		(unsigned long long) __entry->extra2)
36051c3ff62SJens Axboe );
36151c3ff62SJens Axboe 
36251c3ff62SJens Axboe /**
363*2ad57931SJens Axboe  * io_uring_submit_req - called before submitting a request
364c826bd7aSDmitrii Dolgov  *
365236daeaeSOlivier Langlois  * @req:		pointer to a submitted request
366c826bd7aSDmitrii Dolgov  *
367c826bd7aSDmitrii Dolgov  * Allows to track SQE submitting, to understand what was the source of it, SQ
368c826bd7aSDmitrii Dolgov  * thread or io_uring_enter call.
369c826bd7aSDmitrii Dolgov  */
370*2ad57931SJens Axboe TRACE_EVENT(io_uring_submit_req,
371c826bd7aSDmitrii Dolgov 
372*2ad57931SJens Axboe 	TP_PROTO(struct io_kiocb *req),
373c826bd7aSDmitrii Dolgov 
374*2ad57931SJens Axboe 	TP_ARGS(req),
375c826bd7aSDmitrii Dolgov 
376c826bd7aSDmitrii Dolgov 	TP_STRUCT__entry (
377c826bd7aSDmitrii Dolgov 		__field(  void *,		ctx		)
378236daeaeSOlivier Langlois 		__field(  void *,		req		)
379502c87d6SStefan Roesch 		__field(  unsigned long long,	user_data	)
380354420f7SJens Axboe 		__field(  u8,			opcode		)
381236daeaeSOlivier Langlois 		__field(  u32,			flags		)
382c826bd7aSDmitrii Dolgov 		__field(  bool,			sq_thread	)
383e70b64a3SDylan Yudaken 
38448863ffdSPavel Begunkov 		__string( op_str, io_uring_get_opcode(req->opcode) )
385c826bd7aSDmitrii Dolgov 	),
386c826bd7aSDmitrii Dolgov 
387c826bd7aSDmitrii Dolgov 	TP_fast_assign(
38848863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
389236daeaeSOlivier Langlois 		__entry->req		= req;
39048863ffdSPavel Begunkov 		__entry->user_data	= req->cqe.user_data;
39148863ffdSPavel Begunkov 		__entry->opcode		= req->opcode;
39248863ffdSPavel Begunkov 		__entry->flags		= req->flags;
39348863ffdSPavel Begunkov 		__entry->sq_thread	= req->ctx->flags & IORING_SETUP_SQPOLL;
394e70b64a3SDylan Yudaken 
39548863ffdSPavel Begunkov 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
396c826bd7aSDmitrii Dolgov 	),
397c826bd7aSDmitrii Dolgov 
398033b87d2SDylan Yudaken 	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
399*2ad57931SJens Axboe 		  "sq_thread %d", __entry->ctx, __entry->req,
400e70b64a3SDylan Yudaken 		  __entry->user_data, __get_str(op_str),
401*2ad57931SJens Axboe 		  __entry->flags, __entry->sq_thread)
402c826bd7aSDmitrii Dolgov );
403c826bd7aSDmitrii Dolgov 
404236daeaeSOlivier Langlois /*
405236daeaeSOlivier Langlois  * io_uring_poll_arm - called after arming a poll wait if successful
406236daeaeSOlivier Langlois  *
407236daeaeSOlivier Langlois  * @req:		pointer to the armed request
408236daeaeSOlivier Langlois  * @mask:		request poll events mask
409236daeaeSOlivier Langlois  * @events:		registered events of interest
410236daeaeSOlivier Langlois  *
411236daeaeSOlivier Langlois  * Allows to track which fds are waiting for and what are the events of
412236daeaeSOlivier Langlois  * interest.
413236daeaeSOlivier Langlois  */
414d7718a9dSJens Axboe TRACE_EVENT(io_uring_poll_arm,
415d7718a9dSJens Axboe 
41648863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req, int mask, int events),
417d7718a9dSJens Axboe 
41848863ffdSPavel Begunkov 	TP_ARGS(req, mask, events),
419d7718a9dSJens Axboe 
420d7718a9dSJens Axboe 	TP_STRUCT__entry (
421d7718a9dSJens Axboe 		__field(  void *,		ctx		)
422236daeaeSOlivier Langlois 		__field(  void *,		req		)
423502c87d6SStefan Roesch 		__field(  unsigned long long,	user_data	)
424d7718a9dSJens Axboe 		__field(  u8,			opcode		)
425d7718a9dSJens Axboe 		__field(  int,			mask		)
426d7718a9dSJens Axboe 		__field(  int,			events		)
427e70b64a3SDylan Yudaken 
42848863ffdSPavel Begunkov 		__string( op_str, io_uring_get_opcode(req->opcode) )
429d7718a9dSJens Axboe 	),
430d7718a9dSJens Axboe 
431d7718a9dSJens Axboe 	TP_fast_assign(
43248863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
433236daeaeSOlivier Langlois 		__entry->req		= req;
43448863ffdSPavel Begunkov 		__entry->user_data	= req->cqe.user_data;
43548863ffdSPavel Begunkov 		__entry->opcode		= req->opcode;
436d7718a9dSJens Axboe 		__entry->mask		= mask;
437d7718a9dSJens Axboe 		__entry->events		= events;
438e70b64a3SDylan Yudaken 
43948863ffdSPavel Begunkov 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
440d7718a9dSJens Axboe 	),
441d7718a9dSJens Axboe 
442033b87d2SDylan Yudaken 	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
443033b87d2SDylan Yudaken 		  __entry->ctx, __entry->req, __entry->user_data,
444e70b64a3SDylan Yudaken 		  __get_str(op_str),
445d7718a9dSJens Axboe 		  __entry->mask, __entry->events)
446d7718a9dSJens Axboe );
447d7718a9dSJens Axboe 
448236daeaeSOlivier Langlois /*
449502c87d6SStefan Roesch  * io_uring_task_add - called after adding a task
450236daeaeSOlivier Langlois  *
451502c87d6SStefan Roesch  * @req:		pointer to request
452502c87d6SStefan Roesch  * @mask:		request poll events mask
453236daeaeSOlivier Langlois  *
454236daeaeSOlivier Langlois  */
455502c87d6SStefan Roesch TRACE_EVENT(io_uring_task_add,
456d7718a9dSJens Axboe 
45748863ffdSPavel Begunkov 	TP_PROTO(struct io_kiocb *req, int mask),
458d7718a9dSJens Axboe 
45948863ffdSPavel Begunkov 	TP_ARGS(req, mask),
460d7718a9dSJens Axboe 
461d7718a9dSJens Axboe 	TP_STRUCT__entry (
462d7718a9dSJens Axboe 		__field(  void *,		ctx		)
463236daeaeSOlivier Langlois 		__field(  void *,		req		)
464502c87d6SStefan Roesch 		__field(  unsigned long long,	user_data	)
465d7718a9dSJens Axboe 		__field(  u8,			opcode		)
466502c87d6SStefan Roesch 		__field(  int,			mask		)
467e70b64a3SDylan Yudaken 
46848863ffdSPavel Begunkov 		__string( op_str, io_uring_get_opcode(req->opcode) )
469d7718a9dSJens Axboe 	),
470d7718a9dSJens Axboe 
471d7718a9dSJens Axboe 	TP_fast_assign(
47248863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
473236daeaeSOlivier Langlois 		__entry->req		= req;
47448863ffdSPavel Begunkov 		__entry->user_data	= req->cqe.user_data;
47548863ffdSPavel Begunkov 		__entry->opcode		= req->opcode;
476502c87d6SStefan Roesch 		__entry->mask		= mask;
477e70b64a3SDylan Yudaken 
47848863ffdSPavel Begunkov 		__assign_str(op_str, io_uring_get_opcode(req->opcode));
479d7718a9dSJens Axboe 	),
480d7718a9dSJens Axboe 
481033b87d2SDylan Yudaken 	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
482033b87d2SDylan Yudaken 		__entry->ctx, __entry->req, __entry->user_data,
483e70b64a3SDylan Yudaken 		__get_str(op_str),
484502c87d6SStefan Roesch 		__entry->mask)
485d7718a9dSJens Axboe );
486d7718a9dSJens Axboe 
487a87acfdeSJens Axboe /*
488a87acfdeSJens Axboe  * io_uring_req_failed - called when an sqe is errored dring submission
489a87acfdeSJens Axboe  *
490a87acfdeSJens Axboe  * @sqe:		pointer to the io_uring_sqe that failed
491502c87d6SStefan Roesch  * @req:		pointer to request
492a87acfdeSJens Axboe  * @error:		error it failed with
493a87acfdeSJens Axboe  *
494a87acfdeSJens Axboe  * Allows easier diagnosing of malformed requests in production systems.
495a87acfdeSJens Axboe  */
496a87acfdeSJens Axboe TRACE_EVENT(io_uring_req_failed,
497a87acfdeSJens Axboe 
49848863ffdSPavel Begunkov 	TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
499a87acfdeSJens Axboe 
50048863ffdSPavel Begunkov 	TP_ARGS(sqe, req, error),
501a87acfdeSJens Axboe 
502a87acfdeSJens Axboe 	TP_STRUCT__entry (
503502c87d6SStefan Roesch 		__field(  void *,		ctx		)
504502c87d6SStefan Roesch 		__field(  void *,		req		)
505502c87d6SStefan Roesch 		__field(  unsigned long long,	user_data	)
506a87acfdeSJens Axboe 		__field(  u8,			opcode		)
507a87acfdeSJens Axboe 		__field(  u8,			flags		)
508a87acfdeSJens Axboe 		__field(  u8,			ioprio		)
509a87acfdeSJens Axboe 		__field( u64,			off		)
510a87acfdeSJens Axboe 		__field( u64,			addr		)
511a87acfdeSJens Axboe 		__field( u32,			len		)
512a87acfdeSJens Axboe 		__field( u32,			op_flags	)
513a87acfdeSJens Axboe 		__field( u16,			buf_index	)
514a87acfdeSJens Axboe 		__field( u16,			personality	)
515a87acfdeSJens Axboe 		__field( u32,			file_index	)
516a87acfdeSJens Axboe 		__field( u64,			pad1		)
5170200ce6aSJens Axboe 		__field( u64,			addr3		)
518a87acfdeSJens Axboe 		__field( int,			error		)
519e70b64a3SDylan Yudaken 
520e70b64a3SDylan Yudaken 		__string( op_str, io_uring_get_opcode(sqe->opcode) )
521a87acfdeSJens Axboe 	),
522a87acfdeSJens Axboe 
523a87acfdeSJens Axboe 	TP_fast_assign(
52448863ffdSPavel Begunkov 		__entry->ctx		= req->ctx;
525502c87d6SStefan Roesch 		__entry->req		= req;
526502c87d6SStefan Roesch 		__entry->user_data	= sqe->user_data;
527a87acfdeSJens Axboe 		__entry->opcode		= sqe->opcode;
528a87acfdeSJens Axboe 		__entry->flags		= sqe->flags;
529a87acfdeSJens Axboe 		__entry->ioprio		= sqe->ioprio;
530a87acfdeSJens Axboe 		__entry->off		= sqe->off;
531a87acfdeSJens Axboe 		__entry->addr		= sqe->addr;
532a87acfdeSJens Axboe 		__entry->len		= sqe->len;
5330e7579caSVasily Averin 		__entry->op_flags	= sqe->poll32_events;
534a87acfdeSJens Axboe 		__entry->buf_index	= sqe->buf_index;
535a87acfdeSJens Axboe 		__entry->personality	= sqe->personality;
536a87acfdeSJens Axboe 		__entry->file_index	= sqe->file_index;
537a87acfdeSJens Axboe 		__entry->pad1		= sqe->__pad2[0];
5380200ce6aSJens Axboe 		__entry->addr3		= sqe->addr3;
539a87acfdeSJens Axboe 		__entry->error		= error;
540e70b64a3SDylan Yudaken 
541e70b64a3SDylan Yudaken 		__assign_str(op_str, io_uring_get_opcode(sqe->opcode));
542a87acfdeSJens Axboe 	),
543a87acfdeSJens Axboe 
544052ebf1fSDylan Yudaken 	TP_printk("ring %p, req %p, user_data 0x%llx, "
545033b87d2SDylan Yudaken 		  "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
546502c87d6SStefan Roesch 		  "len=%u, rw_flags=0x%x, buf_index=%d, "
5470200ce6aSJens Axboe 		  "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
5480200ce6aSJens Axboe 		  "error=%d",
549502c87d6SStefan Roesch 		  __entry->ctx, __entry->req, __entry->user_data,
550e70b64a3SDylan Yudaken 		  __get_str(op_str),
551033b87d2SDylan Yudaken 		  __entry->flags, __entry->ioprio,
552a87acfdeSJens Axboe 		  (unsigned long long)__entry->off,
553a87acfdeSJens Axboe 		  (unsigned long long) __entry->addr, __entry->len,
554502c87d6SStefan Roesch 		  __entry->op_flags,
555a87acfdeSJens Axboe 		  __entry->buf_index, __entry->personality, __entry->file_index,
556a87acfdeSJens Axboe 		  (unsigned long long) __entry->pad1,
5570200ce6aSJens Axboe 		  (unsigned long long) __entry->addr3, __entry->error)
558a87acfdeSJens Axboe );
559a87acfdeSJens Axboe 
56047894438SDylan Yudaken 
56147894438SDylan Yudaken /*
56247894438SDylan Yudaken  * io_uring_cqe_overflow - a CQE overflowed
56347894438SDylan Yudaken  *
56447894438SDylan Yudaken  * @ctx:		pointer to a ring context structure
56547894438SDylan Yudaken  * @user_data:		user data associated with the request
56647894438SDylan Yudaken  * @res:		CQE result
56747894438SDylan Yudaken  * @cflags:		CQE flags
56847894438SDylan Yudaken  * @ocqe:		pointer to the overflow cqe (if available)
56947894438SDylan Yudaken  *
57047894438SDylan Yudaken  */
57147894438SDylan Yudaken TRACE_EVENT(io_uring_cqe_overflow,
57247894438SDylan Yudaken 
57347894438SDylan Yudaken 	TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
57447894438SDylan Yudaken 		 void *ocqe),
57547894438SDylan Yudaken 
57647894438SDylan Yudaken 	TP_ARGS(ctx, user_data, res, cflags, ocqe),
57747894438SDylan Yudaken 
57847894438SDylan Yudaken 	TP_STRUCT__entry (
57947894438SDylan Yudaken 		__field(  void *,		ctx		)
58047894438SDylan Yudaken 		__field(  unsigned long long,	user_data	)
58147894438SDylan Yudaken 		__field(  s32,			res		)
58247894438SDylan Yudaken 		__field(  u32,			cflags		)
58347894438SDylan Yudaken 		__field(  void *,		ocqe		)
58447894438SDylan Yudaken 	),
58547894438SDylan Yudaken 
58647894438SDylan Yudaken 	TP_fast_assign(
58747894438SDylan Yudaken 		__entry->ctx		= ctx;
58847894438SDylan Yudaken 		__entry->user_data	= user_data;
58947894438SDylan Yudaken 		__entry->res		= res;
59047894438SDylan Yudaken 		__entry->cflags		= cflags;
59147894438SDylan Yudaken 		__entry->ocqe		= ocqe;
59247894438SDylan Yudaken 	),
59347894438SDylan Yudaken 
5949b26e811SDylan Yudaken 	TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
59547894438SDylan Yudaken 		  "overflow_cqe %p",
59647894438SDylan Yudaken 		  __entry->ctx, __entry->user_data, __entry->res,
59747894438SDylan Yudaken 		  __entry->cflags, __entry->ocqe)
598c826bd7aSDmitrii Dolgov );
599c826bd7aSDmitrii Dolgov 
600eccd8801SDylan Yudaken /*
601eccd8801SDylan Yudaken  * io_uring_task_work_run - ran task work
602eccd8801SDylan Yudaken  *
603eccd8801SDylan Yudaken  * @tctx:		pointer to a io_uring_task
604eccd8801SDylan Yudaken  * @count:		how many functions it ran
605eccd8801SDylan Yudaken  * @loops:		how many loops it ran
606eccd8801SDylan Yudaken  *
607eccd8801SDylan Yudaken  */
608eccd8801SDylan Yudaken TRACE_EVENT(io_uring_task_work_run,
609eccd8801SDylan Yudaken 
610eccd8801SDylan Yudaken 	TP_PROTO(void *tctx, unsigned int count, unsigned int loops),
611eccd8801SDylan Yudaken 
612eccd8801SDylan Yudaken 	TP_ARGS(tctx, count, loops),
613eccd8801SDylan Yudaken 
614eccd8801SDylan Yudaken 	TP_STRUCT__entry (
615eccd8801SDylan Yudaken 		__field(  void *,		tctx		)
616eccd8801SDylan Yudaken 		__field(  unsigned int,		count		)
617eccd8801SDylan Yudaken 		__field(  unsigned int,		loops		)
618eccd8801SDylan Yudaken 	),
619eccd8801SDylan Yudaken 
620eccd8801SDylan Yudaken 	TP_fast_assign(
621eccd8801SDylan Yudaken 		__entry->tctx		= tctx;
622eccd8801SDylan Yudaken 		__entry->count		= count;
623eccd8801SDylan Yudaken 		__entry->loops		= loops;
624eccd8801SDylan Yudaken 	),
625eccd8801SDylan Yudaken 
626eccd8801SDylan Yudaken 	TP_printk("tctx %p, count %u, loops %u",
627eccd8801SDylan Yudaken 		 __entry->tctx, __entry->count, __entry->loops)
628eccd8801SDylan Yudaken );
629eccd8801SDylan Yudaken 
6301c849b48SStefan Roesch TRACE_EVENT(io_uring_short_write,
6311c849b48SStefan Roesch 
6321c849b48SStefan Roesch 	TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
6331c849b48SStefan Roesch 
6341c849b48SStefan Roesch 	TP_ARGS(ctx, fpos, wanted, got),
6351c849b48SStefan Roesch 
6361c849b48SStefan Roesch 	TP_STRUCT__entry(
6371c849b48SStefan Roesch 		__field(void *,	ctx)
6381c849b48SStefan Roesch 		__field(u64,	fpos)
6391c849b48SStefan Roesch 		__field(u64,	wanted)
6401c849b48SStefan Roesch 		__field(u64,	got)
6411c849b48SStefan Roesch 	),
6421c849b48SStefan Roesch 
6431c849b48SStefan Roesch 	TP_fast_assign(
6441c849b48SStefan Roesch 		__entry->ctx	= ctx;
6451c849b48SStefan Roesch 		__entry->fpos	= fpos;
6461c849b48SStefan Roesch 		__entry->wanted	= wanted;
6471c849b48SStefan Roesch 		__entry->got	= got;
6481c849b48SStefan Roesch 	),
6491c849b48SStefan Roesch 
6501c849b48SStefan Roesch 	TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
6511c849b48SStefan Roesch 			  __entry->ctx, __entry->fpos,
6521c849b48SStefan Roesch 			  __entry->wanted, __entry->got)
6531c849b48SStefan Roesch );
6541c849b48SStefan Roesch 
655f75d5036SDylan Yudaken /*
656f75d5036SDylan Yudaken  * io_uring_local_work_run - ran ring local task work
657f75d5036SDylan Yudaken  *
658f75d5036SDylan Yudaken  * @tctx:		pointer to a io_uring_ctx
659f75d5036SDylan Yudaken  * @count:		how many functions it ran
660f75d5036SDylan Yudaken  * @loops:		how many loops it ran
661f75d5036SDylan Yudaken  *
662f75d5036SDylan Yudaken  */
663f75d5036SDylan Yudaken TRACE_EVENT(io_uring_local_work_run,
664f75d5036SDylan Yudaken 
665f75d5036SDylan Yudaken 	TP_PROTO(void *ctx, int count, unsigned int loops),
666f75d5036SDylan Yudaken 
667f75d5036SDylan Yudaken 	TP_ARGS(ctx, count, loops),
668f75d5036SDylan Yudaken 
669f75d5036SDylan Yudaken 	TP_STRUCT__entry (
670f75d5036SDylan Yudaken 		__field(void *,		ctx	)
671f75d5036SDylan Yudaken 		__field(int,		count	)
672f75d5036SDylan Yudaken 		__field(unsigned int,	loops	)
673f75d5036SDylan Yudaken 	),
674f75d5036SDylan Yudaken 
675f75d5036SDylan Yudaken 	TP_fast_assign(
676f75d5036SDylan Yudaken 		__entry->ctx		= ctx;
677f75d5036SDylan Yudaken 		__entry->count		= count;
678f75d5036SDylan Yudaken 		__entry->loops		= loops;
679f75d5036SDylan Yudaken 	),
680f75d5036SDylan Yudaken 
681f75d5036SDylan Yudaken 	TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
682f75d5036SDylan Yudaken );
683f75d5036SDylan Yudaken 
684c826bd7aSDmitrii Dolgov #endif /* _TRACE_IO_URING_H */
685c826bd7aSDmitrii Dolgov 
686c826bd7aSDmitrii Dolgov /* This part must be outside protection */
687c826bd7aSDmitrii Dolgov #include <trace/define_trace.h>
688