1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM io_uring 4 5 #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_IO_URING_H 7 8 #include <linux/tracepoint.h> 9 10 struct io_wq_work; 11 12 /** 13 * io_uring_create - called after a new io_uring context was prepared 14 * 15 * @fd: corresponding file descriptor 16 * @ctx: pointer to a ring context structure 17 * @sq_entries: actual SQ size 18 * @cq_entries: actual CQ size 19 * @flags: SQ ring flags, provided to io_uring_setup(2) 20 * 21 * Allows to trace io_uring creation and provide pointer to a context, that can 22 * be used later to find correlated events. 23 */ 24 TRACE_EVENT(io_uring_create, 25 26 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), 27 28 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), 29 30 TP_STRUCT__entry ( 31 __field( int, fd ) 32 __field( void *, ctx ) 33 __field( u32, sq_entries ) 34 __field( u32, cq_entries ) 35 __field( u32, flags ) 36 ), 37 38 TP_fast_assign( 39 __entry->fd = fd; 40 __entry->ctx = ctx; 41 __entry->sq_entries = sq_entries; 42 __entry->cq_entries = cq_entries; 43 __entry->flags = flags; 44 ), 45 46 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d", 47 __entry->ctx, __entry->fd, __entry->sq_entries, 48 __entry->cq_entries, __entry->flags) 49 ); 50 51 /** 52 * io_uring_register - called after a buffer/file/eventfd was successfully 53 * registered for a ring 54 * 55 * @ctx: pointer to a ring context structure 56 * @opcode: describes which operation to perform 57 * @nr_user_files: number of registered files 58 * @nr_user_bufs: number of registered buffers 59 * @cq_ev_fd: whether eventfs registered or not 60 * @ret: return code 61 * 62 * Allows to trace fixed files/buffers/eventfds, that could be registered to 63 * avoid an overhead of getting references to them for every operation. This 64 * event, together with io_uring_file_get, can provide a full picture of how 65 * much overhead one can reduce via fixing. 66 */ 67 TRACE_EVENT(io_uring_register, 68 69 TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files, 70 unsigned nr_bufs, bool eventfd, long ret), 71 72 TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret), 73 74 TP_STRUCT__entry ( 75 __field( void *, ctx ) 76 __field( unsigned, opcode ) 77 __field( unsigned, nr_files ) 78 __field( unsigned, nr_bufs ) 79 __field( bool, eventfd ) 80 __field( long, ret ) 81 ), 82 83 TP_fast_assign( 84 __entry->ctx = ctx; 85 __entry->opcode = opcode; 86 __entry->nr_files = nr_files; 87 __entry->nr_bufs = nr_bufs; 88 __entry->eventfd = eventfd; 89 __entry->ret = ret; 90 ), 91 92 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " 93 "eventfd %d, ret %ld", 94 __entry->ctx, __entry->opcode, __entry->nr_files, 95 __entry->nr_bufs, __entry->eventfd, __entry->ret) 96 ); 97 98 /** 99 * io_uring_file_get - called before getting references to an SQE file 100 * 101 * @ctx: pointer to a ring context structure 102 * @fd: SQE file descriptor 103 * 104 * Allows to trace out how often an SQE file reference is obtained, which can 105 * help figuring out if it makes sense to use fixed files, or check that fixed 106 * files are used correctly. 107 */ 108 TRACE_EVENT(io_uring_file_get, 109 110 TP_PROTO(void *ctx, int fd), 111 112 TP_ARGS(ctx, fd), 113 114 TP_STRUCT__entry ( 115 __field( void *, ctx ) 116 __field( int, fd ) 117 ), 118 119 TP_fast_assign( 120 __entry->ctx = ctx; 121 __entry->fd = fd; 122 ), 123 124 TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd) 125 ); 126 127 /** 128 * io_uring_queue_async_work - called before submitting a new async work 129 * 130 * @ctx: pointer to a ring context structure 131 * @hashed: type of workqueue, hashed or normal 132 * @req: pointer to a submitted request 133 * @work: pointer to a submitted io_wq_work 134 * 135 * Allows to trace asynchronous work submission. 136 */ 137 TRACE_EVENT(io_uring_queue_async_work, 138 139 TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work, 140 unsigned int flags), 141 142 TP_ARGS(ctx, rw, req, work, flags), 143 144 TP_STRUCT__entry ( 145 __field( void *, ctx ) 146 __field( int, rw ) 147 __field( void *, req ) 148 __field( struct io_wq_work *, work ) 149 __field( unsigned int, flags ) 150 ), 151 152 TP_fast_assign( 153 __entry->ctx = ctx; 154 __entry->rw = rw; 155 __entry->req = req; 156 __entry->work = work; 157 __entry->flags = flags; 158 ), 159 160 TP_printk("ring %p, request %p, flags %d, %s queue, work %p", 161 __entry->ctx, __entry->req, __entry->flags, 162 __entry->rw ? "hashed" : "normal", __entry->work) 163 ); 164 165 /** 166 * io_uring_defer - called when an io_uring request is deferred 167 * 168 * @ctx: pointer to a ring context structure 169 * @req: pointer to a deferred request 170 * @user_data: user data associated with the request 171 * 172 * Allows to track deferred requests, to get an insight about what requests are 173 * not started immediately. 174 */ 175 TRACE_EVENT(io_uring_defer, 176 177 TP_PROTO(void *ctx, void *req, unsigned long long user_data), 178 179 TP_ARGS(ctx, req, user_data), 180 181 TP_STRUCT__entry ( 182 __field( void *, ctx ) 183 __field( void *, req ) 184 __field( unsigned long long, data ) 185 ), 186 187 TP_fast_assign( 188 __entry->ctx = ctx; 189 __entry->req = req; 190 __entry->data = user_data; 191 ), 192 193 TP_printk("ring %p, request %p user_data %llu", __entry->ctx, 194 __entry->req, __entry->data) 195 ); 196 197 /** 198 * io_uring_link - called before the io_uring request added into link_list of 199 * another request 200 * 201 * @ctx: pointer to a ring context structure 202 * @req: pointer to a linked request 203 * @target_req: pointer to a previous request, that would contain @req 204 * 205 * Allows to track linked requests, to understand dependencies between requests 206 * and how does it influence their execution flow. 207 */ 208 TRACE_EVENT(io_uring_link, 209 210 TP_PROTO(void *ctx, void *req, void *target_req), 211 212 TP_ARGS(ctx, req, target_req), 213 214 TP_STRUCT__entry ( 215 __field( void *, ctx ) 216 __field( void *, req ) 217 __field( void *, target_req ) 218 ), 219 220 TP_fast_assign( 221 __entry->ctx = ctx; 222 __entry->req = req; 223 __entry->target_req = target_req; 224 ), 225 226 TP_printk("ring %p, request %p linked after %p", 227 __entry->ctx, __entry->req, __entry->target_req) 228 ); 229 230 /** 231 * io_uring_cqring_wait - called before start waiting for an available CQE 232 * 233 * @ctx: pointer to a ring context structure 234 * @min_events: minimal number of events to wait for 235 * 236 * Allows to track waiting for CQE, so that we can e.g. troubleshoot 237 * situations, when an application wants to wait for an event, that never 238 * comes. 239 */ 240 TRACE_EVENT(io_uring_cqring_wait, 241 242 TP_PROTO(void *ctx, int min_events), 243 244 TP_ARGS(ctx, min_events), 245 246 TP_STRUCT__entry ( 247 __field( void *, ctx ) 248 __field( int, min_events ) 249 ), 250 251 TP_fast_assign( 252 __entry->ctx = ctx; 253 __entry->min_events = min_events; 254 ), 255 256 TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events) 257 ); 258 259 /** 260 * io_uring_fail_link - called before failing a linked request 261 * 262 * @req: request, which links were cancelled 263 * @link: cancelled link 264 * 265 * Allows to track linked requests cancellation, to see not only that some work 266 * was cancelled, but also which request was the reason. 267 */ 268 TRACE_EVENT(io_uring_fail_link, 269 270 TP_PROTO(void *req, void *link), 271 272 TP_ARGS(req, link), 273 274 TP_STRUCT__entry ( 275 __field( void *, req ) 276 __field( void *, link ) 277 ), 278 279 TP_fast_assign( 280 __entry->req = req; 281 __entry->link = link; 282 ), 283 284 TP_printk("request %p, link %p", __entry->req, __entry->link) 285 ); 286 287 /** 288 * io_uring_complete - called when completing an SQE 289 * 290 * @ctx: pointer to a ring context structure 291 * @user_data: user data associated with the request 292 * @res: result of the request 293 * @cflags: completion flags 294 * 295 */ 296 TRACE_EVENT(io_uring_complete, 297 298 TP_PROTO(void *ctx, u64 user_data, long res, unsigned cflags), 299 300 TP_ARGS(ctx, user_data, res, cflags), 301 302 TP_STRUCT__entry ( 303 __field( void *, ctx ) 304 __field( u64, user_data ) 305 __field( long, res ) 306 __field( unsigned, cflags ) 307 ), 308 309 TP_fast_assign( 310 __entry->ctx = ctx; 311 __entry->user_data = user_data; 312 __entry->res = res; 313 __entry->cflags = cflags; 314 ), 315 316 TP_printk("ring %p, user_data 0x%llx, result %ld, cflags %x", 317 __entry->ctx, (unsigned long long)__entry->user_data, 318 __entry->res, __entry->cflags) 319 ); 320 321 322 /** 323 * io_uring_submit_sqe - called before submitting one SQE 324 * 325 * @ctx: pointer to a ring context structure 326 * @opcode: opcode of request 327 * @user_data: user data associated with the request 328 * @force_nonblock: whether a context blocking or not 329 * @sq_thread: true if sq_thread has submitted this SQE 330 * 331 * Allows to track SQE submitting, to understand what was the source of it, SQ 332 * thread or io_uring_enter call. 333 */ 334 TRACE_EVENT(io_uring_submit_sqe, 335 336 TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, 337 bool sq_thread), 338 339 TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread), 340 341 TP_STRUCT__entry ( 342 __field( void *, ctx ) 343 __field( u8, opcode ) 344 __field( u64, user_data ) 345 __field( bool, force_nonblock ) 346 __field( bool, sq_thread ) 347 ), 348 349 TP_fast_assign( 350 __entry->ctx = ctx; 351 __entry->opcode = opcode; 352 __entry->user_data = user_data; 353 __entry->force_nonblock = force_nonblock; 354 __entry->sq_thread = sq_thread; 355 ), 356 357 TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d", 358 __entry->ctx, __entry->opcode, 359 (unsigned long long) __entry->user_data, 360 __entry->force_nonblock, __entry->sq_thread) 361 ); 362 363 TRACE_EVENT(io_uring_poll_arm, 364 365 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), 366 367 TP_ARGS(ctx, opcode, user_data, mask, events), 368 369 TP_STRUCT__entry ( 370 __field( void *, ctx ) 371 __field( u8, opcode ) 372 __field( u64, user_data ) 373 __field( int, mask ) 374 __field( int, events ) 375 ), 376 377 TP_fast_assign( 378 __entry->ctx = ctx; 379 __entry->opcode = opcode; 380 __entry->user_data = user_data; 381 __entry->mask = mask; 382 __entry->events = events; 383 ), 384 385 TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", 386 __entry->ctx, __entry->opcode, 387 (unsigned long long) __entry->user_data, 388 __entry->mask, __entry->events) 389 ); 390 391 TRACE_EVENT(io_uring_poll_wake, 392 393 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask), 394 395 TP_ARGS(ctx, opcode, user_data, mask), 396 397 TP_STRUCT__entry ( 398 __field( void *, ctx ) 399 __field( u8, opcode ) 400 __field( u64, user_data ) 401 __field( int, mask ) 402 ), 403 404 TP_fast_assign( 405 __entry->ctx = ctx; 406 __entry->opcode = opcode; 407 __entry->user_data = user_data; 408 __entry->mask = mask; 409 ), 410 411 TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x", 412 __entry->ctx, __entry->opcode, 413 (unsigned long long) __entry->user_data, 414 __entry->mask) 415 ); 416 417 TRACE_EVENT(io_uring_task_add, 418 419 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask), 420 421 TP_ARGS(ctx, opcode, user_data, mask), 422 423 TP_STRUCT__entry ( 424 __field( void *, ctx ) 425 __field( u8, opcode ) 426 __field( u64, user_data ) 427 __field( int, mask ) 428 ), 429 430 TP_fast_assign( 431 __entry->ctx = ctx; 432 __entry->opcode = opcode; 433 __entry->user_data = user_data; 434 __entry->mask = mask; 435 ), 436 437 TP_printk("ring %p, op %d, data 0x%llx, mask %x", 438 __entry->ctx, __entry->opcode, 439 (unsigned long long) __entry->user_data, 440 __entry->mask) 441 ); 442 443 TRACE_EVENT(io_uring_task_run, 444 445 TP_PROTO(void *ctx, u8 opcode, u64 user_data), 446 447 TP_ARGS(ctx, opcode, user_data), 448 449 TP_STRUCT__entry ( 450 __field( void *, ctx ) 451 __field( u8, opcode ) 452 __field( u64, user_data ) 453 ), 454 455 TP_fast_assign( 456 __entry->ctx = ctx; 457 __entry->opcode = opcode; 458 __entry->user_data = user_data; 459 ), 460 461 TP_printk("ring %p, op %d, data 0x%llx", 462 __entry->ctx, __entry->opcode, 463 (unsigned long long) __entry->user_data) 464 ); 465 466 #endif /* _TRACE_IO_URING_H */ 467 468 /* This part must be outside protection */ 469 #include <trace/define_trace.h> 470