1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 3 #define _I915_TRACE_H_ 4 5 #include <linux/stringify.h> 6 #include <linux/types.h> 7 #include <linux/tracepoint.h> 8 9 #include <drm/drm_drv.h> 10 11 #include "display/intel_crtc.h" 12 #include "display/intel_display_types.h" 13 #include "gt/intel_engine.h" 14 15 #include "i915_drv.h" 16 #include "i915_irq.h" 17 18 #undef TRACE_SYSTEM 19 #define TRACE_SYSTEM i915 20 #define TRACE_INCLUDE_FILE i915_trace 21 22 /* watermark/fifo updates */ 23 24 TRACE_EVENT(intel_pipe_enable, 25 TP_PROTO(struct intel_crtc *crtc), 26 TP_ARGS(crtc), 27 28 TP_STRUCT__entry( 29 __array(u32, frame, 3) 30 __array(u32, scanline, 3) 31 __field(enum pipe, pipe) 32 ), 33 TP_fast_assign( 34 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 35 struct intel_crtc *it__; 36 for_each_intel_crtc(&dev_priv->drm, it__) { 37 __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); 38 __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); 39 } 40 __entry->pipe = crtc->pipe; 41 ), 42 43 TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", 44 pipe_name(__entry->pipe), 45 __entry->frame[PIPE_A], __entry->scanline[PIPE_A], 46 __entry->frame[PIPE_B], __entry->scanline[PIPE_B], 47 __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) 48 ); 49 50 TRACE_EVENT(intel_pipe_disable, 51 TP_PROTO(struct intel_crtc *crtc), 52 TP_ARGS(crtc), 53 54 TP_STRUCT__entry( 55 __array(u32, frame, 3) 56 __array(u32, scanline, 3) 57 __field(enum pipe, pipe) 58 ), 59 60 TP_fast_assign( 61 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 62 struct intel_crtc *it__; 63 for_each_intel_crtc(&dev_priv->drm, it__) { 64 __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); 65 __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); 66 } 67 __entry->pipe = crtc->pipe; 68 ), 69 70 TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", 71 pipe_name(__entry->pipe), 72 __entry->frame[PIPE_A], __entry->scanline[PIPE_A], 73 __entry->frame[PIPE_B], __entry->scanline[PIPE_B], 74 __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) 75 ); 76 77 TRACE_EVENT(intel_pipe_crc, 78 TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), 79 TP_ARGS(crtc, crcs), 80 81 TP_STRUCT__entry( 82 __field(enum pipe, pipe) 83 __field(u32, frame) 84 __field(u32, scanline) 85 __array(u32, crcs, 5) 86 ), 87 88 TP_fast_assign( 89 __entry->pipe = crtc->pipe; 90 __entry->frame = intel_crtc_get_vblank_counter(crtc); 91 __entry->scanline = intel_get_crtc_scanline(crtc); 92 memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); 93 ), 94 95 TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", 96 pipe_name(__entry->pipe), __entry->frame, __entry->scanline, 97 __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], 98 __entry->crcs[3], __entry->crcs[4]) 99 ); 100 101 TRACE_EVENT(intel_cpu_fifo_underrun, 102 TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), 103 TP_ARGS(dev_priv, pipe), 104 105 TP_STRUCT__entry( 106 __field(enum pipe, pipe) 107 __field(u32, frame) 108 __field(u32, scanline) 109 ), 110 111 TP_fast_assign( 112 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 113 __entry->pipe = pipe; 114 __entry->frame = intel_crtc_get_vblank_counter(crtc); 115 __entry->scanline = intel_get_crtc_scanline(crtc); 116 ), 117 118 TP_printk("pipe %c, frame=%u, scanline=%u", 119 pipe_name(__entry->pipe), 120 __entry->frame, __entry->scanline) 121 ); 122 123 TRACE_EVENT(intel_pch_fifo_underrun, 124 TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), 125 TP_ARGS(dev_priv, pch_transcoder), 126 127 TP_STRUCT__entry( 128 __field(enum pipe, pipe) 129 __field(u32, frame) 130 __field(u32, scanline) 131 ), 132 133 TP_fast_assign( 134 enum pipe pipe = pch_transcoder; 135 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 136 __entry->pipe = pipe; 137 __entry->frame = intel_crtc_get_vblank_counter(crtc); 138 __entry->scanline = intel_get_crtc_scanline(crtc); 139 ), 140 141 TP_printk("pch transcoder %c, frame=%u, scanline=%u", 142 pipe_name(__entry->pipe), 143 __entry->frame, __entry->scanline) 144 ); 145 146 TRACE_EVENT(intel_memory_cxsr, 147 TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), 148 TP_ARGS(dev_priv, old, new), 149 150 TP_STRUCT__entry( 151 __array(u32, frame, 3) 152 __array(u32, scanline, 3) 153 __field(bool, old) 154 __field(bool, new) 155 ), 156 157 TP_fast_assign( 158 struct intel_crtc *crtc; 159 for_each_intel_crtc(&dev_priv->drm, crtc) { 160 __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); 161 __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); 162 } 163 __entry->old = old; 164 __entry->new = new; 165 ), 166 167 TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", 168 onoff(__entry->old), onoff(__entry->new), 169 __entry->frame[PIPE_A], __entry->scanline[PIPE_A], 170 __entry->frame[PIPE_B], __entry->scanline[PIPE_B], 171 __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) 172 ); 173 174 TRACE_EVENT(g4x_wm, 175 TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), 176 TP_ARGS(crtc, wm), 177 178 TP_STRUCT__entry( 179 __field(enum pipe, pipe) 180 __field(u32, frame) 181 __field(u32, scanline) 182 __field(u16, primary) 183 __field(u16, sprite) 184 __field(u16, cursor) 185 __field(u16, sr_plane) 186 __field(u16, sr_cursor) 187 __field(u16, sr_fbc) 188 __field(u16, hpll_plane) 189 __field(u16, hpll_cursor) 190 __field(u16, hpll_fbc) 191 __field(bool, cxsr) 192 __field(bool, hpll) 193 __field(bool, fbc) 194 ), 195 196 TP_fast_assign( 197 __entry->pipe = crtc->pipe; 198 __entry->frame = intel_crtc_get_vblank_counter(crtc); 199 __entry->scanline = intel_get_crtc_scanline(crtc); 200 __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; 201 __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; 202 __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; 203 __entry->sr_plane = wm->sr.plane; 204 __entry->sr_cursor = wm->sr.cursor; 205 __entry->sr_fbc = wm->sr.fbc; 206 __entry->hpll_plane = wm->hpll.plane; 207 __entry->hpll_cursor = wm->hpll.cursor; 208 __entry->hpll_fbc = wm->hpll.fbc; 209 __entry->cxsr = wm->cxsr; 210 __entry->hpll = wm->hpll_en; 211 __entry->fbc = wm->fbc_en; 212 ), 213 214 TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", 215 pipe_name(__entry->pipe), __entry->frame, __entry->scanline, 216 __entry->primary, __entry->sprite, __entry->cursor, 217 yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, 218 yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, 219 yesno(__entry->fbc)) 220 ); 221 222 TRACE_EVENT(vlv_wm, 223 TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), 224 TP_ARGS(crtc, wm), 225 226 TP_STRUCT__entry( 227 __field(enum pipe, pipe) 228 __field(u32, frame) 229 __field(u32, scanline) 230 __field(u32, level) 231 __field(u32, cxsr) 232 __field(u32, primary) 233 __field(u32, sprite0) 234 __field(u32, sprite1) 235 __field(u32, cursor) 236 __field(u32, sr_plane) 237 __field(u32, sr_cursor) 238 ), 239 240 TP_fast_assign( 241 __entry->pipe = crtc->pipe; 242 __entry->frame = intel_crtc_get_vblank_counter(crtc); 243 __entry->scanline = intel_get_crtc_scanline(crtc); 244 __entry->level = wm->level; 245 __entry->cxsr = wm->cxsr; 246 __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; 247 __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; 248 __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; 249 __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; 250 __entry->sr_plane = wm->sr.plane; 251 __entry->sr_cursor = wm->sr.cursor; 252 ), 253 254 TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", 255 pipe_name(__entry->pipe), __entry->frame, 256 __entry->scanline, __entry->level, __entry->cxsr, 257 __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, 258 __entry->sr_plane, __entry->sr_cursor) 259 ); 260 261 TRACE_EVENT(vlv_fifo_size, 262 TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), 263 TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), 264 265 TP_STRUCT__entry( 266 __field(enum pipe, pipe) 267 __field(u32, frame) 268 __field(u32, scanline) 269 __field(u32, sprite0_start) 270 __field(u32, sprite1_start) 271 __field(u32, fifo_size) 272 ), 273 274 TP_fast_assign( 275 __entry->pipe = crtc->pipe; 276 __entry->frame = intel_crtc_get_vblank_counter(crtc); 277 __entry->scanline = intel_get_crtc_scanline(crtc); 278 __entry->sprite0_start = sprite0_start; 279 __entry->sprite1_start = sprite1_start; 280 __entry->fifo_size = fifo_size; 281 ), 282 283 TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", 284 pipe_name(__entry->pipe), __entry->frame, 285 __entry->scanline, __entry->sprite0_start, 286 __entry->sprite1_start, __entry->fifo_size) 287 ); 288 289 /* plane updates */ 290 291 TRACE_EVENT(intel_update_plane, 292 TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), 293 TP_ARGS(plane, crtc), 294 295 TP_STRUCT__entry( 296 __field(enum pipe, pipe) 297 __field(u32, frame) 298 __field(u32, scanline) 299 __array(int, src, 4) 300 __array(int, dst, 4) 301 __string(name, plane->name) 302 ), 303 304 TP_fast_assign( 305 __assign_str(name, plane->name); 306 __entry->pipe = crtc->pipe; 307 __entry->frame = intel_crtc_get_vblank_counter(crtc); 308 __entry->scanline = intel_get_crtc_scanline(crtc); 309 memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); 310 memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); 311 ), 312 313 TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, 314 pipe_name(__entry->pipe), __get_str(name), 315 __entry->frame, __entry->scanline, 316 DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), 317 DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) 318 ); 319 320 TRACE_EVENT(intel_disable_plane, 321 TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), 322 TP_ARGS(plane, crtc), 323 324 TP_STRUCT__entry( 325 __field(enum pipe, pipe) 326 __field(u32, frame) 327 __field(u32, scanline) 328 __string(name, plane->name) 329 ), 330 331 TP_fast_assign( 332 __assign_str(name, plane->name); 333 __entry->pipe = crtc->pipe; 334 __entry->frame = intel_crtc_get_vblank_counter(crtc); 335 __entry->scanline = intel_get_crtc_scanline(crtc); 336 ), 337 338 TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", 339 pipe_name(__entry->pipe), __get_str(name), 340 __entry->frame, __entry->scanline) 341 ); 342 343 /* fbc */ 344 345 TRACE_EVENT(intel_fbc_activate, 346 TP_PROTO(struct intel_crtc *crtc), 347 TP_ARGS(crtc), 348 349 TP_STRUCT__entry( 350 __field(enum pipe, pipe) 351 __field(u32, frame) 352 __field(u32, scanline) 353 ), 354 355 TP_fast_assign( 356 __entry->pipe = crtc->pipe; 357 __entry->frame = intel_crtc_get_vblank_counter(crtc); 358 __entry->scanline = intel_get_crtc_scanline(crtc); 359 ), 360 361 TP_printk("pipe %c, frame=%u, scanline=%u", 362 pipe_name(__entry->pipe), __entry->frame, __entry->scanline) 363 ); 364 365 TRACE_EVENT(intel_fbc_deactivate, 366 TP_PROTO(struct intel_crtc *crtc), 367 TP_ARGS(crtc), 368 369 TP_STRUCT__entry( 370 __field(enum pipe, pipe) 371 __field(u32, frame) 372 __field(u32, scanline) 373 ), 374 375 TP_fast_assign( 376 __entry->pipe = crtc->pipe; 377 __entry->frame = intel_crtc_get_vblank_counter(crtc); 378 __entry->scanline = intel_get_crtc_scanline(crtc); 379 ), 380 381 TP_printk("pipe %c, frame=%u, scanline=%u", 382 pipe_name(__entry->pipe), __entry->frame, __entry->scanline) 383 ); 384 385 TRACE_EVENT(intel_fbc_nuke, 386 TP_PROTO(struct intel_crtc *crtc), 387 TP_ARGS(crtc), 388 389 TP_STRUCT__entry( 390 __field(enum pipe, pipe) 391 __field(u32, frame) 392 __field(u32, scanline) 393 ), 394 395 TP_fast_assign( 396 __entry->pipe = crtc->pipe; 397 __entry->frame = intel_crtc_get_vblank_counter(crtc); 398 __entry->scanline = intel_get_crtc_scanline(crtc); 399 ), 400 401 TP_printk("pipe %c, frame=%u, scanline=%u", 402 pipe_name(__entry->pipe), __entry->frame, __entry->scanline) 403 ); 404 405 /* pipe updates */ 406 407 TRACE_EVENT(intel_pipe_update_start, 408 TP_PROTO(struct intel_crtc *crtc), 409 TP_ARGS(crtc), 410 411 TP_STRUCT__entry( 412 __field(enum pipe, pipe) 413 __field(u32, frame) 414 __field(u32, scanline) 415 __field(u32, min) 416 __field(u32, max) 417 ), 418 419 TP_fast_assign( 420 __entry->pipe = crtc->pipe; 421 __entry->frame = intel_crtc_get_vblank_counter(crtc); 422 __entry->scanline = intel_get_crtc_scanline(crtc); 423 __entry->min = crtc->debug.min_vbl; 424 __entry->max = crtc->debug.max_vbl; 425 ), 426 427 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", 428 pipe_name(__entry->pipe), __entry->frame, 429 __entry->scanline, __entry->min, __entry->max) 430 ); 431 432 TRACE_EVENT(intel_pipe_update_vblank_evaded, 433 TP_PROTO(struct intel_crtc *crtc), 434 TP_ARGS(crtc), 435 436 TP_STRUCT__entry( 437 __field(enum pipe, pipe) 438 __field(u32, frame) 439 __field(u32, scanline) 440 __field(u32, min) 441 __field(u32, max) 442 ), 443 444 TP_fast_assign( 445 __entry->pipe = crtc->pipe; 446 __entry->frame = crtc->debug.start_vbl_count; 447 __entry->scanline = crtc->debug.scanline_start; 448 __entry->min = crtc->debug.min_vbl; 449 __entry->max = crtc->debug.max_vbl; 450 ), 451 452 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", 453 pipe_name(__entry->pipe), __entry->frame, 454 __entry->scanline, __entry->min, __entry->max) 455 ); 456 457 TRACE_EVENT(intel_pipe_update_end, 458 TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), 459 TP_ARGS(crtc, frame, scanline_end), 460 461 TP_STRUCT__entry( 462 __field(enum pipe, pipe) 463 __field(u32, frame) 464 __field(u32, scanline) 465 ), 466 467 TP_fast_assign( 468 __entry->pipe = crtc->pipe; 469 __entry->frame = frame; 470 __entry->scanline = scanline_end; 471 ), 472 473 TP_printk("pipe %c, frame=%u, scanline=%u", 474 pipe_name(__entry->pipe), __entry->frame, 475 __entry->scanline) 476 ); 477 478 /* frontbuffer tracking */ 479 480 TRACE_EVENT(intel_frontbuffer_invalidate, 481 TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), 482 TP_ARGS(frontbuffer_bits, origin), 483 484 TP_STRUCT__entry( 485 __field(unsigned int, frontbuffer_bits) 486 __field(unsigned int, origin) 487 ), 488 489 TP_fast_assign( 490 __entry->frontbuffer_bits = frontbuffer_bits; 491 __entry->origin = origin; 492 ), 493 494 TP_printk("frontbuffer_bits=0x%08x, origin=%u", 495 __entry->frontbuffer_bits, __entry->origin) 496 ); 497 498 TRACE_EVENT(intel_frontbuffer_flush, 499 TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), 500 TP_ARGS(frontbuffer_bits, origin), 501 502 TP_STRUCT__entry( 503 __field(unsigned int, frontbuffer_bits) 504 __field(unsigned int, origin) 505 ), 506 507 TP_fast_assign( 508 __entry->frontbuffer_bits = frontbuffer_bits; 509 __entry->origin = origin; 510 ), 511 512 TP_printk("frontbuffer_bits=0x%08x, origin=%u", 513 __entry->frontbuffer_bits, __entry->origin) 514 ); 515 516 /* object tracking */ 517 518 TRACE_EVENT(i915_gem_object_create, 519 TP_PROTO(struct drm_i915_gem_object *obj), 520 TP_ARGS(obj), 521 522 TP_STRUCT__entry( 523 __field(struct drm_i915_gem_object *, obj) 524 __field(u64, size) 525 ), 526 527 TP_fast_assign( 528 __entry->obj = obj; 529 __entry->size = obj->base.size; 530 ), 531 532 TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size) 533 ); 534 535 TRACE_EVENT(i915_gem_shrink, 536 TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags), 537 TP_ARGS(i915, target, flags), 538 539 TP_STRUCT__entry( 540 __field(int, dev) 541 __field(unsigned long, target) 542 __field(unsigned, flags) 543 ), 544 545 TP_fast_assign( 546 __entry->dev = i915->drm.primary->index; 547 __entry->target = target; 548 __entry->flags = flags; 549 ), 550 551 TP_printk("dev=%d, target=%lu, flags=%x", 552 __entry->dev, __entry->target, __entry->flags) 553 ); 554 555 TRACE_EVENT(i915_vma_bind, 556 TP_PROTO(struct i915_vma *vma, unsigned flags), 557 TP_ARGS(vma, flags), 558 559 TP_STRUCT__entry( 560 __field(struct drm_i915_gem_object *, obj) 561 __field(struct i915_address_space *, vm) 562 __field(u64, offset) 563 __field(u64, size) 564 __field(unsigned, flags) 565 ), 566 567 TP_fast_assign( 568 __entry->obj = vma->obj; 569 __entry->vm = vma->vm; 570 __entry->offset = vma->node.start; 571 __entry->size = vma->node.size; 572 __entry->flags = flags; 573 ), 574 575 TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p", 576 __entry->obj, __entry->offset, __entry->size, 577 __entry->flags & PIN_MAPPABLE ? ", mappable" : "", 578 __entry->vm) 579 ); 580 581 TRACE_EVENT(i915_vma_unbind, 582 TP_PROTO(struct i915_vma *vma), 583 TP_ARGS(vma), 584 585 TP_STRUCT__entry( 586 __field(struct drm_i915_gem_object *, obj) 587 __field(struct i915_address_space *, vm) 588 __field(u64, offset) 589 __field(u64, size) 590 ), 591 592 TP_fast_assign( 593 __entry->obj = vma->obj; 594 __entry->vm = vma->vm; 595 __entry->offset = vma->node.start; 596 __entry->size = vma->node.size; 597 ), 598 599 TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p", 600 __entry->obj, __entry->offset, __entry->size, __entry->vm) 601 ); 602 603 TRACE_EVENT(i915_gem_object_pwrite, 604 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len), 605 TP_ARGS(obj, offset, len), 606 607 TP_STRUCT__entry( 608 __field(struct drm_i915_gem_object *, obj) 609 __field(u64, offset) 610 __field(u64, len) 611 ), 612 613 TP_fast_assign( 614 __entry->obj = obj; 615 __entry->offset = offset; 616 __entry->len = len; 617 ), 618 619 TP_printk("obj=%p, offset=0x%llx, len=0x%llx", 620 __entry->obj, __entry->offset, __entry->len) 621 ); 622 623 TRACE_EVENT(i915_gem_object_pread, 624 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len), 625 TP_ARGS(obj, offset, len), 626 627 TP_STRUCT__entry( 628 __field(struct drm_i915_gem_object *, obj) 629 __field(u64, offset) 630 __field(u64, len) 631 ), 632 633 TP_fast_assign( 634 __entry->obj = obj; 635 __entry->offset = offset; 636 __entry->len = len; 637 ), 638 639 TP_printk("obj=%p, offset=0x%llx, len=0x%llx", 640 __entry->obj, __entry->offset, __entry->len) 641 ); 642 643 TRACE_EVENT(i915_gem_object_fault, 644 TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write), 645 TP_ARGS(obj, index, gtt, write), 646 647 TP_STRUCT__entry( 648 __field(struct drm_i915_gem_object *, obj) 649 __field(u64, index) 650 __field(bool, gtt) 651 __field(bool, write) 652 ), 653 654 TP_fast_assign( 655 __entry->obj = obj; 656 __entry->index = index; 657 __entry->gtt = gtt; 658 __entry->write = write; 659 ), 660 661 TP_printk("obj=%p, %s index=%llu %s", 662 __entry->obj, 663 __entry->gtt ? "GTT" : "CPU", 664 __entry->index, 665 __entry->write ? ", writable" : "") 666 ); 667 668 DECLARE_EVENT_CLASS(i915_gem_object, 669 TP_PROTO(struct drm_i915_gem_object *obj), 670 TP_ARGS(obj), 671 672 TP_STRUCT__entry( 673 __field(struct drm_i915_gem_object *, obj) 674 ), 675 676 TP_fast_assign( 677 __entry->obj = obj; 678 ), 679 680 TP_printk("obj=%p", __entry->obj) 681 ); 682 683 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 684 TP_PROTO(struct drm_i915_gem_object *obj), 685 TP_ARGS(obj) 686 ); 687 688 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 689 TP_PROTO(struct drm_i915_gem_object *obj), 690 TP_ARGS(obj) 691 ); 692 693 TRACE_EVENT(i915_gem_evict, 694 TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags), 695 TP_ARGS(vm, size, align, flags), 696 697 TP_STRUCT__entry( 698 __field(u32, dev) 699 __field(struct i915_address_space *, vm) 700 __field(u64, size) 701 __field(u64, align) 702 __field(unsigned int, flags) 703 ), 704 705 TP_fast_assign( 706 __entry->dev = vm->i915->drm.primary->index; 707 __entry->vm = vm; 708 __entry->size = size; 709 __entry->align = align; 710 __entry->flags = flags; 711 ), 712 713 TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s", 714 __entry->dev, __entry->vm, __entry->size, __entry->align, 715 __entry->flags & PIN_MAPPABLE ? ", mappable" : "") 716 ); 717 718 TRACE_EVENT(i915_gem_evict_node, 719 TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags), 720 TP_ARGS(vm, node, flags), 721 722 TP_STRUCT__entry( 723 __field(u32, dev) 724 __field(struct i915_address_space *, vm) 725 __field(u64, start) 726 __field(u64, size) 727 __field(unsigned long, color) 728 __field(unsigned int, flags) 729 ), 730 731 TP_fast_assign( 732 __entry->dev = vm->i915->drm.primary->index; 733 __entry->vm = vm; 734 __entry->start = node->start; 735 __entry->size = node->size; 736 __entry->color = node->color; 737 __entry->flags = flags; 738 ), 739 740 TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x", 741 __entry->dev, __entry->vm, 742 __entry->start, __entry->size, 743 __entry->color, __entry->flags) 744 ); 745 746 TRACE_EVENT(i915_gem_evict_vm, 747 TP_PROTO(struct i915_address_space *vm), 748 TP_ARGS(vm), 749 750 TP_STRUCT__entry( 751 __field(u32, dev) 752 __field(struct i915_address_space *, vm) 753 ), 754 755 TP_fast_assign( 756 __entry->dev = vm->i915->drm.primary->index; 757 __entry->vm = vm; 758 ), 759 760 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) 761 ); 762 763 TRACE_EVENT(i915_request_queue, 764 TP_PROTO(struct i915_request *rq, u32 flags), 765 TP_ARGS(rq, flags), 766 767 TP_STRUCT__entry( 768 __field(u32, dev) 769 __field(u64, ctx) 770 __field(u16, class) 771 __field(u16, instance) 772 __field(u32, seqno) 773 __field(u32, flags) 774 ), 775 776 TP_fast_assign( 777 __entry->dev = rq->engine->i915->drm.primary->index; 778 __entry->class = rq->engine->uabi_class; 779 __entry->instance = rq->engine->uabi_instance; 780 __entry->ctx = rq->fence.context; 781 __entry->seqno = rq->fence.seqno; 782 __entry->flags = flags; 783 ), 784 785 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x", 786 __entry->dev, __entry->class, __entry->instance, 787 __entry->ctx, __entry->seqno, __entry->flags) 788 ); 789 790 DECLARE_EVENT_CLASS(i915_request, 791 TP_PROTO(struct i915_request *rq), 792 TP_ARGS(rq), 793 794 TP_STRUCT__entry( 795 __field(u32, dev) 796 __field(u64, ctx) 797 __field(u32, guc_id) 798 __field(u16, class) 799 __field(u16, instance) 800 __field(u32, seqno) 801 __field(u32, tail) 802 ), 803 804 TP_fast_assign( 805 __entry->dev = rq->engine->i915->drm.primary->index; 806 __entry->class = rq->engine->uabi_class; 807 __entry->instance = rq->engine->uabi_instance; 808 __entry->guc_id = rq->context->guc_id; 809 __entry->ctx = rq->fence.context; 810 __entry->seqno = rq->fence.seqno; 811 __entry->tail = rq->tail; 812 ), 813 814 TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u", 815 __entry->dev, __entry->class, __entry->instance, 816 __entry->guc_id, __entry->ctx, __entry->seqno, 817 __entry->tail) 818 ); 819 820 DEFINE_EVENT(i915_request, i915_request_add, 821 TP_PROTO(struct i915_request *rq), 822 TP_ARGS(rq) 823 ); 824 825 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) 826 DEFINE_EVENT(i915_request, i915_request_guc_submit, 827 TP_PROTO(struct i915_request *rq), 828 TP_ARGS(rq) 829 ); 830 831 DEFINE_EVENT(i915_request, i915_request_submit, 832 TP_PROTO(struct i915_request *rq), 833 TP_ARGS(rq) 834 ); 835 836 DEFINE_EVENT(i915_request, i915_request_execute, 837 TP_PROTO(struct i915_request *rq), 838 TP_ARGS(rq) 839 ); 840 841 TRACE_EVENT(i915_request_in, 842 TP_PROTO(struct i915_request *rq, unsigned int port), 843 TP_ARGS(rq, port), 844 845 TP_STRUCT__entry( 846 __field(u32, dev) 847 __field(u64, ctx) 848 __field(u16, class) 849 __field(u16, instance) 850 __field(u32, seqno) 851 __field(u32, port) 852 __field(s32, prio) 853 ), 854 855 TP_fast_assign( 856 __entry->dev = rq->engine->i915->drm.primary->index; 857 __entry->class = rq->engine->uabi_class; 858 __entry->instance = rq->engine->uabi_instance; 859 __entry->ctx = rq->fence.context; 860 __entry->seqno = rq->fence.seqno; 861 __entry->prio = rq->sched.attr.priority; 862 __entry->port = port; 863 ), 864 865 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u", 866 __entry->dev, __entry->class, __entry->instance, 867 __entry->ctx, __entry->seqno, 868 __entry->prio, __entry->port) 869 ); 870 871 TRACE_EVENT(i915_request_out, 872 TP_PROTO(struct i915_request *rq), 873 TP_ARGS(rq), 874 875 TP_STRUCT__entry( 876 __field(u32, dev) 877 __field(u64, ctx) 878 __field(u16, class) 879 __field(u16, instance) 880 __field(u32, seqno) 881 __field(u32, completed) 882 ), 883 884 TP_fast_assign( 885 __entry->dev = rq->engine->i915->drm.primary->index; 886 __entry->class = rq->engine->uabi_class; 887 __entry->instance = rq->engine->uabi_instance; 888 __entry->ctx = rq->fence.context; 889 __entry->seqno = rq->fence.seqno; 890 __entry->completed = i915_request_completed(rq); 891 ), 892 893 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u", 894 __entry->dev, __entry->class, __entry->instance, 895 __entry->ctx, __entry->seqno, __entry->completed) 896 ); 897 898 DECLARE_EVENT_CLASS(intel_context, 899 TP_PROTO(struct intel_context *ce), 900 TP_ARGS(ce), 901 902 TP_STRUCT__entry( 903 __field(u32, guc_id) 904 __field(int, pin_count) 905 __field(u32, sched_state) 906 __field(u32, guc_sched_state_no_lock) 907 __field(u8, guc_prio) 908 ), 909 910 TP_fast_assign( 911 __entry->guc_id = ce->guc_id; 912 __entry->pin_count = atomic_read(&ce->pin_count); 913 __entry->sched_state = ce->guc_state.sched_state; 914 __entry->guc_sched_state_no_lock = 915 atomic_read(&ce->guc_sched_state_no_lock); 916 __entry->guc_prio = ce->guc_prio; 917 ), 918 919 TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u", 920 __entry->guc_id, __entry->pin_count, 921 __entry->sched_state, 922 __entry->guc_sched_state_no_lock, 923 __entry->guc_prio) 924 ); 925 926 DEFINE_EVENT(intel_context, intel_context_set_prio, 927 TP_PROTO(struct intel_context *ce), 928 TP_ARGS(ce) 929 ); 930 931 DEFINE_EVENT(intel_context, intel_context_reset, 932 TP_PROTO(struct intel_context *ce), 933 TP_ARGS(ce) 934 ); 935 936 DEFINE_EVENT(intel_context, intel_context_ban, 937 TP_PROTO(struct intel_context *ce), 938 TP_ARGS(ce) 939 ); 940 941 DEFINE_EVENT(intel_context, intel_context_register, 942 TP_PROTO(struct intel_context *ce), 943 TP_ARGS(ce) 944 ); 945 946 DEFINE_EVENT(intel_context, intel_context_deregister, 947 TP_PROTO(struct intel_context *ce), 948 TP_ARGS(ce) 949 ); 950 951 DEFINE_EVENT(intel_context, intel_context_deregister_done, 952 TP_PROTO(struct intel_context *ce), 953 TP_ARGS(ce) 954 ); 955 956 DEFINE_EVENT(intel_context, intel_context_sched_enable, 957 TP_PROTO(struct intel_context *ce), 958 TP_ARGS(ce) 959 ); 960 961 DEFINE_EVENT(intel_context, intel_context_sched_disable, 962 TP_PROTO(struct intel_context *ce), 963 TP_ARGS(ce) 964 ); 965 966 DEFINE_EVENT(intel_context, intel_context_sched_done, 967 TP_PROTO(struct intel_context *ce), 968 TP_ARGS(ce) 969 ); 970 971 DEFINE_EVENT(intel_context, intel_context_create, 972 TP_PROTO(struct intel_context *ce), 973 TP_ARGS(ce) 974 ); 975 976 DEFINE_EVENT(intel_context, intel_context_fence_release, 977 TP_PROTO(struct intel_context *ce), 978 TP_ARGS(ce) 979 ); 980 981 DEFINE_EVENT(intel_context, intel_context_free, 982 TP_PROTO(struct intel_context *ce), 983 TP_ARGS(ce) 984 ); 985 986 DEFINE_EVENT(intel_context, intel_context_steal_guc_id, 987 TP_PROTO(struct intel_context *ce), 988 TP_ARGS(ce) 989 ); 990 991 DEFINE_EVENT(intel_context, intel_context_do_pin, 992 TP_PROTO(struct intel_context *ce), 993 TP_ARGS(ce) 994 ); 995 996 DEFINE_EVENT(intel_context, intel_context_do_unpin, 997 TP_PROTO(struct intel_context *ce), 998 TP_ARGS(ce) 999 ); 1000 1001 #else 1002 #if !defined(TRACE_HEADER_MULTI_READ) 1003 static inline void 1004 trace_i915_request_guc_submit(struct i915_request *rq) 1005 { 1006 } 1007 1008 static inline void 1009 trace_i915_request_submit(struct i915_request *rq) 1010 { 1011 } 1012 1013 static inline void 1014 trace_i915_request_execute(struct i915_request *rq) 1015 { 1016 } 1017 1018 static inline void 1019 trace_i915_request_in(struct i915_request *rq, unsigned int port) 1020 { 1021 } 1022 1023 static inline void 1024 trace_i915_request_out(struct i915_request *rq) 1025 { 1026 } 1027 1028 static inline void 1029 trace_intel_context_set_prio(struct intel_context *ce) 1030 { 1031 } 1032 1033 static inline void 1034 trace_intel_context_reset(struct intel_context *ce) 1035 { 1036 } 1037 1038 static inline void 1039 trace_intel_context_ban(struct intel_context *ce) 1040 { 1041 } 1042 1043 static inline void 1044 trace_intel_context_register(struct intel_context *ce) 1045 { 1046 } 1047 1048 static inline void 1049 trace_intel_context_deregister(struct intel_context *ce) 1050 { 1051 } 1052 1053 static inline void 1054 trace_intel_context_deregister_done(struct intel_context *ce) 1055 { 1056 } 1057 1058 static inline void 1059 trace_intel_context_sched_enable(struct intel_context *ce) 1060 { 1061 } 1062 1063 static inline void 1064 trace_intel_context_sched_disable(struct intel_context *ce) 1065 { 1066 } 1067 1068 static inline void 1069 trace_intel_context_sched_done(struct intel_context *ce) 1070 { 1071 } 1072 1073 static inline void 1074 trace_intel_context_create(struct intel_context *ce) 1075 { 1076 } 1077 1078 static inline void 1079 trace_intel_context_fence_release(struct intel_context *ce) 1080 { 1081 } 1082 1083 static inline void 1084 trace_intel_context_free(struct intel_context *ce) 1085 { 1086 } 1087 1088 static inline void 1089 trace_intel_context_steal_guc_id(struct intel_context *ce) 1090 { 1091 } 1092 1093 static inline void 1094 trace_intel_context_do_pin(struct intel_context *ce) 1095 { 1096 } 1097 1098 static inline void 1099 trace_intel_context_do_unpin(struct intel_context *ce) 1100 { 1101 } 1102 #endif 1103 #endif 1104 1105 DEFINE_EVENT(i915_request, i915_request_retire, 1106 TP_PROTO(struct i915_request *rq), 1107 TP_ARGS(rq) 1108 ); 1109 1110 TRACE_EVENT(i915_request_wait_begin, 1111 TP_PROTO(struct i915_request *rq, unsigned int flags), 1112 TP_ARGS(rq, flags), 1113 1114 TP_STRUCT__entry( 1115 __field(u32, dev) 1116 __field(u64, ctx) 1117 __field(u16, class) 1118 __field(u16, instance) 1119 __field(u32, seqno) 1120 __field(unsigned int, flags) 1121 ), 1122 1123 /* NB: the blocking information is racy since mutex_is_locked 1124 * doesn't check that the current thread holds the lock. The only 1125 * other option would be to pass the boolean information of whether 1126 * or not the class was blocking down through the stack which is 1127 * less desirable. 1128 */ 1129 TP_fast_assign( 1130 __entry->dev = rq->engine->i915->drm.primary->index; 1131 __entry->class = rq->engine->uabi_class; 1132 __entry->instance = rq->engine->uabi_instance; 1133 __entry->ctx = rq->fence.context; 1134 __entry->seqno = rq->fence.seqno; 1135 __entry->flags = flags; 1136 ), 1137 1138 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x", 1139 __entry->dev, __entry->class, __entry->instance, 1140 __entry->ctx, __entry->seqno, 1141 __entry->flags) 1142 ); 1143 1144 DEFINE_EVENT(i915_request, i915_request_wait_end, 1145 TP_PROTO(struct i915_request *rq), 1146 TP_ARGS(rq) 1147 ); 1148 1149 TRACE_EVENT_CONDITION(i915_reg_rw, 1150 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace), 1151 1152 TP_ARGS(write, reg, val, len, trace), 1153 1154 TP_CONDITION(trace), 1155 1156 TP_STRUCT__entry( 1157 __field(u64, val) 1158 __field(u32, reg) 1159 __field(u16, write) 1160 __field(u16, len) 1161 ), 1162 1163 TP_fast_assign( 1164 __entry->val = (u64)val; 1165 __entry->reg = i915_mmio_reg_offset(reg); 1166 __entry->write = write; 1167 __entry->len = len; 1168 ), 1169 1170 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 1171 __entry->write ? "write" : "read", 1172 __entry->reg, __entry->len, 1173 (u32)(__entry->val & 0xffffffff), 1174 (u32)(__entry->val >> 32)) 1175 ); 1176 1177 TRACE_EVENT(intel_gpu_freq_change, 1178 TP_PROTO(u32 freq), 1179 TP_ARGS(freq), 1180 1181 TP_STRUCT__entry( 1182 __field(u32, freq) 1183 ), 1184 1185 TP_fast_assign( 1186 __entry->freq = freq; 1187 ), 1188 1189 TP_printk("new_freq=%u", __entry->freq) 1190 ); 1191 1192 /** 1193 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints 1194 * 1195 * With full ppgtt enabled each process using drm will allocate at least one 1196 * translation table. With these traces it is possible to keep track of the 1197 * allocation and of the lifetime of the tables; this can be used during 1198 * testing/debug to verify that we are not leaking ppgtts. 1199 * These traces identify the ppgtt through the vm pointer, which is also printed 1200 * by the i915_vma_bind and i915_vma_unbind tracepoints. 1201 */ 1202 DECLARE_EVENT_CLASS(i915_ppgtt, 1203 TP_PROTO(struct i915_address_space *vm), 1204 TP_ARGS(vm), 1205 1206 TP_STRUCT__entry( 1207 __field(struct i915_address_space *, vm) 1208 __field(u32, dev) 1209 ), 1210 1211 TP_fast_assign( 1212 __entry->vm = vm; 1213 __entry->dev = vm->i915->drm.primary->index; 1214 ), 1215 1216 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) 1217 ) 1218 1219 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create, 1220 TP_PROTO(struct i915_address_space *vm), 1221 TP_ARGS(vm) 1222 ); 1223 1224 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, 1225 TP_PROTO(struct i915_address_space *vm), 1226 TP_ARGS(vm) 1227 ); 1228 1229 /** 1230 * DOC: i915_context_create and i915_context_free tracepoints 1231 * 1232 * These tracepoints are used to track creation and deletion of contexts. 1233 * If full ppgtt is enabled, they also print the address of the vm assigned to 1234 * the context. 1235 */ 1236 DECLARE_EVENT_CLASS(i915_context, 1237 TP_PROTO(struct i915_gem_context *ctx), 1238 TP_ARGS(ctx), 1239 1240 TP_STRUCT__entry( 1241 __field(u32, dev) 1242 __field(struct i915_gem_context *, ctx) 1243 __field(struct i915_address_space *, vm) 1244 ), 1245 1246 TP_fast_assign( 1247 __entry->dev = ctx->i915->drm.primary->index; 1248 __entry->ctx = ctx; 1249 __entry->vm = rcu_access_pointer(ctx->vm); 1250 ), 1251 1252 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 1253 __entry->dev, __entry->ctx, __entry->vm) 1254 ) 1255 1256 DEFINE_EVENT(i915_context, i915_context_create, 1257 TP_PROTO(struct i915_gem_context *ctx), 1258 TP_ARGS(ctx) 1259 ); 1260 1261 DEFINE_EVENT(i915_context, i915_context_free, 1262 TP_PROTO(struct i915_gem_context *ctx), 1263 TP_ARGS(ctx) 1264 ); 1265 1266 #endif /* _I915_TRACE_H_ */ 1267 1268 /* This part must be outside protection */ 1269 #undef TRACE_INCLUDE_PATH 1270 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 1271 #include <trace/define_trace.h> 1272