1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #undef TRACE_SYSTEM 4 #define TRACE_SYSTEM i915 5 6 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 7 #define _I915_TRACE_H_ 8 9 #include <linux/stringify.h> 10 #include <linux/types.h> 11 #include <linux/tracepoint.h> 12 13 #include <drm/drm_drv.h> 14 15 #include "gt/intel_engine.h" 16 17 #include "i915_drv.h" 18 19 /* object tracking */ 20 21 TRACE_EVENT(i915_gem_object_create, 22 TP_PROTO(struct drm_i915_gem_object *obj), 23 TP_ARGS(obj), 24 25 TP_STRUCT__entry( 26 __field(struct drm_i915_gem_object *, obj) 27 __field(u64, size) 28 ), 29 30 TP_fast_assign( 31 __entry->obj = obj; 32 __entry->size = obj->base.size; 33 ), 34 35 TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size) 36 ); 37 38 TRACE_EVENT(i915_gem_shrink, 39 TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags), 40 TP_ARGS(i915, target, flags), 41 42 TP_STRUCT__entry( 43 __field(int, dev) 44 __field(unsigned long, target) 45 __field(unsigned, flags) 46 ), 47 48 TP_fast_assign( 49 __entry->dev = i915->drm.primary->index; 50 __entry->target = target; 51 __entry->flags = flags; 52 ), 53 54 TP_printk("dev=%d, target=%lu, flags=%x", 55 __entry->dev, __entry->target, __entry->flags) 56 ); 57 58 TRACE_EVENT(i915_vma_bind, 59 TP_PROTO(struct i915_vma *vma, unsigned flags), 60 TP_ARGS(vma, flags), 61 62 TP_STRUCT__entry( 63 __field(struct drm_i915_gem_object *, obj) 64 __field(struct i915_address_space *, vm) 65 __field(u64, offset) 66 __field(u64, size) 67 __field(unsigned, flags) 68 ), 69 70 TP_fast_assign( 71 __entry->obj = vma->obj; 72 __entry->vm = vma->vm; 73 __entry->offset = vma->node.start; 74 __entry->size = vma->node.size; 75 __entry->flags = flags; 76 ), 77 78 TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p", 79 __entry->obj, __entry->offset, __entry->size, 80 __entry->flags & PIN_MAPPABLE ? ", mappable" : "", 81 __entry->vm) 82 ); 83 84 TRACE_EVENT(i915_vma_unbind, 85 TP_PROTO(struct i915_vma *vma), 86 TP_ARGS(vma), 87 88 TP_STRUCT__entry( 89 __field(struct drm_i915_gem_object *, obj) 90 __field(struct i915_address_space *, vm) 91 __field(u64, offset) 92 __field(u64, size) 93 ), 94 95 TP_fast_assign( 96 __entry->obj = vma->obj; 97 __entry->vm = vma->vm; 98 __entry->offset = vma->node.start; 99 __entry->size = vma->node.size; 100 ), 101 102 TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p", 103 __entry->obj, __entry->offset, __entry->size, __entry->vm) 104 ); 105 106 TRACE_EVENT(i915_gem_object_pwrite, 107 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len), 108 TP_ARGS(obj, offset, len), 109 110 TP_STRUCT__entry( 111 __field(struct drm_i915_gem_object *, obj) 112 __field(u64, offset) 113 __field(u64, len) 114 ), 115 116 TP_fast_assign( 117 __entry->obj = obj; 118 __entry->offset = offset; 119 __entry->len = len; 120 ), 121 122 TP_printk("obj=%p, offset=0x%llx, len=0x%llx", 123 __entry->obj, __entry->offset, __entry->len) 124 ); 125 126 TRACE_EVENT(i915_gem_object_pread, 127 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len), 128 TP_ARGS(obj, offset, len), 129 130 TP_STRUCT__entry( 131 __field(struct drm_i915_gem_object *, obj) 132 __field(u64, offset) 133 __field(u64, len) 134 ), 135 136 TP_fast_assign( 137 __entry->obj = obj; 138 __entry->offset = offset; 139 __entry->len = len; 140 ), 141 142 TP_printk("obj=%p, offset=0x%llx, len=0x%llx", 143 __entry->obj, __entry->offset, __entry->len) 144 ); 145 146 TRACE_EVENT(i915_gem_object_fault, 147 TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write), 148 TP_ARGS(obj, index, gtt, write), 149 150 TP_STRUCT__entry( 151 __field(struct drm_i915_gem_object *, obj) 152 __field(u64, index) 153 __field(bool, gtt) 154 __field(bool, write) 155 ), 156 157 TP_fast_assign( 158 __entry->obj = obj; 159 __entry->index = index; 160 __entry->gtt = gtt; 161 __entry->write = write; 162 ), 163 164 TP_printk("obj=%p, %s index=%llu %s", 165 __entry->obj, 166 __entry->gtt ? "GTT" : "CPU", 167 __entry->index, 168 __entry->write ? ", writable" : "") 169 ); 170 171 DECLARE_EVENT_CLASS(i915_gem_object, 172 TP_PROTO(struct drm_i915_gem_object *obj), 173 TP_ARGS(obj), 174 175 TP_STRUCT__entry( 176 __field(struct drm_i915_gem_object *, obj) 177 ), 178 179 TP_fast_assign( 180 __entry->obj = obj; 181 ), 182 183 TP_printk("obj=%p", __entry->obj) 184 ); 185 186 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 187 TP_PROTO(struct drm_i915_gem_object *obj), 188 TP_ARGS(obj) 189 ); 190 191 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 192 TP_PROTO(struct drm_i915_gem_object *obj), 193 TP_ARGS(obj) 194 ); 195 196 TRACE_EVENT(i915_gem_evict, 197 TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags), 198 TP_ARGS(vm, size, align, flags), 199 200 TP_STRUCT__entry( 201 __field(u32, dev) 202 __field(struct i915_address_space *, vm) 203 __field(u64, size) 204 __field(u64, align) 205 __field(unsigned int, flags) 206 ), 207 208 TP_fast_assign( 209 __entry->dev = vm->i915->drm.primary->index; 210 __entry->vm = vm; 211 __entry->size = size; 212 __entry->align = align; 213 __entry->flags = flags; 214 ), 215 216 TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s", 217 __entry->dev, __entry->vm, __entry->size, __entry->align, 218 __entry->flags & PIN_MAPPABLE ? ", mappable" : "") 219 ); 220 221 TRACE_EVENT(i915_gem_evict_node, 222 TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags), 223 TP_ARGS(vm, node, flags), 224 225 TP_STRUCT__entry( 226 __field(u32, dev) 227 __field(struct i915_address_space *, vm) 228 __field(u64, start) 229 __field(u64, size) 230 __field(unsigned long, color) 231 __field(unsigned int, flags) 232 ), 233 234 TP_fast_assign( 235 __entry->dev = vm->i915->drm.primary->index; 236 __entry->vm = vm; 237 __entry->start = node->start; 238 __entry->size = node->size; 239 __entry->color = node->color; 240 __entry->flags = flags; 241 ), 242 243 TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x", 244 __entry->dev, __entry->vm, 245 __entry->start, __entry->size, 246 __entry->color, __entry->flags) 247 ); 248 249 TRACE_EVENT(i915_gem_evict_vm, 250 TP_PROTO(struct i915_address_space *vm), 251 TP_ARGS(vm), 252 253 TP_STRUCT__entry( 254 __field(u32, dev) 255 __field(struct i915_address_space *, vm) 256 ), 257 258 TP_fast_assign( 259 __entry->dev = vm->i915->drm.primary->index; 260 __entry->vm = vm; 261 ), 262 263 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) 264 ); 265 266 TRACE_EVENT(i915_request_queue, 267 TP_PROTO(struct i915_request *rq, u32 flags), 268 TP_ARGS(rq, flags), 269 270 TP_STRUCT__entry( 271 __field(u32, dev) 272 __field(u64, ctx) 273 __field(u16, class) 274 __field(u16, instance) 275 __field(u32, seqno) 276 __field(u32, flags) 277 ), 278 279 TP_fast_assign( 280 __entry->dev = rq->i915->drm.primary->index; 281 __entry->class = rq->engine->uabi_class; 282 __entry->instance = rq->engine->uabi_instance; 283 __entry->ctx = rq->fence.context; 284 __entry->seqno = rq->fence.seqno; 285 __entry->flags = flags; 286 ), 287 288 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x", 289 __entry->dev, __entry->class, __entry->instance, 290 __entry->ctx, __entry->seqno, __entry->flags) 291 ); 292 293 DECLARE_EVENT_CLASS(i915_request, 294 TP_PROTO(struct i915_request *rq), 295 TP_ARGS(rq), 296 297 TP_STRUCT__entry( 298 __field(u32, dev) 299 __field(u64, ctx) 300 __field(u16, class) 301 __field(u16, instance) 302 __field(u32, seqno) 303 __field(u32, tail) 304 ), 305 306 TP_fast_assign( 307 __entry->dev = rq->i915->drm.primary->index; 308 __entry->class = rq->engine->uabi_class; 309 __entry->instance = rq->engine->uabi_instance; 310 __entry->ctx = rq->fence.context; 311 __entry->seqno = rq->fence.seqno; 312 __entry->tail = rq->tail; 313 ), 314 315 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u", 316 __entry->dev, __entry->class, __entry->instance, 317 __entry->ctx, __entry->seqno, __entry->tail) 318 ); 319 320 DEFINE_EVENT(i915_request, i915_request_add, 321 TP_PROTO(struct i915_request *rq), 322 TP_ARGS(rq) 323 ); 324 325 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) 326 DEFINE_EVENT(i915_request, i915_request_guc_submit, 327 TP_PROTO(struct i915_request *rq), 328 TP_ARGS(rq) 329 ); 330 331 DEFINE_EVENT(i915_request, i915_request_submit, 332 TP_PROTO(struct i915_request *rq), 333 TP_ARGS(rq) 334 ); 335 336 DEFINE_EVENT(i915_request, i915_request_execute, 337 TP_PROTO(struct i915_request *rq), 338 TP_ARGS(rq) 339 ); 340 341 TRACE_EVENT(i915_request_in, 342 TP_PROTO(struct i915_request *rq, unsigned int port), 343 TP_ARGS(rq, port), 344 345 TP_STRUCT__entry( 346 __field(u32, dev) 347 __field(u64, ctx) 348 __field(u16, class) 349 __field(u16, instance) 350 __field(u32, seqno) 351 __field(u32, port) 352 __field(s32, prio) 353 ), 354 355 TP_fast_assign( 356 __entry->dev = rq->i915->drm.primary->index; 357 __entry->class = rq->engine->uabi_class; 358 __entry->instance = rq->engine->uabi_instance; 359 __entry->ctx = rq->fence.context; 360 __entry->seqno = rq->fence.seqno; 361 __entry->prio = rq->sched.attr.priority; 362 __entry->port = port; 363 ), 364 365 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u", 366 __entry->dev, __entry->class, __entry->instance, 367 __entry->ctx, __entry->seqno, 368 __entry->prio, __entry->port) 369 ); 370 371 TRACE_EVENT(i915_request_out, 372 TP_PROTO(struct i915_request *rq), 373 TP_ARGS(rq), 374 375 TP_STRUCT__entry( 376 __field(u32, dev) 377 __field(u64, ctx) 378 __field(u16, class) 379 __field(u16, instance) 380 __field(u32, seqno) 381 __field(u32, completed) 382 ), 383 384 TP_fast_assign( 385 __entry->dev = rq->i915->drm.primary->index; 386 __entry->class = rq->engine->uabi_class; 387 __entry->instance = rq->engine->uabi_instance; 388 __entry->ctx = rq->fence.context; 389 __entry->seqno = rq->fence.seqno; 390 __entry->completed = i915_request_completed(rq); 391 ), 392 393 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u", 394 __entry->dev, __entry->class, __entry->instance, 395 __entry->ctx, __entry->seqno, __entry->completed) 396 ); 397 398 DECLARE_EVENT_CLASS(intel_context, 399 TP_PROTO(struct intel_context *ce), 400 TP_ARGS(ce), 401 402 TP_STRUCT__entry( 403 __field(u32, guc_id) 404 __field(int, pin_count) 405 __field(u32, sched_state) 406 __field(u8, guc_prio) 407 ), 408 409 TP_fast_assign( 410 __entry->guc_id = ce->guc_id.id; 411 __entry->pin_count = atomic_read(&ce->pin_count); 412 __entry->sched_state = ce->guc_state.sched_state; 413 __entry->guc_prio = ce->guc_state.prio; 414 ), 415 416 TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u", 417 __entry->guc_id, __entry->pin_count, 418 __entry->sched_state, 419 __entry->guc_prio) 420 ); 421 422 DEFINE_EVENT(intel_context, intel_context_set_prio, 423 TP_PROTO(struct intel_context *ce), 424 TP_ARGS(ce) 425 ); 426 427 DEFINE_EVENT(intel_context, intel_context_reset, 428 TP_PROTO(struct intel_context *ce), 429 TP_ARGS(ce) 430 ); 431 432 DEFINE_EVENT(intel_context, intel_context_ban, 433 TP_PROTO(struct intel_context *ce), 434 TP_ARGS(ce) 435 ); 436 437 DEFINE_EVENT(intel_context, intel_context_register, 438 TP_PROTO(struct intel_context *ce), 439 TP_ARGS(ce) 440 ); 441 442 DEFINE_EVENT(intel_context, intel_context_deregister, 443 TP_PROTO(struct intel_context *ce), 444 TP_ARGS(ce) 445 ); 446 447 DEFINE_EVENT(intel_context, intel_context_deregister_done, 448 TP_PROTO(struct intel_context *ce), 449 TP_ARGS(ce) 450 ); 451 452 DEFINE_EVENT(intel_context, intel_context_sched_enable, 453 TP_PROTO(struct intel_context *ce), 454 TP_ARGS(ce) 455 ); 456 457 DEFINE_EVENT(intel_context, intel_context_sched_disable, 458 TP_PROTO(struct intel_context *ce), 459 TP_ARGS(ce) 460 ); 461 462 DEFINE_EVENT(intel_context, intel_context_sched_done, 463 TP_PROTO(struct intel_context *ce), 464 TP_ARGS(ce) 465 ); 466 467 DEFINE_EVENT(intel_context, intel_context_create, 468 TP_PROTO(struct intel_context *ce), 469 TP_ARGS(ce) 470 ); 471 472 DEFINE_EVENT(intel_context, intel_context_fence_release, 473 TP_PROTO(struct intel_context *ce), 474 TP_ARGS(ce) 475 ); 476 477 DEFINE_EVENT(intel_context, intel_context_free, 478 TP_PROTO(struct intel_context *ce), 479 TP_ARGS(ce) 480 ); 481 482 DEFINE_EVENT(intel_context, intel_context_steal_guc_id, 483 TP_PROTO(struct intel_context *ce), 484 TP_ARGS(ce) 485 ); 486 487 DEFINE_EVENT(intel_context, intel_context_do_pin, 488 TP_PROTO(struct intel_context *ce), 489 TP_ARGS(ce) 490 ); 491 492 DEFINE_EVENT(intel_context, intel_context_do_unpin, 493 TP_PROTO(struct intel_context *ce), 494 TP_ARGS(ce) 495 ); 496 497 #else 498 #if !defined(TRACE_HEADER_MULTI_READ) 499 static inline void 500 trace_i915_request_guc_submit(struct i915_request *rq) 501 { 502 } 503 504 static inline void 505 trace_i915_request_submit(struct i915_request *rq) 506 { 507 } 508 509 static inline void 510 trace_i915_request_execute(struct i915_request *rq) 511 { 512 } 513 514 static inline void 515 trace_i915_request_in(struct i915_request *rq, unsigned int port) 516 { 517 } 518 519 static inline void 520 trace_i915_request_out(struct i915_request *rq) 521 { 522 } 523 524 static inline void 525 trace_intel_context_set_prio(struct intel_context *ce) 526 { 527 } 528 529 static inline void 530 trace_intel_context_reset(struct intel_context *ce) 531 { 532 } 533 534 static inline void 535 trace_intel_context_ban(struct intel_context *ce) 536 { 537 } 538 539 static inline void 540 trace_intel_context_register(struct intel_context *ce) 541 { 542 } 543 544 static inline void 545 trace_intel_context_deregister(struct intel_context *ce) 546 { 547 } 548 549 static inline void 550 trace_intel_context_deregister_done(struct intel_context *ce) 551 { 552 } 553 554 static inline void 555 trace_intel_context_sched_enable(struct intel_context *ce) 556 { 557 } 558 559 static inline void 560 trace_intel_context_sched_disable(struct intel_context *ce) 561 { 562 } 563 564 static inline void 565 trace_intel_context_sched_done(struct intel_context *ce) 566 { 567 } 568 569 static inline void 570 trace_intel_context_create(struct intel_context *ce) 571 { 572 } 573 574 static inline void 575 trace_intel_context_fence_release(struct intel_context *ce) 576 { 577 } 578 579 static inline void 580 trace_intel_context_free(struct intel_context *ce) 581 { 582 } 583 584 static inline void 585 trace_intel_context_steal_guc_id(struct intel_context *ce) 586 { 587 } 588 589 static inline void 590 trace_intel_context_do_pin(struct intel_context *ce) 591 { 592 } 593 594 static inline void 595 trace_intel_context_do_unpin(struct intel_context *ce) 596 { 597 } 598 #endif 599 #endif 600 601 DEFINE_EVENT(i915_request, i915_request_retire, 602 TP_PROTO(struct i915_request *rq), 603 TP_ARGS(rq) 604 ); 605 606 TRACE_EVENT(i915_request_wait_begin, 607 TP_PROTO(struct i915_request *rq, unsigned int flags), 608 TP_ARGS(rq, flags), 609 610 TP_STRUCT__entry( 611 __field(u32, dev) 612 __field(u64, ctx) 613 __field(u16, class) 614 __field(u16, instance) 615 __field(u32, seqno) 616 __field(unsigned int, flags) 617 ), 618 619 /* NB: the blocking information is racy since mutex_is_locked 620 * doesn't check that the current thread holds the lock. The only 621 * other option would be to pass the boolean information of whether 622 * or not the class was blocking down through the stack which is 623 * less desirable. 624 */ 625 TP_fast_assign( 626 __entry->dev = rq->i915->drm.primary->index; 627 __entry->class = rq->engine->uabi_class; 628 __entry->instance = rq->engine->uabi_instance; 629 __entry->ctx = rq->fence.context; 630 __entry->seqno = rq->fence.seqno; 631 __entry->flags = flags; 632 ), 633 634 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x", 635 __entry->dev, __entry->class, __entry->instance, 636 __entry->ctx, __entry->seqno, 637 __entry->flags) 638 ); 639 640 DEFINE_EVENT(i915_request, i915_request_wait_end, 641 TP_PROTO(struct i915_request *rq), 642 TP_ARGS(rq) 643 ); 644 645 TRACE_EVENT_CONDITION(i915_reg_rw, 646 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace), 647 648 TP_ARGS(write, reg, val, len, trace), 649 650 TP_CONDITION(trace), 651 652 TP_STRUCT__entry( 653 __field(u64, val) 654 __field(u32, reg) 655 __field(u16, write) 656 __field(u16, len) 657 ), 658 659 TP_fast_assign( 660 __entry->val = (u64)val; 661 __entry->reg = i915_mmio_reg_offset(reg); 662 __entry->write = write; 663 __entry->len = len; 664 ), 665 666 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 667 __entry->write ? "write" : "read", 668 __entry->reg, __entry->len, 669 (u32)(__entry->val & 0xffffffff), 670 (u32)(__entry->val >> 32)) 671 ); 672 673 /** 674 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints 675 * 676 * With full ppgtt enabled each process using drm will allocate at least one 677 * translation table. With these traces it is possible to keep track of the 678 * allocation and of the lifetime of the tables; this can be used during 679 * testing/debug to verify that we are not leaking ppgtts. 680 * These traces identify the ppgtt through the vm pointer, which is also printed 681 * by the i915_vma_bind and i915_vma_unbind tracepoints. 682 */ 683 DECLARE_EVENT_CLASS(i915_ppgtt, 684 TP_PROTO(struct i915_address_space *vm), 685 TP_ARGS(vm), 686 687 TP_STRUCT__entry( 688 __field(struct i915_address_space *, vm) 689 __field(u32, dev) 690 ), 691 692 TP_fast_assign( 693 __entry->vm = vm; 694 __entry->dev = vm->i915->drm.primary->index; 695 ), 696 697 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) 698 ) 699 700 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create, 701 TP_PROTO(struct i915_address_space *vm), 702 TP_ARGS(vm) 703 ); 704 705 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, 706 TP_PROTO(struct i915_address_space *vm), 707 TP_ARGS(vm) 708 ); 709 710 /** 711 * DOC: i915_context_create and i915_context_free tracepoints 712 * 713 * These tracepoints are used to track creation and deletion of contexts. 714 * If full ppgtt is enabled, they also print the address of the vm assigned to 715 * the context. 716 */ 717 DECLARE_EVENT_CLASS(i915_context, 718 TP_PROTO(struct i915_gem_context *ctx), 719 TP_ARGS(ctx), 720 721 TP_STRUCT__entry( 722 __field(u32, dev) 723 __field(struct i915_gem_context *, ctx) 724 __field(struct i915_address_space *, vm) 725 ), 726 727 TP_fast_assign( 728 __entry->dev = ctx->i915->drm.primary->index; 729 __entry->ctx = ctx; 730 __entry->vm = ctx->vm; 731 ), 732 733 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 734 __entry->dev, __entry->ctx, __entry->vm) 735 ) 736 737 DEFINE_EVENT(i915_context, i915_context_create, 738 TP_PROTO(struct i915_gem_context *ctx), 739 TP_ARGS(ctx) 740 ); 741 742 DEFINE_EVENT(i915_context, i915_context_free, 743 TP_PROTO(struct i915_gem_context *ctx), 744 TP_ARGS(ctx) 745 ); 746 747 #endif /* _I915_TRACE_H_ */ 748 749 /* This part must be outside protection */ 750 #undef TRACE_INCLUDE_PATH 751 #undef TRACE_INCLUDE_FILE 752 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 753 #define TRACE_INCLUDE_FILE i915_trace 754 #include <trace/define_trace.h> 755