1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 2 #define _I915_TRACE_H_ 3 4 #include <linux/stringify.h> 5 #include <linux/types.h> 6 #include <linux/tracepoint.h> 7 8 #include <drm/drmP.h> 9 #include "i915_drv.h" 10 #include "intel_drv.h" 11 #include "intel_ringbuffer.h" 12 13 #undef TRACE_SYSTEM 14 #define TRACE_SYSTEM i915 15 #define TRACE_INCLUDE_FILE i915_trace 16 17 /* pipe updates */ 18 19 TRACE_EVENT(i915_pipe_update_start, 20 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max), 21 TP_ARGS(crtc, min, max), 22 23 TP_STRUCT__entry( 24 __field(enum pipe, pipe) 25 __field(u32, frame) 26 __field(u32, scanline) 27 __field(u32, min) 28 __field(u32, max) 29 ), 30 31 TP_fast_assign( 32 __entry->pipe = crtc->pipe; 33 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 34 crtc->pipe); 35 __entry->scanline = intel_get_crtc_scanline(crtc); 36 __entry->min = min; 37 __entry->max = max; 38 ), 39 40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", 41 pipe_name(__entry->pipe), __entry->frame, 42 __entry->scanline, __entry->min, __entry->max) 43 ); 44 45 TRACE_EVENT(i915_pipe_update_vblank_evaded, 46 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame), 47 TP_ARGS(crtc, min, max, frame), 48 49 TP_STRUCT__entry( 50 __field(enum pipe, pipe) 51 __field(u32, frame) 52 __field(u32, scanline) 53 __field(u32, min) 54 __field(u32, max) 55 ), 56 57 TP_fast_assign( 58 __entry->pipe = crtc->pipe; 59 __entry->frame = frame; 60 __entry->scanline = intel_get_crtc_scanline(crtc); 61 __entry->min = min; 62 __entry->max = max; 63 ), 64 65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", 66 pipe_name(__entry->pipe), __entry->frame, 67 __entry->scanline, __entry->min, __entry->max) 68 ); 69 70 TRACE_EVENT(i915_pipe_update_end, 71 TP_PROTO(struct intel_crtc *crtc, u32 frame), 72 TP_ARGS(crtc, frame), 73 74 TP_STRUCT__entry( 75 __field(enum pipe, pipe) 76 __field(u32, frame) 77 __field(u32, scanline) 78 ), 79 80 TP_fast_assign( 81 __entry->pipe = crtc->pipe; 82 __entry->frame = frame; 83 __entry->scanline = intel_get_crtc_scanline(crtc); 84 ), 85 86 TP_printk("pipe %c, frame=%u, scanline=%u", 87 pipe_name(__entry->pipe), __entry->frame, 88 __entry->scanline) 89 ); 90 91 /* object tracking */ 92 93 TRACE_EVENT(i915_gem_object_create, 94 TP_PROTO(struct drm_i915_gem_object *obj), 95 TP_ARGS(obj), 96 97 TP_STRUCT__entry( 98 __field(struct drm_i915_gem_object *, obj) 99 __field(u32, size) 100 ), 101 102 TP_fast_assign( 103 __entry->obj = obj; 104 __entry->size = obj->base.size; 105 ), 106 107 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 108 ); 109 110 TRACE_EVENT(i915_vma_bind, 111 TP_PROTO(struct i915_vma *vma, unsigned flags), 112 TP_ARGS(vma, flags), 113 114 TP_STRUCT__entry( 115 __field(struct drm_i915_gem_object *, obj) 116 __field(struct i915_address_space *, vm) 117 __field(u64, offset) 118 __field(u32, size) 119 __field(unsigned, flags) 120 ), 121 122 TP_fast_assign( 123 __entry->obj = vma->obj; 124 __entry->vm = vma->vm; 125 __entry->offset = vma->node.start; 126 __entry->size = vma->node.size; 127 __entry->flags = flags; 128 ), 129 130 TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p", 131 __entry->obj, __entry->offset, __entry->size, 132 __entry->flags & PIN_MAPPABLE ? ", mappable" : "", 133 __entry->vm) 134 ); 135 136 TRACE_EVENT(i915_vma_unbind, 137 TP_PROTO(struct i915_vma *vma), 138 TP_ARGS(vma), 139 140 TP_STRUCT__entry( 141 __field(struct drm_i915_gem_object *, obj) 142 __field(struct i915_address_space *, vm) 143 __field(u64, offset) 144 __field(u32, size) 145 ), 146 147 TP_fast_assign( 148 __entry->obj = vma->obj; 149 __entry->vm = vma->vm; 150 __entry->offset = vma->node.start; 151 __entry->size = vma->node.size; 152 ), 153 154 TP_printk("obj=%p, offset=%016llx size=%x vm=%p", 155 __entry->obj, __entry->offset, __entry->size, __entry->vm) 156 ); 157 158 #define VM_TO_TRACE_NAME(vm) \ 159 (i915_is_ggtt(vm) ? "G" : \ 160 "P") 161 162 DECLARE_EVENT_CLASS(i915_va, 163 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name), 164 TP_ARGS(vm, start, length, name), 165 166 TP_STRUCT__entry( 167 __field(struct i915_address_space *, vm) 168 __field(u64, start) 169 __field(u64, end) 170 __string(name, name) 171 ), 172 173 TP_fast_assign( 174 __entry->vm = vm; 175 __entry->start = start; 176 __entry->end = start + length - 1; 177 __assign_str(name, name); 178 ), 179 180 TP_printk("vm=%p (%s), 0x%llx-0x%llx", 181 __entry->vm, __get_str(name), __entry->start, __entry->end) 182 ); 183 184 DEFINE_EVENT(i915_va, i915_va_alloc, 185 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name), 186 TP_ARGS(vm, start, length, name) 187 ); 188 189 DECLARE_EVENT_CLASS(i915_page_table_entry, 190 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), 191 TP_ARGS(vm, pde, start, pde_shift), 192 193 TP_STRUCT__entry( 194 __field(struct i915_address_space *, vm) 195 __field(u32, pde) 196 __field(u64, start) 197 __field(u64, end) 198 ), 199 200 TP_fast_assign( 201 __entry->vm = vm; 202 __entry->pde = pde; 203 __entry->start = start; 204 __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1; 205 ), 206 207 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)", 208 __entry->vm, __entry->pde, __entry->start, __entry->end) 209 ); 210 211 DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc, 212 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), 213 TP_ARGS(vm, pde, start, pde_shift) 214 ); 215 216 /* Avoid extra math because we only support two sizes. The format is defined by 217 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */ 218 #define TRACE_PT_SIZE(bits) \ 219 ((((bits) == 1024) ? 288 : 144) + 1) 220 221 DECLARE_EVENT_CLASS(i915_page_table_entry_update, 222 TP_PROTO(struct i915_address_space *vm, u32 pde, 223 struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits), 224 TP_ARGS(vm, pde, pt, first, count, bits), 225 226 TP_STRUCT__entry( 227 __field(struct i915_address_space *, vm) 228 __field(u32, pde) 229 __field(u32, first) 230 __field(u32, last) 231 __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits)) 232 ), 233 234 TP_fast_assign( 235 __entry->vm = vm; 236 __entry->pde = pde; 237 __entry->first = first; 238 __entry->last = first + count - 1; 239 scnprintf(__get_str(cur_ptes), 240 TRACE_PT_SIZE(bits), 241 "%*pb", 242 bits, 243 pt->used_ptes); 244 ), 245 246 TP_printk("vm=%p, pde=%d, updating %u:%u\t%s", 247 __entry->vm, __entry->pde, __entry->last, __entry->first, 248 __get_str(cur_ptes)) 249 ); 250 251 DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map, 252 TP_PROTO(struct i915_address_space *vm, u32 pde, 253 struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits), 254 TP_ARGS(vm, pde, pt, first, count, bits) 255 ); 256 257 TRACE_EVENT(i915_gem_object_change_domain, 258 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), 259 TP_ARGS(obj, old_read, old_write), 260 261 TP_STRUCT__entry( 262 __field(struct drm_i915_gem_object *, obj) 263 __field(u32, read_domains) 264 __field(u32, write_domain) 265 ), 266 267 TP_fast_assign( 268 __entry->obj = obj; 269 __entry->read_domains = obj->base.read_domains | (old_read << 16); 270 __entry->write_domain = obj->base.write_domain | (old_write << 16); 271 ), 272 273 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", 274 __entry->obj, 275 __entry->read_domains >> 16, 276 __entry->read_domains & 0xffff, 277 __entry->write_domain >> 16, 278 __entry->write_domain & 0xffff) 279 ); 280 281 TRACE_EVENT(i915_gem_object_pwrite, 282 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), 283 TP_ARGS(obj, offset, len), 284 285 TP_STRUCT__entry( 286 __field(struct drm_i915_gem_object *, obj) 287 __field(u32, offset) 288 __field(u32, len) 289 ), 290 291 TP_fast_assign( 292 __entry->obj = obj; 293 __entry->offset = offset; 294 __entry->len = len; 295 ), 296 297 TP_printk("obj=%p, offset=%u, len=%u", 298 __entry->obj, __entry->offset, __entry->len) 299 ); 300 301 TRACE_EVENT(i915_gem_object_pread, 302 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), 303 TP_ARGS(obj, offset, len), 304 305 TP_STRUCT__entry( 306 __field(struct drm_i915_gem_object *, obj) 307 __field(u32, offset) 308 __field(u32, len) 309 ), 310 311 TP_fast_assign( 312 __entry->obj = obj; 313 __entry->offset = offset; 314 __entry->len = len; 315 ), 316 317 TP_printk("obj=%p, offset=%u, len=%u", 318 __entry->obj, __entry->offset, __entry->len) 319 ); 320 321 TRACE_EVENT(i915_gem_object_fault, 322 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), 323 TP_ARGS(obj, index, gtt, write), 324 325 TP_STRUCT__entry( 326 __field(struct drm_i915_gem_object *, obj) 327 __field(u32, index) 328 __field(bool, gtt) 329 __field(bool, write) 330 ), 331 332 TP_fast_assign( 333 __entry->obj = obj; 334 __entry->index = index; 335 __entry->gtt = gtt; 336 __entry->write = write; 337 ), 338 339 TP_printk("obj=%p, %s index=%u %s", 340 __entry->obj, 341 __entry->gtt ? "GTT" : "CPU", 342 __entry->index, 343 __entry->write ? ", writable" : "") 344 ); 345 346 DECLARE_EVENT_CLASS(i915_gem_object, 347 TP_PROTO(struct drm_i915_gem_object *obj), 348 TP_ARGS(obj), 349 350 TP_STRUCT__entry( 351 __field(struct drm_i915_gem_object *, obj) 352 ), 353 354 TP_fast_assign( 355 __entry->obj = obj; 356 ), 357 358 TP_printk("obj=%p", __entry->obj) 359 ); 360 361 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 362 TP_PROTO(struct drm_i915_gem_object *obj), 363 TP_ARGS(obj) 364 ); 365 366 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 367 TP_PROTO(struct drm_i915_gem_object *obj), 368 TP_ARGS(obj) 369 ); 370 371 TRACE_EVENT(i915_gem_evict, 372 TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags), 373 TP_ARGS(dev, size, align, flags), 374 375 TP_STRUCT__entry( 376 __field(u32, dev) 377 __field(u32, size) 378 __field(u32, align) 379 __field(unsigned, flags) 380 ), 381 382 TP_fast_assign( 383 __entry->dev = dev->primary->index; 384 __entry->size = size; 385 __entry->align = align; 386 __entry->flags = flags; 387 ), 388 389 TP_printk("dev=%d, size=%d, align=%d %s", 390 __entry->dev, __entry->size, __entry->align, 391 __entry->flags & PIN_MAPPABLE ? ", mappable" : "") 392 ); 393 394 TRACE_EVENT(i915_gem_evict_everything, 395 TP_PROTO(struct drm_device *dev), 396 TP_ARGS(dev), 397 398 TP_STRUCT__entry( 399 __field(u32, dev) 400 ), 401 402 TP_fast_assign( 403 __entry->dev = dev->primary->index; 404 ), 405 406 TP_printk("dev=%d", __entry->dev) 407 ); 408 409 TRACE_EVENT(i915_gem_evict_vm, 410 TP_PROTO(struct i915_address_space *vm), 411 TP_ARGS(vm), 412 413 TP_STRUCT__entry( 414 __field(u32, dev) 415 __field(struct i915_address_space *, vm) 416 ), 417 418 TP_fast_assign( 419 __entry->dev = vm->dev->primary->index; 420 __entry->vm = vm; 421 ), 422 423 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) 424 ); 425 426 TRACE_EVENT(i915_gem_ring_sync_to, 427 TP_PROTO(struct intel_engine_cs *from, 428 struct intel_engine_cs *to, 429 struct drm_i915_gem_request *req), 430 TP_ARGS(from, to, req), 431 432 TP_STRUCT__entry( 433 __field(u32, dev) 434 __field(u32, sync_from) 435 __field(u32, sync_to) 436 __field(u32, seqno) 437 ), 438 439 TP_fast_assign( 440 __entry->dev = from->dev->primary->index; 441 __entry->sync_from = from->id; 442 __entry->sync_to = to->id; 443 __entry->seqno = i915_gem_request_get_seqno(req); 444 ), 445 446 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", 447 __entry->dev, 448 __entry->sync_from, __entry->sync_to, 449 __entry->seqno) 450 ); 451 452 TRACE_EVENT(i915_gem_ring_dispatch, 453 TP_PROTO(struct drm_i915_gem_request *req, u32 flags), 454 TP_ARGS(req, flags), 455 456 TP_STRUCT__entry( 457 __field(u32, dev) 458 __field(u32, ring) 459 __field(u32, seqno) 460 __field(u32, flags) 461 ), 462 463 TP_fast_assign( 464 struct intel_engine_cs *ring = 465 i915_gem_request_get_ring(req); 466 __entry->dev = ring->dev->primary->index; 467 __entry->ring = ring->id; 468 __entry->seqno = i915_gem_request_get_seqno(req); 469 __entry->flags = flags; 470 i915_trace_irq_get(ring, req); 471 ), 472 473 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 474 __entry->dev, __entry->ring, __entry->seqno, __entry->flags) 475 ); 476 477 TRACE_EVENT(i915_gem_ring_flush, 478 TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush), 479 TP_ARGS(ring, invalidate, flush), 480 481 TP_STRUCT__entry( 482 __field(u32, dev) 483 __field(u32, ring) 484 __field(u32, invalidate) 485 __field(u32, flush) 486 ), 487 488 TP_fast_assign( 489 __entry->dev = ring->dev->primary->index; 490 __entry->ring = ring->id; 491 __entry->invalidate = invalidate; 492 __entry->flush = flush; 493 ), 494 495 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", 496 __entry->dev, __entry->ring, 497 __entry->invalidate, __entry->flush) 498 ); 499 500 DECLARE_EVENT_CLASS(i915_gem_request, 501 TP_PROTO(struct drm_i915_gem_request *req), 502 TP_ARGS(req), 503 504 TP_STRUCT__entry( 505 __field(u32, dev) 506 __field(u32, ring) 507 __field(u32, uniq) 508 __field(u32, seqno) 509 ), 510 511 TP_fast_assign( 512 struct intel_engine_cs *ring = 513 i915_gem_request_get_ring(req); 514 __entry->dev = ring->dev->primary->index; 515 __entry->ring = ring->id; 516 __entry->uniq = req ? req->uniq : 0; 517 __entry->seqno = i915_gem_request_get_seqno(req); 518 ), 519 520 TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u", 521 __entry->dev, __entry->ring, __entry->uniq, 522 __entry->seqno) 523 ); 524 525 DEFINE_EVENT(i915_gem_request, i915_gem_request_add, 526 TP_PROTO(struct drm_i915_gem_request *req), 527 TP_ARGS(req) 528 ); 529 530 TRACE_EVENT(i915_gem_request_notify, 531 TP_PROTO(struct intel_engine_cs *ring), 532 TP_ARGS(ring), 533 534 TP_STRUCT__entry( 535 __field(u32, dev) 536 __field(u32, ring) 537 __field(u32, seqno) 538 ), 539 540 TP_fast_assign( 541 __entry->dev = ring->dev->primary->index; 542 __entry->ring = ring->id; 543 __entry->seqno = ring->get_seqno(ring, false); 544 ), 545 546 TP_printk("dev=%u, ring=%u, seqno=%u", 547 __entry->dev, __entry->ring, __entry->seqno) 548 ); 549 550 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 551 TP_PROTO(struct drm_i915_gem_request *req), 552 TP_ARGS(req) 553 ); 554 555 DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 556 TP_PROTO(struct drm_i915_gem_request *req), 557 TP_ARGS(req) 558 ); 559 560 TRACE_EVENT(i915_gem_request_wait_begin, 561 TP_PROTO(struct drm_i915_gem_request *req), 562 TP_ARGS(req), 563 564 TP_STRUCT__entry( 565 __field(u32, dev) 566 __field(u32, ring) 567 __field(u32, uniq) 568 __field(u32, seqno) 569 __field(bool, blocking) 570 ), 571 572 /* NB: the blocking information is racy since mutex_is_locked 573 * doesn't check that the current thread holds the lock. The only 574 * other option would be to pass the boolean information of whether 575 * or not the class was blocking down through the stack which is 576 * less desirable. 577 */ 578 TP_fast_assign( 579 struct intel_engine_cs *ring = 580 i915_gem_request_get_ring(req); 581 __entry->dev = ring->dev->primary->index; 582 __entry->ring = ring->id; 583 __entry->uniq = req ? req->uniq : 0; 584 __entry->seqno = i915_gem_request_get_seqno(req); 585 __entry->blocking = 586 mutex_is_locked(&ring->dev->struct_mutex); 587 ), 588 589 TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s", 590 __entry->dev, __entry->ring, __entry->uniq, 591 __entry->seqno, __entry->blocking ? "yes (NB)" : "no") 592 ); 593 594 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 595 TP_PROTO(struct drm_i915_gem_request *req), 596 TP_ARGS(req) 597 ); 598 599 DECLARE_EVENT_CLASS(i915_ring, 600 TP_PROTO(struct intel_engine_cs *ring), 601 TP_ARGS(ring), 602 603 TP_STRUCT__entry( 604 __field(u32, dev) 605 __field(u32, ring) 606 ), 607 608 TP_fast_assign( 609 __entry->dev = ring->dev->primary->index; 610 __entry->ring = ring->id; 611 ), 612 613 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) 614 ); 615 616 DEFINE_EVENT(i915_ring, i915_ring_wait_begin, 617 TP_PROTO(struct intel_engine_cs *ring), 618 TP_ARGS(ring) 619 ); 620 621 DEFINE_EVENT(i915_ring, i915_ring_wait_end, 622 TP_PROTO(struct intel_engine_cs *ring), 623 TP_ARGS(ring) 624 ); 625 626 TRACE_EVENT(i915_flip_request, 627 TP_PROTO(int plane, struct drm_i915_gem_object *obj), 628 629 TP_ARGS(plane, obj), 630 631 TP_STRUCT__entry( 632 __field(int, plane) 633 __field(struct drm_i915_gem_object *, obj) 634 ), 635 636 TP_fast_assign( 637 __entry->plane = plane; 638 __entry->obj = obj; 639 ), 640 641 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 642 ); 643 644 TRACE_EVENT(i915_flip_complete, 645 TP_PROTO(int plane, struct drm_i915_gem_object *obj), 646 647 TP_ARGS(plane, obj), 648 649 TP_STRUCT__entry( 650 __field(int, plane) 651 __field(struct drm_i915_gem_object *, obj) 652 ), 653 654 TP_fast_assign( 655 __entry->plane = plane; 656 __entry->obj = obj; 657 ), 658 659 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 660 ); 661 662 TRACE_EVENT_CONDITION(i915_reg_rw, 663 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), 664 665 TP_ARGS(write, reg, val, len, trace), 666 667 TP_CONDITION(trace), 668 669 TP_STRUCT__entry( 670 __field(u64, val) 671 __field(u32, reg) 672 __field(u16, write) 673 __field(u16, len) 674 ), 675 676 TP_fast_assign( 677 __entry->val = (u64)val; 678 __entry->reg = reg; 679 __entry->write = write; 680 __entry->len = len; 681 ), 682 683 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 684 __entry->write ? "write" : "read", 685 __entry->reg, __entry->len, 686 (u32)(__entry->val & 0xffffffff), 687 (u32)(__entry->val >> 32)) 688 ); 689 690 TRACE_EVENT(intel_gpu_freq_change, 691 TP_PROTO(u32 freq), 692 TP_ARGS(freq), 693 694 TP_STRUCT__entry( 695 __field(u32, freq) 696 ), 697 698 TP_fast_assign( 699 __entry->freq = freq; 700 ), 701 702 TP_printk("new_freq=%u", __entry->freq) 703 ); 704 705 /** 706 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints 707 * 708 * With full ppgtt enabled each process using drm will allocate at least one 709 * translation table. With these traces it is possible to keep track of the 710 * allocation and of the lifetime of the tables; this can be used during 711 * testing/debug to verify that we are not leaking ppgtts. 712 * These traces identify the ppgtt through the vm pointer, which is also printed 713 * by the i915_vma_bind and i915_vma_unbind tracepoints. 714 */ 715 DECLARE_EVENT_CLASS(i915_ppgtt, 716 TP_PROTO(struct i915_address_space *vm), 717 TP_ARGS(vm), 718 719 TP_STRUCT__entry( 720 __field(struct i915_address_space *, vm) 721 __field(u32, dev) 722 ), 723 724 TP_fast_assign( 725 __entry->vm = vm; 726 __entry->dev = vm->dev->primary->index; 727 ), 728 729 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) 730 ) 731 732 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create, 733 TP_PROTO(struct i915_address_space *vm), 734 TP_ARGS(vm) 735 ); 736 737 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, 738 TP_PROTO(struct i915_address_space *vm), 739 TP_ARGS(vm) 740 ); 741 742 /** 743 * DOC: i915_context_create and i915_context_free tracepoints 744 * 745 * These tracepoints are used to track creation and deletion of contexts. 746 * If full ppgtt is enabled, they also print the address of the vm assigned to 747 * the context. 748 */ 749 DECLARE_EVENT_CLASS(i915_context, 750 TP_PROTO(struct intel_context *ctx), 751 TP_ARGS(ctx), 752 753 TP_STRUCT__entry( 754 __field(u32, dev) 755 __field(struct intel_context *, ctx) 756 __field(struct i915_address_space *, vm) 757 ), 758 759 TP_fast_assign( 760 __entry->ctx = ctx; 761 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; 762 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index; 763 ), 764 765 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 766 __entry->dev, __entry->ctx, __entry->vm) 767 ) 768 769 DEFINE_EVENT(i915_context, i915_context_create, 770 TP_PROTO(struct intel_context *ctx), 771 TP_ARGS(ctx) 772 ); 773 774 DEFINE_EVENT(i915_context, i915_context_free, 775 TP_PROTO(struct intel_context *ctx), 776 TP_ARGS(ctx) 777 ); 778 779 /** 780 * DOC: switch_mm tracepoint 781 * 782 * This tracepoint allows tracking of the mm switch, which is an important point 783 * in the lifetime of the vm in the legacy submission path. This tracepoint is 784 * called only if full ppgtt is enabled. 785 */ 786 TRACE_EVENT(switch_mm, 787 TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to), 788 789 TP_ARGS(ring, to), 790 791 TP_STRUCT__entry( 792 __field(u32, ring) 793 __field(struct intel_context *, to) 794 __field(struct i915_address_space *, vm) 795 __field(u32, dev) 796 ), 797 798 TP_fast_assign( 799 __entry->ring = ring->id; 800 __entry->to = to; 801 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 802 __entry->dev = ring->dev->primary->index; 803 ), 804 805 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 806 __entry->dev, __entry->ring, __entry->to, __entry->vm) 807 ); 808 809 #endif /* _I915_TRACE_H_ */ 810 811 /* This part must be outside protection */ 812 #undef TRACE_INCLUDE_PATH 813 #define TRACE_INCLUDE_PATH . 814 #include <trace/define_trace.h> 815