1 /* 2 * Copyright (c) 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * Mika Kuoppala <mika.kuoppala@intel.com> 27 * 28 */ 29 30 #include <linux/ascii85.h> 31 #include <linux/highmem.h> 32 #include <linux/nmi.h> 33 #include <linux/pagevec.h> 34 #include <linux/scatterlist.h> 35 #include <linux/string_helpers.h> 36 #include <linux/utsname.h> 37 #include <linux/zlib.h> 38 39 #include <drm/drm_cache.h> 40 #include <drm/drm_print.h> 41 42 #include "display/intel_dmc.h" 43 #include "display/intel_overlay.h" 44 45 #include "gem/i915_gem_context.h" 46 #include "gem/i915_gem_lmem.h" 47 #include "gt/intel_engine_regs.h" 48 #include "gt/intel_gt.h" 49 #include "gt/intel_gt_mcr.h" 50 #include "gt/intel_gt_pm.h" 51 #include "gt/intel_gt_regs.h" 52 #include "gt/uc/intel_guc_capture.h" 53 54 #include "i915_driver.h" 55 #include "i915_drv.h" 56 #include "i915_gpu_error.h" 57 #include "i915_memcpy.h" 58 #include "i915_reg.h" 59 #include "i915_scatterlist.h" 60 #include "i915_utils.h" 61 62 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 63 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) 64 65 static void __sg_set_buf(struct scatterlist *sg, 66 void *addr, unsigned int len, loff_t it) 67 { 68 sg->page_link = (unsigned long)virt_to_page(addr); 69 sg->offset = offset_in_page(addr); 70 sg->length = len; 71 sg->dma_address = it; 72 } 73 74 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) 75 { 76 if (!len) 77 return false; 78 79 if (e->bytes + len + 1 <= e->size) 80 return true; 81 82 if (e->bytes) { 83 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter); 84 e->iter += e->bytes; 85 e->buf = NULL; 86 e->bytes = 0; 87 } 88 89 if (e->cur == e->end) { 90 struct scatterlist *sgl; 91 92 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); 93 if (!sgl) { 94 e->err = -ENOMEM; 95 return false; 96 } 97 98 if (e->cur) { 99 e->cur->offset = 0; 100 e->cur->length = 0; 101 e->cur->page_link = 102 (unsigned long)sgl | SG_CHAIN; 103 } else { 104 e->sgl = sgl; 105 } 106 107 e->cur = sgl; 108 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1; 109 } 110 111 e->size = ALIGN(len + 1, SZ_64K); 112 e->buf = kmalloc(e->size, ALLOW_FAIL); 113 if (!e->buf) { 114 e->size = PAGE_ALIGN(len + 1); 115 e->buf = kmalloc(e->size, GFP_KERNEL); 116 } 117 if (!e->buf) { 118 e->err = -ENOMEM; 119 return false; 120 } 121 122 return true; 123 } 124 125 __printf(2, 0) 126 static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 127 const char *fmt, va_list args) 128 { 129 va_list ap; 130 int len; 131 132 if (e->err) 133 return; 134 135 va_copy(ap, args); 136 len = vsnprintf(NULL, 0, fmt, ap); 137 va_end(ap); 138 if (len <= 0) { 139 e->err = len; 140 return; 141 } 142 143 if (!__i915_error_grow(e, len)) 144 return; 145 146 GEM_BUG_ON(e->bytes >= e->size); 147 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args); 148 if (len < 0) { 149 e->err = len; 150 return; 151 } 152 e->bytes += len; 153 } 154 155 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) 156 { 157 unsigned len; 158 159 if (e->err || !str) 160 return; 161 162 len = strlen(str); 163 if (!__i915_error_grow(e, len)) 164 return; 165 166 GEM_BUG_ON(e->bytes + len > e->size); 167 memcpy(e->buf + e->bytes, str, len); 168 e->bytes += len; 169 } 170 171 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 172 #define err_puts(e, s) i915_error_puts(e, s) 173 174 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf) 175 { 176 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va); 177 } 178 179 static inline struct drm_printer 180 i915_error_printer(struct drm_i915_error_state_buf *e) 181 { 182 struct drm_printer p = { 183 .printfn = __i915_printfn_error, 184 .arg = e, 185 }; 186 return p; 187 } 188 189 /* single threaded page allocator with a reserved stash for emergencies */ 190 static void pool_fini(struct folio_batch *fbatch) 191 { 192 folio_batch_release(fbatch); 193 } 194 195 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) 196 { 197 while (folio_batch_space(fbatch)) { 198 struct folio *folio; 199 200 folio = folio_alloc(gfp, 0); 201 if (!folio) 202 return -ENOMEM; 203 204 folio_batch_add(fbatch, folio); 205 } 206 207 return 0; 208 } 209 210 static int pool_init(struct folio_batch *fbatch, gfp_t gfp) 211 { 212 int err; 213 214 folio_batch_init(fbatch); 215 216 err = pool_refill(fbatch, gfp); 217 if (err) 218 pool_fini(fbatch); 219 220 return err; 221 } 222 223 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) 224 { 225 struct folio *folio; 226 227 folio = folio_alloc(gfp, 0); 228 if (!folio && folio_batch_count(fbatch)) 229 folio = fbatch->folios[--fbatch->nr]; 230 231 return folio ? folio_address(folio) : NULL; 232 } 233 234 static void pool_free(struct folio_batch *fbatch, void *addr) 235 { 236 struct folio *folio = virt_to_folio(addr); 237 238 if (folio_batch_space(fbatch)) 239 folio_batch_add(fbatch, folio); 240 else 241 folio_put(folio); 242 } 243 244 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR 245 246 struct i915_vma_compress { 247 struct folio_batch pool; 248 struct z_stream_s zstream; 249 void *tmp; 250 }; 251 252 static bool compress_init(struct i915_vma_compress *c) 253 { 254 struct z_stream_s *zstream = &c->zstream; 255 256 if (pool_init(&c->pool, ALLOW_FAIL)) 257 return false; 258 259 zstream->workspace = 260 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 261 ALLOW_FAIL); 262 if (!zstream->workspace) { 263 pool_fini(&c->pool); 264 return false; 265 } 266 267 c->tmp = NULL; 268 if (i915_has_memcpy_from_wc()) 269 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); 270 271 return true; 272 } 273 274 static bool compress_start(struct i915_vma_compress *c) 275 { 276 struct z_stream_s *zstream = &c->zstream; 277 void *workspace = zstream->workspace; 278 279 memset(zstream, 0, sizeof(*zstream)); 280 zstream->workspace = workspace; 281 282 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; 283 } 284 285 static void *compress_next_page(struct i915_vma_compress *c, 286 struct i915_vma_coredump *dst) 287 { 288 void *page_addr; 289 struct page *page; 290 291 page_addr = pool_alloc(&c->pool, ALLOW_FAIL); 292 if (!page_addr) 293 return ERR_PTR(-ENOMEM); 294 295 page = virt_to_page(page_addr); 296 list_add_tail(&page->lru, &dst->page_list); 297 return page_addr; 298 } 299 300 static int compress_page(struct i915_vma_compress *c, 301 void *src, 302 struct i915_vma_coredump *dst, 303 bool wc) 304 { 305 struct z_stream_s *zstream = &c->zstream; 306 307 zstream->next_in = src; 308 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) 309 zstream->next_in = c->tmp; 310 zstream->avail_in = PAGE_SIZE; 311 312 do { 313 if (zstream->avail_out == 0) { 314 zstream->next_out = compress_next_page(c, dst); 315 if (IS_ERR(zstream->next_out)) 316 return PTR_ERR(zstream->next_out); 317 318 zstream->avail_out = PAGE_SIZE; 319 } 320 321 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) 322 return -EIO; 323 324 cond_resched(); 325 } while (zstream->avail_in); 326 327 /* Fallback to uncompressed if we increase size? */ 328 if (0 && zstream->total_out > zstream->total_in) 329 return -E2BIG; 330 331 return 0; 332 } 333 334 static int compress_flush(struct i915_vma_compress *c, 335 struct i915_vma_coredump *dst) 336 { 337 struct z_stream_s *zstream = &c->zstream; 338 339 do { 340 switch (zlib_deflate(zstream, Z_FINISH)) { 341 case Z_OK: /* more space requested */ 342 zstream->next_out = compress_next_page(c, dst); 343 if (IS_ERR(zstream->next_out)) 344 return PTR_ERR(zstream->next_out); 345 346 zstream->avail_out = PAGE_SIZE; 347 break; 348 349 case Z_STREAM_END: 350 goto end; 351 352 default: /* any error */ 353 return -EIO; 354 } 355 } while (1); 356 357 end: 358 memset(zstream->next_out, 0, zstream->avail_out); 359 dst->unused = zstream->avail_out; 360 return 0; 361 } 362 363 static void compress_finish(struct i915_vma_compress *c) 364 { 365 zlib_deflateEnd(&c->zstream); 366 } 367 368 static void compress_fini(struct i915_vma_compress *c) 369 { 370 kfree(c->zstream.workspace); 371 if (c->tmp) 372 pool_free(&c->pool, c->tmp); 373 pool_fini(&c->pool); 374 } 375 376 static void err_compression_marker(struct drm_i915_error_state_buf *m) 377 { 378 err_puts(m, ":"); 379 } 380 381 #else 382 383 struct i915_vma_compress { 384 struct folio_batch pool; 385 }; 386 387 static bool compress_init(struct i915_vma_compress *c) 388 { 389 return pool_init(&c->pool, ALLOW_FAIL) == 0; 390 } 391 392 static bool compress_start(struct i915_vma_compress *c) 393 { 394 return true; 395 } 396 397 static int compress_page(struct i915_vma_compress *c, 398 void *src, 399 struct i915_vma_coredump *dst, 400 bool wc) 401 { 402 void *ptr; 403 404 ptr = pool_alloc(&c->pool, ALLOW_FAIL); 405 if (!ptr) 406 return -ENOMEM; 407 408 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) 409 memcpy(ptr, src, PAGE_SIZE); 410 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); 411 cond_resched(); 412 413 return 0; 414 } 415 416 static int compress_flush(struct i915_vma_compress *c, 417 struct i915_vma_coredump *dst) 418 { 419 return 0; 420 } 421 422 static void compress_finish(struct i915_vma_compress *c) 423 { 424 } 425 426 static void compress_fini(struct i915_vma_compress *c) 427 { 428 pool_fini(&c->pool); 429 } 430 431 static void err_compression_marker(struct drm_i915_error_state_buf *m) 432 { 433 err_puts(m, "~"); 434 } 435 436 #endif 437 438 static void error_print_instdone(struct drm_i915_error_state_buf *m, 439 const struct intel_engine_coredump *ee) 440 { 441 int slice; 442 int subslice; 443 int iter; 444 445 err_printf(m, " INSTDONE: 0x%08x\n", 446 ee->instdone.instdone); 447 448 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3) 449 return; 450 451 err_printf(m, " SC_INSTDONE: 0x%08x\n", 452 ee->instdone.slice_common); 453 454 if (GRAPHICS_VER(m->i915) <= 6) 455 return; 456 457 for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 458 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 459 slice, subslice, 460 ee->instdone.sampler[slice][subslice]); 461 462 for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 463 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", 464 slice, subslice, 465 ee->instdone.row[slice][subslice]); 466 467 if (GRAPHICS_VER(m->i915) < 12) 468 return; 469 470 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) { 471 for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 472 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n", 473 slice, subslice, 474 ee->instdone.geom_svg[slice][subslice]); 475 } 476 477 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", 478 ee->instdone.slice_common_extra[0]); 479 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", 480 ee->instdone.slice_common_extra[1]); 481 } 482 483 static void error_print_request(struct drm_i915_error_state_buf *m, 484 const char *prefix, 485 const struct i915_request_coredump *erq) 486 { 487 if (!erq->seqno) 488 return; 489 490 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n", 491 prefix, erq->pid, erq->context, erq->seqno, 492 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 493 &erq->flags) ? "!" : "", 494 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 495 &erq->flags) ? "+" : "", 496 erq->sched_attr.priority, 497 erq->head, erq->tail); 498 } 499 500 static void error_print_context(struct drm_i915_error_state_buf *m, 501 const char *header, 502 const struct i915_gem_context_coredump *ctx) 503 { 504 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", 505 header, ctx->comm, ctx->pid, ctx->sched_attr.priority, 506 ctx->guilty, ctx->active, 507 ctx->total_runtime, ctx->avg_runtime); 508 err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno); 509 } 510 511 static struct i915_vma_coredump * 512 __find_vma(struct i915_vma_coredump *vma, const char *name) 513 { 514 while (vma) { 515 if (strcmp(vma->name, name) == 0) 516 return vma; 517 vma = vma->next; 518 } 519 520 return NULL; 521 } 522 523 struct i915_vma_coredump * 524 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee) 525 { 526 return __find_vma(ee->vma, "batch"); 527 } 528 529 static void error_print_engine(struct drm_i915_error_state_buf *m, 530 const struct intel_engine_coredump *ee) 531 { 532 struct i915_vma_coredump *batch; 533 int n; 534 535 err_printf(m, "%s command stream:\n", ee->engine->name); 536 err_printf(m, " CCID: 0x%08x\n", ee->ccid); 537 err_printf(m, " START: 0x%08x\n", ee->start); 538 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); 539 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", 540 ee->tail, ee->rq_post, ee->rq_tail); 541 err_printf(m, " CTL: 0x%08x\n", ee->ctl); 542 err_printf(m, " MODE: 0x%08x\n", ee->mode); 543 err_printf(m, " HWS: 0x%08x\n", ee->hws); 544 err_printf(m, " ACTHD: 0x%08x %08x\n", 545 (u32)(ee->acthd>>32), (u32)ee->acthd); 546 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); 547 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); 548 err_printf(m, " ESR: 0x%08x\n", ee->esr); 549 550 error_print_instdone(m, ee); 551 552 batch = intel_gpu_error_find_batch(ee); 553 if (batch) { 554 u64 start = batch->gtt_offset; 555 u64 end = start + batch->gtt_size; 556 557 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", 558 upper_32_bits(start), lower_32_bits(start), 559 upper_32_bits(end), lower_32_bits(end)); 560 } 561 if (GRAPHICS_VER(m->i915) >= 4) { 562 err_printf(m, " BBADDR: 0x%08x_%08x\n", 563 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); 564 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); 565 err_printf(m, " INSTPS: 0x%08x\n", ee->instps); 566 } 567 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm); 568 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr), 569 lower_32_bits(ee->faddr)); 570 if (GRAPHICS_VER(m->i915) >= 6) { 571 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); 572 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); 573 } 574 if (GRAPHICS_VER(m->i915) >= 11) { 575 err_printf(m, " NOPID: 0x%08x\n", ee->nopid); 576 err_printf(m, " EXCC: 0x%08x\n", ee->excc); 577 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl); 578 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop); 579 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl); 580 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi); 581 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo); 582 } 583 if (HAS_PPGTT(m->i915)) { 584 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); 585 586 if (GRAPHICS_VER(m->i915) >= 8) { 587 int i; 588 for (i = 0; i < 4; i++) 589 err_printf(m, " PDP%d: 0x%016llx\n", 590 i, ee->vm_info.pdp[i]); 591 } else { 592 err_printf(m, " PP_DIR_BASE: 0x%08x\n", 593 ee->vm_info.pp_dir_base); 594 } 595 } 596 597 for (n = 0; n < ee->num_ports; n++) { 598 err_printf(m, " ELSP[%d]:", n); 599 error_print_request(m, " ", &ee->execlist[n]); 600 } 601 } 602 603 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 604 { 605 va_list args; 606 607 va_start(args, f); 608 i915_error_vprintf(e, f, args); 609 va_end(args); 610 } 611 612 void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m, 613 const struct intel_engine_cs *engine, 614 const struct i915_vma_coredump *vma) 615 { 616 char out[ASCII85_BUFSZ]; 617 struct page *page; 618 619 if (!vma) 620 return; 621 622 err_printf(m, "%s --- %s = 0x%08x %08x\n", 623 engine ? engine->name : "global", vma->name, 624 upper_32_bits(vma->gtt_offset), 625 lower_32_bits(vma->gtt_offset)); 626 627 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) 628 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); 629 630 err_compression_marker(m); 631 list_for_each_entry(page, &vma->page_list, lru) { 632 int i, len; 633 const u32 *addr = page_address(page); 634 635 len = PAGE_SIZE; 636 if (page == list_last_entry(&vma->page_list, typeof(*page), lru)) 637 len -= vma->unused; 638 len = ascii85_encode_len(len); 639 640 for (i = 0; i < len; i++) 641 err_puts(m, ascii85_encode(addr[i], out)); 642 } 643 err_puts(m, "\n"); 644 } 645 646 static void err_print_capabilities(struct drm_i915_error_state_buf *m, 647 struct i915_gpu_coredump *error) 648 { 649 struct drm_printer p = i915_error_printer(m); 650 651 intel_device_info_print(&error->device_info, &error->runtime_info, &p); 652 intel_display_device_info_print(&error->display_device_info, 653 &error->display_runtime_info, &p); 654 intel_driver_caps_print(&error->driver_caps, &p); 655 } 656 657 static void err_print_params(struct drm_i915_error_state_buf *m, 658 const struct i915_params *params) 659 { 660 struct drm_printer p = i915_error_printer(m); 661 662 i915_params_dump(params, &p); 663 } 664 665 static void err_print_pciid(struct drm_i915_error_state_buf *m, 666 struct drm_i915_private *i915) 667 { 668 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 669 670 err_printf(m, "PCI ID: 0x%04x\n", pdev->device); 671 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); 672 err_printf(m, "PCI Subsystem: %04x:%04x\n", 673 pdev->subsystem_vendor, 674 pdev->subsystem_device); 675 } 676 677 static void err_print_guc_ctb(struct drm_i915_error_state_buf *m, 678 const char *name, 679 const struct intel_ctb_coredump *ctb) 680 { 681 if (!ctb->size) 682 return; 683 684 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n", 685 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail, 686 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size); 687 } 688 689 static void err_print_uc(struct drm_i915_error_state_buf *m, 690 const struct intel_uc_coredump *error_uc) 691 { 692 struct drm_printer p = i915_error_printer(m); 693 694 intel_uc_fw_dump(&error_uc->guc_fw, &p); 695 intel_uc_fw_dump(&error_uc->huc_fw, &p); 696 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp); 697 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log); 698 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence); 699 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0); 700 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1); 701 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb); 702 } 703 704 static void err_free_sgl(struct scatterlist *sgl) 705 { 706 while (sgl) { 707 struct scatterlist *sg; 708 709 for (sg = sgl; !sg_is_chain(sg); sg++) { 710 kfree(sg_virt(sg)); 711 if (sg_is_last(sg)) 712 break; 713 } 714 715 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg); 716 free_page((unsigned long)sgl); 717 sgl = sg; 718 } 719 } 720 721 static void err_print_gt_info(struct drm_i915_error_state_buf *m, 722 struct intel_gt_coredump *gt) 723 { 724 struct drm_printer p = i915_error_printer(m); 725 726 intel_gt_info_print(>->info, &p); 727 intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p); 728 } 729 730 static void err_print_gt_display(struct drm_i915_error_state_buf *m, 731 struct intel_gt_coredump *gt) 732 { 733 err_printf(m, "IER: 0x%08x\n", gt->ier); 734 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr); 735 } 736 737 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m, 738 struct intel_gt_coredump *gt) 739 { 740 int i; 741 742 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake)); 743 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", 744 gt->clock_frequency, gt->clock_period_ns); 745 err_printf(m, "EIR: 0x%08x\n", gt->eir); 746 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er); 747 748 for (i = 0; i < gt->ngtier; i++) 749 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]); 750 } 751 752 static void err_print_gt_global(struct drm_i915_error_state_buf *m, 753 struct intel_gt_coredump *gt) 754 { 755 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake); 756 757 if (IS_GRAPHICS_VER(m->i915, 6, 11)) { 758 err_printf(m, "ERROR: 0x%08x\n", gt->error); 759 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg); 760 } 761 762 if (GRAPHICS_VER(m->i915) >= 8) 763 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", 764 gt->fault_data1, gt->fault_data0); 765 766 if (GRAPHICS_VER(m->i915) == 7) 767 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int); 768 769 if (IS_GRAPHICS_VER(m->i915, 8, 11)) 770 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache); 771 772 if (GRAPHICS_VER(m->i915) == 12) 773 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err); 774 775 if (GRAPHICS_VER(m->i915) >= 12) { 776 int i; 777 778 for (i = 0; i < I915_MAX_SFC; i++) { 779 /* 780 * SFC_DONE resides in the VD forcewake domain, so it 781 * only exists if the corresponding VCS engine is 782 * present. 783 */ 784 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || 785 !HAS_ENGINE(gt->_gt, _VCS(i * 2))) 786 continue; 787 788 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, 789 gt->sfc_done[i]); 790 } 791 792 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); 793 } 794 } 795 796 static void err_print_gt_fences(struct drm_i915_error_state_buf *m, 797 struct intel_gt_coredump *gt) 798 { 799 int i; 800 801 for (i = 0; i < gt->nfence; i++) 802 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]); 803 } 804 805 static void err_print_gt_engines(struct drm_i915_error_state_buf *m, 806 struct intel_gt_coredump *gt) 807 { 808 const struct intel_engine_coredump *ee; 809 810 for (ee = gt->engine; ee; ee = ee->next) { 811 const struct i915_vma_coredump *vma; 812 813 if (gt->uc && gt->uc->guc.is_guc_capture) { 814 if (ee->guc_capture_node) 815 intel_guc_capture_print_engine_node(m, ee); 816 else 817 err_printf(m, " Missing GuC capture node for %s\n", 818 ee->engine->name); 819 } else { 820 error_print_engine(m, ee); 821 } 822 823 err_printf(m, " hung: %u\n", ee->hung); 824 err_printf(m, " engine reset count: %u\n", ee->reset_count); 825 error_print_context(m, " Active context: ", &ee->context); 826 827 for (vma = ee->vma; vma; vma = vma->next) 828 intel_gpu_error_print_vma(m, ee->engine, vma); 829 } 830 831 } 832 833 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, 834 struct i915_gpu_coredump *error) 835 { 836 const struct intel_engine_coredump *ee; 837 struct timespec64 ts; 838 839 if (*error->error_msg) 840 err_printf(m, "%s\n", error->error_msg); 841 err_printf(m, "Kernel: %s %s\n", 842 init_utsname()->release, 843 init_utsname()->machine); 844 err_printf(m, "Driver: %s\n", DRIVER_DATE); 845 ts = ktime_to_timespec64(error->time); 846 err_printf(m, "Time: %lld s %ld us\n", 847 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 848 ts = ktime_to_timespec64(error->boottime); 849 err_printf(m, "Boottime: %lld s %ld us\n", 850 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 851 ts = ktime_to_timespec64(error->uptime); 852 err_printf(m, "Uptime: %lld s %ld us\n", 853 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 854 err_printf(m, "Capture: %lu jiffies; %d ms ago\n", 855 error->capture, jiffies_to_msecs(jiffies - error->capture)); 856 857 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next) 858 err_printf(m, "Active process (on ring %s): %s [%d]\n", 859 ee->engine->name, 860 ee->context.comm, 861 ee->context.pid); 862 863 err_printf(m, "Reset count: %u\n", error->reset_count); 864 err_printf(m, "Suspend count: %u\n", error->suspend_count); 865 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); 866 err_printf(m, "Subplatform: 0x%x\n", 867 intel_subplatform(&error->runtime_info, 868 error->device_info.platform)); 869 err_print_pciid(m, m->i915); 870 871 err_printf(m, "IOMMU enabled?: %d\n", error->iommu); 872 873 intel_dmc_print_error_state(m, m->i915); 874 875 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock)); 876 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended)); 877 878 if (error->gt) { 879 bool print_guc_capture = false; 880 881 if (error->gt->uc && error->gt->uc->guc.is_guc_capture) 882 print_guc_capture = true; 883 884 err_print_gt_display(m, error->gt); 885 err_print_gt_global_nonguc(m, error->gt); 886 err_print_gt_fences(m, error->gt); 887 888 /* 889 * GuC dumped global, eng-class and eng-instance registers together 890 * as part of engine state dump so we print in err_print_gt_engines 891 */ 892 if (!print_guc_capture) 893 err_print_gt_global(m, error->gt); 894 895 err_print_gt_engines(m, error->gt); 896 897 if (error->gt->uc) 898 err_print_uc(m, error->gt->uc); 899 900 err_print_gt_info(m, error->gt); 901 } 902 903 if (error->overlay) 904 intel_overlay_print_error_state(m, error->overlay); 905 906 err_print_capabilities(m, error); 907 err_print_params(m, &error->params); 908 } 909 910 static int err_print_to_sgl(struct i915_gpu_coredump *error) 911 { 912 struct drm_i915_error_state_buf m; 913 914 if (IS_ERR(error)) 915 return PTR_ERR(error); 916 917 if (READ_ONCE(error->sgl)) 918 return 0; 919 920 memset(&m, 0, sizeof(m)); 921 m.i915 = error->i915; 922 923 __err_print_to_sgl(&m, error); 924 925 if (m.buf) { 926 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter); 927 m.bytes = 0; 928 m.buf = NULL; 929 } 930 if (m.cur) { 931 GEM_BUG_ON(m.end < m.cur); 932 sg_mark_end(m.cur - 1); 933 } 934 GEM_BUG_ON(m.sgl && !m.cur); 935 936 if (m.err) { 937 err_free_sgl(m.sgl); 938 return m.err; 939 } 940 941 if (cmpxchg(&error->sgl, NULL, m.sgl)) 942 err_free_sgl(m.sgl); 943 944 return 0; 945 } 946 947 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, 948 char *buf, loff_t off, size_t rem) 949 { 950 struct scatterlist *sg; 951 size_t count; 952 loff_t pos; 953 int err; 954 955 if (!error || !rem) 956 return 0; 957 958 err = err_print_to_sgl(error); 959 if (err) 960 return err; 961 962 sg = READ_ONCE(error->fit); 963 if (!sg || off < sg->dma_address) 964 sg = error->sgl; 965 if (!sg) 966 return 0; 967 968 pos = sg->dma_address; 969 count = 0; 970 do { 971 size_t len, start; 972 973 if (sg_is_chain(sg)) { 974 sg = sg_chain_ptr(sg); 975 GEM_BUG_ON(sg_is_chain(sg)); 976 } 977 978 len = sg->length; 979 if (pos + len <= off) { 980 pos += len; 981 continue; 982 } 983 984 start = sg->offset; 985 if (pos < off) { 986 GEM_BUG_ON(off - pos > len); 987 len -= off - pos; 988 start += off - pos; 989 pos = off; 990 } 991 992 len = min(len, rem); 993 GEM_BUG_ON(!len || len > sg->length); 994 995 memcpy(buf, page_address(sg_page(sg)) + start, len); 996 997 count += len; 998 pos += len; 999 1000 buf += len; 1001 rem -= len; 1002 if (!rem) { 1003 WRITE_ONCE(error->fit, sg); 1004 break; 1005 } 1006 } while (!sg_is_last(sg++)); 1007 1008 return count; 1009 } 1010 1011 static void i915_vma_coredump_free(struct i915_vma_coredump *vma) 1012 { 1013 while (vma) { 1014 struct i915_vma_coredump *next = vma->next; 1015 struct page *page, *n; 1016 1017 list_for_each_entry_safe(page, n, &vma->page_list, lru) { 1018 list_del_init(&page->lru); 1019 __free_page(page); 1020 } 1021 1022 kfree(vma); 1023 vma = next; 1024 } 1025 } 1026 1027 static void cleanup_params(struct i915_gpu_coredump *error) 1028 { 1029 i915_params_free(&error->params); 1030 } 1031 1032 static void cleanup_uc(struct intel_uc_coredump *uc) 1033 { 1034 kfree(uc->guc_fw.file_selected.path); 1035 kfree(uc->huc_fw.file_selected.path); 1036 kfree(uc->guc_fw.file_wanted.path); 1037 kfree(uc->huc_fw.file_wanted.path); 1038 i915_vma_coredump_free(uc->guc.vma_log); 1039 i915_vma_coredump_free(uc->guc.vma_ctb); 1040 1041 kfree(uc); 1042 } 1043 1044 static void cleanup_gt(struct intel_gt_coredump *gt) 1045 { 1046 while (gt->engine) { 1047 struct intel_engine_coredump *ee = gt->engine; 1048 1049 gt->engine = ee->next; 1050 1051 i915_vma_coredump_free(ee->vma); 1052 intel_guc_capture_free_node(ee); 1053 kfree(ee); 1054 } 1055 1056 if (gt->uc) 1057 cleanup_uc(gt->uc); 1058 1059 kfree(gt); 1060 } 1061 1062 void __i915_gpu_coredump_free(struct kref *error_ref) 1063 { 1064 struct i915_gpu_coredump *error = 1065 container_of(error_ref, typeof(*error), ref); 1066 1067 while (error->gt) { 1068 struct intel_gt_coredump *gt = error->gt; 1069 1070 error->gt = gt->next; 1071 cleanup_gt(gt); 1072 } 1073 1074 kfree(error->overlay); 1075 1076 cleanup_params(error); 1077 1078 err_free_sgl(error->sgl); 1079 kfree(error); 1080 } 1081 1082 static struct i915_vma_coredump * 1083 i915_vma_coredump_create(const struct intel_gt *gt, 1084 const struct i915_vma_resource *vma_res, 1085 struct i915_vma_compress *compress, 1086 const char *name) 1087 1088 { 1089 struct i915_ggtt *ggtt = gt->ggtt; 1090 const u64 slot = ggtt->error_capture.start; 1091 struct i915_vma_coredump *dst; 1092 struct sgt_iter iter; 1093 int ret; 1094 1095 might_sleep(); 1096 1097 if (!vma_res || !vma_res->bi.pages || !compress) 1098 return NULL; 1099 1100 dst = kmalloc(sizeof(*dst), ALLOW_FAIL); 1101 if (!dst) 1102 return NULL; 1103 1104 if (!compress_start(compress)) { 1105 kfree(dst); 1106 return NULL; 1107 } 1108 1109 INIT_LIST_HEAD(&dst->page_list); 1110 strcpy(dst->name, name); 1111 dst->next = NULL; 1112 1113 dst->gtt_offset = vma_res->start; 1114 dst->gtt_size = vma_res->node_size; 1115 dst->gtt_page_sizes = vma_res->page_sizes_gtt; 1116 dst->unused = 0; 1117 1118 ret = -EINVAL; 1119 if (drm_mm_node_allocated(&ggtt->error_capture)) { 1120 void __iomem *s; 1121 dma_addr_t dma; 1122 1123 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { 1124 mutex_lock(&ggtt->error_mutex); 1125 if (ggtt->vm.raw_insert_page) 1126 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot, 1127 i915_gem_get_pat_index(gt->i915, 1128 I915_CACHE_NONE), 1129 0); 1130 else 1131 ggtt->vm.insert_page(&ggtt->vm, dma, slot, 1132 i915_gem_get_pat_index(gt->i915, 1133 I915_CACHE_NONE), 1134 0); 1135 mb(); 1136 1137 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); 1138 ret = compress_page(compress, 1139 (void __force *)s, dst, 1140 true); 1141 io_mapping_unmap(s); 1142 1143 mb(); 1144 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 1145 mutex_unlock(&ggtt->error_mutex); 1146 if (ret) 1147 break; 1148 } 1149 } else if (vma_res->bi.lmem) { 1150 struct intel_memory_region *mem = vma_res->mr; 1151 dma_addr_t dma; 1152 1153 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { 1154 dma_addr_t offset = dma - mem->region.start; 1155 void __iomem *s; 1156 1157 if (offset + PAGE_SIZE > mem->io_size) { 1158 ret = -EINVAL; 1159 break; 1160 } 1161 1162 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE); 1163 ret = compress_page(compress, 1164 (void __force *)s, dst, 1165 true); 1166 io_mapping_unmap(s); 1167 if (ret) 1168 break; 1169 } 1170 } else { 1171 struct page *page; 1172 1173 for_each_sgt_page(page, iter, vma_res->bi.pages) { 1174 void *s; 1175 1176 drm_clflush_pages(&page, 1); 1177 1178 s = kmap_local_page(page); 1179 ret = compress_page(compress, s, dst, false); 1180 kunmap_local(s); 1181 1182 drm_clflush_pages(&page, 1); 1183 1184 if (ret) 1185 break; 1186 } 1187 } 1188 1189 if (ret || compress_flush(compress, dst)) { 1190 struct page *page, *n; 1191 1192 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) { 1193 list_del_init(&page->lru); 1194 pool_free(&compress->pool, page_address(page)); 1195 } 1196 1197 kfree(dst); 1198 dst = NULL; 1199 } 1200 compress_finish(compress); 1201 1202 return dst; 1203 } 1204 1205 static void gt_record_fences(struct intel_gt_coredump *gt) 1206 { 1207 struct i915_ggtt *ggtt = gt->_gt->ggtt; 1208 struct intel_uncore *uncore = gt->_gt->uncore; 1209 int i; 1210 1211 if (GRAPHICS_VER(uncore->i915) >= 6) { 1212 for (i = 0; i < ggtt->num_fences; i++) 1213 gt->fence[i] = 1214 intel_uncore_read64(uncore, 1215 FENCE_REG_GEN6_LO(i)); 1216 } else if (GRAPHICS_VER(uncore->i915) >= 4) { 1217 for (i = 0; i < ggtt->num_fences; i++) 1218 gt->fence[i] = 1219 intel_uncore_read64(uncore, 1220 FENCE_REG_965_LO(i)); 1221 } else { 1222 for (i = 0; i < ggtt->num_fences; i++) 1223 gt->fence[i] = 1224 intel_uncore_read(uncore, FENCE_REG(i)); 1225 } 1226 gt->nfence = i; 1227 } 1228 1229 static void engine_record_registers(struct intel_engine_coredump *ee) 1230 { 1231 const struct intel_engine_cs *engine = ee->engine; 1232 struct drm_i915_private *i915 = engine->i915; 1233 1234 if (GRAPHICS_VER(i915) >= 6) { 1235 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); 1236 1237 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 1238 ee->fault_reg = intel_gt_mcr_read_any(engine->gt, 1239 XEHP_RING_FAULT_REG); 1240 else if (GRAPHICS_VER(i915) >= 12) 1241 ee->fault_reg = intel_uncore_read(engine->uncore, 1242 GEN12_RING_FAULT_REG); 1243 else if (GRAPHICS_VER(i915) >= 8) 1244 ee->fault_reg = intel_uncore_read(engine->uncore, 1245 GEN8_RING_FAULT_REG); 1246 else 1247 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); 1248 } 1249 1250 if (GRAPHICS_VER(i915) >= 4) { 1251 ee->esr = ENGINE_READ(engine, RING_ESR); 1252 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); 1253 ee->ipeir = ENGINE_READ(engine, RING_IPEIR); 1254 ee->ipehr = ENGINE_READ(engine, RING_IPEHR); 1255 ee->instps = ENGINE_READ(engine, RING_INSTPS); 1256 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); 1257 ee->ccid = ENGINE_READ(engine, CCID); 1258 if (GRAPHICS_VER(i915) >= 8) { 1259 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; 1260 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; 1261 } 1262 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE); 1263 } else { 1264 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX); 1265 ee->ipeir = ENGINE_READ(engine, IPEIR); 1266 ee->ipehr = ENGINE_READ(engine, IPEHR); 1267 } 1268 1269 if (GRAPHICS_VER(i915) >= 11) { 1270 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL); 1271 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP); 1272 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL); 1273 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW); 1274 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD); 1275 ee->nopid = ENGINE_READ(engine, RING_NOPID); 1276 ee->excc = ENGINE_READ(engine, RING_EXCC); 1277 } 1278 1279 intel_engine_get_instdone(engine, &ee->instdone); 1280 1281 ee->instpm = ENGINE_READ(engine, RING_INSTPM); 1282 ee->acthd = intel_engine_get_active_head(engine); 1283 ee->start = ENGINE_READ(engine, RING_START); 1284 ee->head = ENGINE_READ(engine, RING_HEAD); 1285 ee->tail = ENGINE_READ(engine, RING_TAIL); 1286 ee->ctl = ENGINE_READ(engine, RING_CTL); 1287 if (GRAPHICS_VER(i915) > 2) 1288 ee->mode = ENGINE_READ(engine, RING_MI_MODE); 1289 1290 if (!HWS_NEEDS_PHYSICAL(i915)) { 1291 i915_reg_t mmio; 1292 1293 if (GRAPHICS_VER(i915) == 7) { 1294 switch (engine->id) { 1295 default: 1296 MISSING_CASE(engine->id); 1297 fallthrough; 1298 case RCS0: 1299 mmio = RENDER_HWS_PGA_GEN7; 1300 break; 1301 case BCS0: 1302 mmio = BLT_HWS_PGA_GEN7; 1303 break; 1304 case VCS0: 1305 mmio = BSD_HWS_PGA_GEN7; 1306 break; 1307 case VECS0: 1308 mmio = VEBOX_HWS_PGA_GEN7; 1309 break; 1310 } 1311 } else if (GRAPHICS_VER(engine->i915) == 6) { 1312 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 1313 } else { 1314 /* XXX: gen8 returns to sanity */ 1315 mmio = RING_HWS_PGA(engine->mmio_base); 1316 } 1317 1318 ee->hws = intel_uncore_read(engine->uncore, mmio); 1319 } 1320 1321 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine); 1322 1323 if (HAS_PPGTT(i915)) { 1324 int i; 1325 1326 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); 1327 1328 if (GRAPHICS_VER(i915) == 6) { 1329 ee->vm_info.pp_dir_base = 1330 ENGINE_READ(engine, RING_PP_DIR_BASE_READ); 1331 } else if (GRAPHICS_VER(i915) == 7) { 1332 ee->vm_info.pp_dir_base = 1333 ENGINE_READ(engine, RING_PP_DIR_BASE); 1334 } else if (GRAPHICS_VER(i915) >= 8) { 1335 u32 base = engine->mmio_base; 1336 1337 for (i = 0; i < 4; i++) { 1338 ee->vm_info.pdp[i] = 1339 intel_uncore_read(engine->uncore, 1340 GEN8_RING_PDP_UDW(base, i)); 1341 ee->vm_info.pdp[i] <<= 32; 1342 ee->vm_info.pdp[i] |= 1343 intel_uncore_read(engine->uncore, 1344 GEN8_RING_PDP_LDW(base, i)); 1345 } 1346 } 1347 } 1348 } 1349 1350 static void record_request(const struct i915_request *request, 1351 struct i915_request_coredump *erq) 1352 { 1353 erq->flags = request->fence.flags; 1354 erq->context = request->fence.context; 1355 erq->seqno = request->fence.seqno; 1356 erq->sched_attr = request->sched.attr; 1357 erq->head = request->head; 1358 erq->tail = request->tail; 1359 1360 erq->pid = 0; 1361 rcu_read_lock(); 1362 if (!intel_context_is_closed(request->context)) { 1363 const struct i915_gem_context *ctx; 1364 1365 ctx = rcu_dereference(request->context->gem_context); 1366 if (ctx) 1367 erq->pid = pid_nr(ctx->pid); 1368 } 1369 rcu_read_unlock(); 1370 } 1371 1372 static void engine_record_execlists(struct intel_engine_coredump *ee) 1373 { 1374 const struct intel_engine_execlists * const el = &ee->engine->execlists; 1375 struct i915_request * const *port = el->active; 1376 unsigned int n = 0; 1377 1378 while (*port) 1379 record_request(*port++, &ee->execlist[n++]); 1380 1381 ee->num_ports = n; 1382 } 1383 1384 static bool record_context(struct i915_gem_context_coredump *e, 1385 struct intel_context *ce) 1386 { 1387 struct i915_gem_context *ctx; 1388 struct task_struct *task; 1389 bool simulated; 1390 1391 rcu_read_lock(); 1392 ctx = rcu_dereference(ce->gem_context); 1393 if (ctx && !kref_get_unless_zero(&ctx->ref)) 1394 ctx = NULL; 1395 rcu_read_unlock(); 1396 if (!ctx) 1397 return true; 1398 1399 rcu_read_lock(); 1400 task = pid_task(ctx->pid, PIDTYPE_PID); 1401 if (task) { 1402 strcpy(e->comm, task->comm); 1403 e->pid = task->pid; 1404 } 1405 rcu_read_unlock(); 1406 1407 e->sched_attr = ctx->sched; 1408 e->guilty = atomic_read(&ctx->guilty_count); 1409 e->active = atomic_read(&ctx->active_count); 1410 e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ? 1411 *ce->timeline->hwsp_seqno : ~0U; 1412 1413 e->total_runtime = intel_context_get_total_runtime_ns(ce); 1414 e->avg_runtime = intel_context_get_avg_runtime_ns(ce); 1415 1416 simulated = i915_gem_context_no_error_capture(ctx); 1417 1418 i915_gem_context_put(ctx); 1419 return simulated; 1420 } 1421 1422 struct intel_engine_capture_vma { 1423 struct intel_engine_capture_vma *next; 1424 struct i915_vma_resource *vma_res; 1425 char name[16]; 1426 bool lockdep_cookie; 1427 }; 1428 1429 static struct intel_engine_capture_vma * 1430 capture_vma_snapshot(struct intel_engine_capture_vma *next, 1431 struct i915_vma_resource *vma_res, 1432 gfp_t gfp, const char *name) 1433 { 1434 struct intel_engine_capture_vma *c; 1435 1436 if (!vma_res) 1437 return next; 1438 1439 c = kmalloc(sizeof(*c), gfp); 1440 if (!c) 1441 return next; 1442 1443 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) { 1444 kfree(c); 1445 return next; 1446 } 1447 1448 strcpy(c->name, name); 1449 c->vma_res = i915_vma_resource_get(vma_res); 1450 1451 c->next = next; 1452 return c; 1453 } 1454 1455 static struct intel_engine_capture_vma * 1456 capture_vma(struct intel_engine_capture_vma *next, 1457 struct i915_vma *vma, 1458 const char *name, 1459 gfp_t gfp) 1460 { 1461 if (!vma) 1462 return next; 1463 1464 /* 1465 * If the vma isn't pinned, then the vma should be snapshotted 1466 * to a struct i915_vma_snapshot at command submission time. 1467 * Not here. 1468 */ 1469 if (GEM_WARN_ON(!i915_vma_is_pinned(vma))) 1470 return next; 1471 1472 next = capture_vma_snapshot(next, vma->resource, gfp, name); 1473 1474 return next; 1475 } 1476 1477 static struct intel_engine_capture_vma * 1478 capture_user(struct intel_engine_capture_vma *capture, 1479 const struct i915_request *rq, 1480 gfp_t gfp) 1481 { 1482 struct i915_capture_list *c; 1483 1484 for (c = rq->capture_list; c; c = c->next) 1485 capture = capture_vma_snapshot(capture, c->vma_res, gfp, 1486 "user"); 1487 1488 return capture; 1489 } 1490 1491 static void add_vma(struct intel_engine_coredump *ee, 1492 struct i915_vma_coredump *vma) 1493 { 1494 if (vma) { 1495 vma->next = ee->vma; 1496 ee->vma = vma; 1497 } 1498 } 1499 1500 static struct i915_vma_coredump * 1501 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, 1502 const char *name, struct i915_vma_compress *compress) 1503 { 1504 struct i915_vma_coredump *ret = NULL; 1505 struct i915_vma_resource *vma_res; 1506 bool lockdep_cookie; 1507 1508 if (!vma) 1509 return NULL; 1510 1511 vma_res = vma->resource; 1512 1513 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) { 1514 ret = i915_vma_coredump_create(gt, vma_res, compress, name); 1515 i915_vma_resource_unhold(vma_res, lockdep_cookie); 1516 } 1517 1518 return ret; 1519 } 1520 1521 static void add_vma_coredump(struct intel_engine_coredump *ee, 1522 const struct intel_gt *gt, 1523 struct i915_vma *vma, 1524 const char *name, 1525 struct i915_vma_compress *compress) 1526 { 1527 add_vma(ee, create_vma_coredump(gt, vma, name, compress)); 1528 } 1529 1530 struct intel_engine_coredump * 1531 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags) 1532 { 1533 struct intel_engine_coredump *ee; 1534 1535 ee = kzalloc(sizeof(*ee), gfp); 1536 if (!ee) 1537 return NULL; 1538 1539 ee->engine = engine; 1540 1541 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) { 1542 engine_record_registers(ee); 1543 engine_record_execlists(ee); 1544 } 1545 1546 return ee; 1547 } 1548 1549 static struct intel_engine_capture_vma * 1550 engine_coredump_add_context(struct intel_engine_coredump *ee, 1551 struct intel_context *ce, 1552 gfp_t gfp) 1553 { 1554 struct intel_engine_capture_vma *vma = NULL; 1555 1556 ee->simulated |= record_context(&ee->context, ce); 1557 if (ee->simulated) 1558 return NULL; 1559 1560 /* 1561 * We need to copy these to an anonymous buffer 1562 * as the simplest method to avoid being overwritten 1563 * by userspace. 1564 */ 1565 vma = capture_vma(vma, ce->ring->vma, "ring", gfp); 1566 vma = capture_vma(vma, ce->state, "HW context", gfp); 1567 1568 return vma; 1569 } 1570 1571 struct intel_engine_capture_vma * 1572 intel_engine_coredump_add_request(struct intel_engine_coredump *ee, 1573 struct i915_request *rq, 1574 gfp_t gfp) 1575 { 1576 struct intel_engine_capture_vma *vma; 1577 1578 vma = engine_coredump_add_context(ee, rq->context, gfp); 1579 if (!vma) 1580 return NULL; 1581 1582 /* 1583 * We need to copy these to an anonymous buffer 1584 * as the simplest method to avoid being overwritten 1585 * by userspace. 1586 */ 1587 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch"); 1588 vma = capture_user(vma, rq, gfp); 1589 1590 ee->rq_head = rq->head; 1591 ee->rq_post = rq->postfix; 1592 ee->rq_tail = rq->tail; 1593 1594 return vma; 1595 } 1596 1597 void 1598 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, 1599 struct intel_engine_capture_vma *capture, 1600 struct i915_vma_compress *compress) 1601 { 1602 const struct intel_engine_cs *engine = ee->engine; 1603 1604 while (capture) { 1605 struct intel_engine_capture_vma *this = capture; 1606 struct i915_vma_resource *vma_res = this->vma_res; 1607 1608 add_vma(ee, 1609 i915_vma_coredump_create(engine->gt, vma_res, 1610 compress, this->name)); 1611 1612 i915_vma_resource_unhold(vma_res, this->lockdep_cookie); 1613 i915_vma_resource_put(vma_res); 1614 1615 capture = this->next; 1616 kfree(this); 1617 } 1618 1619 add_vma_coredump(ee, engine->gt, engine->status_page.vma, 1620 "HW Status", compress); 1621 1622 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma, 1623 "WA context", compress); 1624 } 1625 1626 static struct intel_engine_coredump * 1627 capture_engine(struct intel_engine_cs *engine, 1628 struct i915_vma_compress *compress, 1629 u32 dump_flags) 1630 { 1631 struct intel_engine_capture_vma *capture = NULL; 1632 struct intel_engine_coredump *ee; 1633 struct intel_context *ce = NULL; 1634 struct i915_request *rq = NULL; 1635 1636 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags); 1637 if (!ee) 1638 return NULL; 1639 1640 intel_engine_get_hung_entity(engine, &ce, &rq); 1641 if (rq && !i915_request_started(rq)) 1642 drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n", 1643 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id); 1644 1645 if (rq) { 1646 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); 1647 i915_request_put(rq); 1648 } else if (ce) { 1649 capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL); 1650 } 1651 1652 if (capture) { 1653 intel_engine_coredump_add_vma(ee, capture, compress); 1654 1655 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 1656 intel_guc_capture_get_matching_node(engine->gt, ee, ce); 1657 } else { 1658 kfree(ee); 1659 ee = NULL; 1660 } 1661 1662 return ee; 1663 } 1664 1665 static void 1666 gt_record_engines(struct intel_gt_coredump *gt, 1667 intel_engine_mask_t engine_mask, 1668 struct i915_vma_compress *compress, 1669 u32 dump_flags) 1670 { 1671 struct intel_engine_cs *engine; 1672 enum intel_engine_id id; 1673 1674 for_each_engine(engine, gt->_gt, id) { 1675 struct intel_engine_coredump *ee; 1676 1677 /* Refill our page pool before entering atomic section */ 1678 pool_refill(&compress->pool, ALLOW_FAIL); 1679 1680 ee = capture_engine(engine, compress, dump_flags); 1681 if (!ee) 1682 continue; 1683 1684 ee->hung = engine->mask & engine_mask; 1685 1686 gt->simulated |= ee->simulated; 1687 if (ee->simulated) { 1688 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 1689 intel_guc_capture_free_node(ee); 1690 kfree(ee); 1691 continue; 1692 } 1693 1694 ee->next = gt->engine; 1695 gt->engine = ee; 1696 } 1697 } 1698 1699 static void gt_record_guc_ctb(struct intel_ctb_coredump *saved, 1700 const struct intel_guc_ct_buffer *ctb, 1701 const void *blob_ptr, struct intel_guc *guc) 1702 { 1703 if (!ctb || !ctb->desc) 1704 return; 1705 1706 saved->raw_status = ctb->desc->status; 1707 saved->raw_head = ctb->desc->head; 1708 saved->raw_tail = ctb->desc->tail; 1709 saved->head = ctb->head; 1710 saved->tail = ctb->tail; 1711 saved->size = ctb->size; 1712 saved->desc_offset = ((void *)ctb->desc) - blob_ptr; 1713 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr; 1714 } 1715 1716 static struct intel_uc_coredump * 1717 gt_record_uc(struct intel_gt_coredump *gt, 1718 struct i915_vma_compress *compress) 1719 { 1720 const struct intel_uc *uc = >->_gt->uc; 1721 struct intel_uc_coredump *error_uc; 1722 1723 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); 1724 if (!error_uc) 1725 return NULL; 1726 1727 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw)); 1728 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw)); 1729 1730 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL); 1731 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL); 1732 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL); 1733 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL); 1734 1735 /* 1736 * Save the GuC log and include a timestamp reference for converting the 1737 * log times to system times (in conjunction with the error->boottime and 1738 * gt->clock_frequency fields saved elsewhere). 1739 */ 1740 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP); 1741 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma, 1742 "GuC log buffer", compress); 1743 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma, 1744 "GuC CT buffer", compress); 1745 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence; 1746 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send, 1747 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc); 1748 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv, 1749 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc); 1750 1751 return error_uc; 1752 } 1753 1754 /* Capture display registers. */ 1755 static void gt_record_display_regs(struct intel_gt_coredump *gt) 1756 { 1757 struct intel_uncore *uncore = gt->_gt->uncore; 1758 struct drm_i915_private *i915 = uncore->i915; 1759 1760 if (GRAPHICS_VER(i915) >= 6) 1761 gt->derrmr = intel_uncore_read(uncore, DERRMR); 1762 1763 if (GRAPHICS_VER(i915) >= 8) 1764 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); 1765 else if (IS_VALLEYVIEW(i915)) 1766 gt->ier = intel_uncore_read(uncore, VLV_IER); 1767 else if (HAS_PCH_SPLIT(i915)) 1768 gt->ier = intel_uncore_read(uncore, DEIER); 1769 else if (GRAPHICS_VER(i915) == 2) 1770 gt->ier = intel_uncore_read16(uncore, GEN2_IER); 1771 else 1772 gt->ier = intel_uncore_read(uncore, GEN2_IER); 1773 } 1774 1775 /* Capture all other registers that GuC doesn't capture. */ 1776 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt) 1777 { 1778 struct intel_uncore *uncore = gt->_gt->uncore; 1779 struct drm_i915_private *i915 = uncore->i915; 1780 int i; 1781 1782 if (IS_VALLEYVIEW(i915)) { 1783 gt->gtier[0] = intel_uncore_read(uncore, GTIER); 1784 gt->ngtier = 1; 1785 } else if (GRAPHICS_VER(i915) >= 11) { 1786 gt->gtier[0] = 1787 intel_uncore_read(uncore, 1788 GEN11_RENDER_COPY_INTR_ENABLE); 1789 gt->gtier[1] = 1790 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); 1791 gt->gtier[2] = 1792 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); 1793 gt->gtier[3] = 1794 intel_uncore_read(uncore, 1795 GEN11_GPM_WGBOXPERF_INTR_ENABLE); 1796 gt->gtier[4] = 1797 intel_uncore_read(uncore, 1798 GEN11_CRYPTO_RSVD_INTR_ENABLE); 1799 gt->gtier[5] = 1800 intel_uncore_read(uncore, 1801 GEN11_GUNIT_CSME_INTR_ENABLE); 1802 gt->ngtier = 6; 1803 } else if (GRAPHICS_VER(i915) >= 8) { 1804 for (i = 0; i < 4; i++) 1805 gt->gtier[i] = 1806 intel_uncore_read(uncore, GEN8_GT_IER(i)); 1807 gt->ngtier = 4; 1808 } else if (HAS_PCH_SPLIT(i915)) { 1809 gt->gtier[0] = intel_uncore_read(uncore, GTIER); 1810 gt->ngtier = 1; 1811 } 1812 1813 gt->eir = intel_uncore_read(uncore, EIR); 1814 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); 1815 } 1816 1817 /* 1818 * Capture all registers that relate to workload submission. 1819 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us 1820 */ 1821 static void gt_record_global_regs(struct intel_gt_coredump *gt) 1822 { 1823 struct intel_uncore *uncore = gt->_gt->uncore; 1824 struct drm_i915_private *i915 = uncore->i915; 1825 int i; 1826 1827 /* 1828 * General organization 1829 * 1. Registers specific to a single generation 1830 * 2. Registers which belong to multiple generations 1831 * 3. Feature specific registers. 1832 * 4. Everything else 1833 * Please try to follow the order. 1834 */ 1835 1836 /* 1: Registers specific to a single generation */ 1837 if (IS_VALLEYVIEW(i915)) 1838 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); 1839 1840 if (GRAPHICS_VER(i915) == 7) 1841 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); 1842 1843 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 1844 gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, 1845 XEHP_FAULT_TLB_DATA0); 1846 gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, 1847 XEHP_FAULT_TLB_DATA1); 1848 } else if (GRAPHICS_VER(i915) >= 12) { 1849 gt->fault_data0 = intel_uncore_read(uncore, 1850 GEN12_FAULT_TLB_DATA0); 1851 gt->fault_data1 = intel_uncore_read(uncore, 1852 GEN12_FAULT_TLB_DATA1); 1853 } else if (GRAPHICS_VER(i915) >= 8) { 1854 gt->fault_data0 = intel_uncore_read(uncore, 1855 GEN8_FAULT_TLB_DATA0); 1856 gt->fault_data1 = intel_uncore_read(uncore, 1857 GEN8_FAULT_TLB_DATA1); 1858 } 1859 1860 if (GRAPHICS_VER(i915) == 6) { 1861 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); 1862 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL); 1863 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE); 1864 } 1865 1866 /* 2: Registers which belong to multiple generations */ 1867 if (GRAPHICS_VER(i915) >= 7) 1868 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); 1869 1870 if (GRAPHICS_VER(i915) >= 6) { 1871 if (GRAPHICS_VER(i915) < 12) { 1872 gt->error = intel_uncore_read(uncore, ERROR_GEN6); 1873 gt->done_reg = intel_uncore_read(uncore, DONE_REG); 1874 } 1875 } 1876 1877 /* 3: Feature specific registers */ 1878 if (IS_GRAPHICS_VER(i915, 6, 7)) { 1879 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); 1880 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); 1881 } 1882 1883 if (IS_GRAPHICS_VER(i915, 8, 11)) 1884 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); 1885 1886 if (GRAPHICS_VER(i915) == 12) 1887 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); 1888 1889 if (GRAPHICS_VER(i915) >= 12) { 1890 for (i = 0; i < I915_MAX_SFC; i++) { 1891 /* 1892 * SFC_DONE resides in the VD forcewake domain, so it 1893 * only exists if the corresponding VCS engine is 1894 * present. 1895 */ 1896 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || 1897 !HAS_ENGINE(gt->_gt, _VCS(i * 2))) 1898 continue; 1899 1900 gt->sfc_done[i] = 1901 intel_uncore_read(uncore, GEN12_SFC_DONE(i)); 1902 } 1903 1904 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); 1905 } 1906 } 1907 1908 static void gt_record_info(struct intel_gt_coredump *gt) 1909 { 1910 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info)); 1911 gt->clock_frequency = gt->_gt->clock_frequency; 1912 gt->clock_period_ns = gt->_gt->clock_period_ns; 1913 } 1914 1915 /* 1916 * Generate a semi-unique error code. The code is not meant to have meaning, The 1917 * code's only purpose is to try to prevent false duplicated bug reports by 1918 * grossly estimating a GPU error state. 1919 * 1920 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine 1921 * the hang if we could strip the GTT offset information from it. 1922 * 1923 * It's only a small step better than a random number in its current form. 1924 */ 1925 static u32 generate_ecode(const struct intel_engine_coredump *ee) 1926 { 1927 /* 1928 * IPEHR would be an ideal way to detect errors, as it's the gross 1929 * measure of "the command that hung." However, has some very common 1930 * synchronization commands which almost always appear in the case 1931 * strictly a client bug. Use instdone to differentiate those some. 1932 */ 1933 return ee ? ee->ipehr ^ ee->instdone.instdone : 0; 1934 } 1935 1936 static const char *error_msg(struct i915_gpu_coredump *error) 1937 { 1938 struct intel_engine_coredump *first = NULL; 1939 unsigned int hung_classes = 0; 1940 struct intel_gt_coredump *gt; 1941 int len; 1942 1943 for (gt = error->gt; gt; gt = gt->next) { 1944 struct intel_engine_coredump *cs; 1945 1946 for (cs = gt->engine; cs; cs = cs->next) { 1947 if (cs->hung) { 1948 hung_classes |= BIT(cs->engine->uabi_class); 1949 if (!first) 1950 first = cs; 1951 } 1952 } 1953 } 1954 1955 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1956 "GPU HANG: ecode %d:%x:%08x", 1957 GRAPHICS_VER(error->i915), hung_classes, 1958 generate_ecode(first)); 1959 if (first && first->context.pid) { 1960 /* Just show the first executing process, more is confusing */ 1961 len += scnprintf(error->error_msg + len, 1962 sizeof(error->error_msg) - len, 1963 ", in %s [%d]", 1964 first->context.comm, first->context.pid); 1965 } 1966 1967 return error->error_msg; 1968 } 1969 1970 static void capture_gen(struct i915_gpu_coredump *error) 1971 { 1972 struct drm_i915_private *i915 = error->i915; 1973 1974 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); 1975 error->suspended = i915->runtime_pm.suspended; 1976 1977 error->iommu = i915_vtd_active(i915); 1978 error->reset_count = i915_reset_count(&i915->gpu_error); 1979 error->suspend_count = i915->suspend_count; 1980 1981 i915_params_copy(&error->params, &i915->params); 1982 memcpy(&error->device_info, 1983 INTEL_INFO(i915), 1984 sizeof(error->device_info)); 1985 memcpy(&error->runtime_info, 1986 RUNTIME_INFO(i915), 1987 sizeof(error->runtime_info)); 1988 memcpy(&error->display_device_info, DISPLAY_INFO(i915), 1989 sizeof(error->display_device_info)); 1990 memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915), 1991 sizeof(error->display_runtime_info)); 1992 error->driver_caps = i915->caps; 1993 } 1994 1995 struct i915_gpu_coredump * 1996 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) 1997 { 1998 struct i915_gpu_coredump *error; 1999 2000 if (!i915->params.error_capture) 2001 return NULL; 2002 2003 error = kzalloc(sizeof(*error), gfp); 2004 if (!error) 2005 return NULL; 2006 2007 kref_init(&error->ref); 2008 error->i915 = i915; 2009 2010 error->time = ktime_get_real(); 2011 error->boottime = ktime_get_boottime(); 2012 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time); 2013 error->capture = jiffies; 2014 2015 capture_gen(error); 2016 2017 return error; 2018 } 2019 2020 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) 2021 2022 struct intel_gt_coredump * 2023 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags) 2024 { 2025 struct intel_gt_coredump *gc; 2026 2027 gc = kzalloc(sizeof(*gc), gfp); 2028 if (!gc) 2029 return NULL; 2030 2031 gc->_gt = gt; 2032 gc->awake = intel_gt_pm_is_awake(gt); 2033 2034 gt_record_display_regs(gc); 2035 gt_record_global_nonguc_regs(gc); 2036 2037 /* 2038 * GuC dumps global, eng-class and eng-instance registers 2039 * (that can change as part of engine state during execution) 2040 * before an engine is reset due to a hung context. 2041 * GuC captures and reports all three groups of registers 2042 * together as a single set before the engine is reset. 2043 * Thus, if GuC triggered the context reset we retrieve 2044 * the register values as part of gt_record_engines. 2045 */ 2046 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) 2047 gt_record_global_regs(gc); 2048 2049 gt_record_fences(gc); 2050 2051 return gc; 2052 } 2053 2054 struct i915_vma_compress * 2055 i915_vma_capture_prepare(struct intel_gt_coredump *gt) 2056 { 2057 struct i915_vma_compress *compress; 2058 2059 compress = kmalloc(sizeof(*compress), ALLOW_FAIL); 2060 if (!compress) 2061 return NULL; 2062 2063 if (!compress_init(compress)) { 2064 kfree(compress); 2065 return NULL; 2066 } 2067 2068 return compress; 2069 } 2070 2071 void i915_vma_capture_finish(struct intel_gt_coredump *gt, 2072 struct i915_vma_compress *compress) 2073 { 2074 if (!compress) 2075 return; 2076 2077 compress_fini(compress); 2078 kfree(compress); 2079 } 2080 2081 static struct i915_gpu_coredump * 2082 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) 2083 { 2084 struct drm_i915_private *i915 = gt->i915; 2085 struct i915_gpu_coredump *error; 2086 2087 /* Check if GPU capture has been disabled */ 2088 error = READ_ONCE(i915->gpu_error.first_error); 2089 if (IS_ERR(error)) 2090 return error; 2091 2092 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); 2093 if (!error) 2094 return ERR_PTR(-ENOMEM); 2095 2096 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags); 2097 if (error->gt) { 2098 struct i915_vma_compress *compress; 2099 2100 compress = i915_vma_capture_prepare(error->gt); 2101 if (!compress) { 2102 kfree(error->gt); 2103 kfree(error); 2104 return ERR_PTR(-ENOMEM); 2105 } 2106 2107 if (INTEL_INFO(i915)->has_gt_uc) { 2108 error->gt->uc = gt_record_uc(error->gt, compress); 2109 if (error->gt->uc) { 2110 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 2111 error->gt->uc->guc.is_guc_capture = true; 2112 else 2113 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture); 2114 } 2115 } 2116 2117 gt_record_info(error->gt); 2118 gt_record_engines(error->gt, engine_mask, compress, dump_flags); 2119 2120 2121 i915_vma_capture_finish(error->gt, compress); 2122 2123 error->simulated |= error->gt->simulated; 2124 } 2125 2126 error->overlay = intel_overlay_capture_error_state(i915); 2127 2128 return error; 2129 } 2130 2131 struct i915_gpu_coredump * 2132 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) 2133 { 2134 static DEFINE_MUTEX(capture_mutex); 2135 int ret = mutex_lock_interruptible(&capture_mutex); 2136 struct i915_gpu_coredump *dump; 2137 2138 if (ret) 2139 return ERR_PTR(ret); 2140 2141 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags); 2142 mutex_unlock(&capture_mutex); 2143 2144 return dump; 2145 } 2146 2147 void i915_error_state_store(struct i915_gpu_coredump *error) 2148 { 2149 struct drm_i915_private *i915; 2150 static bool warned; 2151 2152 if (IS_ERR_OR_NULL(error)) 2153 return; 2154 2155 i915 = error->i915; 2156 drm_info(&i915->drm, "%s\n", error_msg(error)); 2157 2158 if (error->simulated || 2159 cmpxchg(&i915->gpu_error.first_error, NULL, error)) 2160 return; 2161 2162 i915_gpu_coredump_get(error); 2163 2164 if (!xchg(&warned, true) && 2165 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { 2166 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 2167 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); 2168 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); 2169 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 2170 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); 2171 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", 2172 i915->drm.primary->index); 2173 } 2174 } 2175 2176 /** 2177 * i915_capture_error_state - capture an error record for later analysis 2178 * @gt: intel_gt which originated the hang 2179 * @engine_mask: hung engines 2180 * @dump_flags: dump flags 2181 * 2182 * Should be called when an error is detected (either a hang or an error 2183 * interrupt) to capture error state from the time of the error. Fills 2184 * out a structure which becomes available in debugfs for user level tools 2185 * to pick up. 2186 */ 2187 void i915_capture_error_state(struct intel_gt *gt, 2188 intel_engine_mask_t engine_mask, u32 dump_flags) 2189 { 2190 struct i915_gpu_coredump *error; 2191 2192 error = i915_gpu_coredump(gt, engine_mask, dump_flags); 2193 if (IS_ERR(error)) { 2194 cmpxchg(>->i915->gpu_error.first_error, NULL, error); 2195 return; 2196 } 2197 2198 i915_error_state_store(error); 2199 i915_gpu_coredump_put(error); 2200 } 2201 2202 struct i915_gpu_coredump * 2203 i915_first_error_state(struct drm_i915_private *i915) 2204 { 2205 struct i915_gpu_coredump *error; 2206 2207 spin_lock_irq(&i915->gpu_error.lock); 2208 error = i915->gpu_error.first_error; 2209 if (!IS_ERR_OR_NULL(error)) 2210 i915_gpu_coredump_get(error); 2211 spin_unlock_irq(&i915->gpu_error.lock); 2212 2213 return error; 2214 } 2215 2216 void i915_reset_error_state(struct drm_i915_private *i915) 2217 { 2218 struct i915_gpu_coredump *error; 2219 2220 spin_lock_irq(&i915->gpu_error.lock); 2221 error = i915->gpu_error.first_error; 2222 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ 2223 i915->gpu_error.first_error = NULL; 2224 spin_unlock_irq(&i915->gpu_error.lock); 2225 2226 if (!IS_ERR_OR_NULL(error)) 2227 i915_gpu_coredump_put(error); 2228 } 2229 2230 void i915_disable_error_state(struct drm_i915_private *i915, int err) 2231 { 2232 spin_lock_irq(&i915->gpu_error.lock); 2233 if (!i915->gpu_error.first_error) 2234 i915->gpu_error.first_error = ERR_PTR(err); 2235 spin_unlock_irq(&i915->gpu_error.lock); 2236 } 2237 2238 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 2239 void intel_klog_error_capture(struct intel_gt *gt, 2240 intel_engine_mask_t engine_mask) 2241 { 2242 static int g_count; 2243 struct drm_i915_private *i915 = gt->i915; 2244 struct i915_gpu_coredump *error; 2245 intel_wakeref_t wakeref; 2246 size_t buf_size = PAGE_SIZE * 128; 2247 size_t pos_err; 2248 char *buf, *ptr, *next; 2249 int l_count = g_count++; 2250 int line = 0; 2251 2252 /* Can't allocate memory during a reset */ 2253 if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { 2254 drm_err(>->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n", 2255 l_count, line++); 2256 return; 2257 } 2258 2259 error = READ_ONCE(i915->gpu_error.first_error); 2260 if (error) { 2261 drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n", 2262 l_count, line++); 2263 i915_reset_error_state(i915); 2264 } 2265 2266 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 2267 error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE); 2268 2269 if (IS_ERR(error)) { 2270 drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n", 2271 l_count, line++, PTR_ERR(error)); 2272 return; 2273 } 2274 2275 buf = kvmalloc(buf_size, GFP_KERNEL); 2276 if (!buf) { 2277 drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n", 2278 l_count, line++); 2279 i915_gpu_coredump_put(error); 2280 return; 2281 } 2282 2283 drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n", 2284 l_count, line++, __builtin_return_address(0)); 2285 2286 /* Largest string length safe to print via dmesg */ 2287 # define MAX_CHUNK 800 2288 2289 pos_err = 0; 2290 while (1) { 2291 ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1); 2292 2293 if (got <= 0) 2294 break; 2295 2296 buf[got] = 0; 2297 pos_err += got; 2298 2299 ptr = buf; 2300 while (got > 0) { 2301 size_t count; 2302 char tag[2]; 2303 2304 next = strnchr(ptr, got, '\n'); 2305 if (next) { 2306 count = next - ptr; 2307 *next = 0; 2308 tag[0] = '>'; 2309 tag[1] = '<'; 2310 } else { 2311 count = got; 2312 tag[0] = '}'; 2313 tag[1] = '{'; 2314 } 2315 2316 if (count > MAX_CHUNK) { 2317 size_t pos; 2318 char *ptr2 = ptr; 2319 2320 for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) { 2321 char chr = ptr[pos]; 2322 2323 ptr[pos] = 0; 2324 drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n", 2325 l_count, line++, ptr2); 2326 ptr[pos] = chr; 2327 ptr2 = ptr + pos; 2328 2329 /* 2330 * If spewing large amounts of data via a serial console, 2331 * this can be a very slow process. So be friendly and try 2332 * not to cause 'softlockup on CPU' problems. 2333 */ 2334 cond_resched(); 2335 } 2336 2337 if (ptr2 < (ptr + count)) 2338 drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n", 2339 l_count, line++, tag[0], ptr2, tag[1]); 2340 else if (tag[0] == '>') 2341 drm_info(&i915->drm, "[Capture/%d.%d] ><\n", 2342 l_count, line++); 2343 } else { 2344 drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n", 2345 l_count, line++, tag[0], ptr, tag[1]); 2346 } 2347 2348 ptr = next; 2349 got -= count; 2350 if (next) { 2351 ptr++; 2352 got--; 2353 } 2354 2355 /* As above. */ 2356 cond_resched(); 2357 } 2358 2359 if (got) 2360 drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n", 2361 l_count, line++, got); 2362 } 2363 2364 kvfree(buf); 2365 2366 drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err); 2367 } 2368 #endif 2369