1 /* 2 * Copyright (c) 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * Mika Kuoppala <mika.kuoppala@intel.com> 27 * 28 */ 29 30 #include <linux/ascii85.h> 31 #include <linux/nmi.h> 32 #include <linux/pagevec.h> 33 #include <linux/scatterlist.h> 34 #include <linux/utsname.h> 35 #include <linux/zlib.h> 36 37 #include <drm/drm_print.h> 38 39 #include "display/intel_dmc.h" 40 #include "display/intel_overlay.h" 41 42 #include "gem/i915_gem_context.h" 43 #include "gem/i915_gem_lmem.h" 44 #include "gt/intel_gt.h" 45 #include "gt/intel_gt_pm.h" 46 47 #include "i915_drv.h" 48 #include "i915_gpu_error.h" 49 #include "i915_memcpy.h" 50 #include "i915_scatterlist.h" 51 52 #define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 53 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) 54 55 static void __sg_set_buf(struct scatterlist *sg, 56 void *addr, unsigned int len, loff_t it) 57 { 58 sg->page_link = (unsigned long)virt_to_page(addr); 59 sg->offset = offset_in_page(addr); 60 sg->length = len; 61 sg->dma_address = it; 62 } 63 64 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) 65 { 66 if (!len) 67 return false; 68 69 if (e->bytes + len + 1 <= e->size) 70 return true; 71 72 if (e->bytes) { 73 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter); 74 e->iter += e->bytes; 75 e->buf = NULL; 76 e->bytes = 0; 77 } 78 79 if (e->cur == e->end) { 80 struct scatterlist *sgl; 81 82 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); 83 if (!sgl) { 84 e->err = -ENOMEM; 85 return false; 86 } 87 88 if (e->cur) { 89 e->cur->offset = 0; 90 e->cur->length = 0; 91 e->cur->page_link = 92 (unsigned long)sgl | SG_CHAIN; 93 } else { 94 e->sgl = sgl; 95 } 96 97 e->cur = sgl; 98 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1; 99 } 100 101 e->size = ALIGN(len + 1, SZ_64K); 102 e->buf = kmalloc(e->size, ALLOW_FAIL); 103 if (!e->buf) { 104 e->size = PAGE_ALIGN(len + 1); 105 e->buf = kmalloc(e->size, GFP_KERNEL); 106 } 107 if (!e->buf) { 108 e->err = -ENOMEM; 109 return false; 110 } 111 112 return true; 113 } 114 115 __printf(2, 0) 116 static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 117 const char *fmt, va_list args) 118 { 119 va_list ap; 120 int len; 121 122 if (e->err) 123 return; 124 125 va_copy(ap, args); 126 len = vsnprintf(NULL, 0, fmt, ap); 127 va_end(ap); 128 if (len <= 0) { 129 e->err = len; 130 return; 131 } 132 133 if (!__i915_error_grow(e, len)) 134 return; 135 136 GEM_BUG_ON(e->bytes >= e->size); 137 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args); 138 if (len < 0) { 139 e->err = len; 140 return; 141 } 142 e->bytes += len; 143 } 144 145 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) 146 { 147 unsigned len; 148 149 if (e->err || !str) 150 return; 151 152 len = strlen(str); 153 if (!__i915_error_grow(e, len)) 154 return; 155 156 GEM_BUG_ON(e->bytes + len > e->size); 157 memcpy(e->buf + e->bytes, str, len); 158 e->bytes += len; 159 } 160 161 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 162 #define err_puts(e, s) i915_error_puts(e, s) 163 164 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf) 165 { 166 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va); 167 } 168 169 static inline struct drm_printer 170 i915_error_printer(struct drm_i915_error_state_buf *e) 171 { 172 struct drm_printer p = { 173 .printfn = __i915_printfn_error, 174 .arg = e, 175 }; 176 return p; 177 } 178 179 /* single threaded page allocator with a reserved stash for emergencies */ 180 static void pool_fini(struct pagevec *pv) 181 { 182 pagevec_release(pv); 183 } 184 185 static int pool_refill(struct pagevec *pv, gfp_t gfp) 186 { 187 while (pagevec_space(pv)) { 188 struct page *p; 189 190 p = alloc_page(gfp); 191 if (!p) 192 return -ENOMEM; 193 194 pagevec_add(pv, p); 195 } 196 197 return 0; 198 } 199 200 static int pool_init(struct pagevec *pv, gfp_t gfp) 201 { 202 int err; 203 204 pagevec_init(pv); 205 206 err = pool_refill(pv, gfp); 207 if (err) 208 pool_fini(pv); 209 210 return err; 211 } 212 213 static void *pool_alloc(struct pagevec *pv, gfp_t gfp) 214 { 215 struct page *p; 216 217 p = alloc_page(gfp); 218 if (!p && pagevec_count(pv)) 219 p = pv->pages[--pv->nr]; 220 221 return p ? page_address(p) : NULL; 222 } 223 224 static void pool_free(struct pagevec *pv, void *addr) 225 { 226 struct page *p = virt_to_page(addr); 227 228 if (pagevec_space(pv)) 229 pagevec_add(pv, p); 230 else 231 __free_page(p); 232 } 233 234 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR 235 236 struct i915_vma_compress { 237 struct pagevec pool; 238 struct z_stream_s zstream; 239 void *tmp; 240 }; 241 242 static bool compress_init(struct i915_vma_compress *c) 243 { 244 struct z_stream_s *zstream = &c->zstream; 245 246 if (pool_init(&c->pool, ALLOW_FAIL)) 247 return false; 248 249 zstream->workspace = 250 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 251 ALLOW_FAIL); 252 if (!zstream->workspace) { 253 pool_fini(&c->pool); 254 return false; 255 } 256 257 c->tmp = NULL; 258 if (i915_has_memcpy_from_wc()) 259 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); 260 261 return true; 262 } 263 264 static bool compress_start(struct i915_vma_compress *c) 265 { 266 struct z_stream_s *zstream = &c->zstream; 267 void *workspace = zstream->workspace; 268 269 memset(zstream, 0, sizeof(*zstream)); 270 zstream->workspace = workspace; 271 272 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; 273 } 274 275 static void *compress_next_page(struct i915_vma_compress *c, 276 struct i915_vma_coredump *dst) 277 { 278 void *page; 279 280 if (dst->page_count >= dst->num_pages) 281 return ERR_PTR(-ENOSPC); 282 283 page = pool_alloc(&c->pool, ALLOW_FAIL); 284 if (!page) 285 return ERR_PTR(-ENOMEM); 286 287 return dst->pages[dst->page_count++] = page; 288 } 289 290 static int compress_page(struct i915_vma_compress *c, 291 void *src, 292 struct i915_vma_coredump *dst, 293 bool wc) 294 { 295 struct z_stream_s *zstream = &c->zstream; 296 297 zstream->next_in = src; 298 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) 299 zstream->next_in = c->tmp; 300 zstream->avail_in = PAGE_SIZE; 301 302 do { 303 if (zstream->avail_out == 0) { 304 zstream->next_out = compress_next_page(c, dst); 305 if (IS_ERR(zstream->next_out)) 306 return PTR_ERR(zstream->next_out); 307 308 zstream->avail_out = PAGE_SIZE; 309 } 310 311 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) 312 return -EIO; 313 314 cond_resched(); 315 } while (zstream->avail_in); 316 317 /* Fallback to uncompressed if we increase size? */ 318 if (0 && zstream->total_out > zstream->total_in) 319 return -E2BIG; 320 321 return 0; 322 } 323 324 static int compress_flush(struct i915_vma_compress *c, 325 struct i915_vma_coredump *dst) 326 { 327 struct z_stream_s *zstream = &c->zstream; 328 329 do { 330 switch (zlib_deflate(zstream, Z_FINISH)) { 331 case Z_OK: /* more space requested */ 332 zstream->next_out = compress_next_page(c, dst); 333 if (IS_ERR(zstream->next_out)) 334 return PTR_ERR(zstream->next_out); 335 336 zstream->avail_out = PAGE_SIZE; 337 break; 338 339 case Z_STREAM_END: 340 goto end; 341 342 default: /* any error */ 343 return -EIO; 344 } 345 } while (1); 346 347 end: 348 memset(zstream->next_out, 0, zstream->avail_out); 349 dst->unused = zstream->avail_out; 350 return 0; 351 } 352 353 static void compress_finish(struct i915_vma_compress *c) 354 { 355 zlib_deflateEnd(&c->zstream); 356 } 357 358 static void compress_fini(struct i915_vma_compress *c) 359 { 360 kfree(c->zstream.workspace); 361 if (c->tmp) 362 pool_free(&c->pool, c->tmp); 363 pool_fini(&c->pool); 364 } 365 366 static void err_compression_marker(struct drm_i915_error_state_buf *m) 367 { 368 err_puts(m, ":"); 369 } 370 371 #else 372 373 struct i915_vma_compress { 374 struct pagevec pool; 375 }; 376 377 static bool compress_init(struct i915_vma_compress *c) 378 { 379 return pool_init(&c->pool, ALLOW_FAIL) == 0; 380 } 381 382 static bool compress_start(struct i915_vma_compress *c) 383 { 384 return true; 385 } 386 387 static int compress_page(struct i915_vma_compress *c, 388 void *src, 389 struct i915_vma_coredump *dst, 390 bool wc) 391 { 392 void *ptr; 393 394 ptr = pool_alloc(&c->pool, ALLOW_FAIL); 395 if (!ptr) 396 return -ENOMEM; 397 398 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) 399 memcpy(ptr, src, PAGE_SIZE); 400 dst->pages[dst->page_count++] = ptr; 401 cond_resched(); 402 403 return 0; 404 } 405 406 static int compress_flush(struct i915_vma_compress *c, 407 struct i915_vma_coredump *dst) 408 { 409 return 0; 410 } 411 412 static void compress_finish(struct i915_vma_compress *c) 413 { 414 } 415 416 static void compress_fini(struct i915_vma_compress *c) 417 { 418 pool_fini(&c->pool); 419 } 420 421 static void err_compression_marker(struct drm_i915_error_state_buf *m) 422 { 423 err_puts(m, "~"); 424 } 425 426 #endif 427 428 static void error_print_instdone(struct drm_i915_error_state_buf *m, 429 const struct intel_engine_coredump *ee) 430 { 431 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu; 432 int slice; 433 int subslice; 434 435 err_printf(m, " INSTDONE: 0x%08x\n", 436 ee->instdone.instdone); 437 438 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3) 439 return; 440 441 err_printf(m, " SC_INSTDONE: 0x%08x\n", 442 ee->instdone.slice_common); 443 444 if (GRAPHICS_VER(m->i915) <= 6) 445 return; 446 447 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) 448 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 449 slice, subslice, 450 ee->instdone.sampler[slice][subslice]); 451 452 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) 453 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", 454 slice, subslice, 455 ee->instdone.row[slice][subslice]); 456 457 if (GRAPHICS_VER(m->i915) < 12) 458 return; 459 460 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", 461 ee->instdone.slice_common_extra[0]); 462 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", 463 ee->instdone.slice_common_extra[1]); 464 } 465 466 static void error_print_request(struct drm_i915_error_state_buf *m, 467 const char *prefix, 468 const struct i915_request_coredump *erq) 469 { 470 if (!erq->seqno) 471 return; 472 473 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n", 474 prefix, erq->pid, erq->context, erq->seqno, 475 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 476 &erq->flags) ? "!" : "", 477 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 478 &erq->flags) ? "+" : "", 479 erq->sched_attr.priority, 480 erq->head, erq->tail); 481 } 482 483 static void error_print_context(struct drm_i915_error_state_buf *m, 484 const char *header, 485 const struct i915_gem_context_coredump *ctx) 486 { 487 const u32 period = m->i915->gt.clock_period_ns; 488 489 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", 490 header, ctx->comm, ctx->pid, ctx->sched_attr.priority, 491 ctx->guilty, ctx->active, 492 ctx->total_runtime * period, 493 mul_u32_u32(ctx->avg_runtime, period)); 494 } 495 496 static struct i915_vma_coredump * 497 __find_vma(struct i915_vma_coredump *vma, const char *name) 498 { 499 while (vma) { 500 if (strcmp(vma->name, name) == 0) 501 return vma; 502 vma = vma->next; 503 } 504 505 return NULL; 506 } 507 508 static struct i915_vma_coredump * 509 find_batch(const struct intel_engine_coredump *ee) 510 { 511 return __find_vma(ee->vma, "batch"); 512 } 513 514 static void error_print_engine(struct drm_i915_error_state_buf *m, 515 const struct intel_engine_coredump *ee) 516 { 517 struct i915_vma_coredump *batch; 518 int n; 519 520 err_printf(m, "%s command stream:\n", ee->engine->name); 521 err_printf(m, " CCID: 0x%08x\n", ee->ccid); 522 err_printf(m, " START: 0x%08x\n", ee->start); 523 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); 524 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", 525 ee->tail, ee->rq_post, ee->rq_tail); 526 err_printf(m, " CTL: 0x%08x\n", ee->ctl); 527 err_printf(m, " MODE: 0x%08x\n", ee->mode); 528 err_printf(m, " HWS: 0x%08x\n", ee->hws); 529 err_printf(m, " ACTHD: 0x%08x %08x\n", 530 (u32)(ee->acthd>>32), (u32)ee->acthd); 531 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); 532 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); 533 err_printf(m, " ESR: 0x%08x\n", ee->esr); 534 535 error_print_instdone(m, ee); 536 537 batch = find_batch(ee); 538 if (batch) { 539 u64 start = batch->gtt_offset; 540 u64 end = start + batch->gtt_size; 541 542 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", 543 upper_32_bits(start), lower_32_bits(start), 544 upper_32_bits(end), lower_32_bits(end)); 545 } 546 if (GRAPHICS_VER(m->i915) >= 4) { 547 err_printf(m, " BBADDR: 0x%08x_%08x\n", 548 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); 549 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); 550 err_printf(m, " INSTPS: 0x%08x\n", ee->instps); 551 } 552 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm); 553 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr), 554 lower_32_bits(ee->faddr)); 555 if (GRAPHICS_VER(m->i915) >= 6) { 556 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); 557 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); 558 } 559 if (HAS_PPGTT(m->i915)) { 560 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); 561 562 if (GRAPHICS_VER(m->i915) >= 8) { 563 int i; 564 for (i = 0; i < 4; i++) 565 err_printf(m, " PDP%d: 0x%016llx\n", 566 i, ee->vm_info.pdp[i]); 567 } else { 568 err_printf(m, " PP_DIR_BASE: 0x%08x\n", 569 ee->vm_info.pp_dir_base); 570 } 571 } 572 err_printf(m, " hung: %u\n", ee->hung); 573 err_printf(m, " engine reset count: %u\n", ee->reset_count); 574 575 for (n = 0; n < ee->num_ports; n++) { 576 err_printf(m, " ELSP[%d]:", n); 577 error_print_request(m, " ", &ee->execlist[n]); 578 } 579 580 error_print_context(m, " Active context: ", &ee->context); 581 } 582 583 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 584 { 585 va_list args; 586 587 va_start(args, f); 588 i915_error_vprintf(e, f, args); 589 va_end(args); 590 } 591 592 static void print_error_vma(struct drm_i915_error_state_buf *m, 593 const struct intel_engine_cs *engine, 594 const struct i915_vma_coredump *vma) 595 { 596 char out[ASCII85_BUFSZ]; 597 int page; 598 599 if (!vma) 600 return; 601 602 err_printf(m, "%s --- %s = 0x%08x %08x\n", 603 engine ? engine->name : "global", vma->name, 604 upper_32_bits(vma->gtt_offset), 605 lower_32_bits(vma->gtt_offset)); 606 607 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) 608 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); 609 610 err_compression_marker(m); 611 for (page = 0; page < vma->page_count; page++) { 612 int i, len; 613 614 len = PAGE_SIZE; 615 if (page == vma->page_count - 1) 616 len -= vma->unused; 617 len = ascii85_encode_len(len); 618 619 for (i = 0; i < len; i++) 620 err_puts(m, ascii85_encode(vma->pages[page][i], out)); 621 } 622 err_puts(m, "\n"); 623 } 624 625 static void err_print_capabilities(struct drm_i915_error_state_buf *m, 626 struct i915_gpu_coredump *error) 627 { 628 struct drm_printer p = i915_error_printer(m); 629 630 intel_device_info_print_static(&error->device_info, &p); 631 intel_device_info_print_runtime(&error->runtime_info, &p); 632 intel_driver_caps_print(&error->driver_caps, &p); 633 } 634 635 static void err_print_params(struct drm_i915_error_state_buf *m, 636 const struct i915_params *params) 637 { 638 struct drm_printer p = i915_error_printer(m); 639 640 i915_params_dump(params, &p); 641 } 642 643 static void err_print_pciid(struct drm_i915_error_state_buf *m, 644 struct drm_i915_private *i915) 645 { 646 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 647 648 err_printf(m, "PCI ID: 0x%04x\n", pdev->device); 649 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); 650 err_printf(m, "PCI Subsystem: %04x:%04x\n", 651 pdev->subsystem_vendor, 652 pdev->subsystem_device); 653 } 654 655 static void err_print_uc(struct drm_i915_error_state_buf *m, 656 const struct intel_uc_coredump *error_uc) 657 { 658 struct drm_printer p = i915_error_printer(m); 659 660 intel_uc_fw_dump(&error_uc->guc_fw, &p); 661 intel_uc_fw_dump(&error_uc->huc_fw, &p); 662 print_error_vma(m, NULL, error_uc->guc_log); 663 } 664 665 static void err_free_sgl(struct scatterlist *sgl) 666 { 667 while (sgl) { 668 struct scatterlist *sg; 669 670 for (sg = sgl; !sg_is_chain(sg); sg++) { 671 kfree(sg_virt(sg)); 672 if (sg_is_last(sg)) 673 break; 674 } 675 676 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg); 677 free_page((unsigned long)sgl); 678 sgl = sg; 679 } 680 } 681 682 static void err_print_gt_info(struct drm_i915_error_state_buf *m, 683 struct intel_gt_coredump *gt) 684 { 685 struct drm_printer p = i915_error_printer(m); 686 687 intel_gt_info_print(>->info, &p); 688 intel_sseu_print_topology(>->info.sseu, &p); 689 } 690 691 static void err_print_gt(struct drm_i915_error_state_buf *m, 692 struct intel_gt_coredump *gt) 693 { 694 const struct intel_engine_coredump *ee; 695 int i; 696 697 err_printf(m, "GT awake: %s\n", yesno(gt->awake)); 698 err_printf(m, "EIR: 0x%08x\n", gt->eir); 699 err_printf(m, "IER: 0x%08x\n", gt->ier); 700 for (i = 0; i < gt->ngtier; i++) 701 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]); 702 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er); 703 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake); 704 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr); 705 706 for (i = 0; i < gt->nfence; i++) 707 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]); 708 709 if (IS_GRAPHICS_VER(m->i915, 6, 11)) { 710 err_printf(m, "ERROR: 0x%08x\n", gt->error); 711 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg); 712 } 713 714 if (GRAPHICS_VER(m->i915) >= 8) 715 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", 716 gt->fault_data1, gt->fault_data0); 717 718 if (GRAPHICS_VER(m->i915) == 7) 719 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int); 720 721 if (IS_GRAPHICS_VER(m->i915, 8, 11)) 722 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache); 723 724 if (GRAPHICS_VER(m->i915) == 12) 725 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err); 726 727 if (GRAPHICS_VER(m->i915) >= 12) { 728 int i; 729 730 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) 731 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, 732 gt->sfc_done[i]); 733 734 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); 735 } 736 737 for (ee = gt->engine; ee; ee = ee->next) { 738 const struct i915_vma_coredump *vma; 739 740 error_print_engine(m, ee); 741 for (vma = ee->vma; vma; vma = vma->next) 742 print_error_vma(m, ee->engine, vma); 743 } 744 745 if (gt->uc) 746 err_print_uc(m, gt->uc); 747 748 err_print_gt_info(m, gt); 749 } 750 751 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, 752 struct i915_gpu_coredump *error) 753 { 754 const struct intel_engine_coredump *ee; 755 struct timespec64 ts; 756 757 if (*error->error_msg) 758 err_printf(m, "%s\n", error->error_msg); 759 err_printf(m, "Kernel: %s %s\n", 760 init_utsname()->release, 761 init_utsname()->machine); 762 err_printf(m, "Driver: %s\n", DRIVER_DATE); 763 ts = ktime_to_timespec64(error->time); 764 err_printf(m, "Time: %lld s %ld us\n", 765 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 766 ts = ktime_to_timespec64(error->boottime); 767 err_printf(m, "Boottime: %lld s %ld us\n", 768 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 769 ts = ktime_to_timespec64(error->uptime); 770 err_printf(m, "Uptime: %lld s %ld us\n", 771 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 772 err_printf(m, "Capture: %lu jiffies; %d ms ago\n", 773 error->capture, jiffies_to_msecs(jiffies - error->capture)); 774 775 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next) 776 err_printf(m, "Active process (on ring %s): %s [%d]\n", 777 ee->engine->name, 778 ee->context.comm, 779 ee->context.pid); 780 781 err_printf(m, "Reset count: %u\n", error->reset_count); 782 err_printf(m, "Suspend count: %u\n", error->suspend_count); 783 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); 784 err_printf(m, "Subplatform: 0x%x\n", 785 intel_subplatform(&error->runtime_info, 786 error->device_info.platform)); 787 err_print_pciid(m, m->i915); 788 789 err_printf(m, "IOMMU enabled?: %d\n", error->iommu); 790 791 if (HAS_DMC(m->i915)) { 792 struct intel_dmc *dmc = &m->i915->dmc; 793 794 err_printf(m, "DMC loaded: %s\n", 795 yesno(intel_dmc_has_payload(m->i915) != 0)); 796 err_printf(m, "DMC fw version: %d.%d\n", 797 DMC_VERSION_MAJOR(dmc->version), 798 DMC_VERSION_MINOR(dmc->version)); 799 } 800 801 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock)); 802 err_printf(m, "PM suspended: %s\n", yesno(error->suspended)); 803 804 if (error->gt) 805 err_print_gt(m, error->gt); 806 807 if (error->overlay) 808 intel_overlay_print_error_state(m, error->overlay); 809 810 err_print_capabilities(m, error); 811 err_print_params(m, &error->params); 812 } 813 814 static int err_print_to_sgl(struct i915_gpu_coredump *error) 815 { 816 struct drm_i915_error_state_buf m; 817 818 if (IS_ERR(error)) 819 return PTR_ERR(error); 820 821 if (READ_ONCE(error->sgl)) 822 return 0; 823 824 memset(&m, 0, sizeof(m)); 825 m.i915 = error->i915; 826 827 __err_print_to_sgl(&m, error); 828 829 if (m.buf) { 830 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter); 831 m.bytes = 0; 832 m.buf = NULL; 833 } 834 if (m.cur) { 835 GEM_BUG_ON(m.end < m.cur); 836 sg_mark_end(m.cur - 1); 837 } 838 GEM_BUG_ON(m.sgl && !m.cur); 839 840 if (m.err) { 841 err_free_sgl(m.sgl); 842 return m.err; 843 } 844 845 if (cmpxchg(&error->sgl, NULL, m.sgl)) 846 err_free_sgl(m.sgl); 847 848 return 0; 849 } 850 851 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, 852 char *buf, loff_t off, size_t rem) 853 { 854 struct scatterlist *sg; 855 size_t count; 856 loff_t pos; 857 int err; 858 859 if (!error || !rem) 860 return 0; 861 862 err = err_print_to_sgl(error); 863 if (err) 864 return err; 865 866 sg = READ_ONCE(error->fit); 867 if (!sg || off < sg->dma_address) 868 sg = error->sgl; 869 if (!sg) 870 return 0; 871 872 pos = sg->dma_address; 873 count = 0; 874 do { 875 size_t len, start; 876 877 if (sg_is_chain(sg)) { 878 sg = sg_chain_ptr(sg); 879 GEM_BUG_ON(sg_is_chain(sg)); 880 } 881 882 len = sg->length; 883 if (pos + len <= off) { 884 pos += len; 885 continue; 886 } 887 888 start = sg->offset; 889 if (pos < off) { 890 GEM_BUG_ON(off - pos > len); 891 len -= off - pos; 892 start += off - pos; 893 pos = off; 894 } 895 896 len = min(len, rem); 897 GEM_BUG_ON(!len || len > sg->length); 898 899 memcpy(buf, page_address(sg_page(sg)) + start, len); 900 901 count += len; 902 pos += len; 903 904 buf += len; 905 rem -= len; 906 if (!rem) { 907 WRITE_ONCE(error->fit, sg); 908 break; 909 } 910 } while (!sg_is_last(sg++)); 911 912 return count; 913 } 914 915 static void i915_vma_coredump_free(struct i915_vma_coredump *vma) 916 { 917 while (vma) { 918 struct i915_vma_coredump *next = vma->next; 919 int page; 920 921 for (page = 0; page < vma->page_count; page++) 922 free_page((unsigned long)vma->pages[page]); 923 924 kfree(vma); 925 vma = next; 926 } 927 } 928 929 static void cleanup_params(struct i915_gpu_coredump *error) 930 { 931 i915_params_free(&error->params); 932 } 933 934 static void cleanup_uc(struct intel_uc_coredump *uc) 935 { 936 kfree(uc->guc_fw.path); 937 kfree(uc->huc_fw.path); 938 i915_vma_coredump_free(uc->guc_log); 939 940 kfree(uc); 941 } 942 943 static void cleanup_gt(struct intel_gt_coredump *gt) 944 { 945 while (gt->engine) { 946 struct intel_engine_coredump *ee = gt->engine; 947 948 gt->engine = ee->next; 949 950 i915_vma_coredump_free(ee->vma); 951 kfree(ee); 952 } 953 954 if (gt->uc) 955 cleanup_uc(gt->uc); 956 957 kfree(gt); 958 } 959 960 void __i915_gpu_coredump_free(struct kref *error_ref) 961 { 962 struct i915_gpu_coredump *error = 963 container_of(error_ref, typeof(*error), ref); 964 965 while (error->gt) { 966 struct intel_gt_coredump *gt = error->gt; 967 968 error->gt = gt->next; 969 cleanup_gt(gt); 970 } 971 972 kfree(error->overlay); 973 974 cleanup_params(error); 975 976 err_free_sgl(error->sgl); 977 kfree(error); 978 } 979 980 static struct i915_vma_coredump * 981 i915_vma_coredump_create(const struct intel_gt *gt, 982 const struct i915_vma *vma, 983 const char *name, 984 struct i915_vma_compress *compress) 985 { 986 struct i915_ggtt *ggtt = gt->ggtt; 987 const u64 slot = ggtt->error_capture.start; 988 struct i915_vma_coredump *dst; 989 unsigned long num_pages; 990 struct sgt_iter iter; 991 int ret; 992 993 might_sleep(); 994 995 if (!vma || !vma->pages || !compress) 996 return NULL; 997 998 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; 999 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ 1000 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); 1001 if (!dst) 1002 return NULL; 1003 1004 if (!compress_start(compress)) { 1005 kfree(dst); 1006 return NULL; 1007 } 1008 1009 strcpy(dst->name, name); 1010 dst->next = NULL; 1011 1012 dst->gtt_offset = vma->node.start; 1013 dst->gtt_size = vma->node.size; 1014 dst->gtt_page_sizes = vma->page_sizes.gtt; 1015 dst->num_pages = num_pages; 1016 dst->page_count = 0; 1017 dst->unused = 0; 1018 1019 ret = -EINVAL; 1020 if (drm_mm_node_allocated(&ggtt->error_capture)) { 1021 void __iomem *s; 1022 dma_addr_t dma; 1023 1024 for_each_sgt_daddr(dma, iter, vma->pages) { 1025 mutex_lock(&ggtt->error_mutex); 1026 ggtt->vm.insert_page(&ggtt->vm, dma, slot, 1027 I915_CACHE_NONE, 0); 1028 mb(); 1029 1030 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); 1031 ret = compress_page(compress, 1032 (void __force *)s, dst, 1033 true); 1034 io_mapping_unmap(s); 1035 1036 mb(); 1037 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 1038 mutex_unlock(&ggtt->error_mutex); 1039 if (ret) 1040 break; 1041 } 1042 } else if (i915_gem_object_is_lmem(vma->obj)) { 1043 struct intel_memory_region *mem = vma->obj->mm.region; 1044 dma_addr_t dma; 1045 1046 for_each_sgt_daddr(dma, iter, vma->pages) { 1047 void __iomem *s; 1048 1049 s = io_mapping_map_wc(&mem->iomap, 1050 dma - mem->region.start, 1051 PAGE_SIZE); 1052 ret = compress_page(compress, 1053 (void __force *)s, dst, 1054 true); 1055 io_mapping_unmap(s); 1056 if (ret) 1057 break; 1058 } 1059 } else { 1060 struct page *page; 1061 1062 for_each_sgt_page(page, iter, vma->pages) { 1063 void *s; 1064 1065 drm_clflush_pages(&page, 1); 1066 1067 s = kmap(page); 1068 ret = compress_page(compress, s, dst, false); 1069 kunmap(page); 1070 1071 drm_clflush_pages(&page, 1); 1072 1073 if (ret) 1074 break; 1075 } 1076 } 1077 1078 if (ret || compress_flush(compress, dst)) { 1079 while (dst->page_count--) 1080 pool_free(&compress->pool, dst->pages[dst->page_count]); 1081 kfree(dst); 1082 dst = NULL; 1083 } 1084 compress_finish(compress); 1085 1086 return dst; 1087 } 1088 1089 static void gt_record_fences(struct intel_gt_coredump *gt) 1090 { 1091 struct i915_ggtt *ggtt = gt->_gt->ggtt; 1092 struct intel_uncore *uncore = gt->_gt->uncore; 1093 int i; 1094 1095 if (GRAPHICS_VER(uncore->i915) >= 6) { 1096 for (i = 0; i < ggtt->num_fences; i++) 1097 gt->fence[i] = 1098 intel_uncore_read64(uncore, 1099 FENCE_REG_GEN6_LO(i)); 1100 } else if (GRAPHICS_VER(uncore->i915) >= 4) { 1101 for (i = 0; i < ggtt->num_fences; i++) 1102 gt->fence[i] = 1103 intel_uncore_read64(uncore, 1104 FENCE_REG_965_LO(i)); 1105 } else { 1106 for (i = 0; i < ggtt->num_fences; i++) 1107 gt->fence[i] = 1108 intel_uncore_read(uncore, FENCE_REG(i)); 1109 } 1110 gt->nfence = i; 1111 } 1112 1113 static void engine_record_registers(struct intel_engine_coredump *ee) 1114 { 1115 const struct intel_engine_cs *engine = ee->engine; 1116 struct drm_i915_private *i915 = engine->i915; 1117 1118 if (GRAPHICS_VER(i915) >= 6) { 1119 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); 1120 1121 if (GRAPHICS_VER(i915) >= 12) 1122 ee->fault_reg = intel_uncore_read(engine->uncore, 1123 GEN12_RING_FAULT_REG); 1124 else if (GRAPHICS_VER(i915) >= 8) 1125 ee->fault_reg = intel_uncore_read(engine->uncore, 1126 GEN8_RING_FAULT_REG); 1127 else 1128 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); 1129 } 1130 1131 if (GRAPHICS_VER(i915) >= 4) { 1132 ee->esr = ENGINE_READ(engine, RING_ESR); 1133 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); 1134 ee->ipeir = ENGINE_READ(engine, RING_IPEIR); 1135 ee->ipehr = ENGINE_READ(engine, RING_IPEHR); 1136 ee->instps = ENGINE_READ(engine, RING_INSTPS); 1137 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); 1138 ee->ccid = ENGINE_READ(engine, CCID); 1139 if (GRAPHICS_VER(i915) >= 8) { 1140 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; 1141 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; 1142 } 1143 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE); 1144 } else { 1145 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX); 1146 ee->ipeir = ENGINE_READ(engine, IPEIR); 1147 ee->ipehr = ENGINE_READ(engine, IPEHR); 1148 } 1149 1150 intel_engine_get_instdone(engine, &ee->instdone); 1151 1152 ee->instpm = ENGINE_READ(engine, RING_INSTPM); 1153 ee->acthd = intel_engine_get_active_head(engine); 1154 ee->start = ENGINE_READ(engine, RING_START); 1155 ee->head = ENGINE_READ(engine, RING_HEAD); 1156 ee->tail = ENGINE_READ(engine, RING_TAIL); 1157 ee->ctl = ENGINE_READ(engine, RING_CTL); 1158 if (GRAPHICS_VER(i915) > 2) 1159 ee->mode = ENGINE_READ(engine, RING_MI_MODE); 1160 1161 if (!HWS_NEEDS_PHYSICAL(i915)) { 1162 i915_reg_t mmio; 1163 1164 if (GRAPHICS_VER(i915) == 7) { 1165 switch (engine->id) { 1166 default: 1167 MISSING_CASE(engine->id); 1168 fallthrough; 1169 case RCS0: 1170 mmio = RENDER_HWS_PGA_GEN7; 1171 break; 1172 case BCS0: 1173 mmio = BLT_HWS_PGA_GEN7; 1174 break; 1175 case VCS0: 1176 mmio = BSD_HWS_PGA_GEN7; 1177 break; 1178 case VECS0: 1179 mmio = VEBOX_HWS_PGA_GEN7; 1180 break; 1181 } 1182 } else if (GRAPHICS_VER(engine->i915) == 6) { 1183 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 1184 } else { 1185 /* XXX: gen8 returns to sanity */ 1186 mmio = RING_HWS_PGA(engine->mmio_base); 1187 } 1188 1189 ee->hws = intel_uncore_read(engine->uncore, mmio); 1190 } 1191 1192 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine); 1193 1194 if (HAS_PPGTT(i915)) { 1195 int i; 1196 1197 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); 1198 1199 if (GRAPHICS_VER(i915) == 6) { 1200 ee->vm_info.pp_dir_base = 1201 ENGINE_READ(engine, RING_PP_DIR_BASE_READ); 1202 } else if (GRAPHICS_VER(i915) == 7) { 1203 ee->vm_info.pp_dir_base = 1204 ENGINE_READ(engine, RING_PP_DIR_BASE); 1205 } else if (GRAPHICS_VER(i915) >= 8) { 1206 u32 base = engine->mmio_base; 1207 1208 for (i = 0; i < 4; i++) { 1209 ee->vm_info.pdp[i] = 1210 intel_uncore_read(engine->uncore, 1211 GEN8_RING_PDP_UDW(base, i)); 1212 ee->vm_info.pdp[i] <<= 32; 1213 ee->vm_info.pdp[i] |= 1214 intel_uncore_read(engine->uncore, 1215 GEN8_RING_PDP_LDW(base, i)); 1216 } 1217 } 1218 } 1219 } 1220 1221 static void record_request(const struct i915_request *request, 1222 struct i915_request_coredump *erq) 1223 { 1224 erq->flags = request->fence.flags; 1225 erq->context = request->fence.context; 1226 erq->seqno = request->fence.seqno; 1227 erq->sched_attr = request->sched.attr; 1228 erq->head = request->head; 1229 erq->tail = request->tail; 1230 1231 erq->pid = 0; 1232 rcu_read_lock(); 1233 if (!intel_context_is_closed(request->context)) { 1234 const struct i915_gem_context *ctx; 1235 1236 ctx = rcu_dereference(request->context->gem_context); 1237 if (ctx) 1238 erq->pid = pid_nr(ctx->pid); 1239 } 1240 rcu_read_unlock(); 1241 } 1242 1243 static void engine_record_execlists(struct intel_engine_coredump *ee) 1244 { 1245 const struct intel_engine_execlists * const el = &ee->engine->execlists; 1246 struct i915_request * const *port = el->active; 1247 unsigned int n = 0; 1248 1249 while (*port) 1250 record_request(*port++, &ee->execlist[n++]); 1251 1252 ee->num_ports = n; 1253 } 1254 1255 static bool record_context(struct i915_gem_context_coredump *e, 1256 const struct i915_request *rq) 1257 { 1258 struct i915_gem_context *ctx; 1259 struct task_struct *task; 1260 bool simulated; 1261 1262 rcu_read_lock(); 1263 ctx = rcu_dereference(rq->context->gem_context); 1264 if (ctx && !kref_get_unless_zero(&ctx->ref)) 1265 ctx = NULL; 1266 rcu_read_unlock(); 1267 if (!ctx) 1268 return true; 1269 1270 rcu_read_lock(); 1271 task = pid_task(ctx->pid, PIDTYPE_PID); 1272 if (task) { 1273 strcpy(e->comm, task->comm); 1274 e->pid = task->pid; 1275 } 1276 rcu_read_unlock(); 1277 1278 e->sched_attr = ctx->sched; 1279 e->guilty = atomic_read(&ctx->guilty_count); 1280 e->active = atomic_read(&ctx->active_count); 1281 1282 e->total_runtime = rq->context->runtime.total; 1283 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg); 1284 1285 simulated = i915_gem_context_no_error_capture(ctx); 1286 1287 i915_gem_context_put(ctx); 1288 return simulated; 1289 } 1290 1291 struct intel_engine_capture_vma { 1292 struct intel_engine_capture_vma *next; 1293 struct i915_vma *vma; 1294 char name[16]; 1295 }; 1296 1297 static struct intel_engine_capture_vma * 1298 capture_vma(struct intel_engine_capture_vma *next, 1299 struct i915_vma *vma, 1300 const char *name, 1301 gfp_t gfp) 1302 { 1303 struct intel_engine_capture_vma *c; 1304 1305 if (!vma) 1306 return next; 1307 1308 c = kmalloc(sizeof(*c), gfp); 1309 if (!c) 1310 return next; 1311 1312 if (!i915_active_acquire_if_busy(&vma->active)) { 1313 kfree(c); 1314 return next; 1315 } 1316 1317 strcpy(c->name, name); 1318 c->vma = vma; /* reference held while active */ 1319 1320 c->next = next; 1321 return c; 1322 } 1323 1324 static struct intel_engine_capture_vma * 1325 capture_user(struct intel_engine_capture_vma *capture, 1326 const struct i915_request *rq, 1327 gfp_t gfp) 1328 { 1329 struct i915_capture_list *c; 1330 1331 for (c = rq->capture_list; c; c = c->next) 1332 capture = capture_vma(capture, c->vma, "user", gfp); 1333 1334 return capture; 1335 } 1336 1337 static void add_vma(struct intel_engine_coredump *ee, 1338 struct i915_vma_coredump *vma) 1339 { 1340 if (vma) { 1341 vma->next = ee->vma; 1342 ee->vma = vma; 1343 } 1344 } 1345 1346 struct intel_engine_coredump * 1347 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) 1348 { 1349 struct intel_engine_coredump *ee; 1350 1351 ee = kzalloc(sizeof(*ee), gfp); 1352 if (!ee) 1353 return NULL; 1354 1355 ee->engine = engine; 1356 1357 engine_record_registers(ee); 1358 engine_record_execlists(ee); 1359 1360 return ee; 1361 } 1362 1363 struct intel_engine_capture_vma * 1364 intel_engine_coredump_add_request(struct intel_engine_coredump *ee, 1365 struct i915_request *rq, 1366 gfp_t gfp) 1367 { 1368 struct intel_engine_capture_vma *vma = NULL; 1369 1370 ee->simulated |= record_context(&ee->context, rq); 1371 if (ee->simulated) 1372 return NULL; 1373 1374 /* 1375 * We need to copy these to an anonymous buffer 1376 * as the simplest method to avoid being overwritten 1377 * by userspace. 1378 */ 1379 vma = capture_vma(vma, rq->batch, "batch", gfp); 1380 vma = capture_user(vma, rq, gfp); 1381 vma = capture_vma(vma, rq->ring->vma, "ring", gfp); 1382 vma = capture_vma(vma, rq->context->state, "HW context", gfp); 1383 1384 ee->rq_head = rq->head; 1385 ee->rq_post = rq->postfix; 1386 ee->rq_tail = rq->tail; 1387 1388 return vma; 1389 } 1390 1391 void 1392 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, 1393 struct intel_engine_capture_vma *capture, 1394 struct i915_vma_compress *compress) 1395 { 1396 const struct intel_engine_cs *engine = ee->engine; 1397 1398 while (capture) { 1399 struct intel_engine_capture_vma *this = capture; 1400 struct i915_vma *vma = this->vma; 1401 1402 add_vma(ee, 1403 i915_vma_coredump_create(engine->gt, 1404 vma, this->name, 1405 compress)); 1406 1407 i915_active_release(&vma->active); 1408 1409 capture = this->next; 1410 kfree(this); 1411 } 1412 1413 add_vma(ee, 1414 i915_vma_coredump_create(engine->gt, 1415 engine->status_page.vma, 1416 "HW Status", 1417 compress)); 1418 1419 add_vma(ee, 1420 i915_vma_coredump_create(engine->gt, 1421 engine->wa_ctx.vma, 1422 "WA context", 1423 compress)); 1424 } 1425 1426 static struct intel_engine_coredump * 1427 capture_engine(struct intel_engine_cs *engine, 1428 struct i915_vma_compress *compress) 1429 { 1430 struct intel_engine_capture_vma *capture = NULL; 1431 struct intel_engine_coredump *ee; 1432 struct i915_request *rq; 1433 unsigned long flags; 1434 1435 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL); 1436 if (!ee) 1437 return NULL; 1438 1439 spin_lock_irqsave(&engine->active.lock, flags); 1440 rq = intel_engine_find_active_request(engine); 1441 if (rq) 1442 capture = intel_engine_coredump_add_request(ee, rq, 1443 ATOMIC_MAYFAIL); 1444 spin_unlock_irqrestore(&engine->active.lock, flags); 1445 if (!capture) { 1446 kfree(ee); 1447 return NULL; 1448 } 1449 1450 intel_engine_coredump_add_vma(ee, capture, compress); 1451 1452 return ee; 1453 } 1454 1455 static void 1456 gt_record_engines(struct intel_gt_coredump *gt, 1457 intel_engine_mask_t engine_mask, 1458 struct i915_vma_compress *compress) 1459 { 1460 struct intel_engine_cs *engine; 1461 enum intel_engine_id id; 1462 1463 for_each_engine(engine, gt->_gt, id) { 1464 struct intel_engine_coredump *ee; 1465 1466 /* Refill our page pool before entering atomic section */ 1467 pool_refill(&compress->pool, ALLOW_FAIL); 1468 1469 ee = capture_engine(engine, compress); 1470 if (!ee) 1471 continue; 1472 1473 ee->hung = engine->mask & engine_mask; 1474 1475 gt->simulated |= ee->simulated; 1476 if (ee->simulated) { 1477 kfree(ee); 1478 continue; 1479 } 1480 1481 ee->next = gt->engine; 1482 gt->engine = ee; 1483 } 1484 } 1485 1486 static struct intel_uc_coredump * 1487 gt_record_uc(struct intel_gt_coredump *gt, 1488 struct i915_vma_compress *compress) 1489 { 1490 const struct intel_uc *uc = >->_gt->uc; 1491 struct intel_uc_coredump *error_uc; 1492 1493 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); 1494 if (!error_uc) 1495 return NULL; 1496 1497 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw)); 1498 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw)); 1499 1500 /* Non-default firmware paths will be specified by the modparam. 1501 * As modparams are generally accesible from the userspace make 1502 * explicit copies of the firmware paths. 1503 */ 1504 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); 1505 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); 1506 error_uc->guc_log = 1507 i915_vma_coredump_create(gt->_gt, 1508 uc->guc.log.vma, "GuC log buffer", 1509 compress); 1510 1511 return error_uc; 1512 } 1513 1514 /* Capture all registers which don't fit into another category. */ 1515 static void gt_record_regs(struct intel_gt_coredump *gt) 1516 { 1517 struct intel_uncore *uncore = gt->_gt->uncore; 1518 struct drm_i915_private *i915 = uncore->i915; 1519 int i; 1520 1521 /* 1522 * General organization 1523 * 1. Registers specific to a single generation 1524 * 2. Registers which belong to multiple generations 1525 * 3. Feature specific registers. 1526 * 4. Everything else 1527 * Please try to follow the order. 1528 */ 1529 1530 /* 1: Registers specific to a single generation */ 1531 if (IS_VALLEYVIEW(i915)) { 1532 gt->gtier[0] = intel_uncore_read(uncore, GTIER); 1533 gt->ier = intel_uncore_read(uncore, VLV_IER); 1534 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); 1535 } 1536 1537 if (GRAPHICS_VER(i915) == 7) 1538 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); 1539 1540 if (GRAPHICS_VER(i915) >= 12) { 1541 gt->fault_data0 = intel_uncore_read(uncore, 1542 GEN12_FAULT_TLB_DATA0); 1543 gt->fault_data1 = intel_uncore_read(uncore, 1544 GEN12_FAULT_TLB_DATA1); 1545 } else if (GRAPHICS_VER(i915) >= 8) { 1546 gt->fault_data0 = intel_uncore_read(uncore, 1547 GEN8_FAULT_TLB_DATA0); 1548 gt->fault_data1 = intel_uncore_read(uncore, 1549 GEN8_FAULT_TLB_DATA1); 1550 } 1551 1552 if (GRAPHICS_VER(i915) == 6) { 1553 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); 1554 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL); 1555 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE); 1556 } 1557 1558 /* 2: Registers which belong to multiple generations */ 1559 if (GRAPHICS_VER(i915) >= 7) 1560 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); 1561 1562 if (GRAPHICS_VER(i915) >= 6) { 1563 gt->derrmr = intel_uncore_read(uncore, DERRMR); 1564 if (GRAPHICS_VER(i915) < 12) { 1565 gt->error = intel_uncore_read(uncore, ERROR_GEN6); 1566 gt->done_reg = intel_uncore_read(uncore, DONE_REG); 1567 } 1568 } 1569 1570 /* 3: Feature specific registers */ 1571 if (IS_GRAPHICS_VER(i915, 6, 7)) { 1572 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); 1573 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); 1574 } 1575 1576 if (IS_GRAPHICS_VER(i915, 8, 11)) 1577 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); 1578 1579 if (GRAPHICS_VER(i915) == 12) 1580 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); 1581 1582 if (GRAPHICS_VER(i915) >= 12) { 1583 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { 1584 gt->sfc_done[i] = 1585 intel_uncore_read(uncore, GEN12_SFC_DONE(i)); 1586 } 1587 1588 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); 1589 } 1590 1591 /* 4: Everything else */ 1592 if (GRAPHICS_VER(i915) >= 11) { 1593 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); 1594 gt->gtier[0] = 1595 intel_uncore_read(uncore, 1596 GEN11_RENDER_COPY_INTR_ENABLE); 1597 gt->gtier[1] = 1598 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); 1599 gt->gtier[2] = 1600 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); 1601 gt->gtier[3] = 1602 intel_uncore_read(uncore, 1603 GEN11_GPM_WGBOXPERF_INTR_ENABLE); 1604 gt->gtier[4] = 1605 intel_uncore_read(uncore, 1606 GEN11_CRYPTO_RSVD_INTR_ENABLE); 1607 gt->gtier[5] = 1608 intel_uncore_read(uncore, 1609 GEN11_GUNIT_CSME_INTR_ENABLE); 1610 gt->ngtier = 6; 1611 } else if (GRAPHICS_VER(i915) >= 8) { 1612 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); 1613 for (i = 0; i < 4; i++) 1614 gt->gtier[i] = 1615 intel_uncore_read(uncore, GEN8_GT_IER(i)); 1616 gt->ngtier = 4; 1617 } else if (HAS_PCH_SPLIT(i915)) { 1618 gt->ier = intel_uncore_read(uncore, DEIER); 1619 gt->gtier[0] = intel_uncore_read(uncore, GTIER); 1620 gt->ngtier = 1; 1621 } else if (GRAPHICS_VER(i915) == 2) { 1622 gt->ier = intel_uncore_read16(uncore, GEN2_IER); 1623 } else if (!IS_VALLEYVIEW(i915)) { 1624 gt->ier = intel_uncore_read(uncore, GEN2_IER); 1625 } 1626 gt->eir = intel_uncore_read(uncore, EIR); 1627 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); 1628 } 1629 1630 static void gt_record_info(struct intel_gt_coredump *gt) 1631 { 1632 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info)); 1633 } 1634 1635 /* 1636 * Generate a semi-unique error code. The code is not meant to have meaning, The 1637 * code's only purpose is to try to prevent false duplicated bug reports by 1638 * grossly estimating a GPU error state. 1639 * 1640 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine 1641 * the hang if we could strip the GTT offset information from it. 1642 * 1643 * It's only a small step better than a random number in its current form. 1644 */ 1645 static u32 generate_ecode(const struct intel_engine_coredump *ee) 1646 { 1647 /* 1648 * IPEHR would be an ideal way to detect errors, as it's the gross 1649 * measure of "the command that hung." However, has some very common 1650 * synchronization commands which almost always appear in the case 1651 * strictly a client bug. Use instdone to differentiate those some. 1652 */ 1653 return ee ? ee->ipehr ^ ee->instdone.instdone : 0; 1654 } 1655 1656 static const char *error_msg(struct i915_gpu_coredump *error) 1657 { 1658 struct intel_engine_coredump *first = NULL; 1659 unsigned int hung_classes = 0; 1660 struct intel_gt_coredump *gt; 1661 int len; 1662 1663 for (gt = error->gt; gt; gt = gt->next) { 1664 struct intel_engine_coredump *cs; 1665 1666 for (cs = gt->engine; cs; cs = cs->next) { 1667 if (cs->hung) { 1668 hung_classes |= BIT(cs->engine->uabi_class); 1669 if (!first) 1670 first = cs; 1671 } 1672 } 1673 } 1674 1675 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1676 "GPU HANG: ecode %d:%x:%08x", 1677 GRAPHICS_VER(error->i915), hung_classes, 1678 generate_ecode(first)); 1679 if (first && first->context.pid) { 1680 /* Just show the first executing process, more is confusing */ 1681 len += scnprintf(error->error_msg + len, 1682 sizeof(error->error_msg) - len, 1683 ", in %s [%d]", 1684 first->context.comm, first->context.pid); 1685 } 1686 1687 return error->error_msg; 1688 } 1689 1690 static void capture_gen(struct i915_gpu_coredump *error) 1691 { 1692 struct drm_i915_private *i915 = error->i915; 1693 1694 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); 1695 error->suspended = i915->runtime_pm.suspended; 1696 1697 error->iommu = -1; 1698 #ifdef CONFIG_INTEL_IOMMU 1699 error->iommu = intel_iommu_gfx_mapped; 1700 #endif 1701 error->reset_count = i915_reset_count(&i915->gpu_error); 1702 error->suspend_count = i915->suspend_count; 1703 1704 i915_params_copy(&error->params, &i915->params); 1705 memcpy(&error->device_info, 1706 INTEL_INFO(i915), 1707 sizeof(error->device_info)); 1708 memcpy(&error->runtime_info, 1709 RUNTIME_INFO(i915), 1710 sizeof(error->runtime_info)); 1711 error->driver_caps = i915->caps; 1712 } 1713 1714 struct i915_gpu_coredump * 1715 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) 1716 { 1717 struct i915_gpu_coredump *error; 1718 1719 if (!i915->params.error_capture) 1720 return NULL; 1721 1722 error = kzalloc(sizeof(*error), gfp); 1723 if (!error) 1724 return NULL; 1725 1726 kref_init(&error->ref); 1727 error->i915 = i915; 1728 1729 error->time = ktime_get_real(); 1730 error->boottime = ktime_get_boottime(); 1731 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); 1732 error->capture = jiffies; 1733 1734 capture_gen(error); 1735 1736 return error; 1737 } 1738 1739 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) 1740 1741 struct intel_gt_coredump * 1742 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp) 1743 { 1744 struct intel_gt_coredump *gc; 1745 1746 gc = kzalloc(sizeof(*gc), gfp); 1747 if (!gc) 1748 return NULL; 1749 1750 gc->_gt = gt; 1751 gc->awake = intel_gt_pm_is_awake(gt); 1752 1753 gt_record_regs(gc); 1754 gt_record_fences(gc); 1755 1756 return gc; 1757 } 1758 1759 struct i915_vma_compress * 1760 i915_vma_capture_prepare(struct intel_gt_coredump *gt) 1761 { 1762 struct i915_vma_compress *compress; 1763 1764 compress = kmalloc(sizeof(*compress), ALLOW_FAIL); 1765 if (!compress) 1766 return NULL; 1767 1768 if (!compress_init(compress)) { 1769 kfree(compress); 1770 return NULL; 1771 } 1772 1773 return compress; 1774 } 1775 1776 void i915_vma_capture_finish(struct intel_gt_coredump *gt, 1777 struct i915_vma_compress *compress) 1778 { 1779 if (!compress) 1780 return; 1781 1782 compress_fini(compress); 1783 kfree(compress); 1784 } 1785 1786 struct i915_gpu_coredump * 1787 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) 1788 { 1789 struct drm_i915_private *i915 = gt->i915; 1790 struct i915_gpu_coredump *error; 1791 1792 /* Check if GPU capture has been disabled */ 1793 error = READ_ONCE(i915->gpu_error.first_error); 1794 if (IS_ERR(error)) 1795 return error; 1796 1797 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); 1798 if (!error) 1799 return ERR_PTR(-ENOMEM); 1800 1801 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL); 1802 if (error->gt) { 1803 struct i915_vma_compress *compress; 1804 1805 compress = i915_vma_capture_prepare(error->gt); 1806 if (!compress) { 1807 kfree(error->gt); 1808 kfree(error); 1809 return ERR_PTR(-ENOMEM); 1810 } 1811 1812 gt_record_info(error->gt); 1813 gt_record_engines(error->gt, engine_mask, compress); 1814 1815 if (INTEL_INFO(i915)->has_gt_uc) 1816 error->gt->uc = gt_record_uc(error->gt, compress); 1817 1818 i915_vma_capture_finish(error->gt, compress); 1819 1820 error->simulated |= error->gt->simulated; 1821 } 1822 1823 error->overlay = intel_overlay_capture_error_state(i915); 1824 1825 return error; 1826 } 1827 1828 void i915_error_state_store(struct i915_gpu_coredump *error) 1829 { 1830 struct drm_i915_private *i915; 1831 static bool warned; 1832 1833 if (IS_ERR_OR_NULL(error)) 1834 return; 1835 1836 i915 = error->i915; 1837 drm_info(&i915->drm, "%s\n", error_msg(error)); 1838 1839 if (error->simulated || 1840 cmpxchg(&i915->gpu_error.first_error, NULL, error)) 1841 return; 1842 1843 i915_gpu_coredump_get(error); 1844 1845 if (!xchg(&warned, true) && 1846 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { 1847 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 1848 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); 1849 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); 1850 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1851 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); 1852 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", 1853 i915->drm.primary->index); 1854 } 1855 } 1856 1857 /** 1858 * i915_capture_error_state - capture an error record for later analysis 1859 * @gt: intel_gt which originated the hang 1860 * @engine_mask: hung engines 1861 * 1862 * 1863 * Should be called when an error is detected (either a hang or an error 1864 * interrupt) to capture error state from the time of the error. Fills 1865 * out a structure which becomes available in debugfs for user level tools 1866 * to pick up. 1867 */ 1868 void i915_capture_error_state(struct intel_gt *gt, 1869 intel_engine_mask_t engine_mask) 1870 { 1871 struct i915_gpu_coredump *error; 1872 1873 error = i915_gpu_coredump(gt, engine_mask); 1874 if (IS_ERR(error)) { 1875 cmpxchg(>->i915->gpu_error.first_error, NULL, error); 1876 return; 1877 } 1878 1879 i915_error_state_store(error); 1880 i915_gpu_coredump_put(error); 1881 } 1882 1883 struct i915_gpu_coredump * 1884 i915_first_error_state(struct drm_i915_private *i915) 1885 { 1886 struct i915_gpu_coredump *error; 1887 1888 spin_lock_irq(&i915->gpu_error.lock); 1889 error = i915->gpu_error.first_error; 1890 if (!IS_ERR_OR_NULL(error)) 1891 i915_gpu_coredump_get(error); 1892 spin_unlock_irq(&i915->gpu_error.lock); 1893 1894 return error; 1895 } 1896 1897 void i915_reset_error_state(struct drm_i915_private *i915) 1898 { 1899 struct i915_gpu_coredump *error; 1900 1901 spin_lock_irq(&i915->gpu_error.lock); 1902 error = i915->gpu_error.first_error; 1903 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ 1904 i915->gpu_error.first_error = NULL; 1905 spin_unlock_irq(&i915->gpu_error.lock); 1906 1907 if (!IS_ERR_OR_NULL(error)) 1908 i915_gpu_coredump_put(error); 1909 } 1910 1911 void i915_disable_error_state(struct drm_i915_private *i915, int err) 1912 { 1913 spin_lock_irq(&i915->gpu_error.lock); 1914 if (!i915->gpu_error.first_error) 1915 i915->gpu_error.first_error = ERR_PTR(err); 1916 spin_unlock_irq(&i915->gpu_error.lock); 1917 } 1918