1 /* 2 * Copyright (c) 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * Mika Kuoppala <mika.kuoppala@intel.com> 27 * 28 */ 29 30 #include <generated/utsrelease.h> 31 #include "i915_drv.h" 32 33 static const char *yesno(int v) 34 { 35 return v ? "yes" : "no"; 36 } 37 38 static const char *ring_str(int ring) 39 { 40 switch (ring) { 41 case RCS: return "render"; 42 case VCS: return "bsd"; 43 case BCS: return "blt"; 44 case VECS: return "vebox"; 45 case VCS2: return "bsd2"; 46 default: return ""; 47 } 48 } 49 50 static const char *pin_flag(int pinned) 51 { 52 if (pinned > 0) 53 return " P"; 54 else if (pinned < 0) 55 return " p"; 56 else 57 return ""; 58 } 59 60 static const char *tiling_flag(int tiling) 61 { 62 switch (tiling) { 63 default: 64 case I915_TILING_NONE: return ""; 65 case I915_TILING_X: return " X"; 66 case I915_TILING_Y: return " Y"; 67 } 68 } 69 70 static const char *dirty_flag(int dirty) 71 { 72 return dirty ? " dirty" : ""; 73 } 74 75 static const char *purgeable_flag(int purgeable) 76 { 77 return purgeable ? " purgeable" : ""; 78 } 79 80 static bool __i915_error_ok(struct drm_i915_error_state_buf *e) 81 { 82 83 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { 84 e->err = -ENOSPC; 85 return false; 86 } 87 88 if (e->bytes == e->size - 1 || e->err) 89 return false; 90 91 return true; 92 } 93 94 static bool __i915_error_seek(struct drm_i915_error_state_buf *e, 95 unsigned len) 96 { 97 if (e->pos + len <= e->start) { 98 e->pos += len; 99 return false; 100 } 101 102 /* First vsnprintf needs to fit in its entirety for memmove */ 103 if (len >= e->size) { 104 e->err = -EIO; 105 return false; 106 } 107 108 return true; 109 } 110 111 static void __i915_error_advance(struct drm_i915_error_state_buf *e, 112 unsigned len) 113 { 114 /* If this is first printf in this window, adjust it so that 115 * start position matches start of the buffer 116 */ 117 118 if (e->pos < e->start) { 119 const size_t off = e->start - e->pos; 120 121 /* Should not happen but be paranoid */ 122 if (off > len || e->bytes) { 123 e->err = -EIO; 124 return; 125 } 126 127 memmove(e->buf, e->buf + off, len - off); 128 e->bytes = len - off; 129 e->pos = e->start; 130 return; 131 } 132 133 e->bytes += len; 134 e->pos += len; 135 } 136 137 static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 138 const char *f, va_list args) 139 { 140 unsigned len; 141 142 if (!__i915_error_ok(e)) 143 return; 144 145 /* Seek the first printf which is hits start position */ 146 if (e->pos < e->start) { 147 va_list tmp; 148 149 va_copy(tmp, args); 150 len = vsnprintf(NULL, 0, f, tmp); 151 va_end(tmp); 152 153 if (!__i915_error_seek(e, len)) 154 return; 155 } 156 157 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); 158 if (len >= e->size - e->bytes) 159 len = e->size - e->bytes - 1; 160 161 __i915_error_advance(e, len); 162 } 163 164 static void i915_error_puts(struct drm_i915_error_state_buf *e, 165 const char *str) 166 { 167 unsigned len; 168 169 if (!__i915_error_ok(e)) 170 return; 171 172 len = strlen(str); 173 174 /* Seek the first printf which is hits start position */ 175 if (e->pos < e->start) { 176 if (!__i915_error_seek(e, len)) 177 return; 178 } 179 180 if (len >= e->size - e->bytes) 181 len = e->size - e->bytes - 1; 182 memcpy(e->buf + e->bytes, str, len); 183 184 __i915_error_advance(e, len); 185 } 186 187 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 188 #define err_puts(e, s) i915_error_puts(e, s) 189 190 static void print_error_buffers(struct drm_i915_error_state_buf *m, 191 const char *name, 192 struct drm_i915_error_buffer *err, 193 int count) 194 { 195 err_printf(m, " %s [%d]:\n", name, count); 196 197 while (count--) { 198 err_printf(m, " %08x %8u %02x %02x %x %x", 199 err->gtt_offset, 200 err->size, 201 err->read_domains, 202 err->write_domain, 203 err->rseqno, err->wseqno); 204 err_puts(m, pin_flag(err->pinned)); 205 err_puts(m, tiling_flag(err->tiling)); 206 err_puts(m, dirty_flag(err->dirty)); 207 err_puts(m, purgeable_flag(err->purgeable)); 208 err_puts(m, err->userptr ? " userptr" : ""); 209 err_puts(m, err->ring != -1 ? " " : ""); 210 err_puts(m, ring_str(err->ring)); 211 err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); 212 213 if (err->name) 214 err_printf(m, " (name: %d)", err->name); 215 if (err->fence_reg != I915_FENCE_REG_NONE) 216 err_printf(m, " (fence: %d)", err->fence_reg); 217 218 err_puts(m, "\n"); 219 err++; 220 } 221 } 222 223 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) 224 { 225 switch (a) { 226 case HANGCHECK_IDLE: 227 return "idle"; 228 case HANGCHECK_WAIT: 229 return "wait"; 230 case HANGCHECK_ACTIVE: 231 return "active"; 232 case HANGCHECK_ACTIVE_LOOP: 233 return "active (loop)"; 234 case HANGCHECK_KICK: 235 return "kick"; 236 case HANGCHECK_HUNG: 237 return "hung"; 238 } 239 240 return "unknown"; 241 } 242 243 static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 244 struct drm_device *dev, 245 struct drm_i915_error_state *error, 246 int ring_idx) 247 { 248 struct drm_i915_error_ring *ring = &error->ring[ring_idx]; 249 250 if (!ring->valid) 251 return; 252 253 err_printf(m, "%s command stream:\n", ring_str(ring_idx)); 254 err_printf(m, " HEAD: 0x%08x\n", ring->head); 255 err_printf(m, " TAIL: 0x%08x\n", ring->tail); 256 err_printf(m, " CTL: 0x%08x\n", ring->ctl); 257 err_printf(m, " HWS: 0x%08x\n", ring->hws); 258 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd); 259 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir); 260 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr); 261 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone); 262 if (INTEL_INFO(dev)->gen >= 4) { 263 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr); 264 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate); 265 err_printf(m, " INSTPS: 0x%08x\n", ring->instps); 266 } 267 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm); 268 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr), 269 lower_32_bits(ring->faddr)); 270 if (INTEL_INFO(dev)->gen >= 6) { 271 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); 272 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); 273 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 274 ring->semaphore_mboxes[0], 275 ring->semaphore_seqno[0]); 276 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 277 ring->semaphore_mboxes[1], 278 ring->semaphore_seqno[1]); 279 if (HAS_VEBOX(dev)) { 280 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", 281 ring->semaphore_mboxes[2], 282 ring->semaphore_seqno[2]); 283 } 284 } 285 if (USES_PPGTT(dev)) { 286 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode); 287 288 if (INTEL_INFO(dev)->gen >= 8) { 289 int i; 290 for (i = 0; i < 4; i++) 291 err_printf(m, " PDP%d: 0x%016llx\n", 292 i, ring->vm_info.pdp[i]); 293 } else { 294 err_printf(m, " PP_DIR_BASE: 0x%08x\n", 295 ring->vm_info.pp_dir_base); 296 } 297 } 298 err_printf(m, " seqno: 0x%08x\n", ring->seqno); 299 err_printf(m, " waiting: %s\n", yesno(ring->waiting)); 300 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); 301 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); 302 err_printf(m, " hangcheck: %s [%d]\n", 303 hangcheck_action_to_str(ring->hangcheck_action), 304 ring->hangcheck_score); 305 } 306 307 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 308 { 309 va_list args; 310 311 va_start(args, f); 312 i915_error_vprintf(e, f, args); 313 va_end(args); 314 } 315 316 static void print_error_obj(struct drm_i915_error_state_buf *m, 317 struct drm_i915_error_object *obj) 318 { 319 int page, offset, elt; 320 321 for (page = offset = 0; page < obj->page_count; page++) { 322 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 323 err_printf(m, "%08x : %08x\n", offset, 324 obj->pages[page][elt]); 325 offset += 4; 326 } 327 } 328 } 329 330 int i915_error_state_to_str(struct drm_i915_error_state_buf *m, 331 const struct i915_error_state_file_priv *error_priv) 332 { 333 struct drm_device *dev = error_priv->dev; 334 struct drm_i915_private *dev_priv = dev->dev_private; 335 struct drm_i915_error_state *error = error_priv->error; 336 struct drm_i915_error_object *obj; 337 int i, j, offset, elt; 338 int max_hangcheck_score; 339 340 if (!error) { 341 err_printf(m, "no error state collected\n"); 342 goto out; 343 } 344 345 err_printf(m, "%s\n", error->error_msg); 346 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 347 error->time.tv_usec); 348 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 349 max_hangcheck_score = 0; 350 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 351 if (error->ring[i].hangcheck_score > max_hangcheck_score) 352 max_hangcheck_score = error->ring[i].hangcheck_score; 353 } 354 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 355 if (error->ring[i].hangcheck_score == max_hangcheck_score && 356 error->ring[i].pid != -1) { 357 err_printf(m, "Active process (on ring %s): %s [%d]\n", 358 ring_str(i), 359 error->ring[i].comm, 360 error->ring[i].pid); 361 } 362 } 363 err_printf(m, "Reset count: %u\n", error->reset_count); 364 err_printf(m, "Suspend count: %u\n", error->suspend_count); 365 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); 366 err_printf(m, "EIR: 0x%08x\n", error->eir); 367 err_printf(m, "IER: 0x%08x\n", error->ier); 368 if (INTEL_INFO(dev)->gen >= 8) { 369 for (i = 0; i < 4; i++) 370 err_printf(m, "GTIER gt %d: 0x%08x\n", i, 371 error->gtier[i]); 372 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) 373 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]); 374 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 375 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 376 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 377 err_printf(m, "CCID: 0x%08x\n", error->ccid); 378 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings); 379 380 for (i = 0; i < dev_priv->num_fence_regs; i++) 381 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 382 383 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 384 err_printf(m, " INSTDONE_%d: 0x%08x\n", i, 385 error->extra_instdone[i]); 386 387 if (INTEL_INFO(dev)->gen >= 6) { 388 err_printf(m, "ERROR: 0x%08x\n", error->error); 389 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 390 } 391 392 if (INTEL_INFO(dev)->gen == 7) 393 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 394 395 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 396 i915_ring_error_state(m, dev, error, i); 397 398 for (i = 0; i < error->vm_count; i++) { 399 err_printf(m, "vm[%d]\n", i); 400 401 print_error_buffers(m, "Active", 402 error->active_bo[i], 403 error->active_bo_count[i]); 404 405 print_error_buffers(m, "Pinned", 406 error->pinned_bo[i], 407 error->pinned_bo_count[i]); 408 } 409 410 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 411 obj = error->ring[i].batchbuffer; 412 if (obj) { 413 err_puts(m, dev_priv->ring[i].name); 414 if (error->ring[i].pid != -1) 415 err_printf(m, " (submitted by %s [%d])", 416 error->ring[i].comm, 417 error->ring[i].pid); 418 err_printf(m, " --- gtt_offset = 0x%08x\n", 419 obj->gtt_offset); 420 print_error_obj(m, obj); 421 } 422 423 obj = error->ring[i].wa_batchbuffer; 424 if (obj) { 425 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", 426 dev_priv->ring[i].name, obj->gtt_offset); 427 print_error_obj(m, obj); 428 } 429 430 if (error->ring[i].num_requests) { 431 err_printf(m, "%s --- %d requests\n", 432 dev_priv->ring[i].name, 433 error->ring[i].num_requests); 434 for (j = 0; j < error->ring[i].num_requests; j++) { 435 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 436 error->ring[i].requests[j].seqno, 437 error->ring[i].requests[j].jiffies, 438 error->ring[i].requests[j].tail); 439 } 440 } 441 442 if ((obj = error->ring[i].ringbuffer)) { 443 err_printf(m, "%s --- ringbuffer = 0x%08x\n", 444 dev_priv->ring[i].name, 445 obj->gtt_offset); 446 print_error_obj(m, obj); 447 } 448 449 if ((obj = error->ring[i].hws_page)) { 450 err_printf(m, "%s --- HW Status = 0x%08x\n", 451 dev_priv->ring[i].name, 452 obj->gtt_offset); 453 offset = 0; 454 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 455 err_printf(m, "[%04x] %08x %08x %08x %08x\n", 456 offset, 457 obj->pages[0][elt], 458 obj->pages[0][elt+1], 459 obj->pages[0][elt+2], 460 obj->pages[0][elt+3]); 461 offset += 16; 462 } 463 } 464 465 if ((obj = error->ring[i].ctx)) { 466 err_printf(m, "%s --- HW Context = 0x%08x\n", 467 dev_priv->ring[i].name, 468 obj->gtt_offset); 469 print_error_obj(m, obj); 470 } 471 } 472 473 if ((obj = error->semaphore_obj)) { 474 err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset); 475 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 476 err_printf(m, "[%04x] %08x %08x %08x %08x\n", 477 elt * 4, 478 obj->pages[0][elt], 479 obj->pages[0][elt+1], 480 obj->pages[0][elt+2], 481 obj->pages[0][elt+3]); 482 } 483 } 484 485 if (error->overlay) 486 intel_overlay_print_error_state(m, error->overlay); 487 488 if (error->display) 489 intel_display_print_error_state(m, dev, error->display); 490 491 out: 492 if (m->bytes == 0 && m->err) 493 return m->err; 494 495 return 0; 496 } 497 498 int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, 499 struct drm_i915_private *i915, 500 size_t count, loff_t pos) 501 { 502 memset(ebuf, 0, sizeof(*ebuf)); 503 ebuf->i915 = i915; 504 505 /* We need to have enough room to store any i915_error_state printf 506 * so that we can move it to start position. 507 */ 508 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; 509 ebuf->buf = kmalloc(ebuf->size, 510 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); 511 512 if (ebuf->buf == NULL) { 513 ebuf->size = PAGE_SIZE; 514 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); 515 } 516 517 if (ebuf->buf == NULL) { 518 ebuf->size = 128; 519 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); 520 } 521 522 if (ebuf->buf == NULL) 523 return -ENOMEM; 524 525 ebuf->start = pos; 526 527 return 0; 528 } 529 530 static void i915_error_object_free(struct drm_i915_error_object *obj) 531 { 532 int page; 533 534 if (obj == NULL) 535 return; 536 537 for (page = 0; page < obj->page_count; page++) 538 kfree(obj->pages[page]); 539 540 kfree(obj); 541 } 542 543 static void i915_error_state_free(struct kref *error_ref) 544 { 545 struct drm_i915_error_state *error = container_of(error_ref, 546 typeof(*error), ref); 547 int i; 548 549 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 550 i915_error_object_free(error->ring[i].batchbuffer); 551 i915_error_object_free(error->ring[i].ringbuffer); 552 i915_error_object_free(error->ring[i].hws_page); 553 i915_error_object_free(error->ring[i].ctx); 554 kfree(error->ring[i].requests); 555 } 556 557 i915_error_object_free(error->semaphore_obj); 558 kfree(error->active_bo); 559 kfree(error->overlay); 560 kfree(error->display); 561 kfree(error); 562 } 563 564 static struct drm_i915_error_object * 565 i915_error_object_create(struct drm_i915_private *dev_priv, 566 struct drm_i915_gem_object *src, 567 struct i915_address_space *vm) 568 { 569 struct drm_i915_error_object *dst; 570 struct i915_vma *vma = NULL; 571 int num_pages; 572 bool use_ggtt; 573 int i = 0; 574 u32 reloc_offset; 575 576 if (src == NULL || src->pages == NULL) 577 return NULL; 578 579 num_pages = src->base.size >> PAGE_SHIFT; 580 581 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 582 if (dst == NULL) 583 return NULL; 584 585 if (i915_gem_obj_bound(src, vm)) 586 dst->gtt_offset = i915_gem_obj_offset(src, vm); 587 else 588 dst->gtt_offset = -1; 589 590 reloc_offset = dst->gtt_offset; 591 if (i915_is_ggtt(vm)) 592 vma = i915_gem_obj_to_ggtt(src); 593 use_ggtt = (src->cache_level == I915_CACHE_NONE && 594 vma && (vma->bound & GLOBAL_BIND) && 595 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end); 596 597 /* Cannot access stolen address directly, try to use the aperture */ 598 if (src->stolen) { 599 use_ggtt = true; 600 601 if (!(vma && vma->bound & GLOBAL_BIND)) 602 goto unwind; 603 604 reloc_offset = i915_gem_obj_ggtt_offset(src); 605 if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end) 606 goto unwind; 607 } 608 609 /* Cannot access snooped pages through the aperture */ 610 if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev)) 611 goto unwind; 612 613 dst->page_count = num_pages; 614 while (num_pages--) { 615 unsigned long flags; 616 void *d; 617 618 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 619 if (d == NULL) 620 goto unwind; 621 622 local_irq_save(flags); 623 if (use_ggtt) { 624 void __iomem *s; 625 626 /* Simply ignore tiling or any overlapping fence. 627 * It's part of the error state, and this hopefully 628 * captures what the GPU read. 629 */ 630 631 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 632 reloc_offset); 633 memcpy_fromio(d, s, PAGE_SIZE); 634 io_mapping_unmap_atomic(s); 635 } else { 636 struct page *page; 637 void *s; 638 639 page = i915_gem_object_get_page(src, i); 640 641 drm_clflush_pages(&page, 1); 642 643 s = kmap_atomic(page); 644 memcpy(d, s, PAGE_SIZE); 645 kunmap_atomic(s); 646 647 drm_clflush_pages(&page, 1); 648 } 649 local_irq_restore(flags); 650 651 dst->pages[i++] = d; 652 reloc_offset += PAGE_SIZE; 653 } 654 655 return dst; 656 657 unwind: 658 while (i--) 659 kfree(dst->pages[i]); 660 kfree(dst); 661 return NULL; 662 } 663 #define i915_error_ggtt_object_create(dev_priv, src) \ 664 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base) 665 666 static void capture_bo(struct drm_i915_error_buffer *err, 667 struct i915_vma *vma) 668 { 669 struct drm_i915_gem_object *obj = vma->obj; 670 671 err->size = obj->base.size; 672 err->name = obj->base.name; 673 err->rseqno = i915_gem_request_get_seqno(obj->last_read_req); 674 err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); 675 err->gtt_offset = vma->node.start; 676 err->read_domains = obj->base.read_domains; 677 err->write_domain = obj->base.write_domain; 678 err->fence_reg = obj->fence_reg; 679 err->pinned = 0; 680 if (i915_gem_obj_is_pinned(obj)) 681 err->pinned = 1; 682 err->tiling = obj->tiling_mode; 683 err->dirty = obj->dirty; 684 err->purgeable = obj->madv != I915_MADV_WILLNEED; 685 err->userptr = obj->userptr.mm != NULL; 686 err->ring = obj->last_read_req ? 687 i915_gem_request_get_ring(obj->last_read_req)->id : -1; 688 err->cache_level = obj->cache_level; 689 } 690 691 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 692 int count, struct list_head *head) 693 { 694 struct i915_vma *vma; 695 int i = 0; 696 697 list_for_each_entry(vma, head, mm_list) { 698 capture_bo(err++, vma); 699 if (++i == count) 700 break; 701 } 702 703 return i; 704 } 705 706 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 707 int count, struct list_head *head, 708 struct i915_address_space *vm) 709 { 710 struct drm_i915_gem_object *obj; 711 struct drm_i915_error_buffer * const first = err; 712 struct drm_i915_error_buffer * const last = err + count; 713 714 list_for_each_entry(obj, head, global_list) { 715 struct i915_vma *vma; 716 717 if (err == last) 718 break; 719 720 list_for_each_entry(vma, &obj->vma_list, vma_link) 721 if (vma->vm == vm && vma->pin_count > 0) 722 capture_bo(err++, vma); 723 } 724 725 return err - first; 726 } 727 728 /* Generate a semi-unique error code. The code is not meant to have meaning, The 729 * code's only purpose is to try to prevent false duplicated bug reports by 730 * grossly estimating a GPU error state. 731 * 732 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine 733 * the hang if we could strip the GTT offset information from it. 734 * 735 * It's only a small step better than a random number in its current form. 736 */ 737 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, 738 struct drm_i915_error_state *error, 739 int *ring_id) 740 { 741 uint32_t error_code = 0; 742 int i; 743 744 /* IPEHR would be an ideal way to detect errors, as it's the gross 745 * measure of "the command that hung." However, has some very common 746 * synchronization commands which almost always appear in the case 747 * strictly a client bug. Use instdone to differentiate those some. 748 */ 749 for (i = 0; i < I915_NUM_RINGS; i++) { 750 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) { 751 if (ring_id) 752 *ring_id = i; 753 754 return error->ring[i].ipehr ^ error->ring[i].instdone; 755 } 756 } 757 758 return error_code; 759 } 760 761 static void i915_gem_record_fences(struct drm_device *dev, 762 struct drm_i915_error_state *error) 763 { 764 struct drm_i915_private *dev_priv = dev->dev_private; 765 int i; 766 767 if (IS_GEN3(dev) || IS_GEN2(dev)) { 768 for (i = 0; i < 8; i++) 769 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 770 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 771 for (i = 0; i < 8; i++) 772 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + 773 (i * 4)); 774 } else if (IS_GEN5(dev) || IS_GEN4(dev)) 775 for (i = 0; i < 16; i++) 776 error->fence[i] = I915_READ64(FENCE_REG_965_0 + 777 (i * 8)); 778 else if (INTEL_INFO(dev)->gen >= 6) 779 for (i = 0; i < dev_priv->num_fence_regs; i++) 780 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + 781 (i * 8)); 782 } 783 784 785 static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, 786 struct drm_i915_error_state *error, 787 struct intel_engine_cs *ring, 788 struct drm_i915_error_ring *ering) 789 { 790 struct intel_engine_cs *to; 791 int i; 792 793 if (!i915_semaphore_is_enabled(dev_priv->dev)) 794 return; 795 796 if (!error->semaphore_obj) 797 error->semaphore_obj = 798 i915_error_ggtt_object_create(dev_priv, 799 dev_priv->semaphore_obj); 800 801 for_each_ring(to, dev_priv, i) { 802 int idx; 803 u16 signal_offset; 804 u32 *tmp; 805 806 if (ring == to) 807 continue; 808 809 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1)) 810 / 4; 811 tmp = error->semaphore_obj->pages[0]; 812 idx = intel_ring_sync_index(ring, to); 813 814 ering->semaphore_mboxes[idx] = tmp[signal_offset]; 815 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx]; 816 } 817 } 818 819 static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, 820 struct intel_engine_cs *ring, 821 struct drm_i915_error_ring *ering) 822 { 823 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); 824 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); 825 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; 826 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; 827 828 if (HAS_VEBOX(dev_priv->dev)) { 829 ering->semaphore_mboxes[2] = 830 I915_READ(RING_SYNC_2(ring->mmio_base)); 831 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; 832 } 833 } 834 835 static void i915_record_ring_state(struct drm_device *dev, 836 struct drm_i915_error_state *error, 837 struct intel_engine_cs *ring, 838 struct drm_i915_error_ring *ering) 839 { 840 struct drm_i915_private *dev_priv = dev->dev_private; 841 842 if (INTEL_INFO(dev)->gen >= 6) { 843 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); 844 ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 845 if (INTEL_INFO(dev)->gen >= 8) 846 gen8_record_semaphore_state(dev_priv, error, ring, ering); 847 else 848 gen6_record_semaphore_state(dev_priv, ring, ering); 849 } 850 851 if (INTEL_INFO(dev)->gen >= 4) { 852 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); 853 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base)); 854 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 855 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); 856 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); 857 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); 858 if (INTEL_INFO(dev)->gen >= 8) { 859 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32; 860 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 861 } 862 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); 863 } else { 864 ering->faddr = I915_READ(DMA_FADD_I8XX); 865 ering->ipeir = I915_READ(IPEIR); 866 ering->ipehr = I915_READ(IPEHR); 867 ering->instdone = I915_READ(INSTDONE); 868 } 869 870 ering->waiting = waitqueue_active(&ring->irq_queue); 871 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); 872 ering->seqno = ring->get_seqno(ring, false); 873 ering->acthd = intel_ring_get_active_head(ring); 874 ering->head = I915_READ_HEAD(ring); 875 ering->tail = I915_READ_TAIL(ring); 876 ering->ctl = I915_READ_CTL(ring); 877 878 if (I915_NEED_GFX_HWS(dev)) { 879 int mmio; 880 881 if (IS_GEN7(dev)) { 882 switch (ring->id) { 883 default: 884 case RCS: 885 mmio = RENDER_HWS_PGA_GEN7; 886 break; 887 case BCS: 888 mmio = BLT_HWS_PGA_GEN7; 889 break; 890 case VCS: 891 mmio = BSD_HWS_PGA_GEN7; 892 break; 893 case VECS: 894 mmio = VEBOX_HWS_PGA_GEN7; 895 break; 896 } 897 } else if (IS_GEN6(ring->dev)) { 898 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 899 } else { 900 /* XXX: gen8 returns to sanity */ 901 mmio = RING_HWS_PGA(ring->mmio_base); 902 } 903 904 ering->hws = I915_READ(mmio); 905 } 906 907 ering->hangcheck_score = ring->hangcheck.score; 908 ering->hangcheck_action = ring->hangcheck.action; 909 910 if (USES_PPGTT(dev)) { 911 int i; 912 913 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); 914 915 if (IS_GEN6(dev)) 916 ering->vm_info.pp_dir_base = 917 I915_READ(RING_PP_DIR_BASE_READ(ring)); 918 else if (IS_GEN7(dev)) 919 ering->vm_info.pp_dir_base = 920 I915_READ(RING_PP_DIR_BASE(ring)); 921 else if (INTEL_INFO(dev)->gen >= 8) 922 for (i = 0; i < 4; i++) { 923 ering->vm_info.pdp[i] = 924 I915_READ(GEN8_RING_PDP_UDW(ring, i)); 925 ering->vm_info.pdp[i] <<= 32; 926 ering->vm_info.pdp[i] |= 927 I915_READ(GEN8_RING_PDP_LDW(ring, i)); 928 } 929 } 930 } 931 932 933 static void i915_gem_record_active_context(struct intel_engine_cs *ring, 934 struct drm_i915_error_state *error, 935 struct drm_i915_error_ring *ering) 936 { 937 struct drm_i915_private *dev_priv = ring->dev->dev_private; 938 struct drm_i915_gem_object *obj; 939 940 /* Currently render ring is the only HW context user */ 941 if (ring->id != RCS || !error->ccid) 942 return; 943 944 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 945 if (!i915_gem_obj_ggtt_bound(obj)) 946 continue; 947 948 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 949 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj); 950 break; 951 } 952 } 953 } 954 955 static void i915_gem_record_rings(struct drm_device *dev, 956 struct drm_i915_error_state *error) 957 { 958 struct drm_i915_private *dev_priv = dev->dev_private; 959 struct drm_i915_gem_request *request; 960 int i, count; 961 962 for (i = 0; i < I915_NUM_RINGS; i++) { 963 struct intel_engine_cs *ring = &dev_priv->ring[i]; 964 struct intel_ringbuffer *rbuf; 965 966 error->ring[i].pid = -1; 967 968 if (ring->dev == NULL) 969 continue; 970 971 error->ring[i].valid = true; 972 973 i915_record_ring_state(dev, error, ring, &error->ring[i]); 974 975 request = i915_gem_find_active_request(ring); 976 if (request) { 977 struct i915_address_space *vm; 978 979 vm = request->ctx && request->ctx->ppgtt ? 980 &request->ctx->ppgtt->base : 981 &dev_priv->gtt.base; 982 983 /* We need to copy these to an anonymous buffer 984 * as the simplest method to avoid being overwritten 985 * by userspace. 986 */ 987 error->ring[i].batchbuffer = 988 i915_error_object_create(dev_priv, 989 request->batch_obj, 990 vm); 991 992 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) 993 error->ring[i].wa_batchbuffer = 994 i915_error_ggtt_object_create(dev_priv, 995 ring->scratch.obj); 996 997 if (request->file_priv) { 998 struct task_struct *task; 999 1000 rcu_read_lock(); 1001 task = pid_task(request->file_priv->file->pid, 1002 PIDTYPE_PID); 1003 if (task) { 1004 strcpy(error->ring[i].comm, task->comm); 1005 error->ring[i].pid = task->pid; 1006 } 1007 rcu_read_unlock(); 1008 } 1009 } 1010 1011 if (i915.enable_execlists) { 1012 /* TODO: This is only a small fix to keep basic error 1013 * capture working, but we need to add more information 1014 * for it to be useful (e.g. dump the context being 1015 * executed). 1016 */ 1017 if (request) 1018 rbuf = request->ctx->engine[ring->id].ringbuf; 1019 else 1020 rbuf = ring->default_context->engine[ring->id].ringbuf; 1021 } else 1022 rbuf = ring->buffer; 1023 1024 error->ring[i].cpu_ring_head = rbuf->head; 1025 error->ring[i].cpu_ring_tail = rbuf->tail; 1026 1027 error->ring[i].ringbuffer = 1028 i915_error_ggtt_object_create(dev_priv, rbuf->obj); 1029 1030 error->ring[i].hws_page = 1031 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); 1032 1033 i915_gem_record_active_context(ring, error, &error->ring[i]); 1034 1035 count = 0; 1036 list_for_each_entry(request, &ring->request_list, list) 1037 count++; 1038 1039 error->ring[i].num_requests = count; 1040 error->ring[i].requests = 1041 kcalloc(count, sizeof(*error->ring[i].requests), 1042 GFP_ATOMIC); 1043 if (error->ring[i].requests == NULL) { 1044 error->ring[i].num_requests = 0; 1045 continue; 1046 } 1047 1048 count = 0; 1049 list_for_each_entry(request, &ring->request_list, list) { 1050 struct drm_i915_error_request *erq; 1051 1052 erq = &error->ring[i].requests[count++]; 1053 erq->seqno = request->seqno; 1054 erq->jiffies = request->emitted_jiffies; 1055 erq->tail = request->postfix; 1056 } 1057 } 1058 } 1059 1060 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per 1061 * VM. 1062 */ 1063 static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, 1064 struct drm_i915_error_state *error, 1065 struct i915_address_space *vm, 1066 const int ndx) 1067 { 1068 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; 1069 struct drm_i915_gem_object *obj; 1070 struct i915_vma *vma; 1071 int i; 1072 1073 i = 0; 1074 list_for_each_entry(vma, &vm->active_list, mm_list) 1075 i++; 1076 error->active_bo_count[ndx] = i; 1077 1078 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1079 list_for_each_entry(vma, &obj->vma_list, vma_link) 1080 if (vma->vm == vm && vma->pin_count > 0) 1081 i++; 1082 } 1083 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 1084 1085 if (i) { 1086 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC); 1087 if (active_bo) 1088 pinned_bo = active_bo + error->active_bo_count[ndx]; 1089 } 1090 1091 if (active_bo) 1092 error->active_bo_count[ndx] = 1093 capture_active_bo(active_bo, 1094 error->active_bo_count[ndx], 1095 &vm->active_list); 1096 1097 if (pinned_bo) 1098 error->pinned_bo_count[ndx] = 1099 capture_pinned_bo(pinned_bo, 1100 error->pinned_bo_count[ndx], 1101 &dev_priv->mm.bound_list, vm); 1102 error->active_bo[ndx] = active_bo; 1103 error->pinned_bo[ndx] = pinned_bo; 1104 } 1105 1106 static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, 1107 struct drm_i915_error_state *error) 1108 { 1109 struct i915_address_space *vm; 1110 int cnt = 0, i = 0; 1111 1112 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1113 cnt++; 1114 1115 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); 1116 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); 1117 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), 1118 GFP_ATOMIC); 1119 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), 1120 GFP_ATOMIC); 1121 1122 if (error->active_bo == NULL || 1123 error->pinned_bo == NULL || 1124 error->active_bo_count == NULL || 1125 error->pinned_bo_count == NULL) { 1126 kfree(error->active_bo); 1127 kfree(error->active_bo_count); 1128 kfree(error->pinned_bo); 1129 kfree(error->pinned_bo_count); 1130 1131 error->active_bo = NULL; 1132 error->active_bo_count = NULL; 1133 error->pinned_bo = NULL; 1134 error->pinned_bo_count = NULL; 1135 } else { 1136 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1137 i915_gem_capture_vm(dev_priv, error, vm, i++); 1138 1139 error->vm_count = cnt; 1140 } 1141 } 1142 1143 /* Capture all registers which don't fit into another category. */ 1144 static void i915_capture_reg_state(struct drm_i915_private *dev_priv, 1145 struct drm_i915_error_state *error) 1146 { 1147 struct drm_device *dev = dev_priv->dev; 1148 int i; 1149 1150 /* General organization 1151 * 1. Registers specific to a single generation 1152 * 2. Registers which belong to multiple generations 1153 * 3. Feature specific registers. 1154 * 4. Everything else 1155 * Please try to follow the order. 1156 */ 1157 1158 /* 1: Registers specific to a single generation */ 1159 if (IS_VALLEYVIEW(dev)) { 1160 error->gtier[0] = I915_READ(GTIER); 1161 error->ier = I915_READ(VLV_IER); 1162 error->forcewake = I915_READ(FORCEWAKE_VLV); 1163 } 1164 1165 if (IS_GEN7(dev)) 1166 error->err_int = I915_READ(GEN7_ERR_INT); 1167 1168 if (IS_GEN6(dev)) { 1169 error->forcewake = I915_READ(FORCEWAKE); 1170 error->gab_ctl = I915_READ(GAB_CTL); 1171 error->gfx_mode = I915_READ(GFX_MODE); 1172 } 1173 1174 /* 2: Registers which belong to multiple generations */ 1175 if (INTEL_INFO(dev)->gen >= 7) 1176 error->forcewake = I915_READ(FORCEWAKE_MT); 1177 1178 if (INTEL_INFO(dev)->gen >= 6) { 1179 error->derrmr = I915_READ(DERRMR); 1180 error->error = I915_READ(ERROR_GEN6); 1181 error->done_reg = I915_READ(DONE_REG); 1182 } 1183 1184 /* 3: Feature specific registers */ 1185 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1186 error->gam_ecochk = I915_READ(GAM_ECOCHK); 1187 error->gac_eco = I915_READ(GAC_ECO_BITS); 1188 } 1189 1190 /* 4: Everything else */ 1191 if (HAS_HW_CONTEXTS(dev)) 1192 error->ccid = I915_READ(CCID); 1193 1194 if (INTEL_INFO(dev)->gen >= 8) { 1195 error->ier = I915_READ(GEN8_DE_MISC_IER); 1196 for (i = 0; i < 4; i++) 1197 error->gtier[i] = I915_READ(GEN8_GT_IER(i)); 1198 } else if (HAS_PCH_SPLIT(dev)) { 1199 error->ier = I915_READ(DEIER); 1200 error->gtier[0] = I915_READ(GTIER); 1201 } else if (IS_GEN2(dev)) { 1202 error->ier = I915_READ16(IER); 1203 } else if (!IS_VALLEYVIEW(dev)) { 1204 error->ier = I915_READ(IER); 1205 } 1206 error->eir = I915_READ(EIR); 1207 error->pgtbl_er = I915_READ(PGTBL_ER); 1208 1209 i915_get_extra_instdone(dev, error->extra_instdone); 1210 } 1211 1212 static void i915_error_capture_msg(struct drm_device *dev, 1213 struct drm_i915_error_state *error, 1214 bool wedged, 1215 const char *error_msg) 1216 { 1217 struct drm_i915_private *dev_priv = dev->dev_private; 1218 u32 ecode; 1219 int ring_id = -1, len; 1220 1221 ecode = i915_error_generate_code(dev_priv, error, &ring_id); 1222 1223 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1224 "GPU HANG: ecode %d:%d:0x%08x", 1225 INTEL_INFO(dev)->gen, ring_id, ecode); 1226 1227 if (ring_id != -1 && error->ring[ring_id].pid != -1) 1228 len += scnprintf(error->error_msg + len, 1229 sizeof(error->error_msg) - len, 1230 ", in %s [%d]", 1231 error->ring[ring_id].comm, 1232 error->ring[ring_id].pid); 1233 1234 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, 1235 ", reason: %s, action: %s", 1236 error_msg, 1237 wedged ? "reset" : "continue"); 1238 } 1239 1240 static void i915_capture_gen_state(struct drm_i915_private *dev_priv, 1241 struct drm_i915_error_state *error) 1242 { 1243 error->reset_count = i915_reset_count(&dev_priv->gpu_error); 1244 error->suspend_count = dev_priv->suspend_count; 1245 } 1246 1247 /** 1248 * i915_capture_error_state - capture an error record for later analysis 1249 * @dev: drm device 1250 * 1251 * Should be called when an error is detected (either a hang or an error 1252 * interrupt) to capture error state from the time of the error. Fills 1253 * out a structure which becomes available in debugfs for user level tools 1254 * to pick up. 1255 */ 1256 void i915_capture_error_state(struct drm_device *dev, bool wedged, 1257 const char *error_msg) 1258 { 1259 static bool warned; 1260 struct drm_i915_private *dev_priv = dev->dev_private; 1261 struct drm_i915_error_state *error; 1262 unsigned long flags; 1263 1264 /* Account for pipe specific data like PIPE*STAT */ 1265 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1266 if (!error) { 1267 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1268 return; 1269 } 1270 1271 kref_init(&error->ref); 1272 1273 i915_capture_gen_state(dev_priv, error); 1274 i915_capture_reg_state(dev_priv, error); 1275 i915_gem_capture_buffers(dev_priv, error); 1276 i915_gem_record_fences(dev, error); 1277 i915_gem_record_rings(dev, error); 1278 1279 do_gettimeofday(&error->time); 1280 1281 error->overlay = intel_overlay_capture_error_state(dev); 1282 error->display = intel_display_capture_error_state(dev); 1283 1284 i915_error_capture_msg(dev, error, wedged, error_msg); 1285 DRM_INFO("%s\n", error->error_msg); 1286 1287 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1288 if (dev_priv->gpu_error.first_error == NULL) { 1289 dev_priv->gpu_error.first_error = error; 1290 error = NULL; 1291 } 1292 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1293 1294 if (error) { 1295 i915_error_state_free(&error->ref); 1296 return; 1297 } 1298 1299 if (!warned) { 1300 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 1301 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1302 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1303 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1304 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); 1305 warned = true; 1306 } 1307 } 1308 1309 void i915_error_state_get(struct drm_device *dev, 1310 struct i915_error_state_file_priv *error_priv) 1311 { 1312 struct drm_i915_private *dev_priv = dev->dev_private; 1313 1314 spin_lock_irq(&dev_priv->gpu_error.lock); 1315 error_priv->error = dev_priv->gpu_error.first_error; 1316 if (error_priv->error) 1317 kref_get(&error_priv->error->ref); 1318 spin_unlock_irq(&dev_priv->gpu_error.lock); 1319 1320 } 1321 1322 void i915_error_state_put(struct i915_error_state_file_priv *error_priv) 1323 { 1324 if (error_priv->error) 1325 kref_put(&error_priv->error->ref, i915_error_state_free); 1326 } 1327 1328 void i915_destroy_error_state(struct drm_device *dev) 1329 { 1330 struct drm_i915_private *dev_priv = dev->dev_private; 1331 struct drm_i915_error_state *error; 1332 1333 spin_lock_irq(&dev_priv->gpu_error.lock); 1334 error = dev_priv->gpu_error.first_error; 1335 dev_priv->gpu_error.first_error = NULL; 1336 spin_unlock_irq(&dev_priv->gpu_error.lock); 1337 1338 if (error) 1339 kref_put(&error->ref, i915_error_state_free); 1340 } 1341 1342 const char *i915_cache_level_str(struct drm_i915_private *i915, int type) 1343 { 1344 switch (type) { 1345 case I915_CACHE_NONE: return " uncached"; 1346 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; 1347 case I915_CACHE_L3_LLC: return " L3+LLC"; 1348 case I915_CACHE_WT: return " WT"; 1349 default: return ""; 1350 } 1351 } 1352 1353 /* NB: please notice the memset */ 1354 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) 1355 { 1356 struct drm_i915_private *dev_priv = dev->dev_private; 1357 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1358 1359 if (IS_GEN2(dev) || IS_GEN3(dev)) 1360 instdone[0] = I915_READ(INSTDONE); 1361 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1362 instdone[0] = I915_READ(INSTDONE_I965); 1363 instdone[1] = I915_READ(INSTDONE1); 1364 } else if (INTEL_INFO(dev)->gen >= 7) { 1365 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1366 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1367 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1368 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 1369 } 1370 } 1371