1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2018 Intel Corporation 5 */ 6 7 #include <linux/sched/mm.h> 8 #include <linux/stop_machine.h> 9 10 #include "display/intel_display_types.h" 11 #include "display/intel_overlay.h" 12 13 #include "gem/i915_gem_context.h" 14 15 #include "i915_drv.h" 16 #include "i915_gpu_error.h" 17 #include "i915_irq.h" 18 #include "intel_engine_pm.h" 19 #include "intel_gt.h" 20 #include "intel_gt_pm.h" 21 #include "intel_reset.h" 22 23 #include "uc/intel_guc.h" 24 25 #define RESET_MAX_RETRIES 3 26 27 /* XXX How to handle concurrent GGTT updates using tiling registers? */ 28 #define RESET_UNDER_STOP_MACHINE 0 29 30 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set) 31 { 32 intel_uncore_rmw_fw(uncore, reg, 0, set); 33 } 34 35 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) 36 { 37 intel_uncore_rmw_fw(uncore, reg, clr, 0); 38 } 39 40 static void engine_skip_context(struct i915_request *rq) 41 { 42 struct intel_engine_cs *engine = rq->engine; 43 struct i915_gem_context *hung_ctx = rq->gem_context; 44 45 lockdep_assert_held(&engine->active.lock); 46 47 if (!i915_request_is_active(rq)) 48 return; 49 50 list_for_each_entry_continue(rq, &engine->active.requests, sched.link) 51 if (rq->gem_context == hung_ctx) 52 i915_request_skip(rq, -EIO); 53 } 54 55 static void client_mark_guilty(struct drm_i915_file_private *file_priv, 56 const struct i915_gem_context *ctx) 57 { 58 unsigned int score; 59 unsigned long prev_hang; 60 61 if (i915_gem_context_is_banned(ctx)) 62 score = I915_CLIENT_SCORE_CONTEXT_BAN; 63 else 64 score = 0; 65 66 prev_hang = xchg(&file_priv->hang_timestamp, jiffies); 67 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) 68 score += I915_CLIENT_SCORE_HANG_FAST; 69 70 if (score) { 71 atomic_add(score, &file_priv->ban_score); 72 73 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", 74 ctx->name, score, 75 atomic_read(&file_priv->ban_score)); 76 } 77 } 78 79 static bool context_mark_guilty(struct i915_gem_context *ctx) 80 { 81 unsigned long prev_hang; 82 bool banned; 83 int i; 84 85 atomic_inc(&ctx->guilty_count); 86 87 /* Cool contexts are too cool to be banned! (Used for reset testing.) */ 88 if (!i915_gem_context_is_bannable(ctx)) 89 return false; 90 91 /* Record the timestamp for the last N hangs */ 92 prev_hang = ctx->hang_timestamp[0]; 93 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++) 94 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1]; 95 ctx->hang_timestamp[i] = jiffies; 96 97 /* If we have hung N+1 times in rapid succession, we ban the context! */ 98 banned = !i915_gem_context_is_recoverable(ctx); 99 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) 100 banned = true; 101 if (banned) { 102 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", 103 ctx->name, atomic_read(&ctx->guilty_count)); 104 i915_gem_context_set_banned(ctx); 105 } 106 107 if (!IS_ERR_OR_NULL(ctx->file_priv)) 108 client_mark_guilty(ctx->file_priv, ctx); 109 110 return banned; 111 } 112 113 static void context_mark_innocent(struct i915_gem_context *ctx) 114 { 115 atomic_inc(&ctx->active_count); 116 } 117 118 void __i915_request_reset(struct i915_request *rq, bool guilty) 119 { 120 GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", 121 rq->engine->name, 122 rq->fence.context, 123 rq->fence.seqno, 124 yesno(guilty)); 125 126 lockdep_assert_held(&rq->engine->active.lock); 127 GEM_BUG_ON(i915_request_completed(rq)); 128 129 if (guilty) { 130 i915_request_skip(rq, -EIO); 131 if (context_mark_guilty(rq->gem_context)) 132 engine_skip_context(rq); 133 } else { 134 dma_fence_set_error(&rq->fence, -EAGAIN); 135 context_mark_innocent(rq->gem_context); 136 } 137 } 138 139 static bool i915_in_reset(struct pci_dev *pdev) 140 { 141 u8 gdrst; 142 143 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 144 return gdrst & GRDOM_RESET_STATUS; 145 } 146 147 static int i915_do_reset(struct intel_gt *gt, 148 intel_engine_mask_t engine_mask, 149 unsigned int retry) 150 { 151 struct pci_dev *pdev = gt->i915->drm.pdev; 152 int err; 153 154 /* Assert reset for at least 20 usec, and wait for acknowledgement. */ 155 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 156 udelay(50); 157 err = wait_for_atomic(i915_in_reset(pdev), 50); 158 159 /* Clear the reset request. */ 160 pci_write_config_byte(pdev, I915_GDRST, 0); 161 udelay(50); 162 if (!err) 163 err = wait_for_atomic(!i915_in_reset(pdev), 50); 164 165 return err; 166 } 167 168 static bool g4x_reset_complete(struct pci_dev *pdev) 169 { 170 u8 gdrst; 171 172 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 173 return (gdrst & GRDOM_RESET_ENABLE) == 0; 174 } 175 176 static int g33_do_reset(struct intel_gt *gt, 177 intel_engine_mask_t engine_mask, 178 unsigned int retry) 179 { 180 struct pci_dev *pdev = gt->i915->drm.pdev; 181 182 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 183 return wait_for_atomic(g4x_reset_complete(pdev), 50); 184 } 185 186 static int g4x_do_reset(struct intel_gt *gt, 187 intel_engine_mask_t engine_mask, 188 unsigned int retry) 189 { 190 struct pci_dev *pdev = gt->i915->drm.pdev; 191 struct intel_uncore *uncore = gt->uncore; 192 int ret; 193 194 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 195 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); 196 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); 197 198 pci_write_config_byte(pdev, I915_GDRST, 199 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 200 ret = wait_for_atomic(g4x_reset_complete(pdev), 50); 201 if (ret) { 202 DRM_DEBUG_DRIVER("Wait for media reset failed\n"); 203 goto out; 204 } 205 206 pci_write_config_byte(pdev, I915_GDRST, 207 GRDOM_RENDER | GRDOM_RESET_ENABLE); 208 ret = wait_for_atomic(g4x_reset_complete(pdev), 50); 209 if (ret) { 210 DRM_DEBUG_DRIVER("Wait for render reset failed\n"); 211 goto out; 212 } 213 214 out: 215 pci_write_config_byte(pdev, I915_GDRST, 0); 216 217 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); 218 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); 219 220 return ret; 221 } 222 223 static int ironlake_do_reset(struct intel_gt *gt, 224 intel_engine_mask_t engine_mask, 225 unsigned int retry) 226 { 227 struct intel_uncore *uncore = gt->uncore; 228 int ret; 229 230 intel_uncore_write_fw(uncore, ILK_GDSR, 231 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 232 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, 233 ILK_GRDOM_RESET_ENABLE, 0, 234 5000, 0, 235 NULL); 236 if (ret) { 237 DRM_DEBUG_DRIVER("Wait for render reset failed\n"); 238 goto out; 239 } 240 241 intel_uncore_write_fw(uncore, ILK_GDSR, 242 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 243 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, 244 ILK_GRDOM_RESET_ENABLE, 0, 245 5000, 0, 246 NULL); 247 if (ret) { 248 DRM_DEBUG_DRIVER("Wait for media reset failed\n"); 249 goto out; 250 } 251 252 out: 253 intel_uncore_write_fw(uncore, ILK_GDSR, 0); 254 intel_uncore_posting_read_fw(uncore, ILK_GDSR); 255 return ret; 256 } 257 258 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ 259 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) 260 { 261 struct intel_uncore *uncore = gt->uncore; 262 int err; 263 264 /* 265 * GEN6_GDRST is not in the gt power well, no need to check 266 * for fifo space for the write or forcewake the chip for 267 * the read 268 */ 269 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); 270 271 /* Wait for the device to ack the reset requests */ 272 err = __intel_wait_for_register_fw(uncore, 273 GEN6_GDRST, hw_domain_mask, 0, 274 500, 0, 275 NULL); 276 if (err) 277 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", 278 hw_domain_mask); 279 280 return err; 281 } 282 283 static int gen6_reset_engines(struct intel_gt *gt, 284 intel_engine_mask_t engine_mask, 285 unsigned int retry) 286 { 287 struct intel_engine_cs *engine; 288 const u32 hw_engine_mask[] = { 289 [RCS0] = GEN6_GRDOM_RENDER, 290 [BCS0] = GEN6_GRDOM_BLT, 291 [VCS0] = GEN6_GRDOM_MEDIA, 292 [VCS1] = GEN8_GRDOM_MEDIA2, 293 [VECS0] = GEN6_GRDOM_VECS, 294 }; 295 u32 hw_mask; 296 297 if (engine_mask == ALL_ENGINES) { 298 hw_mask = GEN6_GRDOM_FULL; 299 } else { 300 intel_engine_mask_t tmp; 301 302 hw_mask = 0; 303 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { 304 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); 305 hw_mask |= hw_engine_mask[engine->id]; 306 } 307 } 308 309 return gen6_hw_domain_reset(gt, hw_mask); 310 } 311 312 static u32 gen11_lock_sfc(struct intel_engine_cs *engine) 313 { 314 struct intel_uncore *uncore = engine->uncore; 315 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; 316 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; 317 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; 318 i915_reg_t sfc_usage; 319 u32 sfc_usage_bit; 320 u32 sfc_reset_bit; 321 322 switch (engine->class) { 323 case VIDEO_DECODE_CLASS: 324 if ((BIT(engine->instance) & vdbox_sfc_access) == 0) 325 return 0; 326 327 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); 328 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; 329 330 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); 331 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; 332 333 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); 334 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT; 335 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); 336 break; 337 338 case VIDEO_ENHANCEMENT_CLASS: 339 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); 340 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; 341 342 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); 343 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; 344 345 sfc_usage = GEN11_VECS_SFC_USAGE(engine); 346 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT; 347 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); 348 break; 349 350 default: 351 return 0; 352 } 353 354 /* 355 * Tell the engine that a software reset is going to happen. The engine 356 * will then try to force lock the SFC (if currently locked, it will 357 * remain so until we tell the engine it is safe to unlock; if currently 358 * unlocked, it will ignore this and all new lock requests). If SFC 359 * ends up being locked to the engine we want to reset, we have to reset 360 * it as well (we will unlock it once the reset sequence is completed). 361 */ 362 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); 363 364 if (__intel_wait_for_register_fw(uncore, 365 sfc_forced_lock_ack, 366 sfc_forced_lock_ack_bit, 367 sfc_forced_lock_ack_bit, 368 1000, 0, NULL)) { 369 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); 370 return 0; 371 } 372 373 if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit) 374 return sfc_reset_bit; 375 376 return 0; 377 } 378 379 static void gen11_unlock_sfc(struct intel_engine_cs *engine) 380 { 381 struct intel_uncore *uncore = engine->uncore; 382 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; 383 i915_reg_t sfc_forced_lock; 384 u32 sfc_forced_lock_bit; 385 386 switch (engine->class) { 387 case VIDEO_DECODE_CLASS: 388 if ((BIT(engine->instance) & vdbox_sfc_access) == 0) 389 return; 390 391 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); 392 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; 393 break; 394 395 case VIDEO_ENHANCEMENT_CLASS: 396 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); 397 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; 398 break; 399 400 default: 401 return; 402 } 403 404 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); 405 } 406 407 static int gen11_reset_engines(struct intel_gt *gt, 408 intel_engine_mask_t engine_mask, 409 unsigned int retry) 410 { 411 const u32 hw_engine_mask[] = { 412 [RCS0] = GEN11_GRDOM_RENDER, 413 [BCS0] = GEN11_GRDOM_BLT, 414 [VCS0] = GEN11_GRDOM_MEDIA, 415 [VCS1] = GEN11_GRDOM_MEDIA2, 416 [VCS2] = GEN11_GRDOM_MEDIA3, 417 [VCS3] = GEN11_GRDOM_MEDIA4, 418 [VECS0] = GEN11_GRDOM_VECS, 419 [VECS1] = GEN11_GRDOM_VECS2, 420 }; 421 struct intel_engine_cs *engine; 422 intel_engine_mask_t tmp; 423 u32 hw_mask; 424 int ret; 425 426 if (engine_mask == ALL_ENGINES) { 427 hw_mask = GEN11_GRDOM_FULL; 428 } else { 429 hw_mask = 0; 430 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { 431 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); 432 hw_mask |= hw_engine_mask[engine->id]; 433 hw_mask |= gen11_lock_sfc(engine); 434 } 435 } 436 437 ret = gen6_hw_domain_reset(gt, hw_mask); 438 439 if (engine_mask != ALL_ENGINES) 440 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) 441 gen11_unlock_sfc(engine); 442 443 return ret; 444 } 445 446 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) 447 { 448 struct intel_uncore *uncore = engine->uncore; 449 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base); 450 u32 request, mask, ack; 451 int ret; 452 453 ack = intel_uncore_read_fw(uncore, reg); 454 if (ack & RESET_CTL_CAT_ERROR) { 455 /* 456 * For catastrophic errors, ready-for-reset sequence 457 * needs to be bypassed: HAS#396813 458 */ 459 request = RESET_CTL_CAT_ERROR; 460 mask = RESET_CTL_CAT_ERROR; 461 462 /* Catastrophic errors need to be cleared by HW */ 463 ack = 0; 464 } else if (!(ack & RESET_CTL_READY_TO_RESET)) { 465 request = RESET_CTL_REQUEST_RESET; 466 mask = RESET_CTL_READY_TO_RESET; 467 ack = RESET_CTL_READY_TO_RESET; 468 } else { 469 return 0; 470 } 471 472 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request)); 473 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack, 474 700, 0, NULL); 475 if (ret) 476 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", 477 engine->name, request, 478 intel_uncore_read_fw(uncore, reg)); 479 480 return ret; 481 } 482 483 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) 484 { 485 intel_uncore_write_fw(engine->uncore, 486 RING_RESET_CTL(engine->mmio_base), 487 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 488 } 489 490 static int gen8_reset_engines(struct intel_gt *gt, 491 intel_engine_mask_t engine_mask, 492 unsigned int retry) 493 { 494 struct intel_engine_cs *engine; 495 const bool reset_non_ready = retry >= 1; 496 intel_engine_mask_t tmp; 497 int ret; 498 499 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { 500 ret = gen8_engine_reset_prepare(engine); 501 if (ret && !reset_non_ready) 502 goto skip_reset; 503 504 /* 505 * If this is not the first failed attempt to prepare, 506 * we decide to proceed anyway. 507 * 508 * By doing so we risk context corruption and with 509 * some gens (kbl), possible system hang if reset 510 * happens during active bb execution. 511 * 512 * We rather take context corruption instead of 513 * failed reset with a wedged driver/gpu. And 514 * active bb execution case should be covered by 515 * stop_engines() we have before the reset. 516 */ 517 } 518 519 if (INTEL_GEN(gt->i915) >= 11) 520 ret = gen11_reset_engines(gt, engine_mask, retry); 521 else 522 ret = gen6_reset_engines(gt, engine_mask, retry); 523 524 skip_reset: 525 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) 526 gen8_engine_reset_cancel(engine); 527 528 return ret; 529 } 530 531 typedef int (*reset_func)(struct intel_gt *, 532 intel_engine_mask_t engine_mask, 533 unsigned int retry); 534 535 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) 536 { 537 if (INTEL_GEN(i915) >= 8) 538 return gen8_reset_engines; 539 else if (INTEL_GEN(i915) >= 6) 540 return gen6_reset_engines; 541 else if (INTEL_GEN(i915) >= 5) 542 return ironlake_do_reset; 543 else if (IS_G4X(i915)) 544 return g4x_do_reset; 545 else if (IS_G33(i915) || IS_PINEVIEW(i915)) 546 return g33_do_reset; 547 else if (INTEL_GEN(i915) >= 3) 548 return i915_do_reset; 549 else 550 return NULL; 551 } 552 553 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) 554 { 555 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; 556 reset_func reset; 557 int ret = -ETIMEDOUT; 558 int retry; 559 560 reset = intel_get_gpu_reset(gt->i915); 561 if (!reset) 562 return -ENODEV; 563 564 /* 565 * If the power well sleeps during the reset, the reset 566 * request may be dropped and never completes (causing -EIO). 567 */ 568 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); 569 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { 570 GEM_TRACE("engine_mask=%x\n", engine_mask); 571 preempt_disable(); 572 ret = reset(gt, engine_mask, retry); 573 preempt_enable(); 574 } 575 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 576 577 return ret; 578 } 579 580 bool intel_has_gpu_reset(struct drm_i915_private *i915) 581 { 582 if (!i915_modparams.reset) 583 return NULL; 584 585 return intel_get_gpu_reset(i915); 586 } 587 588 bool intel_has_reset_engine(struct drm_i915_private *i915) 589 { 590 return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; 591 } 592 593 int intel_reset_guc(struct intel_gt *gt) 594 { 595 u32 guc_domain = 596 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; 597 int ret; 598 599 GEM_BUG_ON(!HAS_GT_UC(gt->i915)); 600 601 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); 602 ret = gen6_hw_domain_reset(gt, guc_domain); 603 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 604 605 return ret; 606 } 607 608 /* 609 * Ensure irq handler finishes, and not run again. 610 * Also return the active request so that we only search for it once. 611 */ 612 static void reset_prepare_engine(struct intel_engine_cs *engine) 613 { 614 /* 615 * During the reset sequence, we must prevent the engine from 616 * entering RC6. As the context state is undefined until we restart 617 * the engine, if it does enter RC6 during the reset, the state 618 * written to the powercontext is undefined and so we may lose 619 * GPU state upon resume, i.e. fail to restart after a reset. 620 */ 621 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 622 engine->reset.prepare(engine); 623 } 624 625 static void revoke_mmaps(struct intel_gt *gt) 626 { 627 int i; 628 629 for (i = 0; i < gt->ggtt->num_fences; i++) { 630 struct drm_vma_offset_node *node; 631 struct i915_vma *vma; 632 u64 vma_offset; 633 634 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); 635 if (!vma) 636 continue; 637 638 if (!i915_vma_has_userfault(vma)) 639 continue; 640 641 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); 642 node = &vma->obj->base.vma_node; 643 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 644 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, 645 drm_vma_node_offset_addr(node) + vma_offset, 646 vma->size, 647 1); 648 } 649 } 650 651 static intel_engine_mask_t reset_prepare(struct intel_gt *gt) 652 { 653 struct intel_engine_cs *engine; 654 intel_engine_mask_t awake = 0; 655 enum intel_engine_id id; 656 657 for_each_engine(engine, gt->i915, id) { 658 if (intel_engine_pm_get_if_awake(engine)) 659 awake |= engine->mask; 660 reset_prepare_engine(engine); 661 } 662 663 intel_uc_reset_prepare(>->uc); 664 665 return awake; 666 } 667 668 static void gt_revoke(struct intel_gt *gt) 669 { 670 revoke_mmaps(gt); 671 } 672 673 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) 674 { 675 struct intel_engine_cs *engine; 676 enum intel_engine_id id; 677 int err; 678 679 /* 680 * Everything depends on having the GTT running, so we need to start 681 * there. 682 */ 683 err = i915_ggtt_enable_hw(gt->i915); 684 if (err) 685 return err; 686 687 for_each_engine(engine, gt->i915, id) 688 __intel_engine_reset(engine, stalled_mask & engine->mask); 689 690 i915_gem_restore_fences(gt->i915); 691 692 return err; 693 } 694 695 static void reset_finish_engine(struct intel_engine_cs *engine) 696 { 697 engine->reset.finish(engine); 698 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 699 700 intel_engine_signal_breadcrumbs(engine); 701 } 702 703 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) 704 { 705 struct intel_engine_cs *engine; 706 enum intel_engine_id id; 707 708 for_each_engine(engine, gt->i915, id) { 709 reset_finish_engine(engine); 710 if (awake & engine->mask) 711 intel_engine_pm_put(engine); 712 } 713 } 714 715 static void nop_submit_request(struct i915_request *request) 716 { 717 struct intel_engine_cs *engine = request->engine; 718 unsigned long flags; 719 720 GEM_TRACE("%s fence %llx:%lld -> -EIO\n", 721 engine->name, request->fence.context, request->fence.seqno); 722 dma_fence_set_error(&request->fence, -EIO); 723 724 spin_lock_irqsave(&engine->active.lock, flags); 725 __i915_request_submit(request); 726 i915_request_mark_complete(request); 727 spin_unlock_irqrestore(&engine->active.lock, flags); 728 729 intel_engine_queue_breadcrumbs(engine); 730 } 731 732 static void __intel_gt_set_wedged(struct intel_gt *gt) 733 { 734 struct intel_engine_cs *engine; 735 intel_engine_mask_t awake; 736 enum intel_engine_id id; 737 738 if (test_bit(I915_WEDGED, >->reset.flags)) 739 return; 740 741 if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) { 742 struct drm_printer p = drm_debug_printer(__func__); 743 744 for_each_engine(engine, gt->i915, id) 745 intel_engine_dump(engine, &p, "%s\n", engine->name); 746 } 747 748 GEM_TRACE("start\n"); 749 750 /* 751 * First, stop submission to hw, but do not yet complete requests by 752 * rolling the global seqno forward (since this would complete requests 753 * for which we haven't set the fence error to EIO yet). 754 */ 755 awake = reset_prepare(gt); 756 757 /* Even if the GPU reset fails, it should still stop the engines */ 758 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) 759 __intel_gt_reset(gt, ALL_ENGINES); 760 761 for_each_engine(engine, gt->i915, id) 762 engine->submit_request = nop_submit_request; 763 764 /* 765 * Make sure no request can slip through without getting completed by 766 * either this call here to intel_engine_write_global_seqno, or the one 767 * in nop_submit_request. 768 */ 769 synchronize_rcu_expedited(); 770 set_bit(I915_WEDGED, >->reset.flags); 771 772 /* Mark all executing requests as skipped */ 773 for_each_engine(engine, gt->i915, id) 774 engine->cancel_requests(engine); 775 776 reset_finish(gt, awake); 777 778 GEM_TRACE("end\n"); 779 } 780 781 void intel_gt_set_wedged(struct intel_gt *gt) 782 { 783 intel_wakeref_t wakeref; 784 785 mutex_lock(>->reset.mutex); 786 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) 787 __intel_gt_set_wedged(gt); 788 mutex_unlock(>->reset.mutex); 789 } 790 791 static bool __intel_gt_unset_wedged(struct intel_gt *gt) 792 { 793 struct intel_gt_timelines *timelines = >->timelines; 794 struct intel_timeline *tl; 795 unsigned long flags; 796 797 if (!test_bit(I915_WEDGED, >->reset.flags)) 798 return true; 799 800 if (!gt->scratch) /* Never full initialised, recovery impossible */ 801 return false; 802 803 GEM_TRACE("start\n"); 804 805 /* 806 * Before unwedging, make sure that all pending operations 807 * are flushed and errored out - we may have requests waiting upon 808 * third party fences. We marked all inflight requests as EIO, and 809 * every execbuf since returned EIO, for consistency we want all 810 * the currently pending requests to also be marked as EIO, which 811 * is done inside our nop_submit_request - and so we must wait. 812 * 813 * No more can be submitted until we reset the wedged bit. 814 */ 815 spin_lock_irqsave(&timelines->lock, flags); 816 list_for_each_entry(tl, &timelines->active_list, link) { 817 struct i915_request *rq; 818 819 rq = i915_active_request_get_unlocked(&tl->last_request); 820 if (!rq) 821 continue; 822 823 spin_unlock_irqrestore(&timelines->lock, flags); 824 825 /* 826 * All internal dependencies (i915_requests) will have 827 * been flushed by the set-wedge, but we may be stuck waiting 828 * for external fences. These should all be capped to 10s 829 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded 830 * in the worst case. 831 */ 832 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); 833 i915_request_put(rq); 834 835 /* Restart iteration after droping lock */ 836 spin_lock_irqsave(&timelines->lock, flags); 837 tl = list_entry(&timelines->active_list, typeof(*tl), link); 838 } 839 spin_unlock_irqrestore(&timelines->lock, flags); 840 841 intel_gt_sanitize(gt, false); 842 843 /* 844 * Undo nop_submit_request. We prevent all new i915 requests from 845 * being queued (by disallowing execbuf whilst wedged) so having 846 * waited for all active requests above, we know the system is idle 847 * and do not have to worry about a thread being inside 848 * engine->submit_request() as we swap over. So unlike installing 849 * the nop_submit_request on reset, we can do this from normal 850 * context and do not require stop_machine(). 851 */ 852 intel_engines_reset_default_submission(gt); 853 854 GEM_TRACE("end\n"); 855 856 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ 857 clear_bit(I915_WEDGED, >->reset.flags); 858 859 return true; 860 } 861 862 bool intel_gt_unset_wedged(struct intel_gt *gt) 863 { 864 bool result; 865 866 mutex_lock(>->reset.mutex); 867 result = __intel_gt_unset_wedged(gt); 868 mutex_unlock(>->reset.mutex); 869 870 return result; 871 } 872 873 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) 874 { 875 int err, i; 876 877 gt_revoke(gt); 878 879 err = __intel_gt_reset(gt, ALL_ENGINES); 880 for (i = 0; err && i < RESET_MAX_RETRIES; i++) { 881 msleep(10 * (i + 1)); 882 err = __intel_gt_reset(gt, ALL_ENGINES); 883 } 884 if (err) 885 return err; 886 887 return gt_reset(gt, stalled_mask); 888 } 889 890 static int resume(struct intel_gt *gt) 891 { 892 struct intel_engine_cs *engine; 893 enum intel_engine_id id; 894 int ret; 895 896 for_each_engine(engine, gt->i915, id) { 897 ret = engine->resume(engine); 898 if (ret) 899 return ret; 900 } 901 902 return 0; 903 } 904 905 /** 906 * intel_gt_reset - reset chip after a hang 907 * @gt: #intel_gt to reset 908 * @stalled_mask: mask of the stalled engines with the guilty requests 909 * @reason: user error message for why we are resetting 910 * 911 * Reset the chip. Useful if a hang is detected. Marks the device as wedged 912 * on failure. 913 * 914 * Procedure is fairly simple: 915 * - reset the chip using the reset reg 916 * - re-init context state 917 * - re-init hardware status page 918 * - re-init ring buffer 919 * - re-init interrupt state 920 * - re-init display 921 */ 922 void intel_gt_reset(struct intel_gt *gt, 923 intel_engine_mask_t stalled_mask, 924 const char *reason) 925 { 926 intel_engine_mask_t awake; 927 int ret; 928 929 GEM_TRACE("flags=%lx\n", gt->reset.flags); 930 931 might_sleep(); 932 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); 933 mutex_lock(>->reset.mutex); 934 935 /* Clear any previous failed attempts at recovery. Time to try again. */ 936 if (!__intel_gt_unset_wedged(gt)) 937 goto unlock; 938 939 if (reason) 940 dev_notice(gt->i915->drm.dev, 941 "Resetting chip for %s\n", reason); 942 atomic_inc(>->i915->gpu_error.reset_count); 943 944 awake = reset_prepare(gt); 945 946 if (!intel_has_gpu_reset(gt->i915)) { 947 if (i915_modparams.reset) 948 dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); 949 else 950 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 951 goto error; 952 } 953 954 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) 955 intel_runtime_pm_disable_interrupts(gt->i915); 956 957 if (do_reset(gt, stalled_mask)) { 958 dev_err(gt->i915->drm.dev, "Failed to reset chip\n"); 959 goto taint; 960 } 961 962 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) 963 intel_runtime_pm_enable_interrupts(gt->i915); 964 965 intel_overlay_reset(gt->i915); 966 967 /* 968 * Next we need to restore the context, but we don't use those 969 * yet either... 970 * 971 * Ring buffer needs to be re-initialized in the KMS case, or if X 972 * was running at the time of the reset (i.e. we weren't VT 973 * switched away). 974 */ 975 ret = i915_gem_init_hw(gt->i915); 976 if (ret) { 977 DRM_ERROR("Failed to initialise HW following reset (%d)\n", 978 ret); 979 goto taint; 980 } 981 982 ret = resume(gt); 983 if (ret) 984 goto taint; 985 986 intel_gt_queue_hangcheck(gt); 987 988 finish: 989 reset_finish(gt, awake); 990 unlock: 991 mutex_unlock(>->reset.mutex); 992 return; 993 994 taint: 995 /* 996 * History tells us that if we cannot reset the GPU now, we 997 * never will. This then impacts everything that is run 998 * subsequently. On failing the reset, we mark the driver 999 * as wedged, preventing further execution on the GPU. 1000 * We also want to go one step further and add a taint to the 1001 * kernel so that any subsequent faults can be traced back to 1002 * this failure. This is important for CI, where if the 1003 * GPU/driver fails we would like to reboot and restart testing 1004 * rather than continue on into oblivion. For everyone else, 1005 * the system should still plod along, but they have been warned! 1006 */ 1007 add_taint_for_CI(TAINT_WARN); 1008 error: 1009 __intel_gt_set_wedged(gt); 1010 goto finish; 1011 } 1012 1013 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) 1014 { 1015 return __intel_gt_reset(engine->gt, engine->mask); 1016 } 1017 1018 /** 1019 * intel_engine_reset - reset GPU engine to recover from a hang 1020 * @engine: engine to reset 1021 * @msg: reason for GPU reset; or NULL for no dev_notice() 1022 * 1023 * Reset a specific GPU engine. Useful if a hang is detected. 1024 * Returns zero on successful reset or otherwise an error code. 1025 * 1026 * Procedure is: 1027 * - identifies the request that caused the hang and it is dropped 1028 * - reset engine (which will force the engine to idle) 1029 * - re-init/configure engine 1030 */ 1031 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) 1032 { 1033 struct intel_gt *gt = engine->gt; 1034 int ret; 1035 1036 GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags); 1037 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); 1038 1039 if (!intel_engine_pm_get_if_awake(engine)) 1040 return 0; 1041 1042 reset_prepare_engine(engine); 1043 1044 if (msg) 1045 dev_notice(engine->i915->drm.dev, 1046 "Resetting %s for %s\n", engine->name, msg); 1047 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); 1048 1049 if (!engine->gt->uc.guc.execbuf_client) 1050 ret = intel_gt_reset_engine(engine); 1051 else 1052 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); 1053 if (ret) { 1054 /* If we fail here, we expect to fallback to a global reset */ 1055 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", 1056 engine->gt->uc.guc.execbuf_client ? "GuC " : "", 1057 engine->name, ret); 1058 goto out; 1059 } 1060 1061 /* 1062 * The request that caused the hang is stuck on elsp, we know the 1063 * active request and can drop it, adjust head to skip the offending 1064 * request to resume executing remaining requests in the queue. 1065 */ 1066 __intel_engine_reset(engine, true); 1067 1068 /* 1069 * The engine and its registers (and workarounds in case of render) 1070 * have been reset to their default values. Follow the init_ring 1071 * process to program RING_MODE, HWSP and re-enable submission. 1072 */ 1073 ret = engine->resume(engine); 1074 1075 out: 1076 intel_engine_cancel_stop_cs(engine); 1077 reset_finish_engine(engine); 1078 intel_engine_pm_put(engine); 1079 return ret; 1080 } 1081 1082 static void intel_gt_reset_global(struct intel_gt *gt, 1083 u32 engine_mask, 1084 const char *reason) 1085 { 1086 struct kobject *kobj = >->i915->drm.primary->kdev->kobj; 1087 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1088 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1089 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1090 struct intel_wedge_me w; 1091 1092 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 1093 1094 DRM_DEBUG_DRIVER("resetting chip\n"); 1095 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 1096 1097 /* Use a watchdog to ensure that our reset completes */ 1098 intel_wedge_on_timeout(&w, gt, 5 * HZ) { 1099 intel_prepare_reset(gt->i915); 1100 1101 /* Flush everyone using a resource about to be clobbered */ 1102 synchronize_srcu_expedited(>->reset.backoff_srcu); 1103 1104 intel_gt_reset(gt, engine_mask, reason); 1105 1106 intel_finish_reset(gt->i915); 1107 } 1108 1109 if (!test_bit(I915_WEDGED, >->reset.flags)) 1110 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 1111 } 1112 1113 /** 1114 * intel_gt_handle_error - handle a gpu error 1115 * @gt: the intel_gt 1116 * @engine_mask: mask representing engines that are hung 1117 * @flags: control flags 1118 * @fmt: Error message format string 1119 * 1120 * Do some basic checking of register state at error time and 1121 * dump it to the syslog. Also call i915_capture_error_state() to make 1122 * sure we get a record and make it available in debugfs. Fire a uevent 1123 * so userspace knows something bad happened (should trigger collection 1124 * of a ring dump etc.). 1125 */ 1126 void intel_gt_handle_error(struct intel_gt *gt, 1127 intel_engine_mask_t engine_mask, 1128 unsigned long flags, 1129 const char *fmt, ...) 1130 { 1131 struct intel_engine_cs *engine; 1132 intel_wakeref_t wakeref; 1133 intel_engine_mask_t tmp; 1134 char error_msg[80]; 1135 char *msg = NULL; 1136 1137 if (fmt) { 1138 va_list args; 1139 1140 va_start(args, fmt); 1141 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 1142 va_end(args); 1143 1144 msg = error_msg; 1145 } 1146 1147 /* 1148 * In most cases it's guaranteed that we get here with an RPM 1149 * reference held, for example because there is a pending GPU 1150 * request that won't finish until the reset is done. This 1151 * isn't the case at least when we get here by doing a 1152 * simulated reset via debugfs, so get an RPM reference. 1153 */ 1154 wakeref = intel_runtime_pm_get(>->i915->runtime_pm); 1155 1156 engine_mask &= INTEL_INFO(gt->i915)->engine_mask; 1157 1158 if (flags & I915_ERROR_CAPTURE) { 1159 i915_capture_error_state(gt->i915, engine_mask, msg); 1160 intel_gt_clear_error_registers(gt, engine_mask); 1161 } 1162 1163 /* 1164 * Try engine reset when available. We fall back to full reset if 1165 * single reset fails. 1166 */ 1167 if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) { 1168 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { 1169 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 1170 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 1171 >->reset.flags)) 1172 continue; 1173 1174 if (intel_engine_reset(engine, msg) == 0) 1175 engine_mask &= ~engine->mask; 1176 1177 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 1178 >->reset.flags); 1179 } 1180 } 1181 1182 if (!engine_mask) 1183 goto out; 1184 1185 /* Full reset needs the mutex, stop any other user trying to do so. */ 1186 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) { 1187 wait_event(gt->reset.queue, 1188 !test_bit(I915_RESET_BACKOFF, >->reset.flags)); 1189 goto out; /* piggy-back on the other reset */ 1190 } 1191 1192 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ 1193 synchronize_rcu_expedited(); 1194 1195 /* Prevent any other reset-engine attempt. */ 1196 for_each_engine(engine, gt->i915, tmp) { 1197 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 1198 >->reset.flags)) 1199 wait_on_bit(>->reset.flags, 1200 I915_RESET_ENGINE + engine->id, 1201 TASK_UNINTERRUPTIBLE); 1202 } 1203 1204 intel_gt_reset_global(gt, engine_mask, msg); 1205 1206 for_each_engine(engine, gt->i915, tmp) 1207 clear_bit_unlock(I915_RESET_ENGINE + engine->id, 1208 >->reset.flags); 1209 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); 1210 smp_mb__after_atomic(); 1211 wake_up_all(>->reset.queue); 1212 1213 out: 1214 intel_runtime_pm_put(>->i915->runtime_pm, wakeref); 1215 } 1216 1217 int intel_gt_reset_trylock(struct intel_gt *gt) 1218 { 1219 int srcu; 1220 1221 might_lock(>->reset.backoff_srcu); 1222 might_sleep(); 1223 1224 rcu_read_lock(); 1225 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { 1226 rcu_read_unlock(); 1227 1228 if (wait_event_interruptible(gt->reset.queue, 1229 !test_bit(I915_RESET_BACKOFF, 1230 >->reset.flags))) 1231 return -EINTR; 1232 1233 rcu_read_lock(); 1234 } 1235 srcu = srcu_read_lock(>->reset.backoff_srcu); 1236 rcu_read_unlock(); 1237 1238 return srcu; 1239 } 1240 1241 void intel_gt_reset_unlock(struct intel_gt *gt, int tag) 1242 __releases(>->reset.backoff_srcu) 1243 { 1244 srcu_read_unlock(>->reset.backoff_srcu, tag); 1245 } 1246 1247 int intel_gt_terminally_wedged(struct intel_gt *gt) 1248 { 1249 might_sleep(); 1250 1251 if (!intel_gt_is_wedged(gt)) 1252 return 0; 1253 1254 /* Reset still in progress? Maybe we will recover? */ 1255 if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) 1256 return -EIO; 1257 1258 /* XXX intel_reset_finish() still takes struct_mutex!!! */ 1259 if (mutex_is_locked(>->i915->drm.struct_mutex)) 1260 return -EAGAIN; 1261 1262 if (wait_event_interruptible(gt->reset.queue, 1263 !test_bit(I915_RESET_BACKOFF, 1264 >->reset.flags))) 1265 return -EINTR; 1266 1267 return intel_gt_is_wedged(gt) ? -EIO : 0; 1268 } 1269 1270 void intel_gt_init_reset(struct intel_gt *gt) 1271 { 1272 init_waitqueue_head(>->reset.queue); 1273 mutex_init(>->reset.mutex); 1274 init_srcu_struct(>->reset.backoff_srcu); 1275 } 1276 1277 void intel_gt_fini_reset(struct intel_gt *gt) 1278 { 1279 cleanup_srcu_struct(>->reset.backoff_srcu); 1280 } 1281 1282 static void intel_wedge_me(struct work_struct *work) 1283 { 1284 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); 1285 1286 dev_err(w->gt->i915->drm.dev, 1287 "%s timed out, cancelling all in-flight rendering.\n", 1288 w->name); 1289 intel_gt_set_wedged(w->gt); 1290 } 1291 1292 void __intel_init_wedge(struct intel_wedge_me *w, 1293 struct intel_gt *gt, 1294 long timeout, 1295 const char *name) 1296 { 1297 w->gt = gt; 1298 w->name = name; 1299 1300 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); 1301 schedule_delayed_work(&w->work, timeout); 1302 } 1303 1304 void __intel_fini_wedge(struct intel_wedge_me *w) 1305 { 1306 cancel_delayed_work_sync(&w->work); 1307 destroy_delayed_work_on_stack(&w->work); 1308 w->gt = NULL; 1309 } 1310 1311 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1312 #include "selftest_reset.c" 1313 #endif 1314