1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <drm/drm_managed.h> 7 #include <drm/intel-gtt.h> 8 9 #include "gem/i915_gem_internal.h" 10 #include "gem/i915_gem_lmem.h" 11 #include "pxp/intel_pxp.h" 12 13 #include "i915_drv.h" 14 #include "i915_perf_oa_regs.h" 15 #include "i915_reg.h" 16 #include "intel_context.h" 17 #include "intel_engine_pm.h" 18 #include "intel_engine_regs.h" 19 #include "intel_ggtt_gmch.h" 20 #include "intel_gt.h" 21 #include "intel_gt_buffer_pool.h" 22 #include "intel_gt_clock_utils.h" 23 #include "intel_gt_debugfs.h" 24 #include "intel_gt_mcr.h" 25 #include "intel_gt_pm.h" 26 #include "intel_gt_regs.h" 27 #include "intel_gt_requests.h" 28 #include "intel_migrate.h" 29 #include "intel_mocs.h" 30 #include "intel_pci_config.h" 31 #include "intel_pm.h" 32 #include "intel_rc6.h" 33 #include "intel_renderstate.h" 34 #include "intel_rps.h" 35 #include "intel_sa_media.h" 36 #include "intel_gt_sysfs.h" 37 #include "intel_uncore.h" 38 #include "shmem_utils.h" 39 40 void intel_gt_common_init_early(struct intel_gt *gt) 41 { 42 spin_lock_init(gt->irq_lock); 43 44 INIT_LIST_HEAD(>->closed_vma); 45 spin_lock_init(>->closed_lock); 46 47 init_llist_head(>->watchdog.list); 48 INIT_WORK(>->watchdog.work, intel_gt_watchdog_work); 49 50 intel_gt_init_buffer_pool(gt); 51 intel_gt_init_reset(gt); 52 intel_gt_init_requests(gt); 53 intel_gt_init_timelines(gt); 54 mutex_init(>->tlb.invalidate_lock); 55 seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); 56 intel_gt_pm_init_early(gt); 57 58 intel_wopcm_init_early(>->wopcm); 59 intel_uc_init_early(>->uc); 60 intel_rps_init_early(>->rps); 61 } 62 63 /* Preliminary initialization of Tile 0 */ 64 int intel_root_gt_init_early(struct drm_i915_private *i915) 65 { 66 struct intel_gt *gt = to_gt(i915); 67 68 gt->i915 = i915; 69 gt->uncore = &i915->uncore; 70 gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL); 71 if (!gt->irq_lock) 72 return -ENOMEM; 73 74 intel_gt_common_init_early(gt); 75 76 return 0; 77 } 78 79 static int intel_gt_probe_lmem(struct intel_gt *gt) 80 { 81 struct drm_i915_private *i915 = gt->i915; 82 unsigned int instance = gt->info.id; 83 int id = INTEL_REGION_LMEM_0 + instance; 84 struct intel_memory_region *mem; 85 int err; 86 87 mem = intel_gt_setup_lmem(gt); 88 if (IS_ERR(mem)) { 89 err = PTR_ERR(mem); 90 if (err == -ENODEV) 91 return 0; 92 93 drm_err(&i915->drm, 94 "Failed to setup region(%d) type=%d\n", 95 err, INTEL_MEMORY_LOCAL); 96 return err; 97 } 98 99 mem->id = id; 100 mem->instance = instance; 101 102 intel_memory_region_set_name(mem, "local%u", mem->instance); 103 104 GEM_BUG_ON(!HAS_REGION(i915, id)); 105 GEM_BUG_ON(i915->mm.regions[id]); 106 i915->mm.regions[id] = mem; 107 108 return 0; 109 } 110 111 int intel_gt_assign_ggtt(struct intel_gt *gt) 112 { 113 gt->ggtt = drmm_kzalloc(>->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL); 114 115 return gt->ggtt ? 0 : -ENOMEM; 116 } 117 118 int intel_gt_init_mmio(struct intel_gt *gt) 119 { 120 intel_gt_init_clock_frequency(gt); 121 122 intel_uc_init_mmio(>->uc); 123 intel_sseu_info_init(gt); 124 intel_gt_mcr_init(gt); 125 126 return intel_engines_init_mmio(gt); 127 } 128 129 static void init_unused_ring(struct intel_gt *gt, u32 base) 130 { 131 struct intel_uncore *uncore = gt->uncore; 132 133 intel_uncore_write(uncore, RING_CTL(base), 0); 134 intel_uncore_write(uncore, RING_HEAD(base), 0); 135 intel_uncore_write(uncore, RING_TAIL(base), 0); 136 intel_uncore_write(uncore, RING_START(base), 0); 137 } 138 139 static void init_unused_rings(struct intel_gt *gt) 140 { 141 struct drm_i915_private *i915 = gt->i915; 142 143 if (IS_I830(i915)) { 144 init_unused_ring(gt, PRB1_BASE); 145 init_unused_ring(gt, SRB0_BASE); 146 init_unused_ring(gt, SRB1_BASE); 147 init_unused_ring(gt, SRB2_BASE); 148 init_unused_ring(gt, SRB3_BASE); 149 } else if (GRAPHICS_VER(i915) == 2) { 150 init_unused_ring(gt, SRB0_BASE); 151 init_unused_ring(gt, SRB1_BASE); 152 } else if (GRAPHICS_VER(i915) == 3) { 153 init_unused_ring(gt, PRB1_BASE); 154 init_unused_ring(gt, PRB2_BASE); 155 } 156 } 157 158 int intel_gt_init_hw(struct intel_gt *gt) 159 { 160 struct drm_i915_private *i915 = gt->i915; 161 struct intel_uncore *uncore = gt->uncore; 162 int ret; 163 164 gt->last_init_time = ktime_get(); 165 166 /* Double layer security blanket, see i915_gem_init() */ 167 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 168 169 if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9) 170 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); 171 172 if (IS_HASWELL(i915)) 173 intel_uncore_write(uncore, 174 HSW_MI_PREDICATE_RESULT_2, 175 IS_HSW_GT3(i915) ? 176 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 177 178 /* Apply the GT workarounds... */ 179 intel_gt_apply_workarounds(gt); 180 /* ...and determine whether they are sticking. */ 181 intel_gt_verify_workarounds(gt, "init"); 182 183 intel_gt_init_swizzling(gt); 184 185 /* 186 * At least 830 can leave some of the unused rings 187 * "active" (ie. head != tail) after resume which 188 * will prevent c3 entry. Makes sure all unused rings 189 * are totally idle. 190 */ 191 init_unused_rings(gt); 192 193 ret = i915_ppgtt_init_hw(gt); 194 if (ret) { 195 drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret); 196 goto out; 197 } 198 199 /* We can't enable contexts until all firmware is loaded */ 200 ret = intel_uc_init_hw(>->uc); 201 if (ret) { 202 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); 203 goto out; 204 } 205 206 intel_mocs_init(gt); 207 208 out: 209 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 210 return ret; 211 } 212 213 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) 214 { 215 intel_uncore_rmw(uncore, reg, 0, set); 216 } 217 218 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) 219 { 220 intel_uncore_rmw(uncore, reg, clr, 0); 221 } 222 223 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) 224 { 225 intel_uncore_rmw(uncore, reg, 0, 0); 226 } 227 228 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine) 229 { 230 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); 231 GEN6_RING_FAULT_REG_POSTING_READ(engine); 232 } 233 234 i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt) 235 { 236 /* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */ 237 if (GRAPHICS_VER(gt->i915) < 11) 238 return INVALID_MMIO_REG; 239 240 return gt->type == GT_MEDIA ? 241 MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS; 242 } 243 244 void 245 intel_gt_clear_error_registers(struct intel_gt *gt, 246 intel_engine_mask_t engine_mask) 247 { 248 struct drm_i915_private *i915 = gt->i915; 249 struct intel_uncore *uncore = gt->uncore; 250 u32 eir; 251 252 if (GRAPHICS_VER(i915) != 2) 253 clear_register(uncore, PGTBL_ER); 254 255 if (GRAPHICS_VER(i915) < 4) 256 clear_register(uncore, IPEIR(RENDER_RING_BASE)); 257 else 258 clear_register(uncore, IPEIR_I965); 259 260 clear_register(uncore, EIR); 261 eir = intel_uncore_read(uncore, EIR); 262 if (eir) { 263 /* 264 * some errors might have become stuck, 265 * mask them. 266 */ 267 drm_dbg(>->i915->drm, "EIR stuck: 0x%08x, masking\n", eir); 268 rmw_set(uncore, EMR, eir); 269 intel_uncore_write(uncore, GEN2_IIR, 270 I915_MASTER_ERROR_INTERRUPT); 271 } 272 273 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 274 intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG, 275 RING_FAULT_VALID, 0); 276 intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); 277 } else if (GRAPHICS_VER(i915) >= 12) { 278 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); 279 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); 280 } else if (GRAPHICS_VER(i915) >= 8) { 281 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); 282 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); 283 } else if (GRAPHICS_VER(i915) >= 6) { 284 struct intel_engine_cs *engine; 285 enum intel_engine_id id; 286 287 for_each_engine_masked(engine, gt, engine_mask, id) 288 gen6_clear_engine_error_register(engine); 289 } 290 } 291 292 static void gen6_check_faults(struct intel_gt *gt) 293 { 294 struct intel_engine_cs *engine; 295 enum intel_engine_id id; 296 u32 fault; 297 298 for_each_engine(engine, gt, id) { 299 fault = GEN6_RING_FAULT_REG_READ(engine); 300 if (fault & RING_FAULT_VALID) { 301 drm_dbg(&engine->i915->drm, "Unexpected fault\n" 302 "\tAddr: 0x%08lx\n" 303 "\tAddress space: %s\n" 304 "\tSource ID: %d\n" 305 "\tType: %d\n", 306 fault & PAGE_MASK, 307 fault & RING_FAULT_GTTSEL_MASK ? 308 "GGTT" : "PPGTT", 309 RING_FAULT_SRCID(fault), 310 RING_FAULT_FAULT_TYPE(fault)); 311 } 312 } 313 } 314 315 static void xehp_check_faults(struct intel_gt *gt) 316 { 317 u32 fault; 318 319 /* 320 * Although the fault register now lives in an MCR register range, 321 * the GAM registers are special and we only truly need to read 322 * the "primary" GAM instance rather than handling each instance 323 * individually. intel_gt_mcr_read_any() will automatically steer 324 * toward the primary instance. 325 */ 326 fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); 327 if (fault & RING_FAULT_VALID) { 328 u32 fault_data0, fault_data1; 329 u64 fault_addr; 330 331 fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0); 332 fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1); 333 334 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | 335 ((u64)fault_data0 << 12); 336 337 drm_dbg(>->i915->drm, "Unexpected fault\n" 338 "\tAddr: 0x%08x_%08x\n" 339 "\tAddress space: %s\n" 340 "\tEngine ID: %d\n" 341 "\tSource ID: %d\n" 342 "\tType: %d\n", 343 upper_32_bits(fault_addr), lower_32_bits(fault_addr), 344 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", 345 GEN8_RING_FAULT_ENGINE_ID(fault), 346 RING_FAULT_SRCID(fault), 347 RING_FAULT_FAULT_TYPE(fault)); 348 } 349 } 350 351 static void gen8_check_faults(struct intel_gt *gt) 352 { 353 struct intel_uncore *uncore = gt->uncore; 354 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg; 355 u32 fault; 356 357 if (GRAPHICS_VER(gt->i915) >= 12) { 358 fault_reg = GEN12_RING_FAULT_REG; 359 fault_data0_reg = GEN12_FAULT_TLB_DATA0; 360 fault_data1_reg = GEN12_FAULT_TLB_DATA1; 361 } else { 362 fault_reg = GEN8_RING_FAULT_REG; 363 fault_data0_reg = GEN8_FAULT_TLB_DATA0; 364 fault_data1_reg = GEN8_FAULT_TLB_DATA1; 365 } 366 367 fault = intel_uncore_read(uncore, fault_reg); 368 if (fault & RING_FAULT_VALID) { 369 u32 fault_data0, fault_data1; 370 u64 fault_addr; 371 372 fault_data0 = intel_uncore_read(uncore, fault_data0_reg); 373 fault_data1 = intel_uncore_read(uncore, fault_data1_reg); 374 375 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | 376 ((u64)fault_data0 << 12); 377 378 drm_dbg(&uncore->i915->drm, "Unexpected fault\n" 379 "\tAddr: 0x%08x_%08x\n" 380 "\tAddress space: %s\n" 381 "\tEngine ID: %d\n" 382 "\tSource ID: %d\n" 383 "\tType: %d\n", 384 upper_32_bits(fault_addr), lower_32_bits(fault_addr), 385 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", 386 GEN8_RING_FAULT_ENGINE_ID(fault), 387 RING_FAULT_SRCID(fault), 388 RING_FAULT_FAULT_TYPE(fault)); 389 } 390 } 391 392 void intel_gt_check_and_clear_faults(struct intel_gt *gt) 393 { 394 struct drm_i915_private *i915 = gt->i915; 395 396 /* From GEN8 onwards we only have one 'All Engine Fault Register' */ 397 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 398 xehp_check_faults(gt); 399 else if (GRAPHICS_VER(i915) >= 8) 400 gen8_check_faults(gt); 401 else if (GRAPHICS_VER(i915) >= 6) 402 gen6_check_faults(gt); 403 else 404 return; 405 406 intel_gt_clear_error_registers(gt, ALL_ENGINES); 407 } 408 409 void intel_gt_flush_ggtt_writes(struct intel_gt *gt) 410 { 411 struct intel_uncore *uncore = gt->uncore; 412 intel_wakeref_t wakeref; 413 414 /* 415 * No actual flushing is required for the GTT write domain for reads 416 * from the GTT domain. Writes to it "immediately" go to main memory 417 * as far as we know, so there's no chipset flush. It also doesn't 418 * land in the GPU render cache. 419 * 420 * However, we do have to enforce the order so that all writes through 421 * the GTT land before any writes to the device, such as updates to 422 * the GATT itself. 423 * 424 * We also have to wait a bit for the writes to land from the GTT. 425 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 426 * timing. This issue has only been observed when switching quickly 427 * between GTT writes and CPU reads from inside the kernel on recent hw, 428 * and it appears to only affect discrete GTT blocks (i.e. on LLC 429 * system agents we cannot reproduce this behaviour, until Cannonlake 430 * that was!). 431 */ 432 433 wmb(); 434 435 if (INTEL_INFO(gt->i915)->has_coherent_ggtt) 436 return; 437 438 intel_gt_chipset_flush(gt); 439 440 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) { 441 unsigned long flags; 442 443 spin_lock_irqsave(&uncore->lock, flags); 444 intel_uncore_posting_read_fw(uncore, 445 RING_HEAD(RENDER_RING_BASE)); 446 spin_unlock_irqrestore(&uncore->lock, flags); 447 } 448 } 449 450 void intel_gt_chipset_flush(struct intel_gt *gt) 451 { 452 wmb(); 453 if (GRAPHICS_VER(gt->i915) < 6) 454 intel_ggtt_gmch_flush(); 455 } 456 457 void intel_gt_driver_register(struct intel_gt *gt) 458 { 459 intel_gsc_init(>->gsc, gt->i915); 460 461 intel_rps_driver_register(>->rps); 462 463 intel_gt_debugfs_register(gt); 464 intel_gt_sysfs_register(gt); 465 } 466 467 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) 468 { 469 struct drm_i915_private *i915 = gt->i915; 470 struct drm_i915_gem_object *obj; 471 struct i915_vma *vma; 472 int ret; 473 474 obj = i915_gem_object_create_lmem(i915, size, 475 I915_BO_ALLOC_VOLATILE | 476 I915_BO_ALLOC_GPU_ONLY); 477 if (IS_ERR(obj)) 478 obj = i915_gem_object_create_stolen(i915, size); 479 if (IS_ERR(obj)) 480 obj = i915_gem_object_create_internal(i915, size); 481 if (IS_ERR(obj)) { 482 drm_err(&i915->drm, "Failed to allocate scratch page\n"); 483 return PTR_ERR(obj); 484 } 485 486 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 487 if (IS_ERR(vma)) { 488 ret = PTR_ERR(vma); 489 goto err_unref; 490 } 491 492 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); 493 if (ret) 494 goto err_unref; 495 496 gt->scratch = i915_vma_make_unshrinkable(vma); 497 498 return 0; 499 500 err_unref: 501 i915_gem_object_put(obj); 502 return ret; 503 } 504 505 static void intel_gt_fini_scratch(struct intel_gt *gt) 506 { 507 i915_vma_unpin_and_release(>->scratch, 0); 508 } 509 510 static struct i915_address_space *kernel_vm(struct intel_gt *gt) 511 { 512 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) 513 return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm; 514 else 515 return i915_vm_get(>->ggtt->vm); 516 } 517 518 static int __engines_record_defaults(struct intel_gt *gt) 519 { 520 struct i915_request *requests[I915_NUM_ENGINES] = {}; 521 struct intel_engine_cs *engine; 522 enum intel_engine_id id; 523 int err = 0; 524 525 /* 526 * As we reset the gpu during very early sanitisation, the current 527 * register state on the GPU should reflect its defaults values. 528 * We load a context onto the hw (with restore-inhibit), then switch 529 * over to a second context to save that default register state. We 530 * can then prime every new context with that state so they all start 531 * from the same default HW values. 532 */ 533 534 for_each_engine(engine, gt, id) { 535 struct intel_renderstate so; 536 struct intel_context *ce; 537 struct i915_request *rq; 538 539 /* We must be able to switch to something! */ 540 GEM_BUG_ON(!engine->kernel_context); 541 542 ce = intel_context_create(engine); 543 if (IS_ERR(ce)) { 544 err = PTR_ERR(ce); 545 goto out; 546 } 547 548 err = intel_renderstate_init(&so, ce); 549 if (err) 550 goto err; 551 552 rq = i915_request_create(ce); 553 if (IS_ERR(rq)) { 554 err = PTR_ERR(rq); 555 goto err_fini; 556 } 557 558 err = intel_engine_emit_ctx_wa(rq); 559 if (err) 560 goto err_rq; 561 562 err = intel_renderstate_emit(&so, rq); 563 if (err) 564 goto err_rq; 565 566 err_rq: 567 requests[id] = i915_request_get(rq); 568 i915_request_add(rq); 569 err_fini: 570 intel_renderstate_fini(&so, ce); 571 err: 572 if (err) { 573 intel_context_put(ce); 574 goto out; 575 } 576 } 577 578 /* Flush the default context image to memory, and enable powersaving. */ 579 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { 580 err = -EIO; 581 goto out; 582 } 583 584 for (id = 0; id < ARRAY_SIZE(requests); id++) { 585 struct i915_request *rq; 586 struct file *state; 587 588 rq = requests[id]; 589 if (!rq) 590 continue; 591 592 if (rq->fence.error) { 593 err = -EIO; 594 goto out; 595 } 596 597 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); 598 if (!rq->context->state) 599 continue; 600 601 /* Keep a copy of the state's backing pages; free the obj */ 602 state = shmem_create_from_object(rq->context->state->obj); 603 if (IS_ERR(state)) { 604 err = PTR_ERR(state); 605 goto out; 606 } 607 rq->engine->default_state = state; 608 } 609 610 out: 611 /* 612 * If we have to abandon now, we expect the engines to be idle 613 * and ready to be torn-down. The quickest way we can accomplish 614 * this is by declaring ourselves wedged. 615 */ 616 if (err) 617 intel_gt_set_wedged(gt); 618 619 for (id = 0; id < ARRAY_SIZE(requests); id++) { 620 struct intel_context *ce; 621 struct i915_request *rq; 622 623 rq = requests[id]; 624 if (!rq) 625 continue; 626 627 ce = rq->context; 628 i915_request_put(rq); 629 intel_context_put(ce); 630 } 631 return err; 632 } 633 634 static int __engines_verify_workarounds(struct intel_gt *gt) 635 { 636 struct intel_engine_cs *engine; 637 enum intel_engine_id id; 638 int err = 0; 639 640 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 641 return 0; 642 643 for_each_engine(engine, gt, id) { 644 if (intel_engine_verify_workarounds(engine, "load")) 645 err = -EIO; 646 } 647 648 /* Flush and restore the kernel context for safety */ 649 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) 650 err = -EIO; 651 652 return err; 653 } 654 655 static void __intel_gt_disable(struct intel_gt *gt) 656 { 657 intel_gt_set_wedged_on_fini(gt); 658 659 intel_gt_suspend_prepare(gt); 660 intel_gt_suspend_late(gt); 661 662 GEM_BUG_ON(intel_gt_pm_is_awake(gt)); 663 } 664 665 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) 666 { 667 long remaining_timeout; 668 669 /* If the device is asleep, we have no requests outstanding */ 670 if (!intel_gt_pm_is_awake(gt)) 671 return 0; 672 673 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout, 674 &remaining_timeout)) > 0) { 675 cond_resched(); 676 if (signal_pending(current)) 677 return -EINTR; 678 } 679 680 return timeout ? timeout : intel_uc_wait_for_idle(>->uc, 681 remaining_timeout); 682 } 683 684 int intel_gt_init(struct intel_gt *gt) 685 { 686 int err; 687 688 err = i915_inject_probe_error(gt->i915, -ENODEV); 689 if (err) 690 return err; 691 692 intel_gt_init_workarounds(gt); 693 694 /* 695 * This is just a security blanket to placate dragons. 696 * On some systems, we very sporadically observe that the first TLBs 697 * used by the CS may be stale, despite us poking the TLB reset. If 698 * we hold the forcewake during initialisation these problems 699 * just magically go away. 700 */ 701 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); 702 703 err = intel_gt_init_scratch(gt, 704 GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K); 705 if (err) 706 goto out_fw; 707 708 intel_gt_pm_init(gt); 709 710 gt->vm = kernel_vm(gt); 711 if (!gt->vm) { 712 err = -ENOMEM; 713 goto err_pm; 714 } 715 716 intel_set_mocs_index(gt); 717 718 err = intel_engines_init(gt); 719 if (err) 720 goto err_engines; 721 722 err = intel_uc_init(>->uc); 723 if (err) 724 goto err_engines; 725 726 err = intel_gt_resume(gt); 727 if (err) 728 goto err_uc_init; 729 730 err = intel_gt_init_hwconfig(gt); 731 if (err) 732 drm_err(>->i915->drm, "Failed to retrieve hwconfig table: %pe\n", 733 ERR_PTR(err)); 734 735 err = __engines_record_defaults(gt); 736 if (err) 737 goto err_gt; 738 739 err = __engines_verify_workarounds(gt); 740 if (err) 741 goto err_gt; 742 743 intel_uc_init_late(>->uc); 744 745 err = i915_inject_probe_error(gt->i915, -EIO); 746 if (err) 747 goto err_gt; 748 749 intel_migrate_init(>->migrate, gt); 750 751 intel_pxp_init(>->pxp); 752 753 goto out_fw; 754 err_gt: 755 __intel_gt_disable(gt); 756 intel_uc_fini_hw(>->uc); 757 err_uc_init: 758 intel_uc_fini(>->uc); 759 err_engines: 760 intel_engines_release(gt); 761 i915_vm_put(fetch_and_zero(>->vm)); 762 err_pm: 763 intel_gt_pm_fini(gt); 764 intel_gt_fini_scratch(gt); 765 out_fw: 766 if (err) 767 intel_gt_set_wedged_on_init(gt); 768 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 769 return err; 770 } 771 772 void intel_gt_driver_remove(struct intel_gt *gt) 773 { 774 __intel_gt_disable(gt); 775 776 intel_migrate_fini(>->migrate); 777 intel_uc_driver_remove(>->uc); 778 779 intel_engines_release(gt); 780 781 intel_gt_flush_buffer_pool(gt); 782 } 783 784 void intel_gt_driver_unregister(struct intel_gt *gt) 785 { 786 intel_wakeref_t wakeref; 787 788 intel_gt_sysfs_unregister(gt); 789 intel_rps_driver_unregister(>->rps); 790 intel_gsc_fini(>->gsc); 791 792 intel_pxp_fini(>->pxp); 793 794 /* 795 * Upon unregistering the device to prevent any new users, cancel 796 * all in-flight requests so that we can quickly unbind the active 797 * resources. 798 */ 799 intel_gt_set_wedged_on_fini(gt); 800 801 /* Scrub all HW state upon release */ 802 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 803 __intel_gt_reset(gt, ALL_ENGINES); 804 } 805 806 void intel_gt_driver_release(struct intel_gt *gt) 807 { 808 struct i915_address_space *vm; 809 810 vm = fetch_and_zero(>->vm); 811 if (vm) /* FIXME being called twice on error paths :( */ 812 i915_vm_put(vm); 813 814 intel_wa_list_free(>->wa_list); 815 intel_gt_pm_fini(gt); 816 intel_gt_fini_scratch(gt); 817 intel_gt_fini_buffer_pool(gt); 818 intel_gt_fini_hwconfig(gt); 819 } 820 821 void intel_gt_driver_late_release_all(struct drm_i915_private *i915) 822 { 823 struct intel_gt *gt; 824 unsigned int id; 825 826 /* We need to wait for inflight RCU frees to release their grip */ 827 rcu_barrier(); 828 829 for_each_gt(gt, i915, id) { 830 intel_uc_driver_late_release(>->uc); 831 intel_gt_fini_requests(gt); 832 intel_gt_fini_reset(gt); 833 intel_gt_fini_timelines(gt); 834 mutex_destroy(>->tlb.invalidate_lock); 835 intel_engines_free(gt); 836 } 837 } 838 839 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) 840 { 841 int ret; 842 843 if (!gt_is_root(gt)) { 844 struct intel_uncore *uncore; 845 spinlock_t *irq_lock; 846 847 uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL); 848 if (!uncore) 849 return -ENOMEM; 850 851 irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL); 852 if (!irq_lock) 853 return -ENOMEM; 854 855 gt->uncore = uncore; 856 gt->irq_lock = irq_lock; 857 858 intel_gt_common_init_early(gt); 859 } 860 861 intel_uncore_init_early(gt->uncore, gt); 862 863 ret = intel_uncore_setup_mmio(gt->uncore, phys_addr); 864 if (ret) 865 return ret; 866 867 gt->phys_addr = phys_addr; 868 869 return 0; 870 } 871 872 int intel_gt_probe_all(struct drm_i915_private *i915) 873 { 874 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 875 struct intel_gt *gt = &i915->gt0; 876 const struct intel_gt_definition *gtdef; 877 phys_addr_t phys_addr; 878 unsigned int mmio_bar; 879 unsigned int i; 880 int ret; 881 882 mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915)); 883 phys_addr = pci_resource_start(pdev, mmio_bar); 884 885 /* 886 * We always have at least one primary GT on any device 887 * and it has been already initialized early during probe 888 * in i915_driver_probe() 889 */ 890 gt->i915 = i915; 891 gt->name = "Primary GT"; 892 gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; 893 894 drm_dbg(&i915->drm, "Setting up %s\n", gt->name); 895 ret = intel_gt_tile_setup(gt, phys_addr); 896 if (ret) 897 return ret; 898 899 i915->gt[0] = gt; 900 901 if (!HAS_EXTRA_GT_LIST(i915)) 902 return 0; 903 904 for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]; 905 gtdef->name != NULL; 906 i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) { 907 gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); 908 if (!gt) { 909 ret = -ENOMEM; 910 goto err; 911 } 912 913 gt->i915 = i915; 914 gt->name = gtdef->name; 915 gt->type = gtdef->type; 916 gt->info.engine_mask = gtdef->engine_mask; 917 gt->info.id = i; 918 919 drm_dbg(&i915->drm, "Setting up %s\n", gt->name); 920 if (GEM_WARN_ON(range_overflows_t(resource_size_t, 921 gtdef->mapping_base, 922 SZ_16M, 923 pci_resource_len(pdev, mmio_bar)))) { 924 ret = -ENODEV; 925 goto err; 926 } 927 928 switch (gtdef->type) { 929 case GT_TILE: 930 ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base); 931 break; 932 933 case GT_MEDIA: 934 ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base, 935 gtdef->gsi_offset); 936 break; 937 938 case GT_PRIMARY: 939 /* Primary GT should not appear in extra GT list */ 940 default: 941 MISSING_CASE(gtdef->type); 942 ret = -ENODEV; 943 } 944 945 if (ret) 946 goto err; 947 948 i915->gt[i] = gt; 949 } 950 951 return 0; 952 953 err: 954 i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); 955 intel_gt_release_all(i915); 956 957 return ret; 958 } 959 960 int intel_gt_tiles_init(struct drm_i915_private *i915) 961 { 962 struct intel_gt *gt; 963 unsigned int id; 964 int ret; 965 966 for_each_gt(gt, i915, id) { 967 ret = intel_gt_probe_lmem(gt); 968 if (ret) 969 return ret; 970 } 971 972 return 0; 973 } 974 975 void intel_gt_release_all(struct drm_i915_private *i915) 976 { 977 struct intel_gt *gt; 978 unsigned int id; 979 980 for_each_gt(gt, i915, id) 981 i915->gt[id] = NULL; 982 } 983 984 void intel_gt_info_print(const struct intel_gt_info *info, 985 struct drm_printer *p) 986 { 987 drm_printf(p, "available engines: %x\n", info->engine_mask); 988 989 intel_sseu_dump(&info->sseu, p); 990 } 991 992 struct reg_and_bit { 993 union { 994 i915_reg_t reg; 995 i915_mcr_reg_t mcr_reg; 996 }; 997 u32 bit; 998 }; 999 1000 static struct reg_and_bit 1001 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8, 1002 const i915_reg_t *regs, const unsigned int num) 1003 { 1004 const unsigned int class = engine->class; 1005 struct reg_and_bit rb = { }; 1006 1007 if (drm_WARN_ON_ONCE(&engine->i915->drm, 1008 class >= num || !regs[class].reg)) 1009 return rb; 1010 1011 rb.reg = regs[class]; 1012 if (gen8 && class == VIDEO_DECODE_CLASS) 1013 rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ 1014 else 1015 rb.bit = engine->instance; 1016 1017 rb.bit = BIT(rb.bit); 1018 1019 return rb; 1020 } 1021 1022 /* 1023 * HW architecture suggest typical invalidation time at 40us, 1024 * with pessimistic cases up to 100us and a recommendation to 1025 * cap at 1ms. We go a bit higher just in case. 1026 */ 1027 #define TLB_INVAL_TIMEOUT_US 100 1028 #define TLB_INVAL_TIMEOUT_MS 4 1029 1030 /* 1031 * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets 1032 * but are now considered MCR registers. Since they exist within a GAM range, 1033 * the primary instance of the register rolls up the status from each unit. 1034 */ 1035 static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb) 1036 { 1037 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) 1038 return intel_gt_mcr_wait_for_reg_fw(gt, rb.mcr_reg, rb.bit, 0, 1039 TLB_INVAL_TIMEOUT_US, 1040 TLB_INVAL_TIMEOUT_MS); 1041 else 1042 return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0, 1043 TLB_INVAL_TIMEOUT_US, 1044 TLB_INVAL_TIMEOUT_MS, 1045 NULL); 1046 } 1047 1048 static void mmio_invalidate_full(struct intel_gt *gt) 1049 { 1050 static const i915_reg_t gen8_regs[] = { 1051 [RENDER_CLASS] = GEN8_RTCR, 1052 [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ 1053 [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, 1054 [COPY_ENGINE_CLASS] = GEN8_BTCR, 1055 }; 1056 static const i915_reg_t gen12_regs[] = { 1057 [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR, 1058 [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR, 1059 [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR, 1060 [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR, 1061 [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR, 1062 }; 1063 static const i915_mcr_reg_t xehp_regs[] = { 1064 [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR, 1065 [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR, 1066 [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR, 1067 [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR, 1068 [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR, 1069 }; 1070 struct drm_i915_private *i915 = gt->i915; 1071 struct intel_uncore *uncore = gt->uncore; 1072 struct intel_engine_cs *engine; 1073 intel_engine_mask_t awake, tmp; 1074 enum intel_engine_id id; 1075 const i915_reg_t *regs; 1076 unsigned int num = 0; 1077 1078 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 1079 regs = NULL; 1080 num = ARRAY_SIZE(xehp_regs); 1081 } else if (GRAPHICS_VER(i915) == 12) { 1082 regs = gen12_regs; 1083 num = ARRAY_SIZE(gen12_regs); 1084 } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) { 1085 regs = gen8_regs; 1086 num = ARRAY_SIZE(gen8_regs); 1087 } else if (GRAPHICS_VER(i915) < 8) { 1088 return; 1089 } 1090 1091 if (drm_WARN_ONCE(&i915->drm, !num, 1092 "Platform does not implement TLB invalidation!")) 1093 return; 1094 1095 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1096 1097 spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */ 1098 1099 awake = 0; 1100 for_each_engine(engine, gt, id) { 1101 struct reg_and_bit rb; 1102 1103 if (!intel_engine_pm_is_awake(engine)) 1104 continue; 1105 1106 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 1107 intel_gt_mcr_multicast_write_fw(gt, 1108 xehp_regs[engine->class], 1109 BIT(engine->instance)); 1110 } else { 1111 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); 1112 if (!i915_mmio_reg_offset(rb.reg)) 1113 continue; 1114 1115 intel_uncore_write_fw(uncore, rb.reg, rb.bit); 1116 } 1117 awake |= engine->mask; 1118 } 1119 1120 GT_TRACE(gt, "invalidated engines %08x\n", awake); 1121 1122 /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */ 1123 if (awake && 1124 (IS_TIGERLAKE(i915) || 1125 IS_DG1(i915) || 1126 IS_ROCKETLAKE(i915) || 1127 IS_ALDERLAKE_S(i915) || 1128 IS_ALDERLAKE_P(i915))) 1129 intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1); 1130 1131 spin_unlock_irq(&uncore->lock); 1132 1133 for_each_engine_masked(engine, gt, awake, tmp) { 1134 struct reg_and_bit rb; 1135 1136 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 1137 rb.mcr_reg = xehp_regs[engine->class]; 1138 rb.bit = BIT(engine->instance); 1139 } else { 1140 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); 1141 } 1142 1143 if (wait_for_invalidate(gt, rb)) 1144 drm_err_ratelimited(>->i915->drm, 1145 "%s TLB invalidation did not complete in %ums!\n", 1146 engine->name, TLB_INVAL_TIMEOUT_MS); 1147 } 1148 1149 /* 1150 * Use delayed put since a) we mostly expect a flurry of TLB 1151 * invalidations so it is good to avoid paying the forcewake cost and 1152 * b) it works around a bug in Icelake which cannot cope with too rapid 1153 * transitions. 1154 */ 1155 intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); 1156 } 1157 1158 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno) 1159 { 1160 u32 cur = intel_gt_tlb_seqno(gt); 1161 1162 /* Only skip if a *full* TLB invalidate barrier has passed */ 1163 return (s32)(cur - ALIGN(seqno, 2)) > 0; 1164 } 1165 1166 void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno) 1167 { 1168 intel_wakeref_t wakeref; 1169 1170 if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) 1171 return; 1172 1173 if (intel_gt_is_wedged(gt)) 1174 return; 1175 1176 if (tlb_seqno_passed(gt, seqno)) 1177 return; 1178 1179 with_intel_gt_pm_if_awake(gt, wakeref) { 1180 mutex_lock(>->tlb.invalidate_lock); 1181 if (tlb_seqno_passed(gt, seqno)) 1182 goto unlock; 1183 1184 mmio_invalidate_full(gt); 1185 1186 write_seqcount_invalidate(>->tlb.seqno); 1187 unlock: 1188 mutex_unlock(>->tlb.invalidate_lock); 1189 } 1190 } 1191