1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <asm/iosf_mbi.h> 29 #include <linux/pm_runtime.h> 30 31 #define FORCEWAKE_ACK_TIMEOUT_MS 50 32 #define GT_FIFO_TIMEOUT_MS 10 33 34 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) 35 36 static const char * const forcewake_domain_names[] = { 37 "render", 38 "blitter", 39 "media", 40 }; 41 42 const char * 43 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 44 { 45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 46 47 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 48 return forcewake_domain_names[id]; 49 50 WARN_ON(id); 51 52 return "unknown"; 53 } 54 55 static inline void 56 fw_domain_reset(struct drm_i915_private *i915, 57 const struct intel_uncore_forcewake_domain *d) 58 { 59 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); 60 } 61 62 static inline void 63 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 64 { 65 d->wake_count++; 66 hrtimer_start_range_ns(&d->timer, 67 NSEC_PER_MSEC, 68 NSEC_PER_MSEC, 69 HRTIMER_MODE_REL); 70 } 71 72 static inline void 73 fw_domain_wait_ack_clear(const struct drm_i915_private *i915, 74 const struct intel_uncore_forcewake_domain *d) 75 { 76 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & 77 FORCEWAKE_KERNEL) == 0, 78 FORCEWAKE_ACK_TIMEOUT_MS)) 79 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 80 intel_uncore_forcewake_domain_to_str(d->id)); 81 } 82 83 static inline void 84 fw_domain_get(struct drm_i915_private *i915, 85 const struct intel_uncore_forcewake_domain *d) 86 { 87 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); 88 } 89 90 static inline void 91 fw_domain_wait_ack(const struct drm_i915_private *i915, 92 const struct intel_uncore_forcewake_domain *d) 93 { 94 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & 95 FORCEWAKE_KERNEL), 96 FORCEWAKE_ACK_TIMEOUT_MS)) 97 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 98 intel_uncore_forcewake_domain_to_str(d->id)); 99 } 100 101 static inline void 102 fw_domain_put(const struct drm_i915_private *i915, 103 const struct intel_uncore_forcewake_domain *d) 104 { 105 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); 106 } 107 108 static void 109 fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) 110 { 111 struct intel_uncore_forcewake_domain *d; 112 unsigned int tmp; 113 114 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); 115 116 for_each_fw_domain_masked(d, fw_domains, i915, tmp) { 117 fw_domain_wait_ack_clear(i915, d); 118 fw_domain_get(i915, d); 119 } 120 121 for_each_fw_domain_masked(d, fw_domains, i915, tmp) 122 fw_domain_wait_ack(i915, d); 123 124 i915->uncore.fw_domains_active |= fw_domains; 125 } 126 127 static void 128 fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) 129 { 130 struct intel_uncore_forcewake_domain *d; 131 unsigned int tmp; 132 133 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); 134 135 for_each_fw_domain_masked(d, fw_domains, i915, tmp) 136 fw_domain_put(i915, d); 137 138 i915->uncore.fw_domains_active &= ~fw_domains; 139 } 140 141 static void 142 fw_domains_reset(struct drm_i915_private *i915, 143 enum forcewake_domains fw_domains) 144 { 145 struct intel_uncore_forcewake_domain *d; 146 unsigned int tmp; 147 148 if (!fw_domains) 149 return; 150 151 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); 152 153 for_each_fw_domain_masked(d, fw_domains, i915, tmp) 154 fw_domain_reset(i915, d); 155 } 156 157 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 158 { 159 /* w/a for a sporadic read returning 0 by waiting for the GT 160 * thread to wake up. 161 */ 162 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 163 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 164 DRM_ERROR("GT thread status wait timed out\n"); 165 } 166 167 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 168 enum forcewake_domains fw_domains) 169 { 170 fw_domains_get(dev_priv, fw_domains); 171 172 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 173 __gen6_gt_wait_for_thread_c0(dev_priv); 174 } 175 176 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 177 { 178 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 179 180 return count & GT_FIFO_FREE_ENTRIES_MASK; 181 } 182 183 static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 184 { 185 u32 n; 186 187 /* On VLV, FIFO will be shared by both SW and HW. 188 * So, we need to read the FREE_ENTRIES everytime */ 189 if (IS_VALLEYVIEW(dev_priv)) 190 n = fifo_free_entries(dev_priv); 191 else 192 n = dev_priv->uncore.fifo_count; 193 194 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { 195 if (wait_for_atomic((n = fifo_free_entries(dev_priv)) > 196 GT_FIFO_NUM_RESERVED_ENTRIES, 197 GT_FIFO_TIMEOUT_MS)) { 198 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); 199 return; 200 } 201 } 202 203 dev_priv->uncore.fifo_count = n - 1; 204 } 205 206 static enum hrtimer_restart 207 intel_uncore_fw_release_timer(struct hrtimer *timer) 208 { 209 struct intel_uncore_forcewake_domain *domain = 210 container_of(timer, struct intel_uncore_forcewake_domain, timer); 211 struct drm_i915_private *dev_priv = 212 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); 213 unsigned long irqflags; 214 215 assert_rpm_device_not_suspended(dev_priv); 216 217 if (xchg(&domain->active, false)) 218 return HRTIMER_RESTART; 219 220 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 221 if (WARN_ON(domain->wake_count == 0)) 222 domain->wake_count++; 223 224 if (--domain->wake_count == 0) 225 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 226 227 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 228 229 return HRTIMER_NORESTART; 230 } 231 232 static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 233 bool restore) 234 { 235 unsigned long irqflags; 236 struct intel_uncore_forcewake_domain *domain; 237 int retry_count = 100; 238 enum forcewake_domains fw, active_domains; 239 240 /* Hold uncore.lock across reset to prevent any register access 241 * with forcewake not set correctly. Wait until all pending 242 * timers are run before holding. 243 */ 244 while (1) { 245 unsigned int tmp; 246 247 active_domains = 0; 248 249 for_each_fw_domain(domain, dev_priv, tmp) { 250 smp_store_mb(domain->active, false); 251 if (hrtimer_cancel(&domain->timer) == 0) 252 continue; 253 254 intel_uncore_fw_release_timer(&domain->timer); 255 } 256 257 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 258 259 for_each_fw_domain(domain, dev_priv, tmp) { 260 if (hrtimer_active(&domain->timer)) 261 active_domains |= domain->mask; 262 } 263 264 if (active_domains == 0) 265 break; 266 267 if (--retry_count == 0) { 268 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 269 break; 270 } 271 272 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 273 cond_resched(); 274 } 275 276 WARN_ON(active_domains); 277 278 fw = dev_priv->uncore.fw_domains_active; 279 if (fw) 280 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 281 282 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); 283 284 if (restore) { /* If reset with a user forcewake, try to restore */ 285 if (fw) 286 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 287 288 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 289 dev_priv->uncore.fifo_count = 290 fifo_free_entries(dev_priv); 291 } 292 293 if (!restore) 294 assert_forcewakes_inactive(dev_priv); 295 296 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 297 } 298 299 static u64 gen9_edram_size(struct drm_i915_private *dev_priv) 300 { 301 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 302 const unsigned int sets[4] = { 1, 1, 2, 2 }; 303 const u32 cap = dev_priv->edram_cap; 304 305 return EDRAM_NUM_BANKS(cap) * 306 ways[EDRAM_WAYS_IDX(cap)] * 307 sets[EDRAM_SETS_IDX(cap)] * 308 1024 * 1024; 309 } 310 311 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) 312 { 313 if (!HAS_EDRAM(dev_priv)) 314 return 0; 315 316 /* The needed capability bits for size calculation 317 * are not there with pre gen9 so return 128MB always. 318 */ 319 if (INTEL_GEN(dev_priv) < 9) 320 return 128 * 1024 * 1024; 321 322 return gen9_edram_size(dev_priv); 323 } 324 325 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) 326 { 327 if (IS_HASWELL(dev_priv) || 328 IS_BROADWELL(dev_priv) || 329 INTEL_GEN(dev_priv) >= 9) { 330 dev_priv->edram_cap = __raw_i915_read32(dev_priv, 331 HSW_EDRAM_CAP); 332 333 /* NB: We can't write IDICR yet because we do not have gt funcs 334 * set up */ 335 } else { 336 dev_priv->edram_cap = 0; 337 } 338 339 if (HAS_EDRAM(dev_priv)) 340 DRM_INFO("Found %lluMB of eDRAM\n", 341 intel_uncore_edram_size(dev_priv) / (1024 * 1024)); 342 } 343 344 static bool 345 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 346 { 347 u32 dbg; 348 349 dbg = __raw_i915_read32(dev_priv, FPGA_DBG); 350 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 351 return false; 352 353 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 354 355 return true; 356 } 357 358 static bool 359 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 360 { 361 u32 cer; 362 363 cer = __raw_i915_read32(dev_priv, CLAIM_ER); 364 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 365 return false; 366 367 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); 368 369 return true; 370 } 371 372 static bool 373 gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv) 374 { 375 u32 fifodbg; 376 377 fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 378 379 if (unlikely(fifodbg)) { 380 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); 381 __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg); 382 } 383 384 return fifodbg; 385 } 386 387 static bool 388 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 389 { 390 bool ret = false; 391 392 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) 393 ret |= fpga_check_for_unclaimed_mmio(dev_priv); 394 395 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 396 ret |= vlv_check_for_unclaimed_mmio(dev_priv); 397 398 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 399 ret |= gen6_check_for_fifo_debug(dev_priv); 400 401 return ret; 402 } 403 404 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 405 bool restore_forcewake) 406 { 407 /* clear out unclaimed reg detection bit */ 408 if (check_for_unclaimed_mmio(dev_priv)) 409 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 410 411 /* WaDisableShadowRegForCpd:chv */ 412 if (IS_CHERRYVIEW(dev_priv)) { 413 __raw_i915_write32(dev_priv, GTFIFOCTL, 414 __raw_i915_read32(dev_priv, GTFIFOCTL) | 415 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 416 GT_FIFO_CTL_RC6_POLICY_STALL); 417 } 418 419 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 420 } 421 422 void intel_uncore_suspend(struct drm_i915_private *dev_priv) 423 { 424 iosf_mbi_unregister_pmic_bus_access_notifier( 425 &dev_priv->uncore.pmic_bus_access_nb); 426 intel_uncore_forcewake_reset(dev_priv, false); 427 } 428 429 void intel_uncore_resume_early(struct drm_i915_private *dev_priv) 430 { 431 __intel_uncore_early_sanitize(dev_priv, true); 432 iosf_mbi_register_pmic_bus_access_notifier( 433 &dev_priv->uncore.pmic_bus_access_nb); 434 i915_check_and_clear_faults(dev_priv); 435 } 436 437 void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 438 { 439 i915_modparams.enable_rc6 = 440 sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6); 441 442 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 443 intel_sanitize_gt_powersave(dev_priv); 444 } 445 446 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 447 enum forcewake_domains fw_domains) 448 { 449 struct intel_uncore_forcewake_domain *domain; 450 unsigned int tmp; 451 452 fw_domains &= dev_priv->uncore.fw_domains; 453 454 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { 455 if (domain->wake_count++) { 456 fw_domains &= ~domain->mask; 457 domain->active = true; 458 } 459 } 460 461 if (fw_domains) 462 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 463 } 464 465 /** 466 * intel_uncore_forcewake_get - grab forcewake domain references 467 * @dev_priv: i915 device instance 468 * @fw_domains: forcewake domains to get reference on 469 * 470 * This function can be used get GT's forcewake domain references. 471 * Normal register access will handle the forcewake domains automatically. 472 * However if some sequence requires the GT to not power down a particular 473 * forcewake domains this function should be called at the beginning of the 474 * sequence. And subsequently the reference should be dropped by symmetric 475 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 476 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 477 */ 478 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 479 enum forcewake_domains fw_domains) 480 { 481 unsigned long irqflags; 482 483 if (!dev_priv->uncore.funcs.force_wake_get) 484 return; 485 486 assert_rpm_wakelock_held(dev_priv); 487 488 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 489 __intel_uncore_forcewake_get(dev_priv, fw_domains); 490 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 491 } 492 493 /** 494 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace 495 * @dev_priv: i915 device instance 496 * 497 * This function is a wrapper around intel_uncore_forcewake_get() to acquire 498 * the GT powerwell and in the process disable our debugging for the 499 * duration of userspace's bypass. 500 */ 501 void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) 502 { 503 spin_lock_irq(&dev_priv->uncore.lock); 504 if (!dev_priv->uncore.user_forcewake.count++) { 505 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); 506 507 /* Save and disable mmio debugging for the user bypass */ 508 dev_priv->uncore.user_forcewake.saved_mmio_check = 509 dev_priv->uncore.unclaimed_mmio_check; 510 dev_priv->uncore.user_forcewake.saved_mmio_debug = 511 i915_modparams.mmio_debug; 512 513 dev_priv->uncore.unclaimed_mmio_check = 0; 514 i915_modparams.mmio_debug = 0; 515 } 516 spin_unlock_irq(&dev_priv->uncore.lock); 517 } 518 519 /** 520 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace 521 * @dev_priv: i915 device instance 522 * 523 * This function complements intel_uncore_forcewake_user_get() and releases 524 * the GT powerwell taken on behalf of the userspace bypass. 525 */ 526 void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) 527 { 528 spin_lock_irq(&dev_priv->uncore.lock); 529 if (!--dev_priv->uncore.user_forcewake.count) { 530 if (intel_uncore_unclaimed_mmio(dev_priv)) 531 dev_info(dev_priv->drm.dev, 532 "Invalid mmio detected during user access\n"); 533 534 dev_priv->uncore.unclaimed_mmio_check = 535 dev_priv->uncore.user_forcewake.saved_mmio_check; 536 i915_modparams.mmio_debug = 537 dev_priv->uncore.user_forcewake.saved_mmio_debug; 538 539 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); 540 } 541 spin_unlock_irq(&dev_priv->uncore.lock); 542 } 543 544 /** 545 * intel_uncore_forcewake_get__locked - grab forcewake domain references 546 * @dev_priv: i915 device instance 547 * @fw_domains: forcewake domains to get reference on 548 * 549 * See intel_uncore_forcewake_get(). This variant places the onus 550 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 551 */ 552 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 553 enum forcewake_domains fw_domains) 554 { 555 lockdep_assert_held(&dev_priv->uncore.lock); 556 557 if (!dev_priv->uncore.funcs.force_wake_get) 558 return; 559 560 __intel_uncore_forcewake_get(dev_priv, fw_domains); 561 } 562 563 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 564 enum forcewake_domains fw_domains) 565 { 566 struct intel_uncore_forcewake_domain *domain; 567 unsigned int tmp; 568 569 fw_domains &= dev_priv->uncore.fw_domains; 570 571 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { 572 if (WARN_ON(domain->wake_count == 0)) 573 continue; 574 575 if (--domain->wake_count) { 576 domain->active = true; 577 continue; 578 } 579 580 fw_domain_arm_timer(domain); 581 } 582 } 583 584 /** 585 * intel_uncore_forcewake_put - release a forcewake domain reference 586 * @dev_priv: i915 device instance 587 * @fw_domains: forcewake domains to put references 588 * 589 * This function drops the device-level forcewakes for specified 590 * domains obtained by intel_uncore_forcewake_get(). 591 */ 592 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 593 enum forcewake_domains fw_domains) 594 { 595 unsigned long irqflags; 596 597 if (!dev_priv->uncore.funcs.force_wake_put) 598 return; 599 600 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 601 __intel_uncore_forcewake_put(dev_priv, fw_domains); 602 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 603 } 604 605 /** 606 * intel_uncore_forcewake_put__locked - grab forcewake domain references 607 * @dev_priv: i915 device instance 608 * @fw_domains: forcewake domains to get reference on 609 * 610 * See intel_uncore_forcewake_put(). This variant places the onus 611 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 612 */ 613 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 614 enum forcewake_domains fw_domains) 615 { 616 lockdep_assert_held(&dev_priv->uncore.lock); 617 618 if (!dev_priv->uncore.funcs.force_wake_put) 619 return; 620 621 __intel_uncore_forcewake_put(dev_priv, fw_domains); 622 } 623 624 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 625 { 626 if (!dev_priv->uncore.funcs.force_wake_get) 627 return; 628 629 WARN(dev_priv->uncore.fw_domains_active, 630 "Expected all fw_domains to be inactive, but %08x are still on\n", 631 dev_priv->uncore.fw_domains_active); 632 } 633 634 void assert_forcewakes_active(struct drm_i915_private *dev_priv, 635 enum forcewake_domains fw_domains) 636 { 637 if (!dev_priv->uncore.funcs.force_wake_get) 638 return; 639 640 assert_rpm_wakelock_held(dev_priv); 641 642 fw_domains &= dev_priv->uncore.fw_domains; 643 WARN(fw_domains & ~dev_priv->uncore.fw_domains_active, 644 "Expected %08x fw_domains to be active, but %08x are off\n", 645 fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active); 646 } 647 648 /* We give fast paths for the really cool registers */ 649 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 650 651 #define __gen6_reg_read_fw_domains(offset) \ 652 ({ \ 653 enum forcewake_domains __fwd; \ 654 if (NEEDS_FORCE_WAKE(offset)) \ 655 __fwd = FORCEWAKE_RENDER; \ 656 else \ 657 __fwd = 0; \ 658 __fwd; \ 659 }) 660 661 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 662 { 663 if (offset < entry->start) 664 return -1; 665 else if (offset > entry->end) 666 return 1; 667 else 668 return 0; 669 } 670 671 /* Copied and "macroized" from lib/bsearch.c */ 672 #define BSEARCH(key, base, num, cmp) ({ \ 673 unsigned int start__ = 0, end__ = (num); \ 674 typeof(base) result__ = NULL; \ 675 while (start__ < end__) { \ 676 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 677 int ret__ = (cmp)((key), (base) + mid__); \ 678 if (ret__ < 0) { \ 679 end__ = mid__; \ 680 } else if (ret__ > 0) { \ 681 start__ = mid__ + 1; \ 682 } else { \ 683 result__ = (base) + mid__; \ 684 break; \ 685 } \ 686 } \ 687 result__; \ 688 }) 689 690 static enum forcewake_domains 691 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) 692 { 693 const struct intel_forcewake_range *entry; 694 695 entry = BSEARCH(offset, 696 dev_priv->uncore.fw_domains_table, 697 dev_priv->uncore.fw_domains_table_entries, 698 fw_range_cmp); 699 700 if (!entry) 701 return 0; 702 703 WARN(entry->domains & ~dev_priv->uncore.fw_domains, 704 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 705 entry->domains & ~dev_priv->uncore.fw_domains, offset); 706 707 return entry->domains; 708 } 709 710 #define GEN_FW_RANGE(s, e, d) \ 711 { .start = (s), .end = (e), .domains = (d) } 712 713 #define HAS_FWTABLE(dev_priv) \ 714 (INTEL_GEN(dev_priv) >= 9 || \ 715 IS_CHERRYVIEW(dev_priv) || \ 716 IS_VALLEYVIEW(dev_priv)) 717 718 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 719 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 720 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 721 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 722 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 723 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 724 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 725 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 726 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 727 }; 728 729 #define __fwtable_reg_read_fw_domains(offset) \ 730 ({ \ 731 enum forcewake_domains __fwd = 0; \ 732 if (NEEDS_FORCE_WAKE((offset))) \ 733 __fwd = find_fw_domain(dev_priv, offset); \ 734 __fwd; \ 735 }) 736 737 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 738 static const i915_reg_t gen8_shadowed_regs[] = { 739 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 740 GEN6_RPNSWREQ, /* 0xA008 */ 741 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 742 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 743 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 744 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 745 /* TODO: Other registers are not yet used */ 746 }; 747 748 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 749 { 750 u32 offset = i915_mmio_reg_offset(*reg); 751 752 if (key < offset) 753 return -1; 754 else if (key > offset) 755 return 1; 756 else 757 return 0; 758 } 759 760 static bool is_gen8_shadowed(u32 offset) 761 { 762 const i915_reg_t *regs = gen8_shadowed_regs; 763 764 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), 765 mmio_reg_cmp); 766 } 767 768 #define __gen8_reg_write_fw_domains(offset) \ 769 ({ \ 770 enum forcewake_domains __fwd; \ 771 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 772 __fwd = FORCEWAKE_RENDER; \ 773 else \ 774 __fwd = 0; \ 775 __fwd; \ 776 }) 777 778 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 779 static const struct intel_forcewake_range __chv_fw_ranges[] = { 780 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 781 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 782 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 783 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 784 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 785 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 786 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 787 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 788 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 789 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 790 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 791 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 792 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 793 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 794 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 795 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 796 }; 797 798 #define __fwtable_reg_write_fw_domains(offset) \ 799 ({ \ 800 enum forcewake_domains __fwd = 0; \ 801 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 802 __fwd = find_fw_domain(dev_priv, offset); \ 803 __fwd; \ 804 }) 805 806 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 807 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 808 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 809 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 810 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 811 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 812 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 813 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 814 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 815 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 816 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 817 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 818 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 819 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 820 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 821 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 822 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 823 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 824 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 825 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 826 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 827 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 828 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 829 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 830 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 831 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 832 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 833 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 834 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 835 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 836 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 837 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 838 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 839 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 840 }; 841 842 static void 843 ilk_dummy_write(struct drm_i915_private *dev_priv) 844 { 845 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 846 * the chip from rc6 before touching it for real. MI_MODE is masked, 847 * hence harmless to write 0 into. */ 848 __raw_i915_write32(dev_priv, MI_MODE, 0); 849 } 850 851 static void 852 __unclaimed_reg_debug(struct drm_i915_private *dev_priv, 853 const i915_reg_t reg, 854 const bool read, 855 const bool before) 856 { 857 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, 858 "Unclaimed %s register 0x%x\n", 859 read ? "read from" : "write to", 860 i915_mmio_reg_offset(reg))) 861 /* Only report the first N failures */ 862 i915_modparams.mmio_debug--; 863 } 864 865 static inline void 866 unclaimed_reg_debug(struct drm_i915_private *dev_priv, 867 const i915_reg_t reg, 868 const bool read, 869 const bool before) 870 { 871 if (likely(!i915_modparams.mmio_debug)) 872 return; 873 874 __unclaimed_reg_debug(dev_priv, reg, read, before); 875 } 876 877 #define GEN2_READ_HEADER(x) \ 878 u##x val = 0; \ 879 assert_rpm_wakelock_held(dev_priv); 880 881 #define GEN2_READ_FOOTER \ 882 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 883 return val 884 885 #define __gen2_read(x) \ 886 static u##x \ 887 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 888 GEN2_READ_HEADER(x); \ 889 val = __raw_i915_read##x(dev_priv, reg); \ 890 GEN2_READ_FOOTER; \ 891 } 892 893 #define __gen5_read(x) \ 894 static u##x \ 895 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 896 GEN2_READ_HEADER(x); \ 897 ilk_dummy_write(dev_priv); \ 898 val = __raw_i915_read##x(dev_priv, reg); \ 899 GEN2_READ_FOOTER; \ 900 } 901 902 __gen5_read(8) 903 __gen5_read(16) 904 __gen5_read(32) 905 __gen5_read(64) 906 __gen2_read(8) 907 __gen2_read(16) 908 __gen2_read(32) 909 __gen2_read(64) 910 911 #undef __gen5_read 912 #undef __gen2_read 913 914 #undef GEN2_READ_FOOTER 915 #undef GEN2_READ_HEADER 916 917 #define GEN6_READ_HEADER(x) \ 918 u32 offset = i915_mmio_reg_offset(reg); \ 919 unsigned long irqflags; \ 920 u##x val = 0; \ 921 assert_rpm_wakelock_held(dev_priv); \ 922 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 923 unclaimed_reg_debug(dev_priv, reg, true, true) 924 925 #define GEN6_READ_FOOTER \ 926 unclaimed_reg_debug(dev_priv, reg, true, false); \ 927 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 928 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 929 return val 930 931 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, 932 enum forcewake_domains fw_domains) 933 { 934 struct intel_uncore_forcewake_domain *domain; 935 unsigned int tmp; 936 937 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); 938 939 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) 940 fw_domain_arm_timer(domain); 941 942 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 943 } 944 945 static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 946 enum forcewake_domains fw_domains) 947 { 948 if (WARN_ON(!fw_domains)) 949 return; 950 951 /* Turn on all requested but inactive supported forcewake domains. */ 952 fw_domains &= dev_priv->uncore.fw_domains; 953 fw_domains &= ~dev_priv->uncore.fw_domains_active; 954 955 if (fw_domains) 956 ___force_wake_auto(dev_priv, fw_domains); 957 } 958 959 #define __gen_read(func, x) \ 960 static u##x \ 961 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 962 enum forcewake_domains fw_engine; \ 963 GEN6_READ_HEADER(x); \ 964 fw_engine = __##func##_reg_read_fw_domains(offset); \ 965 if (fw_engine) \ 966 __force_wake_auto(dev_priv, fw_engine); \ 967 val = __raw_i915_read##x(dev_priv, reg); \ 968 GEN6_READ_FOOTER; \ 969 } 970 #define __gen6_read(x) __gen_read(gen6, x) 971 #define __fwtable_read(x) __gen_read(fwtable, x) 972 973 __fwtable_read(8) 974 __fwtable_read(16) 975 __fwtable_read(32) 976 __fwtable_read(64) 977 __gen6_read(8) 978 __gen6_read(16) 979 __gen6_read(32) 980 __gen6_read(64) 981 982 #undef __fwtable_read 983 #undef __gen6_read 984 #undef GEN6_READ_FOOTER 985 #undef GEN6_READ_HEADER 986 987 #define GEN2_WRITE_HEADER \ 988 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 989 assert_rpm_wakelock_held(dev_priv); \ 990 991 #define GEN2_WRITE_FOOTER 992 993 #define __gen2_write(x) \ 994 static void \ 995 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 996 GEN2_WRITE_HEADER; \ 997 __raw_i915_write##x(dev_priv, reg, val); \ 998 GEN2_WRITE_FOOTER; \ 999 } 1000 1001 #define __gen5_write(x) \ 1002 static void \ 1003 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1004 GEN2_WRITE_HEADER; \ 1005 ilk_dummy_write(dev_priv); \ 1006 __raw_i915_write##x(dev_priv, reg, val); \ 1007 GEN2_WRITE_FOOTER; \ 1008 } 1009 1010 __gen5_write(8) 1011 __gen5_write(16) 1012 __gen5_write(32) 1013 __gen2_write(8) 1014 __gen2_write(16) 1015 __gen2_write(32) 1016 1017 #undef __gen5_write 1018 #undef __gen2_write 1019 1020 #undef GEN2_WRITE_FOOTER 1021 #undef GEN2_WRITE_HEADER 1022 1023 #define GEN6_WRITE_HEADER \ 1024 u32 offset = i915_mmio_reg_offset(reg); \ 1025 unsigned long irqflags; \ 1026 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1027 assert_rpm_wakelock_held(dev_priv); \ 1028 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 1029 unclaimed_reg_debug(dev_priv, reg, false, true) 1030 1031 #define GEN6_WRITE_FOOTER \ 1032 unclaimed_reg_debug(dev_priv, reg, false, false); \ 1033 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1034 1035 #define __gen6_write(x) \ 1036 static void \ 1037 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1038 GEN6_WRITE_HEADER; \ 1039 if (NEEDS_FORCE_WAKE(offset)) \ 1040 __gen6_gt_wait_for_fifo(dev_priv); \ 1041 __raw_i915_write##x(dev_priv, reg, val); \ 1042 GEN6_WRITE_FOOTER; \ 1043 } 1044 1045 #define __gen_write(func, x) \ 1046 static void \ 1047 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1048 enum forcewake_domains fw_engine; \ 1049 GEN6_WRITE_HEADER; \ 1050 fw_engine = __##func##_reg_write_fw_domains(offset); \ 1051 if (fw_engine) \ 1052 __force_wake_auto(dev_priv, fw_engine); \ 1053 __raw_i915_write##x(dev_priv, reg, val); \ 1054 GEN6_WRITE_FOOTER; \ 1055 } 1056 #define __gen8_write(x) __gen_write(gen8, x) 1057 #define __fwtable_write(x) __gen_write(fwtable, x) 1058 1059 __fwtable_write(8) 1060 __fwtable_write(16) 1061 __fwtable_write(32) 1062 __gen8_write(8) 1063 __gen8_write(16) 1064 __gen8_write(32) 1065 __gen6_write(8) 1066 __gen6_write(16) 1067 __gen6_write(32) 1068 1069 #undef __fwtable_write 1070 #undef __gen8_write 1071 #undef __gen6_write 1072 #undef GEN6_WRITE_FOOTER 1073 #undef GEN6_WRITE_HEADER 1074 1075 #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \ 1076 do { \ 1077 (i915)->uncore.funcs.mmio_writeb = x##_write8; \ 1078 (i915)->uncore.funcs.mmio_writew = x##_write16; \ 1079 (i915)->uncore.funcs.mmio_writel = x##_write32; \ 1080 } while (0) 1081 1082 #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \ 1083 do { \ 1084 (i915)->uncore.funcs.mmio_readb = x##_read8; \ 1085 (i915)->uncore.funcs.mmio_readw = x##_read16; \ 1086 (i915)->uncore.funcs.mmio_readl = x##_read32; \ 1087 (i915)->uncore.funcs.mmio_readq = x##_read64; \ 1088 } while (0) 1089 1090 1091 static void fw_domain_init(struct drm_i915_private *dev_priv, 1092 enum forcewake_domain_id domain_id, 1093 i915_reg_t reg_set, 1094 i915_reg_t reg_ack) 1095 { 1096 struct intel_uncore_forcewake_domain *d; 1097 1098 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1099 return; 1100 1101 d = &dev_priv->uncore.fw_domain[domain_id]; 1102 1103 WARN_ON(d->wake_count); 1104 1105 WARN_ON(!i915_mmio_reg_valid(reg_set)); 1106 WARN_ON(!i915_mmio_reg_valid(reg_ack)); 1107 1108 d->wake_count = 0; 1109 d->reg_set = reg_set; 1110 d->reg_ack = reg_ack; 1111 1112 d->id = domain_id; 1113 1114 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1115 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1116 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1117 1118 d->mask = BIT(domain_id); 1119 1120 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1121 d->timer.function = intel_uncore_fw_release_timer; 1122 1123 dev_priv->uncore.fw_domains |= BIT(domain_id); 1124 1125 fw_domain_reset(dev_priv, d); 1126 } 1127 1128 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) 1129 { 1130 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) 1131 return; 1132 1133 if (IS_GEN6(dev_priv)) { 1134 dev_priv->uncore.fw_reset = 0; 1135 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; 1136 dev_priv->uncore.fw_clear = 0; 1137 } else { 1138 /* WaRsClearFWBitsAtReset:bdw,skl */ 1139 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); 1140 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1141 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1142 } 1143 1144 if (INTEL_GEN(dev_priv) >= 9) { 1145 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1146 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1147 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1148 FORCEWAKE_RENDER_GEN9, 1149 FORCEWAKE_ACK_RENDER_GEN9); 1150 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1151 FORCEWAKE_BLITTER_GEN9, 1152 FORCEWAKE_ACK_BLITTER_GEN9); 1153 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1154 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1155 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1156 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1157 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1158 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1159 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1160 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1161 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1162 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1163 dev_priv->uncore.funcs.force_wake_get = 1164 fw_domains_get_with_thread_status; 1165 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1166 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1167 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1168 } else if (IS_IVYBRIDGE(dev_priv)) { 1169 u32 ecobus; 1170 1171 /* IVB configs may use multi-threaded forcewake */ 1172 1173 /* A small trick here - if the bios hasn't configured 1174 * MT forcewake, and if the device is in RC6, then 1175 * force_wake_mt_get will not wake the device and the 1176 * ECOBUS read will return zero. Which will be 1177 * (correctly) interpreted by the test below as MT 1178 * forcewake being disabled. 1179 */ 1180 dev_priv->uncore.funcs.force_wake_get = 1181 fw_domains_get_with_thread_status; 1182 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1183 1184 /* We need to init first for ECOBUS access and then 1185 * determine later if we want to reinit, in case of MT access is 1186 * not working. In this stage we don't know which flavour this 1187 * ivb is, so it is better to reset also the gen6 fw registers 1188 * before the ecobus check. 1189 */ 1190 1191 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1192 __raw_posting_read(dev_priv, ECOBUS); 1193 1194 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1195 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1196 1197 spin_lock_irq(&dev_priv->uncore.lock); 1198 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); 1199 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1200 fw_domains_put(dev_priv, FORCEWAKE_RENDER); 1201 spin_unlock_irq(&dev_priv->uncore.lock); 1202 1203 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1204 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1205 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1206 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1207 FORCEWAKE, FORCEWAKE_ACK); 1208 } 1209 } else if (IS_GEN6(dev_priv)) { 1210 dev_priv->uncore.funcs.force_wake_get = 1211 fw_domains_get_with_thread_status; 1212 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1213 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1214 FORCEWAKE, FORCEWAKE_ACK); 1215 } 1216 1217 /* All future platforms are expected to require complex power gating */ 1218 WARN_ON(dev_priv->uncore.fw_domains == 0); 1219 } 1220 1221 #define ASSIGN_FW_DOMAINS_TABLE(d) \ 1222 { \ 1223 dev_priv->uncore.fw_domains_table = \ 1224 (struct intel_forcewake_range *)(d); \ 1225 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ 1226 } 1227 1228 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 1229 unsigned long action, void *data) 1230 { 1231 struct drm_i915_private *dev_priv = container_of(nb, 1232 struct drm_i915_private, uncore.pmic_bus_access_nb); 1233 1234 switch (action) { 1235 case MBI_PMIC_BUS_ACCESS_BEGIN: 1236 /* 1237 * forcewake all now to make sure that we don't need to do a 1238 * forcewake later which on systems where this notifier gets 1239 * called requires the punit to access to the shared pmic i2c 1240 * bus, which will be busy after this notification, leading to: 1241 * "render: timed out waiting for forcewake ack request." 1242 * errors. 1243 */ 1244 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1245 break; 1246 case MBI_PMIC_BUS_ACCESS_END: 1247 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1248 break; 1249 } 1250 1251 return NOTIFY_OK; 1252 } 1253 1254 void intel_uncore_init(struct drm_i915_private *dev_priv) 1255 { 1256 i915_check_vgpu(dev_priv); 1257 1258 intel_uncore_edram_detect(dev_priv); 1259 intel_uncore_fw_domains_init(dev_priv); 1260 __intel_uncore_early_sanitize(dev_priv, false); 1261 1262 dev_priv->uncore.unclaimed_mmio_check = 1; 1263 dev_priv->uncore.pmic_bus_access_nb.notifier_call = 1264 i915_pmic_bus_access_notifier; 1265 1266 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { 1267 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); 1268 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); 1269 } else if (IS_GEN5(dev_priv)) { 1270 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); 1271 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); 1272 } else if (IS_GEN(dev_priv, 6, 7)) { 1273 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); 1274 1275 if (IS_VALLEYVIEW(dev_priv)) { 1276 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); 1277 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); 1278 } else { 1279 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); 1280 } 1281 } else if (IS_GEN8(dev_priv)) { 1282 if (IS_CHERRYVIEW(dev_priv)) { 1283 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); 1284 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); 1285 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); 1286 1287 } else { 1288 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); 1289 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); 1290 } 1291 } else { 1292 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1293 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); 1294 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); 1295 } 1296 1297 iosf_mbi_register_pmic_bus_access_notifier( 1298 &dev_priv->uncore.pmic_bus_access_nb); 1299 1300 i915_check_and_clear_faults(dev_priv); 1301 } 1302 1303 void intel_uncore_fini(struct drm_i915_private *dev_priv) 1304 { 1305 iosf_mbi_unregister_pmic_bus_access_notifier( 1306 &dev_priv->uncore.pmic_bus_access_nb); 1307 1308 /* Paranoia: make sure we have disabled everything before we exit. */ 1309 intel_uncore_sanitize(dev_priv); 1310 intel_uncore_forcewake_reset(dev_priv, false); 1311 } 1312 1313 static const struct reg_whitelist { 1314 i915_reg_t offset_ldw; 1315 i915_reg_t offset_udw; 1316 u16 gen_mask; 1317 u8 size; 1318 } reg_read_whitelist[] = { { 1319 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1320 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1321 .gen_mask = INTEL_GEN_MASK(4, 10), 1322 .size = 8 1323 } }; 1324 1325 int i915_reg_read_ioctl(struct drm_device *dev, 1326 void *data, struct drm_file *file) 1327 { 1328 struct drm_i915_private *dev_priv = to_i915(dev); 1329 struct drm_i915_reg_read *reg = data; 1330 struct reg_whitelist const *entry; 1331 unsigned int flags; 1332 int remain; 1333 int ret = 0; 1334 1335 entry = reg_read_whitelist; 1336 remain = ARRAY_SIZE(reg_read_whitelist); 1337 while (remain) { 1338 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); 1339 1340 GEM_BUG_ON(!is_power_of_2(entry->size)); 1341 GEM_BUG_ON(entry->size > 8); 1342 GEM_BUG_ON(entry_offset & (entry->size - 1)); 1343 1344 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && 1345 entry_offset == (reg->offset & -entry->size)) 1346 break; 1347 entry++; 1348 remain--; 1349 } 1350 1351 if (!remain) 1352 return -EINVAL; 1353 1354 flags = reg->offset & (entry->size - 1); 1355 1356 intel_runtime_pm_get(dev_priv); 1357 if (entry->size == 8 && flags == I915_REG_READ_8B_WA) 1358 reg->val = I915_READ64_2x32(entry->offset_ldw, 1359 entry->offset_udw); 1360 else if (entry->size == 8 && flags == 0) 1361 reg->val = I915_READ64(entry->offset_ldw); 1362 else if (entry->size == 4 && flags == 0) 1363 reg->val = I915_READ(entry->offset_ldw); 1364 else if (entry->size == 2 && flags == 0) 1365 reg->val = I915_READ16(entry->offset_ldw); 1366 else if (entry->size == 1 && flags == 0) 1367 reg->val = I915_READ8(entry->offset_ldw); 1368 else 1369 ret = -EINVAL; 1370 intel_runtime_pm_put(dev_priv); 1371 1372 return ret; 1373 } 1374 1375 static void gen3_stop_engine(struct intel_engine_cs *engine) 1376 { 1377 struct drm_i915_private *dev_priv = engine->i915; 1378 const u32 base = engine->mmio_base; 1379 const i915_reg_t mode = RING_MI_MODE(base); 1380 1381 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); 1382 if (intel_wait_for_register_fw(dev_priv, 1383 mode, 1384 MODE_IDLE, 1385 MODE_IDLE, 1386 500)) 1387 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", 1388 engine->name); 1389 1390 I915_WRITE_FW(RING_CTL(base), 0); 1391 I915_WRITE_FW(RING_HEAD(base), 0); 1392 I915_WRITE_FW(RING_TAIL(base), 0); 1393 1394 /* Check acts as a post */ 1395 if (I915_READ_FW(RING_HEAD(base)) != 0) 1396 DRM_DEBUG_DRIVER("%s: ring head not parked\n", 1397 engine->name); 1398 } 1399 1400 static void i915_stop_engines(struct drm_i915_private *dev_priv, 1401 unsigned engine_mask) 1402 { 1403 struct intel_engine_cs *engine; 1404 enum intel_engine_id id; 1405 1406 if (INTEL_GEN(dev_priv) < 3) 1407 return; 1408 1409 for_each_engine_masked(engine, dev_priv, engine_mask, id) 1410 gen3_stop_engine(engine); 1411 } 1412 1413 static bool i915_reset_complete(struct pci_dev *pdev) 1414 { 1415 u8 gdrst; 1416 1417 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1418 return (gdrst & GRDOM_RESET_STATUS) == 0; 1419 } 1420 1421 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1422 { 1423 struct pci_dev *pdev = dev_priv->drm.pdev; 1424 1425 /* assert reset for at least 20 usec */ 1426 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1427 usleep_range(50, 200); 1428 pci_write_config_byte(pdev, I915_GDRST, 0); 1429 1430 return wait_for(i915_reset_complete(pdev), 500); 1431 } 1432 1433 static bool g4x_reset_complete(struct pci_dev *pdev) 1434 { 1435 u8 gdrst; 1436 1437 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1438 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1439 } 1440 1441 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1442 { 1443 struct pci_dev *pdev = dev_priv->drm.pdev; 1444 1445 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1446 return wait_for(g4x_reset_complete(pdev), 500); 1447 } 1448 1449 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1450 { 1451 struct pci_dev *pdev = dev_priv->drm.pdev; 1452 int ret; 1453 1454 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1455 I915_WRITE(VDECCLK_GATE_D, 1456 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1457 POSTING_READ(VDECCLK_GATE_D); 1458 1459 pci_write_config_byte(pdev, I915_GDRST, 1460 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1461 ret = wait_for(g4x_reset_complete(pdev), 500); 1462 if (ret) { 1463 DRM_DEBUG_DRIVER("Wait for media reset failed\n"); 1464 goto out; 1465 } 1466 1467 pci_write_config_byte(pdev, I915_GDRST, 1468 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1469 ret = wait_for(g4x_reset_complete(pdev), 500); 1470 if (ret) { 1471 DRM_DEBUG_DRIVER("Wait for render reset failed\n"); 1472 goto out; 1473 } 1474 1475 out: 1476 pci_write_config_byte(pdev, I915_GDRST, 0); 1477 1478 I915_WRITE(VDECCLK_GATE_D, 1479 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1480 POSTING_READ(VDECCLK_GATE_D); 1481 1482 return ret; 1483 } 1484 1485 static int ironlake_do_reset(struct drm_i915_private *dev_priv, 1486 unsigned engine_mask) 1487 { 1488 int ret; 1489 1490 I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1491 ret = intel_wait_for_register(dev_priv, 1492 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1493 500); 1494 if (ret) { 1495 DRM_DEBUG_DRIVER("Wait for render reset failed\n"); 1496 goto out; 1497 } 1498 1499 I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1500 ret = intel_wait_for_register(dev_priv, 1501 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1502 500); 1503 if (ret) { 1504 DRM_DEBUG_DRIVER("Wait for media reset failed\n"); 1505 goto out; 1506 } 1507 1508 out: 1509 I915_WRITE(ILK_GDSR, 0); 1510 POSTING_READ(ILK_GDSR); 1511 return ret; 1512 } 1513 1514 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ 1515 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1516 u32 hw_domain_mask) 1517 { 1518 int err; 1519 1520 /* GEN6_GDRST is not in the gt power well, no need to check 1521 * for fifo space for the write or forcewake the chip for 1522 * the read 1523 */ 1524 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1525 1526 /* Wait for the device to ack the reset requests */ 1527 err = intel_wait_for_register_fw(dev_priv, 1528 GEN6_GDRST, hw_domain_mask, 0, 1529 500); 1530 if (err) 1531 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", 1532 hw_domain_mask); 1533 1534 return err; 1535 } 1536 1537 /** 1538 * gen6_reset_engines - reset individual engines 1539 * @dev_priv: i915 device 1540 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1541 * 1542 * This function will reset the individual engines that are set in engine_mask. 1543 * If you provide ALL_ENGINES as mask, full global domain reset will be issued. 1544 * 1545 * Note: It is responsibility of the caller to handle the difference between 1546 * asking full domain reset versus reset for all available individual engines. 1547 * 1548 * Returns 0 on success, nonzero on error. 1549 */ 1550 static int gen6_reset_engines(struct drm_i915_private *dev_priv, 1551 unsigned engine_mask) 1552 { 1553 struct intel_engine_cs *engine; 1554 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1555 [RCS] = GEN6_GRDOM_RENDER, 1556 [BCS] = GEN6_GRDOM_BLT, 1557 [VCS] = GEN6_GRDOM_MEDIA, 1558 [VCS2] = GEN8_GRDOM_MEDIA2, 1559 [VECS] = GEN6_GRDOM_VECS, 1560 }; 1561 u32 hw_mask; 1562 1563 if (engine_mask == ALL_ENGINES) { 1564 hw_mask = GEN6_GRDOM_FULL; 1565 } else { 1566 unsigned int tmp; 1567 1568 hw_mask = 0; 1569 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1570 hw_mask |= hw_engine_mask[engine->id]; 1571 } 1572 1573 return gen6_hw_domain_reset(dev_priv, hw_mask); 1574 } 1575 1576 /** 1577 * __intel_wait_for_register_fw - wait until register matches expected state 1578 * @dev_priv: the i915 device 1579 * @reg: the register to read 1580 * @mask: mask to apply to register value 1581 * @value: expected value 1582 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1583 * @slow_timeout_ms: slow timeout in millisecond 1584 * @out_value: optional placeholder to hold registry value 1585 * 1586 * This routine waits until the target register @reg contains the expected 1587 * @value after applying the @mask, i.e. it waits until :: 1588 * 1589 * (I915_READ_FW(reg) & mask) == value 1590 * 1591 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 1592 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us 1593 * must be not larger than 20,0000 microseconds. 1594 * 1595 * Note that this routine assumes the caller holds forcewake asserted, it is 1596 * not suitable for very long waits. See intel_wait_for_register() if you 1597 * wish to wait without holding forcewake for the duration (i.e. you expect 1598 * the wait to be slow). 1599 * 1600 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1601 */ 1602 int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 1603 i915_reg_t reg, 1604 u32 mask, 1605 u32 value, 1606 unsigned int fast_timeout_us, 1607 unsigned int slow_timeout_ms, 1608 u32 *out_value) 1609 { 1610 u32 uninitialized_var(reg_value); 1611 #define done (((reg_value = I915_READ_FW(reg)) & mask) == value) 1612 int ret; 1613 1614 /* Catch any overuse of this function */ 1615 might_sleep_if(slow_timeout_ms); 1616 GEM_BUG_ON(fast_timeout_us > 20000); 1617 1618 ret = -ETIMEDOUT; 1619 if (fast_timeout_us && fast_timeout_us <= 20000) 1620 ret = _wait_for_atomic(done, fast_timeout_us, 0); 1621 if (ret && slow_timeout_ms) 1622 ret = wait_for(done, slow_timeout_ms); 1623 1624 if (out_value) 1625 *out_value = reg_value; 1626 1627 return ret; 1628 #undef done 1629 } 1630 1631 /** 1632 * intel_wait_for_register - wait until register matches expected state 1633 * @dev_priv: the i915 device 1634 * @reg: the register to read 1635 * @mask: mask to apply to register value 1636 * @value: expected value 1637 * @timeout_ms: timeout in millisecond 1638 * 1639 * This routine waits until the target register @reg contains the expected 1640 * @value after applying the @mask, i.e. it waits until :: 1641 * 1642 * (I915_READ(reg) & mask) == value 1643 * 1644 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1645 * 1646 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1647 */ 1648 int intel_wait_for_register(struct drm_i915_private *dev_priv, 1649 i915_reg_t reg, 1650 u32 mask, 1651 u32 value, 1652 unsigned int timeout_ms) 1653 { 1654 unsigned fw = 1655 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); 1656 int ret; 1657 1658 might_sleep(); 1659 1660 spin_lock_irq(&dev_priv->uncore.lock); 1661 intel_uncore_forcewake_get__locked(dev_priv, fw); 1662 1663 ret = __intel_wait_for_register_fw(dev_priv, 1664 reg, mask, value, 1665 2, 0, NULL); 1666 1667 intel_uncore_forcewake_put__locked(dev_priv, fw); 1668 spin_unlock_irq(&dev_priv->uncore.lock); 1669 1670 if (ret) 1671 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, 1672 timeout_ms); 1673 1674 return ret; 1675 } 1676 1677 static int gen8_reset_engine_start(struct intel_engine_cs *engine) 1678 { 1679 struct drm_i915_private *dev_priv = engine->i915; 1680 int ret; 1681 1682 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1683 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1684 1685 ret = intel_wait_for_register_fw(dev_priv, 1686 RING_RESET_CTL(engine->mmio_base), 1687 RESET_CTL_READY_TO_RESET, 1688 RESET_CTL_READY_TO_RESET, 1689 700); 1690 if (ret) 1691 DRM_ERROR("%s: reset request timeout\n", engine->name); 1692 1693 return ret; 1694 } 1695 1696 static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) 1697 { 1698 struct drm_i915_private *dev_priv = engine->i915; 1699 1700 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1701 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1702 } 1703 1704 static int gen8_reset_engines(struct drm_i915_private *dev_priv, 1705 unsigned engine_mask) 1706 { 1707 struct intel_engine_cs *engine; 1708 unsigned int tmp; 1709 1710 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1711 if (gen8_reset_engine_start(engine)) 1712 goto not_ready; 1713 1714 return gen6_reset_engines(dev_priv, engine_mask); 1715 1716 not_ready: 1717 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1718 gen8_reset_engine_cancel(engine); 1719 1720 return -EIO; 1721 } 1722 1723 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); 1724 1725 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) 1726 { 1727 if (!i915_modparams.reset) 1728 return NULL; 1729 1730 if (INTEL_INFO(dev_priv)->gen >= 8) 1731 return gen8_reset_engines; 1732 else if (INTEL_INFO(dev_priv)->gen >= 6) 1733 return gen6_reset_engines; 1734 else if (IS_GEN5(dev_priv)) 1735 return ironlake_do_reset; 1736 else if (IS_G4X(dev_priv)) 1737 return g4x_do_reset; 1738 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 1739 return g33_do_reset; 1740 else if (INTEL_INFO(dev_priv)->gen >= 3) 1741 return i915_do_reset; 1742 else 1743 return NULL; 1744 } 1745 1746 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1747 { 1748 reset_func reset = intel_get_gpu_reset(dev_priv); 1749 int retry; 1750 int ret; 1751 1752 might_sleep(); 1753 1754 /* If the power well sleeps during the reset, the reset 1755 * request may be dropped and never completes (causing -EIO). 1756 */ 1757 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1758 for (retry = 0; retry < 3; retry++) { 1759 1760 /* We stop engines, otherwise we might get failed reset and a 1761 * dead gpu (on elk). Also as modern gpu as kbl can suffer 1762 * from system hang if batchbuffer is progressing when 1763 * the reset is issued, regardless of READY_TO_RESET ack. 1764 * Thus assume it is best to stop engines on all gens 1765 * where we have a gpu reset. 1766 * 1767 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 1768 * 1769 * FIXME: Wa for more modern gens needs to be validated 1770 */ 1771 i915_stop_engines(dev_priv, engine_mask); 1772 1773 ret = -ENODEV; 1774 if (reset) 1775 ret = reset(dev_priv, engine_mask); 1776 if (ret != -ETIMEDOUT) 1777 break; 1778 1779 cond_resched(); 1780 } 1781 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1782 1783 return ret; 1784 } 1785 1786 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) 1787 { 1788 return intel_get_gpu_reset(dev_priv) != NULL; 1789 } 1790 1791 /* 1792 * When GuC submission is enabled, GuC manages ELSP and can initiate the 1793 * engine reset too. For now, fall back to full GPU reset if it is enabled. 1794 */ 1795 bool intel_has_reset_engine(struct drm_i915_private *dev_priv) 1796 { 1797 return (dev_priv->info.has_reset_engine && 1798 !dev_priv->guc.execbuf_client && 1799 i915_modparams.reset >= 2); 1800 } 1801 1802 int intel_guc_reset(struct drm_i915_private *dev_priv) 1803 { 1804 int ret; 1805 1806 if (!HAS_GUC(dev_priv)) 1807 return -EINVAL; 1808 1809 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1810 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); 1811 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1812 1813 return ret; 1814 } 1815 1816 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) 1817 { 1818 return check_for_unclaimed_mmio(dev_priv); 1819 } 1820 1821 bool 1822 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) 1823 { 1824 if (unlikely(i915_modparams.mmio_debug || 1825 dev_priv->uncore.unclaimed_mmio_check <= 0)) 1826 return false; 1827 1828 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { 1829 DRM_DEBUG("Unclaimed register detected, " 1830 "enabling oneshot unclaimed register reporting. " 1831 "Please use i915.mmio_debug=N for more information.\n"); 1832 i915_modparams.mmio_debug++; 1833 dev_priv->uncore.unclaimed_mmio_check--; 1834 return true; 1835 } 1836 1837 return false; 1838 } 1839 1840 static enum forcewake_domains 1841 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, 1842 i915_reg_t reg) 1843 { 1844 u32 offset = i915_mmio_reg_offset(reg); 1845 enum forcewake_domains fw_domains; 1846 1847 if (HAS_FWTABLE(dev_priv)) { 1848 fw_domains = __fwtable_reg_read_fw_domains(offset); 1849 } else if (INTEL_GEN(dev_priv) >= 6) { 1850 fw_domains = __gen6_reg_read_fw_domains(offset); 1851 } else { 1852 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1853 fw_domains = 0; 1854 } 1855 1856 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1857 1858 return fw_domains; 1859 } 1860 1861 static enum forcewake_domains 1862 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, 1863 i915_reg_t reg) 1864 { 1865 u32 offset = i915_mmio_reg_offset(reg); 1866 enum forcewake_domains fw_domains; 1867 1868 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 1869 fw_domains = __fwtable_reg_write_fw_domains(offset); 1870 } else if (IS_GEN8(dev_priv)) { 1871 fw_domains = __gen8_reg_write_fw_domains(offset); 1872 } else if (IS_GEN(dev_priv, 6, 7)) { 1873 fw_domains = FORCEWAKE_RENDER; 1874 } else { 1875 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1876 fw_domains = 0; 1877 } 1878 1879 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1880 1881 return fw_domains; 1882 } 1883 1884 /** 1885 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1886 * a register 1887 * @dev_priv: pointer to struct drm_i915_private 1888 * @reg: register in question 1889 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1890 * 1891 * Returns a set of forcewake domains required to be taken with for example 1892 * intel_uncore_forcewake_get for the specified register to be accessible in the 1893 * specified mode (read, write or read/write) with raw mmio accessors. 1894 * 1895 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1896 * callers to do FIFO management on their own or risk losing writes. 1897 */ 1898 enum forcewake_domains 1899 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 1900 i915_reg_t reg, unsigned int op) 1901 { 1902 enum forcewake_domains fw_domains = 0; 1903 1904 WARN_ON(!op); 1905 1906 if (intel_vgpu_active(dev_priv)) 1907 return 0; 1908 1909 if (op & FW_REG_READ) 1910 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); 1911 1912 if (op & FW_REG_WRITE) 1913 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); 1914 1915 return fw_domains; 1916 } 1917 1918 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1919 #include "selftests/mock_uncore.c" 1920 #include "selftests/intel_uncore.c" 1921 #endif 1922