1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <asm/iosf_mbi.h> 29 #include <linux/pm_runtime.h> 30 31 #define FORCEWAKE_ACK_TIMEOUT_MS 50 32 33 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) 34 35 static const char * const forcewake_domain_names[] = { 36 "render", 37 "blitter", 38 "media", 39 }; 40 41 const char * 42 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 43 { 44 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 45 46 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 47 return forcewake_domain_names[id]; 48 49 WARN_ON(id); 50 51 return "unknown"; 52 } 53 54 static inline void 55 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 56 { 57 WARN_ON(!i915_mmio_reg_valid(d->reg_set)); 58 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 59 } 60 61 static inline void 62 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 63 { 64 d->wake_count++; 65 hrtimer_start_range_ns(&d->timer, 66 NSEC_PER_MSEC, 67 NSEC_PER_MSEC, 68 HRTIMER_MODE_REL); 69 } 70 71 static inline void 72 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 73 { 74 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 75 FORCEWAKE_KERNEL) == 0, 76 FORCEWAKE_ACK_TIMEOUT_MS)) 77 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 78 intel_uncore_forcewake_domain_to_str(d->id)); 79 } 80 81 static inline void 82 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 83 { 84 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 85 } 86 87 static inline void 88 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 89 { 90 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 91 FORCEWAKE_KERNEL), 92 FORCEWAKE_ACK_TIMEOUT_MS)) 93 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 94 intel_uncore_forcewake_domain_to_str(d->id)); 95 } 96 97 static inline void 98 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 99 { 100 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 101 } 102 103 static inline void 104 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 105 { 106 /* something from same cacheline, but not from the set register */ 107 if (i915_mmio_reg_valid(d->reg_post)) 108 __raw_posting_read(d->i915, d->reg_post); 109 } 110 111 static void 112 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 113 { 114 struct intel_uncore_forcewake_domain *d; 115 116 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 117 fw_domain_wait_ack_clear(d); 118 fw_domain_get(d); 119 } 120 121 for_each_fw_domain_masked(d, fw_domains, dev_priv) 122 fw_domain_wait_ack(d); 123 124 dev_priv->uncore.fw_domains_active |= fw_domains; 125 } 126 127 static void 128 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 129 { 130 struct intel_uncore_forcewake_domain *d; 131 132 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 133 fw_domain_put(d); 134 fw_domain_posting_read(d); 135 } 136 137 dev_priv->uncore.fw_domains_active &= ~fw_domains; 138 } 139 140 static void 141 fw_domains_posting_read(struct drm_i915_private *dev_priv) 142 { 143 struct intel_uncore_forcewake_domain *d; 144 145 /* No need to do for all, just do for first found */ 146 for_each_fw_domain(d, dev_priv) { 147 fw_domain_posting_read(d); 148 break; 149 } 150 } 151 152 static void 153 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 154 { 155 struct intel_uncore_forcewake_domain *d; 156 157 if (dev_priv->uncore.fw_domains == 0) 158 return; 159 160 for_each_fw_domain_masked(d, fw_domains, dev_priv) 161 fw_domain_reset(d); 162 163 fw_domains_posting_read(dev_priv); 164 } 165 166 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 167 { 168 /* w/a for a sporadic read returning 0 by waiting for the GT 169 * thread to wake up. 170 */ 171 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 172 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 173 DRM_ERROR("GT thread status wait timed out\n"); 174 } 175 176 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 177 enum forcewake_domains fw_domains) 178 { 179 fw_domains_get(dev_priv, fw_domains); 180 181 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 182 __gen6_gt_wait_for_thread_c0(dev_priv); 183 } 184 185 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 186 { 187 u32 gtfifodbg; 188 189 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 190 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 191 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 192 } 193 194 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 195 enum forcewake_domains fw_domains) 196 { 197 fw_domains_put(dev_priv, fw_domains); 198 gen6_gt_check_fifodbg(dev_priv); 199 } 200 201 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 202 { 203 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 204 205 return count & GT_FIFO_FREE_ENTRIES_MASK; 206 } 207 208 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 209 { 210 int ret = 0; 211 212 /* On VLV, FIFO will be shared by both SW and HW. 213 * So, we need to read the FREE_ENTRIES everytime */ 214 if (IS_VALLEYVIEW(dev_priv)) 215 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 216 217 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 218 int loop = 500; 219 u32 fifo = fifo_free_entries(dev_priv); 220 221 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 222 udelay(10); 223 fifo = fifo_free_entries(dev_priv); 224 } 225 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 226 ++ret; 227 dev_priv->uncore.fifo_count = fifo; 228 } 229 dev_priv->uncore.fifo_count--; 230 231 return ret; 232 } 233 234 static enum hrtimer_restart 235 intel_uncore_fw_release_timer(struct hrtimer *timer) 236 { 237 struct intel_uncore_forcewake_domain *domain = 238 container_of(timer, struct intel_uncore_forcewake_domain, timer); 239 struct drm_i915_private *dev_priv = domain->i915; 240 unsigned long irqflags; 241 242 assert_rpm_device_not_suspended(dev_priv); 243 244 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 245 if (WARN_ON(domain->wake_count == 0)) 246 domain->wake_count++; 247 248 if (--domain->wake_count == 0) 249 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 250 251 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 252 253 return HRTIMER_NORESTART; 254 } 255 256 static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 257 bool restore) 258 { 259 unsigned long irqflags; 260 struct intel_uncore_forcewake_domain *domain; 261 int retry_count = 100; 262 enum forcewake_domains fw, active_domains; 263 264 /* Hold uncore.lock across reset to prevent any register access 265 * with forcewake not set correctly. Wait until all pending 266 * timers are run before holding. 267 */ 268 while (1) { 269 active_domains = 0; 270 271 for_each_fw_domain(domain, dev_priv) { 272 if (hrtimer_cancel(&domain->timer) == 0) 273 continue; 274 275 intel_uncore_fw_release_timer(&domain->timer); 276 } 277 278 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 279 280 for_each_fw_domain(domain, dev_priv) { 281 if (hrtimer_active(&domain->timer)) 282 active_domains |= domain->mask; 283 } 284 285 if (active_domains == 0) 286 break; 287 288 if (--retry_count == 0) { 289 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 290 break; 291 } 292 293 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 294 cond_resched(); 295 } 296 297 WARN_ON(active_domains); 298 299 fw = dev_priv->uncore.fw_domains_active; 300 if (fw) 301 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 302 303 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 304 305 if (restore) { /* If reset with a user forcewake, try to restore */ 306 if (fw) 307 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 308 309 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 310 dev_priv->uncore.fifo_count = 311 fifo_free_entries(dev_priv); 312 } 313 314 if (!restore) 315 assert_forcewakes_inactive(dev_priv); 316 317 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 318 } 319 320 static u64 gen9_edram_size(struct drm_i915_private *dev_priv) 321 { 322 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 323 const unsigned int sets[4] = { 1, 1, 2, 2 }; 324 const u32 cap = dev_priv->edram_cap; 325 326 return EDRAM_NUM_BANKS(cap) * 327 ways[EDRAM_WAYS_IDX(cap)] * 328 sets[EDRAM_SETS_IDX(cap)] * 329 1024 * 1024; 330 } 331 332 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) 333 { 334 if (!HAS_EDRAM(dev_priv)) 335 return 0; 336 337 /* The needed capability bits for size calculation 338 * are not there with pre gen9 so return 128MB always. 339 */ 340 if (INTEL_GEN(dev_priv) < 9) 341 return 128 * 1024 * 1024; 342 343 return gen9_edram_size(dev_priv); 344 } 345 346 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) 347 { 348 if (IS_HASWELL(dev_priv) || 349 IS_BROADWELL(dev_priv) || 350 INTEL_GEN(dev_priv) >= 9) { 351 dev_priv->edram_cap = __raw_i915_read32(dev_priv, 352 HSW_EDRAM_CAP); 353 354 /* NB: We can't write IDICR yet because we do not have gt funcs 355 * set up */ 356 } else { 357 dev_priv->edram_cap = 0; 358 } 359 360 if (HAS_EDRAM(dev_priv)) 361 DRM_INFO("Found %lluMB of eDRAM\n", 362 intel_uncore_edram_size(dev_priv) / (1024 * 1024)); 363 } 364 365 static bool 366 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 367 { 368 u32 dbg; 369 370 dbg = __raw_i915_read32(dev_priv, FPGA_DBG); 371 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 372 return false; 373 374 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 375 376 return true; 377 } 378 379 static bool 380 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 381 { 382 u32 cer; 383 384 cer = __raw_i915_read32(dev_priv, CLAIM_ER); 385 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 386 return false; 387 388 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); 389 390 return true; 391 } 392 393 static bool 394 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 395 { 396 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) 397 return fpga_check_for_unclaimed_mmio(dev_priv); 398 399 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 400 return vlv_check_for_unclaimed_mmio(dev_priv); 401 402 return false; 403 } 404 405 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 406 bool restore_forcewake) 407 { 408 struct intel_device_info *info = mkwrite_device_info(dev_priv); 409 410 /* clear out unclaimed reg detection bit */ 411 if (check_for_unclaimed_mmio(dev_priv)) 412 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 413 414 /* clear out old GT FIFO errors */ 415 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 416 __raw_i915_write32(dev_priv, GTFIFODBG, 417 __raw_i915_read32(dev_priv, GTFIFODBG)); 418 419 /* WaDisableShadowRegForCpd:chv */ 420 if (IS_CHERRYVIEW(dev_priv)) { 421 __raw_i915_write32(dev_priv, GTFIFOCTL, 422 __raw_i915_read32(dev_priv, GTFIFOCTL) | 423 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 424 GT_FIFO_CTL_RC6_POLICY_STALL); 425 } 426 427 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST)) 428 info->has_decoupled_mmio = false; 429 430 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 431 } 432 433 void intel_uncore_suspend(struct drm_i915_private *dev_priv) 434 { 435 iosf_mbi_unregister_pmic_bus_access_notifier( 436 &dev_priv->uncore.pmic_bus_access_nb); 437 intel_uncore_forcewake_reset(dev_priv, false); 438 } 439 440 void intel_uncore_resume_early(struct drm_i915_private *dev_priv) 441 { 442 __intel_uncore_early_sanitize(dev_priv, true); 443 iosf_mbi_register_pmic_bus_access_notifier( 444 &dev_priv->uncore.pmic_bus_access_nb); 445 i915_check_and_clear_faults(dev_priv); 446 } 447 448 void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 449 { 450 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); 451 452 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 453 intel_sanitize_gt_powersave(dev_priv); 454 } 455 456 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 457 enum forcewake_domains fw_domains) 458 { 459 struct intel_uncore_forcewake_domain *domain; 460 461 fw_domains &= dev_priv->uncore.fw_domains; 462 463 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 464 if (domain->wake_count++) 465 fw_domains &= ~domain->mask; 466 } 467 468 if (fw_domains) 469 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 470 } 471 472 /** 473 * intel_uncore_forcewake_get - grab forcewake domain references 474 * @dev_priv: i915 device instance 475 * @fw_domains: forcewake domains to get reference on 476 * 477 * This function can be used get GT's forcewake domain references. 478 * Normal register access will handle the forcewake domains automatically. 479 * However if some sequence requires the GT to not power down a particular 480 * forcewake domains this function should be called at the beginning of the 481 * sequence. And subsequently the reference should be dropped by symmetric 482 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 483 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 484 */ 485 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 486 enum forcewake_domains fw_domains) 487 { 488 unsigned long irqflags; 489 490 if (!dev_priv->uncore.funcs.force_wake_get) 491 return; 492 493 assert_rpm_wakelock_held(dev_priv); 494 495 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 496 __intel_uncore_forcewake_get(dev_priv, fw_domains); 497 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 498 } 499 500 /** 501 * intel_uncore_forcewake_get__locked - grab forcewake domain references 502 * @dev_priv: i915 device instance 503 * @fw_domains: forcewake domains to get reference on 504 * 505 * See intel_uncore_forcewake_get(). This variant places the onus 506 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 507 */ 508 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 509 enum forcewake_domains fw_domains) 510 { 511 lockdep_assert_held(&dev_priv->uncore.lock); 512 513 if (!dev_priv->uncore.funcs.force_wake_get) 514 return; 515 516 __intel_uncore_forcewake_get(dev_priv, fw_domains); 517 } 518 519 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 520 enum forcewake_domains fw_domains) 521 { 522 struct intel_uncore_forcewake_domain *domain; 523 524 fw_domains &= dev_priv->uncore.fw_domains; 525 526 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 527 if (WARN_ON(domain->wake_count == 0)) 528 continue; 529 530 if (--domain->wake_count) 531 continue; 532 533 fw_domain_arm_timer(domain); 534 } 535 } 536 537 /** 538 * intel_uncore_forcewake_put - release a forcewake domain reference 539 * @dev_priv: i915 device instance 540 * @fw_domains: forcewake domains to put references 541 * 542 * This function drops the device-level forcewakes for specified 543 * domains obtained by intel_uncore_forcewake_get(). 544 */ 545 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 546 enum forcewake_domains fw_domains) 547 { 548 unsigned long irqflags; 549 550 if (!dev_priv->uncore.funcs.force_wake_put) 551 return; 552 553 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 554 __intel_uncore_forcewake_put(dev_priv, fw_domains); 555 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 556 } 557 558 /** 559 * intel_uncore_forcewake_put__locked - grab forcewake domain references 560 * @dev_priv: i915 device instance 561 * @fw_domains: forcewake domains to get reference on 562 * 563 * See intel_uncore_forcewake_put(). This variant places the onus 564 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 565 */ 566 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 567 enum forcewake_domains fw_domains) 568 { 569 lockdep_assert_held(&dev_priv->uncore.lock); 570 571 if (!dev_priv->uncore.funcs.force_wake_put) 572 return; 573 574 __intel_uncore_forcewake_put(dev_priv, fw_domains); 575 } 576 577 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 578 { 579 if (!dev_priv->uncore.funcs.force_wake_get) 580 return; 581 582 WARN_ON(dev_priv->uncore.fw_domains_active); 583 } 584 585 /* We give fast paths for the really cool registers */ 586 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 587 588 #define __gen6_reg_read_fw_domains(offset) \ 589 ({ \ 590 enum forcewake_domains __fwd; \ 591 if (NEEDS_FORCE_WAKE(offset)) \ 592 __fwd = FORCEWAKE_RENDER; \ 593 else \ 594 __fwd = 0; \ 595 __fwd; \ 596 }) 597 598 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 599 { 600 if (offset < entry->start) 601 return -1; 602 else if (offset > entry->end) 603 return 1; 604 else 605 return 0; 606 } 607 608 /* Copied and "macroized" from lib/bsearch.c */ 609 #define BSEARCH(key, base, num, cmp) ({ \ 610 unsigned int start__ = 0, end__ = (num); \ 611 typeof(base) result__ = NULL; \ 612 while (start__ < end__) { \ 613 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 614 int ret__ = (cmp)((key), (base) + mid__); \ 615 if (ret__ < 0) { \ 616 end__ = mid__; \ 617 } else if (ret__ > 0) { \ 618 start__ = mid__ + 1; \ 619 } else { \ 620 result__ = (base) + mid__; \ 621 break; \ 622 } \ 623 } \ 624 result__; \ 625 }) 626 627 static enum forcewake_domains 628 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) 629 { 630 const struct intel_forcewake_range *entry; 631 632 entry = BSEARCH(offset, 633 dev_priv->uncore.fw_domains_table, 634 dev_priv->uncore.fw_domains_table_entries, 635 fw_range_cmp); 636 637 if (!entry) 638 return 0; 639 640 WARN(entry->domains & ~dev_priv->uncore.fw_domains, 641 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 642 entry->domains & ~dev_priv->uncore.fw_domains, offset); 643 644 return entry->domains; 645 } 646 647 #define GEN_FW_RANGE(s, e, d) \ 648 { .start = (s), .end = (e), .domains = (d) } 649 650 #define HAS_FWTABLE(dev_priv) \ 651 (IS_GEN9(dev_priv) || \ 652 IS_CHERRYVIEW(dev_priv) || \ 653 IS_VALLEYVIEW(dev_priv)) 654 655 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 656 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 657 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 658 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 659 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 660 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 661 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 662 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 663 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 664 }; 665 666 #define __fwtable_reg_read_fw_domains(offset) \ 667 ({ \ 668 enum forcewake_domains __fwd = 0; \ 669 if (NEEDS_FORCE_WAKE((offset))) \ 670 __fwd = find_fw_domain(dev_priv, offset); \ 671 __fwd; \ 672 }) 673 674 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 675 static const i915_reg_t gen8_shadowed_regs[] = { 676 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 677 GEN6_RPNSWREQ, /* 0xA008 */ 678 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 679 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 680 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 681 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 682 /* TODO: Other registers are not yet used */ 683 }; 684 685 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 686 { 687 u32 offset = i915_mmio_reg_offset(*reg); 688 689 if (key < offset) 690 return -1; 691 else if (key > offset) 692 return 1; 693 else 694 return 0; 695 } 696 697 static bool is_gen8_shadowed(u32 offset) 698 { 699 const i915_reg_t *regs = gen8_shadowed_regs; 700 701 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), 702 mmio_reg_cmp); 703 } 704 705 #define __gen8_reg_write_fw_domains(offset) \ 706 ({ \ 707 enum forcewake_domains __fwd; \ 708 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 709 __fwd = FORCEWAKE_RENDER; \ 710 else \ 711 __fwd = 0; \ 712 __fwd; \ 713 }) 714 715 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 716 static const struct intel_forcewake_range __chv_fw_ranges[] = { 717 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 718 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 719 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 720 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 721 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 722 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 723 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 724 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 725 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 726 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 727 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 728 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 729 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 730 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 731 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 732 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 733 }; 734 735 #define __fwtable_reg_write_fw_domains(offset) \ 736 ({ \ 737 enum forcewake_domains __fwd = 0; \ 738 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 739 __fwd = find_fw_domain(dev_priv, offset); \ 740 __fwd; \ 741 }) 742 743 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 744 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 745 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 746 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 747 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 748 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 749 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 750 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 751 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 752 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 753 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 754 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 755 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 756 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 757 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 758 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 759 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 760 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 761 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 762 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 763 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 764 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 765 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 766 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 767 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 768 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 769 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 770 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 771 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 772 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 773 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 774 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 775 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 776 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 777 }; 778 779 static void 780 ilk_dummy_write(struct drm_i915_private *dev_priv) 781 { 782 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 783 * the chip from rc6 before touching it for real. MI_MODE is masked, 784 * hence harmless to write 0 into. */ 785 __raw_i915_write32(dev_priv, MI_MODE, 0); 786 } 787 788 static void 789 __unclaimed_reg_debug(struct drm_i915_private *dev_priv, 790 const i915_reg_t reg, 791 const bool read, 792 const bool before) 793 { 794 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, 795 "Unclaimed %s register 0x%x\n", 796 read ? "read from" : "write to", 797 i915_mmio_reg_offset(reg))) 798 i915.mmio_debug--; /* Only report the first N failures */ 799 } 800 801 static inline void 802 unclaimed_reg_debug(struct drm_i915_private *dev_priv, 803 const i915_reg_t reg, 804 const bool read, 805 const bool before) 806 { 807 if (likely(!i915.mmio_debug)) 808 return; 809 810 __unclaimed_reg_debug(dev_priv, reg, read, before); 811 } 812 813 static const enum decoupled_power_domain fw2dpd_domain[] = { 814 GEN9_DECOUPLED_PD_RENDER, 815 GEN9_DECOUPLED_PD_BLITTER, 816 GEN9_DECOUPLED_PD_ALL, 817 GEN9_DECOUPLED_PD_MEDIA, 818 GEN9_DECOUPLED_PD_ALL, 819 GEN9_DECOUPLED_PD_ALL, 820 GEN9_DECOUPLED_PD_ALL 821 }; 822 823 /* 824 * Decoupled MMIO access for only 1 DWORD 825 */ 826 static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv, 827 u32 reg, 828 enum forcewake_domains fw_domain, 829 enum decoupled_ops operation) 830 { 831 enum decoupled_power_domain dp_domain; 832 u32 ctrl_reg_data = 0; 833 834 dp_domain = fw2dpd_domain[fw_domain - 1]; 835 836 ctrl_reg_data |= reg; 837 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT); 838 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT); 839 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO; 840 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data); 841 842 if (wait_for_atomic((__raw_i915_read32(dev_priv, 843 GEN9_DECOUPLED_REG0_DW1) & 844 GEN9_DECOUPLED_DW1_GO) == 0, 845 FORCEWAKE_ACK_TIMEOUT_MS)) 846 DRM_ERROR("Decoupled MMIO wait timed out\n"); 847 } 848 849 static inline u32 850 __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv, 851 u32 reg, 852 enum forcewake_domains fw_domain) 853 { 854 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, 855 GEN9_DECOUPLED_OP_READ); 856 857 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0); 858 } 859 860 static inline void 861 __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv, 862 u32 reg, u32 data, 863 enum forcewake_domains fw_domain) 864 { 865 866 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data); 867 868 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, 869 GEN9_DECOUPLED_OP_WRITE); 870 } 871 872 873 #define GEN2_READ_HEADER(x) \ 874 u##x val = 0; \ 875 assert_rpm_wakelock_held(dev_priv); 876 877 #define GEN2_READ_FOOTER \ 878 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 879 return val 880 881 #define __gen2_read(x) \ 882 static u##x \ 883 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 884 GEN2_READ_HEADER(x); \ 885 val = __raw_i915_read##x(dev_priv, reg); \ 886 GEN2_READ_FOOTER; \ 887 } 888 889 #define __gen5_read(x) \ 890 static u##x \ 891 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 892 GEN2_READ_HEADER(x); \ 893 ilk_dummy_write(dev_priv); \ 894 val = __raw_i915_read##x(dev_priv, reg); \ 895 GEN2_READ_FOOTER; \ 896 } 897 898 __gen5_read(8) 899 __gen5_read(16) 900 __gen5_read(32) 901 __gen5_read(64) 902 __gen2_read(8) 903 __gen2_read(16) 904 __gen2_read(32) 905 __gen2_read(64) 906 907 #undef __gen5_read 908 #undef __gen2_read 909 910 #undef GEN2_READ_FOOTER 911 #undef GEN2_READ_HEADER 912 913 #define GEN6_READ_HEADER(x) \ 914 u32 offset = i915_mmio_reg_offset(reg); \ 915 unsigned long irqflags; \ 916 u##x val = 0; \ 917 assert_rpm_wakelock_held(dev_priv); \ 918 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 919 unclaimed_reg_debug(dev_priv, reg, true, true) 920 921 #define GEN6_READ_FOOTER \ 922 unclaimed_reg_debug(dev_priv, reg, true, false); \ 923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 924 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 925 return val 926 927 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, 928 enum forcewake_domains fw_domains) 929 { 930 struct intel_uncore_forcewake_domain *domain; 931 932 for_each_fw_domain_masked(domain, fw_domains, dev_priv) 933 fw_domain_arm_timer(domain); 934 935 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 936 } 937 938 static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 939 enum forcewake_domains fw_domains) 940 { 941 if (WARN_ON(!fw_domains)) 942 return; 943 944 /* Turn on all requested but inactive supported forcewake domains. */ 945 fw_domains &= dev_priv->uncore.fw_domains; 946 fw_domains &= ~dev_priv->uncore.fw_domains_active; 947 948 if (fw_domains) 949 ___force_wake_auto(dev_priv, fw_domains); 950 } 951 952 #define __gen_read(func, x) \ 953 static u##x \ 954 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 955 enum forcewake_domains fw_engine; \ 956 GEN6_READ_HEADER(x); \ 957 fw_engine = __##func##_reg_read_fw_domains(offset); \ 958 if (fw_engine) \ 959 __force_wake_auto(dev_priv, fw_engine); \ 960 val = __raw_i915_read##x(dev_priv, reg); \ 961 GEN6_READ_FOOTER; \ 962 } 963 #define __gen6_read(x) __gen_read(gen6, x) 964 #define __fwtable_read(x) __gen_read(fwtable, x) 965 966 #define __gen9_decoupled_read(x) \ 967 static u##x \ 968 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \ 969 i915_reg_t reg, bool trace) { \ 970 enum forcewake_domains fw_engine; \ 971 GEN6_READ_HEADER(x); \ 972 fw_engine = __fwtable_reg_read_fw_domains(offset); \ 973 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \ 974 unsigned i; \ 975 u32 *ptr_data = (u32 *) &val; \ 976 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \ 977 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \ 978 offset, \ 979 fw_engine); \ 980 } else { \ 981 val = __raw_i915_read##x(dev_priv, reg); \ 982 } \ 983 GEN6_READ_FOOTER; \ 984 } 985 986 __gen9_decoupled_read(32) 987 __gen9_decoupled_read(64) 988 __fwtable_read(8) 989 __fwtable_read(16) 990 __fwtable_read(32) 991 __fwtable_read(64) 992 __gen6_read(8) 993 __gen6_read(16) 994 __gen6_read(32) 995 __gen6_read(64) 996 997 #undef __fwtable_read 998 #undef __gen6_read 999 #undef GEN6_READ_FOOTER 1000 #undef GEN6_READ_HEADER 1001 1002 #define GEN2_WRITE_HEADER \ 1003 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1004 assert_rpm_wakelock_held(dev_priv); \ 1005 1006 #define GEN2_WRITE_FOOTER 1007 1008 #define __gen2_write(x) \ 1009 static void \ 1010 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1011 GEN2_WRITE_HEADER; \ 1012 __raw_i915_write##x(dev_priv, reg, val); \ 1013 GEN2_WRITE_FOOTER; \ 1014 } 1015 1016 #define __gen5_write(x) \ 1017 static void \ 1018 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1019 GEN2_WRITE_HEADER; \ 1020 ilk_dummy_write(dev_priv); \ 1021 __raw_i915_write##x(dev_priv, reg, val); \ 1022 GEN2_WRITE_FOOTER; \ 1023 } 1024 1025 __gen5_write(8) 1026 __gen5_write(16) 1027 __gen5_write(32) 1028 __gen2_write(8) 1029 __gen2_write(16) 1030 __gen2_write(32) 1031 1032 #undef __gen5_write 1033 #undef __gen2_write 1034 1035 #undef GEN2_WRITE_FOOTER 1036 #undef GEN2_WRITE_HEADER 1037 1038 #define GEN6_WRITE_HEADER \ 1039 u32 offset = i915_mmio_reg_offset(reg); \ 1040 unsigned long irqflags; \ 1041 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1042 assert_rpm_wakelock_held(dev_priv); \ 1043 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 1044 unclaimed_reg_debug(dev_priv, reg, false, true) 1045 1046 #define GEN6_WRITE_FOOTER \ 1047 unclaimed_reg_debug(dev_priv, reg, false, false); \ 1048 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1049 1050 #define __gen6_write(x) \ 1051 static void \ 1052 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1053 u32 __fifo_ret = 0; \ 1054 GEN6_WRITE_HEADER; \ 1055 if (NEEDS_FORCE_WAKE(offset)) { \ 1056 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1057 } \ 1058 __raw_i915_write##x(dev_priv, reg, val); \ 1059 if (unlikely(__fifo_ret)) { \ 1060 gen6_gt_check_fifodbg(dev_priv); \ 1061 } \ 1062 GEN6_WRITE_FOOTER; \ 1063 } 1064 1065 #define __gen_write(func, x) \ 1066 static void \ 1067 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1068 enum forcewake_domains fw_engine; \ 1069 GEN6_WRITE_HEADER; \ 1070 fw_engine = __##func##_reg_write_fw_domains(offset); \ 1071 if (fw_engine) \ 1072 __force_wake_auto(dev_priv, fw_engine); \ 1073 __raw_i915_write##x(dev_priv, reg, val); \ 1074 GEN6_WRITE_FOOTER; \ 1075 } 1076 #define __gen8_write(x) __gen_write(gen8, x) 1077 #define __fwtable_write(x) __gen_write(fwtable, x) 1078 1079 #define __gen9_decoupled_write(x) \ 1080 static void \ 1081 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \ 1082 i915_reg_t reg, u##x val, \ 1083 bool trace) { \ 1084 enum forcewake_domains fw_engine; \ 1085 GEN6_WRITE_HEADER; \ 1086 fw_engine = __fwtable_reg_write_fw_domains(offset); \ 1087 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \ 1088 __gen9_decoupled_mmio_write(dev_priv, \ 1089 offset, \ 1090 val, \ 1091 fw_engine); \ 1092 else \ 1093 __raw_i915_write##x(dev_priv, reg, val); \ 1094 GEN6_WRITE_FOOTER; \ 1095 } 1096 1097 __gen9_decoupled_write(32) 1098 __fwtable_write(8) 1099 __fwtable_write(16) 1100 __fwtable_write(32) 1101 __gen8_write(8) 1102 __gen8_write(16) 1103 __gen8_write(32) 1104 __gen6_write(8) 1105 __gen6_write(16) 1106 __gen6_write(32) 1107 1108 #undef __fwtable_write 1109 #undef __gen8_write 1110 #undef __gen6_write 1111 #undef GEN6_WRITE_FOOTER 1112 #undef GEN6_WRITE_HEADER 1113 1114 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1115 do { \ 1116 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1117 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1118 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1119 } while (0) 1120 1121 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1122 do { \ 1123 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1124 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1125 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1126 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1127 } while (0) 1128 1129 1130 static void fw_domain_init(struct drm_i915_private *dev_priv, 1131 enum forcewake_domain_id domain_id, 1132 i915_reg_t reg_set, 1133 i915_reg_t reg_ack) 1134 { 1135 struct intel_uncore_forcewake_domain *d; 1136 1137 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1138 return; 1139 1140 d = &dev_priv->uncore.fw_domain[domain_id]; 1141 1142 WARN_ON(d->wake_count); 1143 1144 d->wake_count = 0; 1145 d->reg_set = reg_set; 1146 d->reg_ack = reg_ack; 1147 1148 if (IS_GEN6(dev_priv)) { 1149 d->val_reset = 0; 1150 d->val_set = FORCEWAKE_KERNEL; 1151 d->val_clear = 0; 1152 } else { 1153 /* WaRsClearFWBitsAtReset:bdw,skl */ 1154 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1155 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1156 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1157 } 1158 1159 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1160 d->reg_post = FORCEWAKE_ACK_VLV; 1161 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1162 d->reg_post = ECOBUS; 1163 1164 d->i915 = dev_priv; 1165 d->id = domain_id; 1166 1167 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1168 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1169 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1170 1171 d->mask = 1 << domain_id; 1172 1173 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1174 d->timer.function = intel_uncore_fw_release_timer; 1175 1176 dev_priv->uncore.fw_domains |= (1 << domain_id); 1177 1178 fw_domain_reset(d); 1179 } 1180 1181 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) 1182 { 1183 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) 1184 return; 1185 1186 if (IS_GEN9(dev_priv)) { 1187 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1188 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1189 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1190 FORCEWAKE_RENDER_GEN9, 1191 FORCEWAKE_ACK_RENDER_GEN9); 1192 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1193 FORCEWAKE_BLITTER_GEN9, 1194 FORCEWAKE_ACK_BLITTER_GEN9); 1195 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1196 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1197 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1198 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1199 if (!IS_CHERRYVIEW(dev_priv)) 1200 dev_priv->uncore.funcs.force_wake_put = 1201 fw_domains_put_with_fifo; 1202 else 1203 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1204 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1205 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1206 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1207 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1208 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1209 dev_priv->uncore.funcs.force_wake_get = 1210 fw_domains_get_with_thread_status; 1211 if (IS_HASWELL(dev_priv)) 1212 dev_priv->uncore.funcs.force_wake_put = 1213 fw_domains_put_with_fifo; 1214 else 1215 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1216 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1217 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1218 } else if (IS_IVYBRIDGE(dev_priv)) { 1219 u32 ecobus; 1220 1221 /* IVB configs may use multi-threaded forcewake */ 1222 1223 /* A small trick here - if the bios hasn't configured 1224 * MT forcewake, and if the device is in RC6, then 1225 * force_wake_mt_get will not wake the device and the 1226 * ECOBUS read will return zero. Which will be 1227 * (correctly) interpreted by the test below as MT 1228 * forcewake being disabled. 1229 */ 1230 dev_priv->uncore.funcs.force_wake_get = 1231 fw_domains_get_with_thread_status; 1232 dev_priv->uncore.funcs.force_wake_put = 1233 fw_domains_put_with_fifo; 1234 1235 /* We need to init first for ECOBUS access and then 1236 * determine later if we want to reinit, in case of MT access is 1237 * not working. In this stage we don't know which flavour this 1238 * ivb is, so it is better to reset also the gen6 fw registers 1239 * before the ecobus check. 1240 */ 1241 1242 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1243 __raw_posting_read(dev_priv, ECOBUS); 1244 1245 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1246 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1247 1248 spin_lock_irq(&dev_priv->uncore.lock); 1249 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1250 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1251 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1252 spin_unlock_irq(&dev_priv->uncore.lock); 1253 1254 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1255 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1256 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1257 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1258 FORCEWAKE, FORCEWAKE_ACK); 1259 } 1260 } else if (IS_GEN6(dev_priv)) { 1261 dev_priv->uncore.funcs.force_wake_get = 1262 fw_domains_get_with_thread_status; 1263 dev_priv->uncore.funcs.force_wake_put = 1264 fw_domains_put_with_fifo; 1265 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1266 FORCEWAKE, FORCEWAKE_ACK); 1267 } 1268 1269 /* All future platforms are expected to require complex power gating */ 1270 WARN_ON(dev_priv->uncore.fw_domains == 0); 1271 } 1272 1273 #define ASSIGN_FW_DOMAINS_TABLE(d) \ 1274 { \ 1275 dev_priv->uncore.fw_domains_table = \ 1276 (struct intel_forcewake_range *)(d); \ 1277 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ 1278 } 1279 1280 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 1281 unsigned long action, void *data) 1282 { 1283 struct drm_i915_private *dev_priv = container_of(nb, 1284 struct drm_i915_private, uncore.pmic_bus_access_nb); 1285 1286 switch (action) { 1287 case MBI_PMIC_BUS_ACCESS_BEGIN: 1288 /* 1289 * forcewake all now to make sure that we don't need to do a 1290 * forcewake later which on systems where this notifier gets 1291 * called requires the punit to access to the shared pmic i2c 1292 * bus, which will be busy after this notification, leading to: 1293 * "render: timed out waiting for forcewake ack request." 1294 * errors. 1295 */ 1296 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1297 break; 1298 case MBI_PMIC_BUS_ACCESS_END: 1299 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1300 break; 1301 } 1302 1303 return NOTIFY_OK; 1304 } 1305 1306 void intel_uncore_init(struct drm_i915_private *dev_priv) 1307 { 1308 i915_check_vgpu(dev_priv); 1309 1310 intel_uncore_edram_detect(dev_priv); 1311 intel_uncore_fw_domains_init(dev_priv); 1312 __intel_uncore_early_sanitize(dev_priv, false); 1313 1314 dev_priv->uncore.unclaimed_mmio_check = 1; 1315 dev_priv->uncore.pmic_bus_access_nb.notifier_call = 1316 i915_pmic_bus_access_notifier; 1317 1318 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { 1319 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1320 ASSIGN_READ_MMIO_VFUNCS(gen2); 1321 } else if (IS_GEN5(dev_priv)) { 1322 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1323 ASSIGN_READ_MMIO_VFUNCS(gen5); 1324 } else if (IS_GEN(dev_priv, 6, 7)) { 1325 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1326 1327 if (IS_VALLEYVIEW(dev_priv)) { 1328 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); 1329 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1330 } else { 1331 ASSIGN_READ_MMIO_VFUNCS(gen6); 1332 } 1333 } else if (IS_GEN8(dev_priv)) { 1334 if (IS_CHERRYVIEW(dev_priv)) { 1335 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); 1336 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1337 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1338 1339 } else { 1340 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1341 ASSIGN_READ_MMIO_VFUNCS(gen6); 1342 } 1343 } else { 1344 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1345 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1346 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1347 if (HAS_DECOUPLED_MMIO(dev_priv)) { 1348 dev_priv->uncore.funcs.mmio_readl = 1349 gen9_decoupled_read32; 1350 dev_priv->uncore.funcs.mmio_readq = 1351 gen9_decoupled_read64; 1352 dev_priv->uncore.funcs.mmio_writel = 1353 gen9_decoupled_write32; 1354 } 1355 } 1356 1357 iosf_mbi_register_pmic_bus_access_notifier( 1358 &dev_priv->uncore.pmic_bus_access_nb); 1359 1360 i915_check_and_clear_faults(dev_priv); 1361 } 1362 #undef ASSIGN_WRITE_MMIO_VFUNCS 1363 #undef ASSIGN_READ_MMIO_VFUNCS 1364 1365 void intel_uncore_fini(struct drm_i915_private *dev_priv) 1366 { 1367 iosf_mbi_unregister_pmic_bus_access_notifier( 1368 &dev_priv->uncore.pmic_bus_access_nb); 1369 1370 /* Paranoia: make sure we have disabled everything before we exit. */ 1371 intel_uncore_sanitize(dev_priv); 1372 intel_uncore_forcewake_reset(dev_priv, false); 1373 } 1374 1375 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) 1376 1377 static const struct register_whitelist { 1378 i915_reg_t offset_ldw, offset_udw; 1379 uint32_t size; 1380 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1381 uint32_t gen_bitmask; 1382 } whitelist[] = { 1383 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1384 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1385 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, 1386 }; 1387 1388 int i915_reg_read_ioctl(struct drm_device *dev, 1389 void *data, struct drm_file *file) 1390 { 1391 struct drm_i915_private *dev_priv = to_i915(dev); 1392 struct drm_i915_reg_read *reg = data; 1393 struct register_whitelist const *entry = whitelist; 1394 unsigned size; 1395 i915_reg_t offset_ldw, offset_udw; 1396 int i, ret = 0; 1397 1398 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1399 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1400 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) 1401 break; 1402 } 1403 1404 if (i == ARRAY_SIZE(whitelist)) 1405 return -EINVAL; 1406 1407 /* We use the low bits to encode extra flags as the register should 1408 * be naturally aligned (and those that are not so aligned merely 1409 * limit the available flags for that register). 1410 */ 1411 offset_ldw = entry->offset_ldw; 1412 offset_udw = entry->offset_udw; 1413 size = entry->size; 1414 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); 1415 1416 intel_runtime_pm_get(dev_priv); 1417 1418 switch (size) { 1419 case 8 | 1: 1420 reg->val = I915_READ64_2x32(offset_ldw, offset_udw); 1421 break; 1422 case 8: 1423 reg->val = I915_READ64(offset_ldw); 1424 break; 1425 case 4: 1426 reg->val = I915_READ(offset_ldw); 1427 break; 1428 case 2: 1429 reg->val = I915_READ16(offset_ldw); 1430 break; 1431 case 1: 1432 reg->val = I915_READ8(offset_ldw); 1433 break; 1434 default: 1435 ret = -EINVAL; 1436 goto out; 1437 } 1438 1439 out: 1440 intel_runtime_pm_put(dev_priv); 1441 return ret; 1442 } 1443 1444 static int i915_reset_complete(struct pci_dev *pdev) 1445 { 1446 u8 gdrst; 1447 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1448 return (gdrst & GRDOM_RESET_STATUS) == 0; 1449 } 1450 1451 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1452 { 1453 struct pci_dev *pdev = dev_priv->drm.pdev; 1454 1455 /* assert reset for at least 20 usec */ 1456 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1457 udelay(20); 1458 pci_write_config_byte(pdev, I915_GDRST, 0); 1459 1460 return wait_for(i915_reset_complete(pdev), 500); 1461 } 1462 1463 static int g4x_reset_complete(struct pci_dev *pdev) 1464 { 1465 u8 gdrst; 1466 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1467 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1468 } 1469 1470 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1471 { 1472 struct pci_dev *pdev = dev_priv->drm.pdev; 1473 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1474 return wait_for(g4x_reset_complete(pdev), 500); 1475 } 1476 1477 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1478 { 1479 struct pci_dev *pdev = dev_priv->drm.pdev; 1480 int ret; 1481 1482 pci_write_config_byte(pdev, I915_GDRST, 1483 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1484 ret = wait_for(g4x_reset_complete(pdev), 500); 1485 if (ret) 1486 return ret; 1487 1488 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1489 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1490 POSTING_READ(VDECCLK_GATE_D); 1491 1492 pci_write_config_byte(pdev, I915_GDRST, 1493 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1494 ret = wait_for(g4x_reset_complete(pdev), 500); 1495 if (ret) 1496 return ret; 1497 1498 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1499 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1500 POSTING_READ(VDECCLK_GATE_D); 1501 1502 pci_write_config_byte(pdev, I915_GDRST, 0); 1503 1504 return 0; 1505 } 1506 1507 static int ironlake_do_reset(struct drm_i915_private *dev_priv, 1508 unsigned engine_mask) 1509 { 1510 int ret; 1511 1512 I915_WRITE(ILK_GDSR, 1513 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1514 ret = intel_wait_for_register(dev_priv, 1515 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1516 500); 1517 if (ret) 1518 return ret; 1519 1520 I915_WRITE(ILK_GDSR, 1521 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1522 ret = intel_wait_for_register(dev_priv, 1523 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1524 500); 1525 if (ret) 1526 return ret; 1527 1528 I915_WRITE(ILK_GDSR, 0); 1529 1530 return 0; 1531 } 1532 1533 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ 1534 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1535 u32 hw_domain_mask) 1536 { 1537 /* GEN6_GDRST is not in the gt power well, no need to check 1538 * for fifo space for the write or forcewake the chip for 1539 * the read 1540 */ 1541 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1542 1543 /* Spin waiting for the device to ack the reset requests */ 1544 return intel_wait_for_register_fw(dev_priv, 1545 GEN6_GDRST, hw_domain_mask, 0, 1546 500); 1547 } 1548 1549 /** 1550 * gen6_reset_engines - reset individual engines 1551 * @dev_priv: i915 device 1552 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1553 * 1554 * This function will reset the individual engines that are set in engine_mask. 1555 * If you provide ALL_ENGINES as mask, full global domain reset will be issued. 1556 * 1557 * Note: It is responsibility of the caller to handle the difference between 1558 * asking full domain reset versus reset for all available individual engines. 1559 * 1560 * Returns 0 on success, nonzero on error. 1561 */ 1562 static int gen6_reset_engines(struct drm_i915_private *dev_priv, 1563 unsigned engine_mask) 1564 { 1565 struct intel_engine_cs *engine; 1566 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1567 [RCS] = GEN6_GRDOM_RENDER, 1568 [BCS] = GEN6_GRDOM_BLT, 1569 [VCS] = GEN6_GRDOM_MEDIA, 1570 [VCS2] = GEN8_GRDOM_MEDIA2, 1571 [VECS] = GEN6_GRDOM_VECS, 1572 }; 1573 u32 hw_mask; 1574 int ret; 1575 1576 if (engine_mask == ALL_ENGINES) { 1577 hw_mask = GEN6_GRDOM_FULL; 1578 } else { 1579 unsigned int tmp; 1580 1581 hw_mask = 0; 1582 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1583 hw_mask |= hw_engine_mask[engine->id]; 1584 } 1585 1586 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1587 1588 intel_uncore_forcewake_reset(dev_priv, true); 1589 1590 return ret; 1591 } 1592 1593 /** 1594 * intel_wait_for_register_fw - wait until register matches expected state 1595 * @dev_priv: the i915 device 1596 * @reg: the register to read 1597 * @mask: mask to apply to register value 1598 * @value: expected value 1599 * @timeout_ms: timeout in millisecond 1600 * 1601 * This routine waits until the target register @reg contains the expected 1602 * @value after applying the @mask, i.e. it waits until :: 1603 * 1604 * (I915_READ_FW(reg) & mask) == value 1605 * 1606 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1607 * 1608 * Note that this routine assumes the caller holds forcewake asserted, it is 1609 * not suitable for very long waits. See intel_wait_for_register() if you 1610 * wish to wait without holding forcewake for the duration (i.e. you expect 1611 * the wait to be slow). 1612 * 1613 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1614 */ 1615 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 1616 i915_reg_t reg, 1617 const u32 mask, 1618 const u32 value, 1619 const unsigned long timeout_ms) 1620 { 1621 #define done ((I915_READ_FW(reg) & mask) == value) 1622 int ret = wait_for_us(done, 2); 1623 if (ret) 1624 ret = wait_for(done, timeout_ms); 1625 return ret; 1626 #undef done 1627 } 1628 1629 /** 1630 * intel_wait_for_register - wait until register matches expected state 1631 * @dev_priv: the i915 device 1632 * @reg: the register to read 1633 * @mask: mask to apply to register value 1634 * @value: expected value 1635 * @timeout_ms: timeout in millisecond 1636 * 1637 * This routine waits until the target register @reg contains the expected 1638 * @value after applying the @mask, i.e. it waits until :: 1639 * 1640 * (I915_READ(reg) & mask) == value 1641 * 1642 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1643 * 1644 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1645 */ 1646 int intel_wait_for_register(struct drm_i915_private *dev_priv, 1647 i915_reg_t reg, 1648 const u32 mask, 1649 const u32 value, 1650 const unsigned long timeout_ms) 1651 { 1652 1653 unsigned fw = 1654 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); 1655 int ret; 1656 1657 intel_uncore_forcewake_get(dev_priv, fw); 1658 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2); 1659 intel_uncore_forcewake_put(dev_priv, fw); 1660 if (ret) 1661 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, 1662 timeout_ms); 1663 1664 return ret; 1665 } 1666 1667 static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1668 { 1669 struct drm_i915_private *dev_priv = engine->i915; 1670 int ret; 1671 1672 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1673 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1674 1675 ret = intel_wait_for_register_fw(dev_priv, 1676 RING_RESET_CTL(engine->mmio_base), 1677 RESET_CTL_READY_TO_RESET, 1678 RESET_CTL_READY_TO_RESET, 1679 700); 1680 if (ret) 1681 DRM_ERROR("%s: reset request timeout\n", engine->name); 1682 1683 return ret; 1684 } 1685 1686 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1687 { 1688 struct drm_i915_private *dev_priv = engine->i915; 1689 1690 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1691 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1692 } 1693 1694 static int gen8_reset_engines(struct drm_i915_private *dev_priv, 1695 unsigned engine_mask) 1696 { 1697 struct intel_engine_cs *engine; 1698 unsigned int tmp; 1699 1700 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1701 if (gen8_request_engine_reset(engine)) 1702 goto not_ready; 1703 1704 return gen6_reset_engines(dev_priv, engine_mask); 1705 1706 not_ready: 1707 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1708 gen8_unrequest_engine_reset(engine); 1709 1710 return -EIO; 1711 } 1712 1713 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); 1714 1715 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) 1716 { 1717 if (!i915.reset) 1718 return NULL; 1719 1720 if (INTEL_INFO(dev_priv)->gen >= 8) 1721 return gen8_reset_engines; 1722 else if (INTEL_INFO(dev_priv)->gen >= 6) 1723 return gen6_reset_engines; 1724 else if (IS_GEN5(dev_priv)) 1725 return ironlake_do_reset; 1726 else if (IS_G4X(dev_priv)) 1727 return g4x_do_reset; 1728 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 1729 return g33_do_reset; 1730 else if (INTEL_INFO(dev_priv)->gen >= 3) 1731 return i915_do_reset; 1732 else 1733 return NULL; 1734 } 1735 1736 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1737 { 1738 reset_func reset; 1739 int ret; 1740 1741 reset = intel_get_gpu_reset(dev_priv); 1742 if (reset == NULL) 1743 return -ENODEV; 1744 1745 /* If the power well sleeps during the reset, the reset 1746 * request may be dropped and never completes (causing -EIO). 1747 */ 1748 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1749 ret = reset(dev_priv, engine_mask); 1750 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1751 1752 return ret; 1753 } 1754 1755 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) 1756 { 1757 return intel_get_gpu_reset(dev_priv) != NULL; 1758 } 1759 1760 int intel_guc_reset(struct drm_i915_private *dev_priv) 1761 { 1762 int ret; 1763 unsigned long irqflags; 1764 1765 if (!HAS_GUC(dev_priv)) 1766 return -EINVAL; 1767 1768 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1769 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1770 1771 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); 1772 1773 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1774 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1775 1776 return ret; 1777 } 1778 1779 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) 1780 { 1781 return check_for_unclaimed_mmio(dev_priv); 1782 } 1783 1784 bool 1785 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) 1786 { 1787 if (unlikely(i915.mmio_debug || 1788 dev_priv->uncore.unclaimed_mmio_check <= 0)) 1789 return false; 1790 1791 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { 1792 DRM_DEBUG("Unclaimed register detected, " 1793 "enabling oneshot unclaimed register reporting. " 1794 "Please use i915.mmio_debug=N for more information.\n"); 1795 i915.mmio_debug++; 1796 dev_priv->uncore.unclaimed_mmio_check--; 1797 return true; 1798 } 1799 1800 return false; 1801 } 1802 1803 static enum forcewake_domains 1804 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, 1805 i915_reg_t reg) 1806 { 1807 u32 offset = i915_mmio_reg_offset(reg); 1808 enum forcewake_domains fw_domains; 1809 1810 if (HAS_FWTABLE(dev_priv)) { 1811 fw_domains = __fwtable_reg_read_fw_domains(offset); 1812 } else if (INTEL_GEN(dev_priv) >= 6) { 1813 fw_domains = __gen6_reg_read_fw_domains(offset); 1814 } else { 1815 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1816 fw_domains = 0; 1817 } 1818 1819 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1820 1821 return fw_domains; 1822 } 1823 1824 static enum forcewake_domains 1825 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, 1826 i915_reg_t reg) 1827 { 1828 u32 offset = i915_mmio_reg_offset(reg); 1829 enum forcewake_domains fw_domains; 1830 1831 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 1832 fw_domains = __fwtable_reg_write_fw_domains(offset); 1833 } else if (IS_GEN8(dev_priv)) { 1834 fw_domains = __gen8_reg_write_fw_domains(offset); 1835 } else if (IS_GEN(dev_priv, 6, 7)) { 1836 fw_domains = FORCEWAKE_RENDER; 1837 } else { 1838 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1839 fw_domains = 0; 1840 } 1841 1842 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1843 1844 return fw_domains; 1845 } 1846 1847 /** 1848 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1849 * a register 1850 * @dev_priv: pointer to struct drm_i915_private 1851 * @reg: register in question 1852 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1853 * 1854 * Returns a set of forcewake domains required to be taken with for example 1855 * intel_uncore_forcewake_get for the specified register to be accessible in the 1856 * specified mode (read, write or read/write) with raw mmio accessors. 1857 * 1858 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1859 * callers to do FIFO management on their own or risk losing writes. 1860 */ 1861 enum forcewake_domains 1862 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 1863 i915_reg_t reg, unsigned int op) 1864 { 1865 enum forcewake_domains fw_domains = 0; 1866 1867 WARN_ON(!op); 1868 1869 if (intel_vgpu_active(dev_priv)) 1870 return 0; 1871 1872 if (op & FW_REG_READ) 1873 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); 1874 1875 if (op & FW_REG_WRITE) 1876 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); 1877 1878 return fw_domains; 1879 } 1880 1881 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1882 #include "selftests/intel_uncore.c" 1883 #endif 1884