1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <linux/pm_runtime.h> 29 30 #define FORCEWAKE_ACK_TIMEOUT_MS 50 31 32 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) 33 34 static const char * const forcewake_domain_names[] = { 35 "render", 36 "blitter", 37 "media", 38 }; 39 40 const char * 41 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 42 { 43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 44 45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 46 return forcewake_domain_names[id]; 47 48 WARN_ON(id); 49 50 return "unknown"; 51 } 52 53 static inline void 54 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 55 { 56 WARN_ON(!i915_mmio_reg_valid(d->reg_set)); 57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 58 } 59 60 static inline void 61 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 62 { 63 d->wake_count++; 64 hrtimer_start_range_ns(&d->timer, 65 NSEC_PER_MSEC, 66 NSEC_PER_MSEC, 67 HRTIMER_MODE_REL); 68 } 69 70 static inline void 71 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 72 { 73 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 74 FORCEWAKE_KERNEL) == 0, 75 FORCEWAKE_ACK_TIMEOUT_MS)) 76 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 77 intel_uncore_forcewake_domain_to_str(d->id)); 78 } 79 80 static inline void 81 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 82 { 83 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 84 } 85 86 static inline void 87 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 88 { 89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 90 FORCEWAKE_KERNEL), 91 FORCEWAKE_ACK_TIMEOUT_MS)) 92 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 93 intel_uncore_forcewake_domain_to_str(d->id)); 94 } 95 96 static inline void 97 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 98 { 99 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 100 } 101 102 static inline void 103 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 104 { 105 /* something from same cacheline, but not from the set register */ 106 if (i915_mmio_reg_valid(d->reg_post)) 107 __raw_posting_read(d->i915, d->reg_post); 108 } 109 110 static void 111 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 112 { 113 struct intel_uncore_forcewake_domain *d; 114 115 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 116 fw_domain_wait_ack_clear(d); 117 fw_domain_get(d); 118 } 119 120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 121 fw_domain_wait_ack(d); 122 } 123 124 static void 125 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 126 { 127 struct intel_uncore_forcewake_domain *d; 128 129 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 130 fw_domain_put(d); 131 fw_domain_posting_read(d); 132 } 133 } 134 135 static void 136 fw_domains_posting_read(struct drm_i915_private *dev_priv) 137 { 138 struct intel_uncore_forcewake_domain *d; 139 140 /* No need to do for all, just do for first found */ 141 for_each_fw_domain(d, dev_priv) { 142 fw_domain_posting_read(d); 143 break; 144 } 145 } 146 147 static void 148 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 149 { 150 struct intel_uncore_forcewake_domain *d; 151 152 if (dev_priv->uncore.fw_domains == 0) 153 return; 154 155 for_each_fw_domain_masked(d, fw_domains, dev_priv) 156 fw_domain_reset(d); 157 158 fw_domains_posting_read(dev_priv); 159 } 160 161 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 162 { 163 /* w/a for a sporadic read returning 0 by waiting for the GT 164 * thread to wake up. 165 */ 166 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 167 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 168 DRM_ERROR("GT thread status wait timed out\n"); 169 } 170 171 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 172 enum forcewake_domains fw_domains) 173 { 174 fw_domains_get(dev_priv, fw_domains); 175 176 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 177 __gen6_gt_wait_for_thread_c0(dev_priv); 178 } 179 180 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 181 { 182 u32 gtfifodbg; 183 184 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 185 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 186 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 187 } 188 189 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 190 enum forcewake_domains fw_domains) 191 { 192 fw_domains_put(dev_priv, fw_domains); 193 gen6_gt_check_fifodbg(dev_priv); 194 } 195 196 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 197 { 198 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 199 200 return count & GT_FIFO_FREE_ENTRIES_MASK; 201 } 202 203 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 204 { 205 int ret = 0; 206 207 /* On VLV, FIFO will be shared by both SW and HW. 208 * So, we need to read the FREE_ENTRIES everytime */ 209 if (IS_VALLEYVIEW(dev_priv)) 210 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 211 212 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 213 int loop = 500; 214 u32 fifo = fifo_free_entries(dev_priv); 215 216 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 217 udelay(10); 218 fifo = fifo_free_entries(dev_priv); 219 } 220 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 221 ++ret; 222 dev_priv->uncore.fifo_count = fifo; 223 } 224 dev_priv->uncore.fifo_count--; 225 226 return ret; 227 } 228 229 static enum hrtimer_restart 230 intel_uncore_fw_release_timer(struct hrtimer *timer) 231 { 232 struct intel_uncore_forcewake_domain *domain = 233 container_of(timer, struct intel_uncore_forcewake_domain, timer); 234 struct drm_i915_private *dev_priv = domain->i915; 235 unsigned long irqflags; 236 237 assert_rpm_device_not_suspended(dev_priv); 238 239 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 240 if (WARN_ON(domain->wake_count == 0)) 241 domain->wake_count++; 242 243 if (--domain->wake_count == 0) { 244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 245 dev_priv->uncore.fw_domains_active &= ~domain->mask; 246 } 247 248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 249 250 return HRTIMER_NORESTART; 251 } 252 253 void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 254 bool restore) 255 { 256 unsigned long irqflags; 257 struct intel_uncore_forcewake_domain *domain; 258 int retry_count = 100; 259 enum forcewake_domains fw, active_domains; 260 261 /* Hold uncore.lock across reset to prevent any register access 262 * with forcewake not set correctly. Wait until all pending 263 * timers are run before holding. 264 */ 265 while (1) { 266 active_domains = 0; 267 268 for_each_fw_domain(domain, dev_priv) { 269 if (hrtimer_cancel(&domain->timer) == 0) 270 continue; 271 272 intel_uncore_fw_release_timer(&domain->timer); 273 } 274 275 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 276 277 for_each_fw_domain(domain, dev_priv) { 278 if (hrtimer_active(&domain->timer)) 279 active_domains |= domain->mask; 280 } 281 282 if (active_domains == 0) 283 break; 284 285 if (--retry_count == 0) { 286 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 287 break; 288 } 289 290 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 291 cond_resched(); 292 } 293 294 WARN_ON(active_domains); 295 296 fw = dev_priv->uncore.fw_domains_active; 297 if (fw) 298 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 299 300 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 301 302 if (restore) { /* If reset with a user forcewake, try to restore */ 303 if (fw) 304 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 305 306 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 307 dev_priv->uncore.fifo_count = 308 fifo_free_entries(dev_priv); 309 } 310 311 if (!restore) 312 assert_forcewakes_inactive(dev_priv); 313 314 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 315 } 316 317 static u64 gen9_edram_size(struct drm_i915_private *dev_priv) 318 { 319 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 320 const unsigned int sets[4] = { 1, 1, 2, 2 }; 321 const u32 cap = dev_priv->edram_cap; 322 323 return EDRAM_NUM_BANKS(cap) * 324 ways[EDRAM_WAYS_IDX(cap)] * 325 sets[EDRAM_SETS_IDX(cap)] * 326 1024 * 1024; 327 } 328 329 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) 330 { 331 if (!HAS_EDRAM(dev_priv)) 332 return 0; 333 334 /* The needed capability bits for size calculation 335 * are not there with pre gen9 so return 128MB always. 336 */ 337 if (INTEL_GEN(dev_priv) < 9) 338 return 128 * 1024 * 1024; 339 340 return gen9_edram_size(dev_priv); 341 } 342 343 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) 344 { 345 if (IS_HASWELL(dev_priv) || 346 IS_BROADWELL(dev_priv) || 347 INTEL_GEN(dev_priv) >= 9) { 348 dev_priv->edram_cap = __raw_i915_read32(dev_priv, 349 HSW_EDRAM_CAP); 350 351 /* NB: We can't write IDICR yet because we do not have gt funcs 352 * set up */ 353 } else { 354 dev_priv->edram_cap = 0; 355 } 356 357 if (HAS_EDRAM(dev_priv)) 358 DRM_INFO("Found %lluMB of eDRAM\n", 359 intel_uncore_edram_size(dev_priv) / (1024 * 1024)); 360 } 361 362 static bool 363 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 364 { 365 u32 dbg; 366 367 dbg = __raw_i915_read32(dev_priv, FPGA_DBG); 368 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 369 return false; 370 371 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 372 373 return true; 374 } 375 376 static bool 377 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 378 { 379 u32 cer; 380 381 cer = __raw_i915_read32(dev_priv, CLAIM_ER); 382 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 383 return false; 384 385 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); 386 387 return true; 388 } 389 390 static bool 391 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) 392 { 393 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) 394 return fpga_check_for_unclaimed_mmio(dev_priv); 395 396 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 397 return vlv_check_for_unclaimed_mmio(dev_priv); 398 399 return false; 400 } 401 402 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 403 bool restore_forcewake) 404 { 405 struct intel_device_info *info = mkwrite_device_info(dev_priv); 406 407 /* clear out unclaimed reg detection bit */ 408 if (check_for_unclaimed_mmio(dev_priv)) 409 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 410 411 /* clear out old GT FIFO errors */ 412 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 413 __raw_i915_write32(dev_priv, GTFIFODBG, 414 __raw_i915_read32(dev_priv, GTFIFODBG)); 415 416 /* WaDisableShadowRegForCpd:chv */ 417 if (IS_CHERRYVIEW(dev_priv)) { 418 __raw_i915_write32(dev_priv, GTFIFOCTL, 419 __raw_i915_read32(dev_priv, GTFIFOCTL) | 420 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 421 GT_FIFO_CTL_RC6_POLICY_STALL); 422 } 423 424 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST)) 425 info->has_decoupled_mmio = false; 426 427 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 428 } 429 430 void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 431 bool restore_forcewake) 432 { 433 __intel_uncore_early_sanitize(dev_priv, restore_forcewake); 434 i915_check_and_clear_faults(dev_priv); 435 } 436 437 void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 438 { 439 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); 440 441 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 442 intel_sanitize_gt_powersave(dev_priv); 443 } 444 445 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 446 enum forcewake_domains fw_domains) 447 { 448 struct intel_uncore_forcewake_domain *domain; 449 450 fw_domains &= dev_priv->uncore.fw_domains; 451 452 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 453 if (domain->wake_count++) 454 fw_domains &= ~domain->mask; 455 } 456 457 if (fw_domains) { 458 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 459 dev_priv->uncore.fw_domains_active |= fw_domains; 460 } 461 } 462 463 /** 464 * intel_uncore_forcewake_get - grab forcewake domain references 465 * @dev_priv: i915 device instance 466 * @fw_domains: forcewake domains to get reference on 467 * 468 * This function can be used get GT's forcewake domain references. 469 * Normal register access will handle the forcewake domains automatically. 470 * However if some sequence requires the GT to not power down a particular 471 * forcewake domains this function should be called at the beginning of the 472 * sequence. And subsequently the reference should be dropped by symmetric 473 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 474 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 475 */ 476 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 477 enum forcewake_domains fw_domains) 478 { 479 unsigned long irqflags; 480 481 if (!dev_priv->uncore.funcs.force_wake_get) 482 return; 483 484 assert_rpm_wakelock_held(dev_priv); 485 486 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 487 __intel_uncore_forcewake_get(dev_priv, fw_domains); 488 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 489 } 490 491 /** 492 * intel_uncore_forcewake_get__locked - grab forcewake domain references 493 * @dev_priv: i915 device instance 494 * @fw_domains: forcewake domains to get reference on 495 * 496 * See intel_uncore_forcewake_get(). This variant places the onus 497 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 498 */ 499 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 500 enum forcewake_domains fw_domains) 501 { 502 assert_spin_locked(&dev_priv->uncore.lock); 503 504 if (!dev_priv->uncore.funcs.force_wake_get) 505 return; 506 507 __intel_uncore_forcewake_get(dev_priv, fw_domains); 508 } 509 510 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 511 enum forcewake_domains fw_domains) 512 { 513 struct intel_uncore_forcewake_domain *domain; 514 515 fw_domains &= dev_priv->uncore.fw_domains; 516 517 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 518 if (WARN_ON(domain->wake_count == 0)) 519 continue; 520 521 if (--domain->wake_count) 522 continue; 523 524 fw_domain_arm_timer(domain); 525 } 526 } 527 528 /** 529 * intel_uncore_forcewake_put - release a forcewake domain reference 530 * @dev_priv: i915 device instance 531 * @fw_domains: forcewake domains to put references 532 * 533 * This function drops the device-level forcewakes for specified 534 * domains obtained by intel_uncore_forcewake_get(). 535 */ 536 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 537 enum forcewake_domains fw_domains) 538 { 539 unsigned long irqflags; 540 541 if (!dev_priv->uncore.funcs.force_wake_put) 542 return; 543 544 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 545 __intel_uncore_forcewake_put(dev_priv, fw_domains); 546 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 547 } 548 549 /** 550 * intel_uncore_forcewake_put__locked - grab forcewake domain references 551 * @dev_priv: i915 device instance 552 * @fw_domains: forcewake domains to get reference on 553 * 554 * See intel_uncore_forcewake_put(). This variant places the onus 555 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 556 */ 557 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 558 enum forcewake_domains fw_domains) 559 { 560 assert_spin_locked(&dev_priv->uncore.lock); 561 562 if (!dev_priv->uncore.funcs.force_wake_put) 563 return; 564 565 __intel_uncore_forcewake_put(dev_priv, fw_domains); 566 } 567 568 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 569 { 570 if (!dev_priv->uncore.funcs.force_wake_get) 571 return; 572 573 WARN_ON(dev_priv->uncore.fw_domains_active); 574 } 575 576 /* We give fast paths for the really cool registers */ 577 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 578 579 #define __gen6_reg_read_fw_domains(offset) \ 580 ({ \ 581 enum forcewake_domains __fwd; \ 582 if (NEEDS_FORCE_WAKE(offset)) \ 583 __fwd = FORCEWAKE_RENDER; \ 584 else \ 585 __fwd = 0; \ 586 __fwd; \ 587 }) 588 589 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 590 { 591 if (offset < entry->start) 592 return -1; 593 else if (offset > entry->end) 594 return 1; 595 else 596 return 0; 597 } 598 599 /* Copied and "macroized" from lib/bsearch.c */ 600 #define BSEARCH(key, base, num, cmp) ({ \ 601 unsigned int start__ = 0, end__ = (num); \ 602 typeof(base) result__ = NULL; \ 603 while (start__ < end__) { \ 604 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 605 int ret__ = (cmp)((key), (base) + mid__); \ 606 if (ret__ < 0) { \ 607 end__ = mid__; \ 608 } else if (ret__ > 0) { \ 609 start__ = mid__ + 1; \ 610 } else { \ 611 result__ = (base) + mid__; \ 612 break; \ 613 } \ 614 } \ 615 result__; \ 616 }) 617 618 static enum forcewake_domains 619 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) 620 { 621 const struct intel_forcewake_range *entry; 622 623 entry = BSEARCH(offset, 624 dev_priv->uncore.fw_domains_table, 625 dev_priv->uncore.fw_domains_table_entries, 626 fw_range_cmp); 627 628 if (!entry) 629 return 0; 630 631 WARN(entry->domains & ~dev_priv->uncore.fw_domains, 632 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 633 entry->domains & ~dev_priv->uncore.fw_domains, offset); 634 635 return entry->domains; 636 } 637 638 static void 639 intel_fw_table_check(struct drm_i915_private *dev_priv) 640 { 641 const struct intel_forcewake_range *ranges; 642 unsigned int num_ranges; 643 s32 prev; 644 unsigned int i; 645 646 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 647 return; 648 649 ranges = dev_priv->uncore.fw_domains_table; 650 if (!ranges) 651 return; 652 653 num_ranges = dev_priv->uncore.fw_domains_table_entries; 654 655 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) { 656 WARN_ON_ONCE(IS_GEN9(dev_priv) && 657 (prev + 1) != (s32)ranges->start); 658 WARN_ON_ONCE(prev >= (s32)ranges->start); 659 prev = ranges->start; 660 WARN_ON_ONCE(prev >= (s32)ranges->end); 661 prev = ranges->end; 662 } 663 } 664 665 #define GEN_FW_RANGE(s, e, d) \ 666 { .start = (s), .end = (e), .domains = (d) } 667 668 #define HAS_FWTABLE(dev_priv) \ 669 (IS_GEN9(dev_priv) || \ 670 IS_CHERRYVIEW(dev_priv) || \ 671 IS_VALLEYVIEW(dev_priv)) 672 673 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 674 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 675 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 676 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 677 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 678 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 679 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 680 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 681 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 682 }; 683 684 #define __fwtable_reg_read_fw_domains(offset) \ 685 ({ \ 686 enum forcewake_domains __fwd = 0; \ 687 if (NEEDS_FORCE_WAKE((offset))) \ 688 __fwd = find_fw_domain(dev_priv, offset); \ 689 __fwd; \ 690 }) 691 692 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 693 static const i915_reg_t gen8_shadowed_regs[] = { 694 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 695 GEN6_RPNSWREQ, /* 0xA008 */ 696 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 697 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 698 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 699 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 700 /* TODO: Other registers are not yet used */ 701 }; 702 703 static void intel_shadow_table_check(void) 704 { 705 const i915_reg_t *reg = gen8_shadowed_regs; 706 s32 prev; 707 u32 offset; 708 unsigned int i; 709 710 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 711 return; 712 713 for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) { 714 offset = i915_mmio_reg_offset(*reg); 715 WARN_ON_ONCE(prev >= (s32)offset); 716 prev = offset; 717 } 718 } 719 720 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 721 { 722 u32 offset = i915_mmio_reg_offset(*reg); 723 724 if (key < offset) 725 return -1; 726 else if (key > offset) 727 return 1; 728 else 729 return 0; 730 } 731 732 static bool is_gen8_shadowed(u32 offset) 733 { 734 const i915_reg_t *regs = gen8_shadowed_regs; 735 736 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), 737 mmio_reg_cmp); 738 } 739 740 #define __gen8_reg_write_fw_domains(offset) \ 741 ({ \ 742 enum forcewake_domains __fwd; \ 743 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 744 __fwd = FORCEWAKE_RENDER; \ 745 else \ 746 __fwd = 0; \ 747 __fwd; \ 748 }) 749 750 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 751 static const struct intel_forcewake_range __chv_fw_ranges[] = { 752 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 753 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 754 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 755 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 756 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 757 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 758 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 759 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 760 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 761 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 762 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 763 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 764 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 765 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 766 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 767 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 768 }; 769 770 #define __fwtable_reg_write_fw_domains(offset) \ 771 ({ \ 772 enum forcewake_domains __fwd = 0; \ 773 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 774 __fwd = find_fw_domain(dev_priv, offset); \ 775 __fwd; \ 776 }) 777 778 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 779 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 780 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 781 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 782 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 783 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 784 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 785 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 786 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 787 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 788 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 789 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 790 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 791 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 792 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 793 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 794 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 795 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 796 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 797 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 798 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 799 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 800 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 801 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 802 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 803 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 804 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 805 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 806 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 807 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 808 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 809 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 810 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 811 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 812 }; 813 814 static void 815 ilk_dummy_write(struct drm_i915_private *dev_priv) 816 { 817 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 818 * the chip from rc6 before touching it for real. MI_MODE is masked, 819 * hence harmless to write 0 into. */ 820 __raw_i915_write32(dev_priv, MI_MODE, 0); 821 } 822 823 static void 824 __unclaimed_reg_debug(struct drm_i915_private *dev_priv, 825 const i915_reg_t reg, 826 const bool read, 827 const bool before) 828 { 829 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, 830 "Unclaimed %s register 0x%x\n", 831 read ? "read from" : "write to", 832 i915_mmio_reg_offset(reg))) 833 i915.mmio_debug--; /* Only report the first N failures */ 834 } 835 836 static inline void 837 unclaimed_reg_debug(struct drm_i915_private *dev_priv, 838 const i915_reg_t reg, 839 const bool read, 840 const bool before) 841 { 842 if (likely(!i915.mmio_debug)) 843 return; 844 845 __unclaimed_reg_debug(dev_priv, reg, read, before); 846 } 847 848 static const enum decoupled_power_domain fw2dpd_domain[] = { 849 GEN9_DECOUPLED_PD_RENDER, 850 GEN9_DECOUPLED_PD_BLITTER, 851 GEN9_DECOUPLED_PD_ALL, 852 GEN9_DECOUPLED_PD_MEDIA, 853 GEN9_DECOUPLED_PD_ALL, 854 GEN9_DECOUPLED_PD_ALL, 855 GEN9_DECOUPLED_PD_ALL 856 }; 857 858 /* 859 * Decoupled MMIO access for only 1 DWORD 860 */ 861 static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv, 862 u32 reg, 863 enum forcewake_domains fw_domain, 864 enum decoupled_ops operation) 865 { 866 enum decoupled_power_domain dp_domain; 867 u32 ctrl_reg_data = 0; 868 869 dp_domain = fw2dpd_domain[fw_domain - 1]; 870 871 ctrl_reg_data |= reg; 872 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT); 873 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT); 874 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO; 875 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data); 876 877 if (wait_for_atomic((__raw_i915_read32(dev_priv, 878 GEN9_DECOUPLED_REG0_DW1) & 879 GEN9_DECOUPLED_DW1_GO) == 0, 880 FORCEWAKE_ACK_TIMEOUT_MS)) 881 DRM_ERROR("Decoupled MMIO wait timed out\n"); 882 } 883 884 static inline u32 885 __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv, 886 u32 reg, 887 enum forcewake_domains fw_domain) 888 { 889 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, 890 GEN9_DECOUPLED_OP_READ); 891 892 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0); 893 } 894 895 static inline void 896 __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv, 897 u32 reg, u32 data, 898 enum forcewake_domains fw_domain) 899 { 900 901 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data); 902 903 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, 904 GEN9_DECOUPLED_OP_WRITE); 905 } 906 907 908 #define GEN2_READ_HEADER(x) \ 909 u##x val = 0; \ 910 assert_rpm_wakelock_held(dev_priv); 911 912 #define GEN2_READ_FOOTER \ 913 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 914 return val 915 916 #define __gen2_read(x) \ 917 static u##x \ 918 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 919 GEN2_READ_HEADER(x); \ 920 val = __raw_i915_read##x(dev_priv, reg); \ 921 GEN2_READ_FOOTER; \ 922 } 923 924 #define __gen5_read(x) \ 925 static u##x \ 926 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 927 GEN2_READ_HEADER(x); \ 928 ilk_dummy_write(dev_priv); \ 929 val = __raw_i915_read##x(dev_priv, reg); \ 930 GEN2_READ_FOOTER; \ 931 } 932 933 __gen5_read(8) 934 __gen5_read(16) 935 __gen5_read(32) 936 __gen5_read(64) 937 __gen2_read(8) 938 __gen2_read(16) 939 __gen2_read(32) 940 __gen2_read(64) 941 942 #undef __gen5_read 943 #undef __gen2_read 944 945 #undef GEN2_READ_FOOTER 946 #undef GEN2_READ_HEADER 947 948 #define GEN6_READ_HEADER(x) \ 949 u32 offset = i915_mmio_reg_offset(reg); \ 950 unsigned long irqflags; \ 951 u##x val = 0; \ 952 assert_rpm_wakelock_held(dev_priv); \ 953 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 954 unclaimed_reg_debug(dev_priv, reg, true, true) 955 956 #define GEN6_READ_FOOTER \ 957 unclaimed_reg_debug(dev_priv, reg, true, false); \ 958 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 959 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 960 return val 961 962 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, 963 enum forcewake_domains fw_domains) 964 { 965 struct intel_uncore_forcewake_domain *domain; 966 967 for_each_fw_domain_masked(domain, fw_domains, dev_priv) 968 fw_domain_arm_timer(domain); 969 970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 971 dev_priv->uncore.fw_domains_active |= fw_domains; 972 } 973 974 static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 975 enum forcewake_domains fw_domains) 976 { 977 if (WARN_ON(!fw_domains)) 978 return; 979 980 /* Turn on all requested but inactive supported forcewake domains. */ 981 fw_domains &= dev_priv->uncore.fw_domains; 982 fw_domains &= ~dev_priv->uncore.fw_domains_active; 983 984 if (fw_domains) 985 ___force_wake_auto(dev_priv, fw_domains); 986 } 987 988 #define __gen6_read(x) \ 989 static u##x \ 990 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 991 enum forcewake_domains fw_engine; \ 992 GEN6_READ_HEADER(x); \ 993 fw_engine = __gen6_reg_read_fw_domains(offset); \ 994 if (fw_engine) \ 995 __force_wake_auto(dev_priv, fw_engine); \ 996 val = __raw_i915_read##x(dev_priv, reg); \ 997 GEN6_READ_FOOTER; \ 998 } 999 1000 #define __fwtable_read(x) \ 1001 static u##x \ 1002 fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 1003 enum forcewake_domains fw_engine; \ 1004 GEN6_READ_HEADER(x); \ 1005 fw_engine = __fwtable_reg_read_fw_domains(offset); \ 1006 if (fw_engine) \ 1007 __force_wake_auto(dev_priv, fw_engine); \ 1008 val = __raw_i915_read##x(dev_priv, reg); \ 1009 GEN6_READ_FOOTER; \ 1010 } 1011 1012 #define __gen9_decoupled_read(x) \ 1013 static u##x \ 1014 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \ 1015 i915_reg_t reg, bool trace) { \ 1016 enum forcewake_domains fw_engine; \ 1017 GEN6_READ_HEADER(x); \ 1018 fw_engine = __fwtable_reg_read_fw_domains(offset); \ 1019 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \ 1020 unsigned i; \ 1021 u32 *ptr_data = (u32 *) &val; \ 1022 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \ 1023 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \ 1024 offset, \ 1025 fw_engine); \ 1026 } else { \ 1027 val = __raw_i915_read##x(dev_priv, reg); \ 1028 } \ 1029 GEN6_READ_FOOTER; \ 1030 } 1031 1032 __gen9_decoupled_read(32) 1033 __gen9_decoupled_read(64) 1034 __fwtable_read(8) 1035 __fwtable_read(16) 1036 __fwtable_read(32) 1037 __fwtable_read(64) 1038 __gen6_read(8) 1039 __gen6_read(16) 1040 __gen6_read(32) 1041 __gen6_read(64) 1042 1043 #undef __fwtable_read 1044 #undef __gen6_read 1045 #undef GEN6_READ_FOOTER 1046 #undef GEN6_READ_HEADER 1047 1048 #define VGPU_READ_HEADER(x) \ 1049 unsigned long irqflags; \ 1050 u##x val = 0; \ 1051 assert_rpm_device_not_suspended(dev_priv); \ 1052 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 1053 1054 #define VGPU_READ_FOOTER \ 1055 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 1056 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1057 return val 1058 1059 #define __vgpu_read(x) \ 1060 static u##x \ 1061 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ 1062 VGPU_READ_HEADER(x); \ 1063 val = __raw_i915_read##x(dev_priv, reg); \ 1064 VGPU_READ_FOOTER; \ 1065 } 1066 1067 __vgpu_read(8) 1068 __vgpu_read(16) 1069 __vgpu_read(32) 1070 __vgpu_read(64) 1071 1072 #undef __vgpu_read 1073 #undef VGPU_READ_FOOTER 1074 #undef VGPU_READ_HEADER 1075 1076 #define GEN2_WRITE_HEADER \ 1077 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1078 assert_rpm_wakelock_held(dev_priv); \ 1079 1080 #define GEN2_WRITE_FOOTER 1081 1082 #define __gen2_write(x) \ 1083 static void \ 1084 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1085 GEN2_WRITE_HEADER; \ 1086 __raw_i915_write##x(dev_priv, reg, val); \ 1087 GEN2_WRITE_FOOTER; \ 1088 } 1089 1090 #define __gen5_write(x) \ 1091 static void \ 1092 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1093 GEN2_WRITE_HEADER; \ 1094 ilk_dummy_write(dev_priv); \ 1095 __raw_i915_write##x(dev_priv, reg, val); \ 1096 GEN2_WRITE_FOOTER; \ 1097 } 1098 1099 __gen5_write(8) 1100 __gen5_write(16) 1101 __gen5_write(32) 1102 __gen2_write(8) 1103 __gen2_write(16) 1104 __gen2_write(32) 1105 1106 #undef __gen5_write 1107 #undef __gen2_write 1108 1109 #undef GEN2_WRITE_FOOTER 1110 #undef GEN2_WRITE_HEADER 1111 1112 #define GEN6_WRITE_HEADER \ 1113 u32 offset = i915_mmio_reg_offset(reg); \ 1114 unsigned long irqflags; \ 1115 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1116 assert_rpm_wakelock_held(dev_priv); \ 1117 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 1118 unclaimed_reg_debug(dev_priv, reg, false, true) 1119 1120 #define GEN6_WRITE_FOOTER \ 1121 unclaimed_reg_debug(dev_priv, reg, false, false); \ 1122 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1123 1124 #define __gen6_write(x) \ 1125 static void \ 1126 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1127 u32 __fifo_ret = 0; \ 1128 GEN6_WRITE_HEADER; \ 1129 if (NEEDS_FORCE_WAKE(offset)) { \ 1130 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1131 } \ 1132 __raw_i915_write##x(dev_priv, reg, val); \ 1133 if (unlikely(__fifo_ret)) { \ 1134 gen6_gt_check_fifodbg(dev_priv); \ 1135 } \ 1136 GEN6_WRITE_FOOTER; \ 1137 } 1138 1139 #define __gen8_write(x) \ 1140 static void \ 1141 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1142 enum forcewake_domains fw_engine; \ 1143 GEN6_WRITE_HEADER; \ 1144 fw_engine = __gen8_reg_write_fw_domains(offset); \ 1145 if (fw_engine) \ 1146 __force_wake_auto(dev_priv, fw_engine); \ 1147 __raw_i915_write##x(dev_priv, reg, val); \ 1148 GEN6_WRITE_FOOTER; \ 1149 } 1150 1151 #define __fwtable_write(x) \ 1152 static void \ 1153 fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ 1154 enum forcewake_domains fw_engine; \ 1155 GEN6_WRITE_HEADER; \ 1156 fw_engine = __fwtable_reg_write_fw_domains(offset); \ 1157 if (fw_engine) \ 1158 __force_wake_auto(dev_priv, fw_engine); \ 1159 __raw_i915_write##x(dev_priv, reg, val); \ 1160 GEN6_WRITE_FOOTER; \ 1161 } 1162 1163 #define __gen9_decoupled_write(x) \ 1164 static void \ 1165 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \ 1166 i915_reg_t reg, u##x val, \ 1167 bool trace) { \ 1168 enum forcewake_domains fw_engine; \ 1169 GEN6_WRITE_HEADER; \ 1170 fw_engine = __fwtable_reg_write_fw_domains(offset); \ 1171 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \ 1172 __gen9_decoupled_mmio_write(dev_priv, \ 1173 offset, \ 1174 val, \ 1175 fw_engine); \ 1176 else \ 1177 __raw_i915_write##x(dev_priv, reg, val); \ 1178 GEN6_WRITE_FOOTER; \ 1179 } 1180 1181 __gen9_decoupled_write(32) 1182 __fwtable_write(8) 1183 __fwtable_write(16) 1184 __fwtable_write(32) 1185 __gen8_write(8) 1186 __gen8_write(16) 1187 __gen8_write(32) 1188 __gen6_write(8) 1189 __gen6_write(16) 1190 __gen6_write(32) 1191 1192 #undef __fwtable_write 1193 #undef __gen8_write 1194 #undef __gen6_write 1195 #undef GEN6_WRITE_FOOTER 1196 #undef GEN6_WRITE_HEADER 1197 1198 #define VGPU_WRITE_HEADER \ 1199 unsigned long irqflags; \ 1200 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1201 assert_rpm_device_not_suspended(dev_priv); \ 1202 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 1203 1204 #define VGPU_WRITE_FOOTER \ 1205 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 1206 1207 #define __vgpu_write(x) \ 1208 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 1209 i915_reg_t reg, u##x val, bool trace) { \ 1210 VGPU_WRITE_HEADER; \ 1211 __raw_i915_write##x(dev_priv, reg, val); \ 1212 VGPU_WRITE_FOOTER; \ 1213 } 1214 1215 __vgpu_write(8) 1216 __vgpu_write(16) 1217 __vgpu_write(32) 1218 1219 #undef __vgpu_write 1220 #undef VGPU_WRITE_FOOTER 1221 #undef VGPU_WRITE_HEADER 1222 1223 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1224 do { \ 1225 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1226 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1227 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1228 } while (0) 1229 1230 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1231 do { \ 1232 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1233 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1234 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1235 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1236 } while (0) 1237 1238 1239 static void fw_domain_init(struct drm_i915_private *dev_priv, 1240 enum forcewake_domain_id domain_id, 1241 i915_reg_t reg_set, 1242 i915_reg_t reg_ack) 1243 { 1244 struct intel_uncore_forcewake_domain *d; 1245 1246 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1247 return; 1248 1249 d = &dev_priv->uncore.fw_domain[domain_id]; 1250 1251 WARN_ON(d->wake_count); 1252 1253 d->wake_count = 0; 1254 d->reg_set = reg_set; 1255 d->reg_ack = reg_ack; 1256 1257 if (IS_GEN6(dev_priv)) { 1258 d->val_reset = 0; 1259 d->val_set = FORCEWAKE_KERNEL; 1260 d->val_clear = 0; 1261 } else { 1262 /* WaRsClearFWBitsAtReset:bdw,skl */ 1263 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1264 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1265 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1266 } 1267 1268 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1269 d->reg_post = FORCEWAKE_ACK_VLV; 1270 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1271 d->reg_post = ECOBUS; 1272 1273 d->i915 = dev_priv; 1274 d->id = domain_id; 1275 1276 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1277 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1278 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1279 1280 d->mask = 1 << domain_id; 1281 1282 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1283 d->timer.function = intel_uncore_fw_release_timer; 1284 1285 dev_priv->uncore.fw_domains |= (1 << domain_id); 1286 1287 fw_domain_reset(d); 1288 } 1289 1290 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) 1291 { 1292 if (INTEL_INFO(dev_priv)->gen <= 5) 1293 return; 1294 1295 if (IS_GEN9(dev_priv)) { 1296 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1297 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1298 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1299 FORCEWAKE_RENDER_GEN9, 1300 FORCEWAKE_ACK_RENDER_GEN9); 1301 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1302 FORCEWAKE_BLITTER_GEN9, 1303 FORCEWAKE_ACK_BLITTER_GEN9); 1304 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1305 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1306 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1307 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1308 if (!IS_CHERRYVIEW(dev_priv)) 1309 dev_priv->uncore.funcs.force_wake_put = 1310 fw_domains_put_with_fifo; 1311 else 1312 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1313 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1314 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1315 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1316 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1317 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1318 dev_priv->uncore.funcs.force_wake_get = 1319 fw_domains_get_with_thread_status; 1320 if (IS_HASWELL(dev_priv)) 1321 dev_priv->uncore.funcs.force_wake_put = 1322 fw_domains_put_with_fifo; 1323 else 1324 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1325 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1326 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1327 } else if (IS_IVYBRIDGE(dev_priv)) { 1328 u32 ecobus; 1329 1330 /* IVB configs may use multi-threaded forcewake */ 1331 1332 /* A small trick here - if the bios hasn't configured 1333 * MT forcewake, and if the device is in RC6, then 1334 * force_wake_mt_get will not wake the device and the 1335 * ECOBUS read will return zero. Which will be 1336 * (correctly) interpreted by the test below as MT 1337 * forcewake being disabled. 1338 */ 1339 dev_priv->uncore.funcs.force_wake_get = 1340 fw_domains_get_with_thread_status; 1341 dev_priv->uncore.funcs.force_wake_put = 1342 fw_domains_put_with_fifo; 1343 1344 /* We need to init first for ECOBUS access and then 1345 * determine later if we want to reinit, in case of MT access is 1346 * not working. In this stage we don't know which flavour this 1347 * ivb is, so it is better to reset also the gen6 fw registers 1348 * before the ecobus check. 1349 */ 1350 1351 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1352 __raw_posting_read(dev_priv, ECOBUS); 1353 1354 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1355 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1356 1357 spin_lock_irq(&dev_priv->uncore.lock); 1358 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1359 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1360 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1361 spin_unlock_irq(&dev_priv->uncore.lock); 1362 1363 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1364 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1365 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1366 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1367 FORCEWAKE, FORCEWAKE_ACK); 1368 } 1369 } else if (IS_GEN6(dev_priv)) { 1370 dev_priv->uncore.funcs.force_wake_get = 1371 fw_domains_get_with_thread_status; 1372 dev_priv->uncore.funcs.force_wake_put = 1373 fw_domains_put_with_fifo; 1374 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1375 FORCEWAKE, FORCEWAKE_ACK); 1376 } 1377 1378 /* All future platforms are expected to require complex power gating */ 1379 WARN_ON(dev_priv->uncore.fw_domains == 0); 1380 } 1381 1382 #define ASSIGN_FW_DOMAINS_TABLE(d) \ 1383 { \ 1384 dev_priv->uncore.fw_domains_table = \ 1385 (struct intel_forcewake_range *)(d); \ 1386 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ 1387 } 1388 1389 void intel_uncore_init(struct drm_i915_private *dev_priv) 1390 { 1391 i915_check_vgpu(dev_priv); 1392 1393 intel_uncore_edram_detect(dev_priv); 1394 intel_uncore_fw_domains_init(dev_priv); 1395 __intel_uncore_early_sanitize(dev_priv, false); 1396 1397 dev_priv->uncore.unclaimed_mmio_check = 1; 1398 1399 switch (INTEL_INFO(dev_priv)->gen) { 1400 default: 1401 case 9: 1402 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1403 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1404 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1405 if (HAS_DECOUPLED_MMIO(dev_priv)) { 1406 dev_priv->uncore.funcs.mmio_readl = 1407 gen9_decoupled_read32; 1408 dev_priv->uncore.funcs.mmio_readq = 1409 gen9_decoupled_read64; 1410 dev_priv->uncore.funcs.mmio_writel = 1411 gen9_decoupled_write32; 1412 } 1413 break; 1414 case 8: 1415 if (IS_CHERRYVIEW(dev_priv)) { 1416 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); 1417 ASSIGN_WRITE_MMIO_VFUNCS(fwtable); 1418 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1419 1420 } else { 1421 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1422 ASSIGN_READ_MMIO_VFUNCS(gen6); 1423 } 1424 break; 1425 case 7: 1426 case 6: 1427 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1428 1429 if (IS_VALLEYVIEW(dev_priv)) { 1430 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); 1431 ASSIGN_READ_MMIO_VFUNCS(fwtable); 1432 } else { 1433 ASSIGN_READ_MMIO_VFUNCS(gen6); 1434 } 1435 break; 1436 case 5: 1437 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1438 ASSIGN_READ_MMIO_VFUNCS(gen5); 1439 break; 1440 case 4: 1441 case 3: 1442 case 2: 1443 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1444 ASSIGN_READ_MMIO_VFUNCS(gen2); 1445 break; 1446 } 1447 1448 intel_fw_table_check(dev_priv); 1449 if (INTEL_GEN(dev_priv) >= 8) 1450 intel_shadow_table_check(); 1451 1452 if (intel_vgpu_active(dev_priv)) { 1453 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1454 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1455 } 1456 1457 i915_check_and_clear_faults(dev_priv); 1458 } 1459 #undef ASSIGN_WRITE_MMIO_VFUNCS 1460 #undef ASSIGN_READ_MMIO_VFUNCS 1461 1462 void intel_uncore_fini(struct drm_i915_private *dev_priv) 1463 { 1464 /* Paranoia: make sure we have disabled everything before we exit. */ 1465 intel_uncore_sanitize(dev_priv); 1466 intel_uncore_forcewake_reset(dev_priv, false); 1467 } 1468 1469 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) 1470 1471 static const struct register_whitelist { 1472 i915_reg_t offset_ldw, offset_udw; 1473 uint32_t size; 1474 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1475 uint32_t gen_bitmask; 1476 } whitelist[] = { 1477 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1478 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1479 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, 1480 }; 1481 1482 int i915_reg_read_ioctl(struct drm_device *dev, 1483 void *data, struct drm_file *file) 1484 { 1485 struct drm_i915_private *dev_priv = to_i915(dev); 1486 struct drm_i915_reg_read *reg = data; 1487 struct register_whitelist const *entry = whitelist; 1488 unsigned size; 1489 i915_reg_t offset_ldw, offset_udw; 1490 int i, ret = 0; 1491 1492 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1493 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1494 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) 1495 break; 1496 } 1497 1498 if (i == ARRAY_SIZE(whitelist)) 1499 return -EINVAL; 1500 1501 /* We use the low bits to encode extra flags as the register should 1502 * be naturally aligned (and those that are not so aligned merely 1503 * limit the available flags for that register). 1504 */ 1505 offset_ldw = entry->offset_ldw; 1506 offset_udw = entry->offset_udw; 1507 size = entry->size; 1508 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); 1509 1510 intel_runtime_pm_get(dev_priv); 1511 1512 switch (size) { 1513 case 8 | 1: 1514 reg->val = I915_READ64_2x32(offset_ldw, offset_udw); 1515 break; 1516 case 8: 1517 reg->val = I915_READ64(offset_ldw); 1518 break; 1519 case 4: 1520 reg->val = I915_READ(offset_ldw); 1521 break; 1522 case 2: 1523 reg->val = I915_READ16(offset_ldw); 1524 break; 1525 case 1: 1526 reg->val = I915_READ8(offset_ldw); 1527 break; 1528 default: 1529 ret = -EINVAL; 1530 goto out; 1531 } 1532 1533 out: 1534 intel_runtime_pm_put(dev_priv); 1535 return ret; 1536 } 1537 1538 static int i915_reset_complete(struct pci_dev *pdev) 1539 { 1540 u8 gdrst; 1541 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1542 return (gdrst & GRDOM_RESET_STATUS) == 0; 1543 } 1544 1545 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1546 { 1547 struct pci_dev *pdev = dev_priv->drm.pdev; 1548 1549 /* assert reset for at least 20 usec */ 1550 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1551 udelay(20); 1552 pci_write_config_byte(pdev, I915_GDRST, 0); 1553 1554 return wait_for(i915_reset_complete(pdev), 500); 1555 } 1556 1557 static int g4x_reset_complete(struct pci_dev *pdev) 1558 { 1559 u8 gdrst; 1560 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1561 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1562 } 1563 1564 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1565 { 1566 struct pci_dev *pdev = dev_priv->drm.pdev; 1567 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1568 return wait_for(g4x_reset_complete(pdev), 500); 1569 } 1570 1571 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1572 { 1573 struct pci_dev *pdev = dev_priv->drm.pdev; 1574 int ret; 1575 1576 pci_write_config_byte(pdev, I915_GDRST, 1577 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1578 ret = wait_for(g4x_reset_complete(pdev), 500); 1579 if (ret) 1580 return ret; 1581 1582 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1583 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1584 POSTING_READ(VDECCLK_GATE_D); 1585 1586 pci_write_config_byte(pdev, I915_GDRST, 1587 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1588 ret = wait_for(g4x_reset_complete(pdev), 500); 1589 if (ret) 1590 return ret; 1591 1592 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1593 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1594 POSTING_READ(VDECCLK_GATE_D); 1595 1596 pci_write_config_byte(pdev, I915_GDRST, 0); 1597 1598 return 0; 1599 } 1600 1601 static int ironlake_do_reset(struct drm_i915_private *dev_priv, 1602 unsigned engine_mask) 1603 { 1604 int ret; 1605 1606 I915_WRITE(ILK_GDSR, 1607 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1608 ret = intel_wait_for_register(dev_priv, 1609 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1610 500); 1611 if (ret) 1612 return ret; 1613 1614 I915_WRITE(ILK_GDSR, 1615 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1616 ret = intel_wait_for_register(dev_priv, 1617 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 1618 500); 1619 if (ret) 1620 return ret; 1621 1622 I915_WRITE(ILK_GDSR, 0); 1623 1624 return 0; 1625 } 1626 1627 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ 1628 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1629 u32 hw_domain_mask) 1630 { 1631 /* GEN6_GDRST is not in the gt power well, no need to check 1632 * for fifo space for the write or forcewake the chip for 1633 * the read 1634 */ 1635 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1636 1637 /* Spin waiting for the device to ack the reset requests */ 1638 return intel_wait_for_register_fw(dev_priv, 1639 GEN6_GDRST, hw_domain_mask, 0, 1640 500); 1641 } 1642 1643 /** 1644 * gen6_reset_engines - reset individual engines 1645 * @dev_priv: i915 device 1646 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1647 * 1648 * This function will reset the individual engines that are set in engine_mask. 1649 * If you provide ALL_ENGINES as mask, full global domain reset will be issued. 1650 * 1651 * Note: It is responsibility of the caller to handle the difference between 1652 * asking full domain reset versus reset for all available individual engines. 1653 * 1654 * Returns 0 on success, nonzero on error. 1655 */ 1656 static int gen6_reset_engines(struct drm_i915_private *dev_priv, 1657 unsigned engine_mask) 1658 { 1659 struct intel_engine_cs *engine; 1660 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1661 [RCS] = GEN6_GRDOM_RENDER, 1662 [BCS] = GEN6_GRDOM_BLT, 1663 [VCS] = GEN6_GRDOM_MEDIA, 1664 [VCS2] = GEN8_GRDOM_MEDIA2, 1665 [VECS] = GEN6_GRDOM_VECS, 1666 }; 1667 u32 hw_mask; 1668 int ret; 1669 1670 if (engine_mask == ALL_ENGINES) { 1671 hw_mask = GEN6_GRDOM_FULL; 1672 } else { 1673 unsigned int tmp; 1674 1675 hw_mask = 0; 1676 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1677 hw_mask |= hw_engine_mask[engine->id]; 1678 } 1679 1680 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1681 1682 intel_uncore_forcewake_reset(dev_priv, true); 1683 1684 return ret; 1685 } 1686 1687 /** 1688 * intel_wait_for_register_fw - wait until register matches expected state 1689 * @dev_priv: the i915 device 1690 * @reg: the register to read 1691 * @mask: mask to apply to register value 1692 * @value: expected value 1693 * @timeout_ms: timeout in millisecond 1694 * 1695 * This routine waits until the target register @reg contains the expected 1696 * @value after applying the @mask, i.e. it waits until :: 1697 * 1698 * (I915_READ_FW(reg) & mask) == value 1699 * 1700 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1701 * 1702 * Note that this routine assumes the caller holds forcewake asserted, it is 1703 * not suitable for very long waits. See intel_wait_for_register() if you 1704 * wish to wait without holding forcewake for the duration (i.e. you expect 1705 * the wait to be slow). 1706 * 1707 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1708 */ 1709 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 1710 i915_reg_t reg, 1711 const u32 mask, 1712 const u32 value, 1713 const unsigned long timeout_ms) 1714 { 1715 #define done ((I915_READ_FW(reg) & mask) == value) 1716 int ret = wait_for_us(done, 2); 1717 if (ret) 1718 ret = wait_for(done, timeout_ms); 1719 return ret; 1720 #undef done 1721 } 1722 1723 /** 1724 * intel_wait_for_register - wait until register matches expected state 1725 * @dev_priv: the i915 device 1726 * @reg: the register to read 1727 * @mask: mask to apply to register value 1728 * @value: expected value 1729 * @timeout_ms: timeout in millisecond 1730 * 1731 * This routine waits until the target register @reg contains the expected 1732 * @value after applying the @mask, i.e. it waits until :: 1733 * 1734 * (I915_READ(reg) & mask) == value 1735 * 1736 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1737 * 1738 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1739 */ 1740 int intel_wait_for_register(struct drm_i915_private *dev_priv, 1741 i915_reg_t reg, 1742 const u32 mask, 1743 const u32 value, 1744 const unsigned long timeout_ms) 1745 { 1746 1747 unsigned fw = 1748 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); 1749 int ret; 1750 1751 intel_uncore_forcewake_get(dev_priv, fw); 1752 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2); 1753 intel_uncore_forcewake_put(dev_priv, fw); 1754 if (ret) 1755 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, 1756 timeout_ms); 1757 1758 return ret; 1759 } 1760 1761 static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1762 { 1763 struct drm_i915_private *dev_priv = engine->i915; 1764 int ret; 1765 1766 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1767 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1768 1769 ret = intel_wait_for_register_fw(dev_priv, 1770 RING_RESET_CTL(engine->mmio_base), 1771 RESET_CTL_READY_TO_RESET, 1772 RESET_CTL_READY_TO_RESET, 1773 700); 1774 if (ret) 1775 DRM_ERROR("%s: reset request timeout\n", engine->name); 1776 1777 return ret; 1778 } 1779 1780 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1781 { 1782 struct drm_i915_private *dev_priv = engine->i915; 1783 1784 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1785 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1786 } 1787 1788 static int gen8_reset_engines(struct drm_i915_private *dev_priv, 1789 unsigned engine_mask) 1790 { 1791 struct intel_engine_cs *engine; 1792 unsigned int tmp; 1793 1794 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1795 if (gen8_request_engine_reset(engine)) 1796 goto not_ready; 1797 1798 return gen6_reset_engines(dev_priv, engine_mask); 1799 1800 not_ready: 1801 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 1802 gen8_unrequest_engine_reset(engine); 1803 1804 return -EIO; 1805 } 1806 1807 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); 1808 1809 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) 1810 { 1811 if (!i915.reset) 1812 return NULL; 1813 1814 if (INTEL_INFO(dev_priv)->gen >= 8) 1815 return gen8_reset_engines; 1816 else if (INTEL_INFO(dev_priv)->gen >= 6) 1817 return gen6_reset_engines; 1818 else if (IS_GEN5(dev_priv)) 1819 return ironlake_do_reset; 1820 else if (IS_G4X(dev_priv)) 1821 return g4x_do_reset; 1822 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 1823 return g33_do_reset; 1824 else if (INTEL_INFO(dev_priv)->gen >= 3) 1825 return i915_do_reset; 1826 else 1827 return NULL; 1828 } 1829 1830 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1831 { 1832 reset_func reset; 1833 int ret; 1834 1835 reset = intel_get_gpu_reset(dev_priv); 1836 if (reset == NULL) 1837 return -ENODEV; 1838 1839 /* If the power well sleeps during the reset, the reset 1840 * request may be dropped and never completes (causing -EIO). 1841 */ 1842 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1843 ret = reset(dev_priv, engine_mask); 1844 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1845 1846 return ret; 1847 } 1848 1849 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) 1850 { 1851 return intel_get_gpu_reset(dev_priv) != NULL; 1852 } 1853 1854 int intel_guc_reset(struct drm_i915_private *dev_priv) 1855 { 1856 int ret; 1857 unsigned long irqflags; 1858 1859 if (!HAS_GUC(dev_priv)) 1860 return -EINVAL; 1861 1862 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1863 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1864 1865 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); 1866 1867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1868 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1869 1870 return ret; 1871 } 1872 1873 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) 1874 { 1875 return check_for_unclaimed_mmio(dev_priv); 1876 } 1877 1878 bool 1879 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) 1880 { 1881 if (unlikely(i915.mmio_debug || 1882 dev_priv->uncore.unclaimed_mmio_check <= 0)) 1883 return false; 1884 1885 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { 1886 DRM_DEBUG("Unclaimed register detected, " 1887 "enabling oneshot unclaimed register reporting. " 1888 "Please use i915.mmio_debug=N for more information.\n"); 1889 i915.mmio_debug++; 1890 dev_priv->uncore.unclaimed_mmio_check--; 1891 return true; 1892 } 1893 1894 return false; 1895 } 1896 1897 static enum forcewake_domains 1898 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, 1899 i915_reg_t reg) 1900 { 1901 u32 offset = i915_mmio_reg_offset(reg); 1902 enum forcewake_domains fw_domains; 1903 1904 if (HAS_FWTABLE(dev_priv)) { 1905 fw_domains = __fwtable_reg_read_fw_domains(offset); 1906 } else if (INTEL_GEN(dev_priv) >= 6) { 1907 fw_domains = __gen6_reg_read_fw_domains(offset); 1908 } else { 1909 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1910 fw_domains = 0; 1911 } 1912 1913 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1914 1915 return fw_domains; 1916 } 1917 1918 static enum forcewake_domains 1919 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, 1920 i915_reg_t reg) 1921 { 1922 u32 offset = i915_mmio_reg_offset(reg); 1923 enum forcewake_domains fw_domains; 1924 1925 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 1926 fw_domains = __fwtable_reg_write_fw_domains(offset); 1927 } else if (IS_GEN8(dev_priv)) { 1928 fw_domains = __gen8_reg_write_fw_domains(offset); 1929 } else if (IS_GEN(dev_priv, 6, 7)) { 1930 fw_domains = FORCEWAKE_RENDER; 1931 } else { 1932 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 1933 fw_domains = 0; 1934 } 1935 1936 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); 1937 1938 return fw_domains; 1939 } 1940 1941 /** 1942 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1943 * a register 1944 * @dev_priv: pointer to struct drm_i915_private 1945 * @reg: register in question 1946 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1947 * 1948 * Returns a set of forcewake domains required to be taken with for example 1949 * intel_uncore_forcewake_get for the specified register to be accessible in the 1950 * specified mode (read, write or read/write) with raw mmio accessors. 1951 * 1952 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1953 * callers to do FIFO management on their own or risk losing writes. 1954 */ 1955 enum forcewake_domains 1956 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 1957 i915_reg_t reg, unsigned int op) 1958 { 1959 enum forcewake_domains fw_domains = 0; 1960 1961 WARN_ON(!op); 1962 1963 if (intel_vgpu_active(dev_priv)) 1964 return 0; 1965 1966 if (op & FW_REG_READ) 1967 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); 1968 1969 if (op & FW_REG_WRITE) 1970 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); 1971 1972 return fw_domains; 1973 } 1974