1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/pm_runtime.h> 25 26 #include "gt/intel_engine_regs.h" 27 #include "gt/intel_gt_regs.h" 28 29 #include "i915_drv.h" 30 #include "i915_iosf_mbi.h" 31 #include "i915_trace.h" 32 #include "i915_vgpu.h" 33 #include "intel_pm.h" 34 35 #define FORCEWAKE_ACK_TIMEOUT_MS 50 36 #define GT_FIFO_TIMEOUT_MS 10 37 38 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) 39 40 static void 41 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 42 { 43 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains); 44 } 45 46 void 47 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) 48 { 49 spin_lock_init(&mmio_debug->lock); 50 mmio_debug->unclaimed_mmio_check = 1; 51 } 52 53 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) 54 { 55 lockdep_assert_held(&mmio_debug->lock); 56 57 /* Save and disable mmio debugging for the user bypass */ 58 if (!mmio_debug->suspend_count++) { 59 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; 60 mmio_debug->unclaimed_mmio_check = 0; 61 } 62 } 63 64 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) 65 { 66 lockdep_assert_held(&mmio_debug->lock); 67 68 if (!--mmio_debug->suspend_count) 69 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; 70 } 71 72 static const char * const forcewake_domain_names[] = { 73 "render", 74 "gt", 75 "media", 76 "vdbox0", 77 "vdbox1", 78 "vdbox2", 79 "vdbox3", 80 "vdbox4", 81 "vdbox5", 82 "vdbox6", 83 "vdbox7", 84 "vebox0", 85 "vebox1", 86 "vebox2", 87 "vebox3", 88 }; 89 90 const char * 91 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 92 { 93 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 94 95 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 96 return forcewake_domain_names[id]; 97 98 WARN_ON(id); 99 100 return "unknown"; 101 } 102 103 #define fw_ack(d) readl((d)->reg_ack) 104 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) 105 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) 106 107 static inline void 108 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 109 { 110 /* 111 * We don't really know if the powerwell for the forcewake domain we are 112 * trying to reset here does exist at this point (engines could be fused 113 * off in ICL+), so no waiting for acks 114 */ 115 /* WaRsClearFWBitsAtReset */ 116 if (GRAPHICS_VER(d->uncore->i915) >= 12) 117 fw_clear(d, 0xefff); 118 else 119 fw_clear(d, 0xffff); 120 } 121 122 static inline void 123 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 124 { 125 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); 126 d->uncore->fw_domains_timer |= d->mask; 127 d->wake_count++; 128 hrtimer_start_range_ns(&d->timer, 129 NSEC_PER_MSEC, 130 NSEC_PER_MSEC, 131 HRTIMER_MODE_REL); 132 } 133 134 static inline int 135 __wait_for_ack(const struct intel_uncore_forcewake_domain *d, 136 const u32 ack, 137 const u32 value) 138 { 139 return wait_for_atomic((fw_ack(d) & ack) == value, 140 FORCEWAKE_ACK_TIMEOUT_MS); 141 } 142 143 static inline int 144 wait_ack_clear(const struct intel_uncore_forcewake_domain *d, 145 const u32 ack) 146 { 147 return __wait_for_ack(d, ack, 0); 148 } 149 150 static inline int 151 wait_ack_set(const struct intel_uncore_forcewake_domain *d, 152 const u32 ack) 153 { 154 return __wait_for_ack(d, ack, ack); 155 } 156 157 static inline void 158 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 159 { 160 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { 161 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 162 intel_uncore_forcewake_domain_to_str(d->id)); 163 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ 164 } 165 } 166 167 enum ack_type { 168 ACK_CLEAR = 0, 169 ACK_SET 170 }; 171 172 static int 173 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, 174 const enum ack_type type) 175 { 176 const u32 ack_bit = FORCEWAKE_KERNEL; 177 const u32 value = type == ACK_SET ? ack_bit : 0; 178 unsigned int pass; 179 bool ack_detected; 180 181 /* 182 * There is a possibility of driver's wake request colliding 183 * with hardware's own wake requests and that can cause 184 * hardware to not deliver the driver's ack message. 185 * 186 * Use a fallback bit toggle to kick the gpu state machine 187 * in the hope that the original ack will be delivered along with 188 * the fallback ack. 189 * 190 * This workaround is described in HSDES #1604254524 and it's known as: 191 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl 192 * although the name is a bit misleading. 193 */ 194 195 pass = 1; 196 do { 197 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); 198 199 fw_set(d, FORCEWAKE_KERNEL_FALLBACK); 200 /* Give gt some time to relax before the polling frenzy */ 201 udelay(10 * pass); 202 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); 203 204 ack_detected = (fw_ack(d) & ack_bit) == value; 205 206 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); 207 } while (!ack_detected && pass++ < 10); 208 209 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", 210 intel_uncore_forcewake_domain_to_str(d->id), 211 type == ACK_SET ? "set" : "clear", 212 fw_ack(d), 213 pass); 214 215 return ack_detected ? 0 : -ETIMEDOUT; 216 } 217 218 static inline void 219 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) 220 { 221 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) 222 return; 223 224 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) 225 fw_domain_wait_ack_clear(d); 226 } 227 228 static inline void 229 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 230 { 231 fw_set(d, FORCEWAKE_KERNEL); 232 } 233 234 static inline void 235 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) 236 { 237 if (wait_ack_set(d, FORCEWAKE_KERNEL)) { 238 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 239 intel_uncore_forcewake_domain_to_str(d->id)); 240 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ 241 } 242 } 243 244 static inline void 245 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) 246 { 247 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) 248 return; 249 250 if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) 251 fw_domain_wait_ack_set(d); 252 } 253 254 static inline void 255 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 256 { 257 fw_clear(d, FORCEWAKE_KERNEL); 258 } 259 260 static void 261 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 262 { 263 struct intel_uncore_forcewake_domain *d; 264 unsigned int tmp; 265 266 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 267 268 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 269 fw_domain_wait_ack_clear(d); 270 fw_domain_get(d); 271 } 272 273 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 274 fw_domain_wait_ack_set(d); 275 276 uncore->fw_domains_active |= fw_domains; 277 } 278 279 static void 280 fw_domains_get_with_fallback(struct intel_uncore *uncore, 281 enum forcewake_domains fw_domains) 282 { 283 struct intel_uncore_forcewake_domain *d; 284 unsigned int tmp; 285 286 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 287 288 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 289 fw_domain_wait_ack_clear_fallback(d); 290 fw_domain_get(d); 291 } 292 293 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 294 fw_domain_wait_ack_set_fallback(d); 295 296 uncore->fw_domains_active |= fw_domains; 297 } 298 299 static void 300 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 301 { 302 struct intel_uncore_forcewake_domain *d; 303 unsigned int tmp; 304 305 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 306 307 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 308 fw_domain_put(d); 309 310 uncore->fw_domains_active &= ~fw_domains; 311 } 312 313 static void 314 fw_domains_reset(struct intel_uncore *uncore, 315 enum forcewake_domains fw_domains) 316 { 317 struct intel_uncore_forcewake_domain *d; 318 unsigned int tmp; 319 320 if (!fw_domains) 321 return; 322 323 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 324 325 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 326 fw_domain_reset(d); 327 } 328 329 static inline u32 gt_thread_status(struct intel_uncore *uncore) 330 { 331 u32 val; 332 333 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); 334 val &= GEN6_GT_THREAD_STATUS_CORE_MASK; 335 336 return val; 337 } 338 339 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) 340 { 341 /* 342 * w/a for a sporadic read returning 0 by waiting for the GT 343 * thread to wake up. 344 */ 345 drm_WARN_ONCE(&uncore->i915->drm, 346 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), 347 "GT thread status wait timed out\n"); 348 } 349 350 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, 351 enum forcewake_domains fw_domains) 352 { 353 fw_domains_get_normal(uncore, fw_domains); 354 355 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 356 __gen6_gt_wait_for_thread_c0(uncore); 357 } 358 359 static inline u32 fifo_free_entries(struct intel_uncore *uncore) 360 { 361 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); 362 363 return count & GT_FIFO_FREE_ENTRIES_MASK; 364 } 365 366 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) 367 { 368 u32 n; 369 370 /* On VLV, FIFO will be shared by both SW and HW. 371 * So, we need to read the FREE_ENTRIES everytime */ 372 if (IS_VALLEYVIEW(uncore->i915)) 373 n = fifo_free_entries(uncore); 374 else 375 n = uncore->fifo_count; 376 377 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { 378 if (wait_for_atomic((n = fifo_free_entries(uncore)) > 379 GT_FIFO_NUM_RESERVED_ENTRIES, 380 GT_FIFO_TIMEOUT_MS)) { 381 drm_dbg(&uncore->i915->drm, 382 "GT_FIFO timeout, entries: %u\n", n); 383 return; 384 } 385 } 386 387 uncore->fifo_count = n - 1; 388 } 389 390 static enum hrtimer_restart 391 intel_uncore_fw_release_timer(struct hrtimer *timer) 392 { 393 struct intel_uncore_forcewake_domain *domain = 394 container_of(timer, struct intel_uncore_forcewake_domain, timer); 395 struct intel_uncore *uncore = domain->uncore; 396 unsigned long irqflags; 397 398 assert_rpm_device_not_suspended(uncore->rpm); 399 400 if (xchg(&domain->active, false)) 401 return HRTIMER_RESTART; 402 403 spin_lock_irqsave(&uncore->lock, irqflags); 404 405 uncore->fw_domains_timer &= ~domain->mask; 406 407 GEM_BUG_ON(!domain->wake_count); 408 if (--domain->wake_count == 0) 409 fw_domains_put(uncore, domain->mask); 410 411 spin_unlock_irqrestore(&uncore->lock, irqflags); 412 413 return HRTIMER_NORESTART; 414 } 415 416 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ 417 static unsigned int 418 intel_uncore_forcewake_reset(struct intel_uncore *uncore) 419 { 420 unsigned long irqflags; 421 struct intel_uncore_forcewake_domain *domain; 422 int retry_count = 100; 423 enum forcewake_domains fw, active_domains; 424 425 iosf_mbi_assert_punit_acquired(); 426 427 /* Hold uncore.lock across reset to prevent any register access 428 * with forcewake not set correctly. Wait until all pending 429 * timers are run before holding. 430 */ 431 while (1) { 432 unsigned int tmp; 433 434 active_domains = 0; 435 436 for_each_fw_domain(domain, uncore, tmp) { 437 smp_store_mb(domain->active, false); 438 if (hrtimer_cancel(&domain->timer) == 0) 439 continue; 440 441 intel_uncore_fw_release_timer(&domain->timer); 442 } 443 444 spin_lock_irqsave(&uncore->lock, irqflags); 445 446 for_each_fw_domain(domain, uncore, tmp) { 447 if (hrtimer_active(&domain->timer)) 448 active_domains |= domain->mask; 449 } 450 451 if (active_domains == 0) 452 break; 453 454 if (--retry_count == 0) { 455 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n"); 456 break; 457 } 458 459 spin_unlock_irqrestore(&uncore->lock, irqflags); 460 cond_resched(); 461 } 462 463 drm_WARN_ON(&uncore->i915->drm, active_domains); 464 465 fw = uncore->fw_domains_active; 466 if (fw) 467 fw_domains_put(uncore, fw); 468 469 fw_domains_reset(uncore, uncore->fw_domains); 470 assert_forcewakes_inactive(uncore); 471 472 spin_unlock_irqrestore(&uncore->lock, irqflags); 473 474 return fw; /* track the lost user forcewake domains */ 475 } 476 477 static bool 478 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) 479 { 480 u32 dbg; 481 482 dbg = __raw_uncore_read32(uncore, FPGA_DBG); 483 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 484 return false; 485 486 /* 487 * Bugs in PCI programming (or failing hardware) can occasionally cause 488 * us to lose access to the MMIO BAR. When this happens, register 489 * reads will come back with 0xFFFFFFFF for every register and things 490 * go bad very quickly. Let's try to detect that special case and at 491 * least try to print a more informative message about what has 492 * happened. 493 * 494 * During normal operation the FPGA_DBG register has several unused 495 * bits that will always read back as 0's so we can use them as canaries 496 * to recognize when MMIO accesses are just busted. 497 */ 498 if (unlikely(dbg == ~0)) 499 drm_err(&uncore->i915->drm, 500 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n"); 501 502 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 503 504 return true; 505 } 506 507 static bool 508 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) 509 { 510 u32 cer; 511 512 cer = __raw_uncore_read32(uncore, CLAIM_ER); 513 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 514 return false; 515 516 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); 517 518 return true; 519 } 520 521 static bool 522 gen6_check_for_fifo_debug(struct intel_uncore *uncore) 523 { 524 u32 fifodbg; 525 526 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); 527 528 if (unlikely(fifodbg)) { 529 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg); 530 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); 531 } 532 533 return fifodbg; 534 } 535 536 static bool 537 check_for_unclaimed_mmio(struct intel_uncore *uncore) 538 { 539 bool ret = false; 540 541 lockdep_assert_held(&uncore->debug->lock); 542 543 if (uncore->debug->suspend_count) 544 return false; 545 546 if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) 547 ret |= fpga_check_for_unclaimed_mmio(uncore); 548 549 if (intel_uncore_has_dbg_unclaimed(uncore)) 550 ret |= vlv_check_for_unclaimed_mmio(uncore); 551 552 if (intel_uncore_has_fifo(uncore)) 553 ret |= gen6_check_for_fifo_debug(uncore); 554 555 return ret; 556 } 557 558 static void forcewake_early_sanitize(struct intel_uncore *uncore, 559 unsigned int restore_forcewake) 560 { 561 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 562 563 /* WaDisableShadowRegForCpd:chv */ 564 if (IS_CHERRYVIEW(uncore->i915)) { 565 __raw_uncore_write32(uncore, GTFIFOCTL, 566 __raw_uncore_read32(uncore, GTFIFOCTL) | 567 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 568 GT_FIFO_CTL_RC6_POLICY_STALL); 569 } 570 571 iosf_mbi_punit_acquire(); 572 intel_uncore_forcewake_reset(uncore); 573 if (restore_forcewake) { 574 spin_lock_irq(&uncore->lock); 575 fw_domains_get(uncore, restore_forcewake); 576 577 if (intel_uncore_has_fifo(uncore)) 578 uncore->fifo_count = fifo_free_entries(uncore); 579 spin_unlock_irq(&uncore->lock); 580 } 581 iosf_mbi_punit_release(); 582 } 583 584 void intel_uncore_suspend(struct intel_uncore *uncore) 585 { 586 if (!intel_uncore_has_forcewake(uncore)) 587 return; 588 589 iosf_mbi_punit_acquire(); 590 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 591 &uncore->pmic_bus_access_nb); 592 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); 593 iosf_mbi_punit_release(); 594 } 595 596 void intel_uncore_resume_early(struct intel_uncore *uncore) 597 { 598 unsigned int restore_forcewake; 599 600 if (intel_uncore_unclaimed_mmio(uncore)) 601 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n"); 602 603 if (!intel_uncore_has_forcewake(uncore)) 604 return; 605 606 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); 607 forcewake_early_sanitize(uncore, restore_forcewake); 608 609 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 610 } 611 612 void intel_uncore_runtime_resume(struct intel_uncore *uncore) 613 { 614 if (!intel_uncore_has_forcewake(uncore)) 615 return; 616 617 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 618 } 619 620 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, 621 enum forcewake_domains fw_domains) 622 { 623 struct intel_uncore_forcewake_domain *domain; 624 unsigned int tmp; 625 626 fw_domains &= uncore->fw_domains; 627 628 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 629 if (domain->wake_count++) { 630 fw_domains &= ~domain->mask; 631 domain->active = true; 632 } 633 } 634 635 if (fw_domains) 636 fw_domains_get(uncore, fw_domains); 637 } 638 639 /** 640 * intel_uncore_forcewake_get - grab forcewake domain references 641 * @uncore: the intel_uncore structure 642 * @fw_domains: forcewake domains to get reference on 643 * 644 * This function can be used get GT's forcewake domain references. 645 * Normal register access will handle the forcewake domains automatically. 646 * However if some sequence requires the GT to not power down a particular 647 * forcewake domains this function should be called at the beginning of the 648 * sequence. And subsequently the reference should be dropped by symmetric 649 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 650 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 651 */ 652 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 653 enum forcewake_domains fw_domains) 654 { 655 unsigned long irqflags; 656 657 if (!uncore->fw_get_funcs) 658 return; 659 660 assert_rpm_wakelock_held(uncore->rpm); 661 662 spin_lock_irqsave(&uncore->lock, irqflags); 663 __intel_uncore_forcewake_get(uncore, fw_domains); 664 spin_unlock_irqrestore(&uncore->lock, irqflags); 665 } 666 667 /** 668 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace 669 * @uncore: the intel_uncore structure 670 * 671 * This function is a wrapper around intel_uncore_forcewake_get() to acquire 672 * the GT powerwell and in the process disable our debugging for the 673 * duration of userspace's bypass. 674 */ 675 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) 676 { 677 spin_lock_irq(&uncore->lock); 678 if (!uncore->user_forcewake_count++) { 679 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); 680 spin_lock(&uncore->debug->lock); 681 mmio_debug_suspend(uncore->debug); 682 spin_unlock(&uncore->debug->lock); 683 } 684 spin_unlock_irq(&uncore->lock); 685 } 686 687 /** 688 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace 689 * @uncore: the intel_uncore structure 690 * 691 * This function complements intel_uncore_forcewake_user_get() and releases 692 * the GT powerwell taken on behalf of the userspace bypass. 693 */ 694 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) 695 { 696 spin_lock_irq(&uncore->lock); 697 if (!--uncore->user_forcewake_count) { 698 spin_lock(&uncore->debug->lock); 699 mmio_debug_resume(uncore->debug); 700 701 if (check_for_unclaimed_mmio(uncore)) 702 drm_info(&uncore->i915->drm, 703 "Invalid mmio detected during user access\n"); 704 spin_unlock(&uncore->debug->lock); 705 706 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); 707 } 708 spin_unlock_irq(&uncore->lock); 709 } 710 711 /** 712 * intel_uncore_forcewake_get__locked - grab forcewake domain references 713 * @uncore: the intel_uncore structure 714 * @fw_domains: forcewake domains to get reference on 715 * 716 * See intel_uncore_forcewake_get(). This variant places the onus 717 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 718 */ 719 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 720 enum forcewake_domains fw_domains) 721 { 722 lockdep_assert_held(&uncore->lock); 723 724 if (!uncore->fw_get_funcs) 725 return; 726 727 __intel_uncore_forcewake_get(uncore, fw_domains); 728 } 729 730 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, 731 enum forcewake_domains fw_domains, 732 bool delayed) 733 { 734 struct intel_uncore_forcewake_domain *domain; 735 unsigned int tmp; 736 737 fw_domains &= uncore->fw_domains; 738 739 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 740 GEM_BUG_ON(!domain->wake_count); 741 742 if (--domain->wake_count) { 743 domain->active = true; 744 continue; 745 } 746 747 if (delayed && 748 !(domain->uncore->fw_domains_timer & domain->mask)) 749 fw_domain_arm_timer(domain); 750 else 751 fw_domains_put(uncore, domain->mask); 752 } 753 } 754 755 /** 756 * intel_uncore_forcewake_put - release a forcewake domain reference 757 * @uncore: the intel_uncore structure 758 * @fw_domains: forcewake domains to put references 759 * 760 * This function drops the device-level forcewakes for specified 761 * domains obtained by intel_uncore_forcewake_get(). 762 */ 763 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 764 enum forcewake_domains fw_domains) 765 { 766 unsigned long irqflags; 767 768 if (!uncore->fw_get_funcs) 769 return; 770 771 spin_lock_irqsave(&uncore->lock, irqflags); 772 __intel_uncore_forcewake_put(uncore, fw_domains, false); 773 spin_unlock_irqrestore(&uncore->lock, irqflags); 774 } 775 776 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, 777 enum forcewake_domains fw_domains) 778 { 779 unsigned long irqflags; 780 781 if (!uncore->fw_get_funcs) 782 return; 783 784 spin_lock_irqsave(&uncore->lock, irqflags); 785 __intel_uncore_forcewake_put(uncore, fw_domains, true); 786 spin_unlock_irqrestore(&uncore->lock, irqflags); 787 } 788 789 /** 790 * intel_uncore_forcewake_flush - flush the delayed release 791 * @uncore: the intel_uncore structure 792 * @fw_domains: forcewake domains to flush 793 */ 794 void intel_uncore_forcewake_flush(struct intel_uncore *uncore, 795 enum forcewake_domains fw_domains) 796 { 797 struct intel_uncore_forcewake_domain *domain; 798 unsigned int tmp; 799 800 if (!uncore->fw_get_funcs) 801 return; 802 803 fw_domains &= uncore->fw_domains; 804 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 805 WRITE_ONCE(domain->active, false); 806 if (hrtimer_cancel(&domain->timer)) 807 intel_uncore_fw_release_timer(&domain->timer); 808 } 809 } 810 811 /** 812 * intel_uncore_forcewake_put__locked - grab forcewake domain references 813 * @uncore: the intel_uncore structure 814 * @fw_domains: forcewake domains to get reference on 815 * 816 * See intel_uncore_forcewake_put(). This variant places the onus 817 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 818 */ 819 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 820 enum forcewake_domains fw_domains) 821 { 822 lockdep_assert_held(&uncore->lock); 823 824 if (!uncore->fw_get_funcs) 825 return; 826 827 __intel_uncore_forcewake_put(uncore, fw_domains, false); 828 } 829 830 void assert_forcewakes_inactive(struct intel_uncore *uncore) 831 { 832 if (!uncore->fw_get_funcs) 833 return; 834 835 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active, 836 "Expected all fw_domains to be inactive, but %08x are still on\n", 837 uncore->fw_domains_active); 838 } 839 840 void assert_forcewakes_active(struct intel_uncore *uncore, 841 enum forcewake_domains fw_domains) 842 { 843 struct intel_uncore_forcewake_domain *domain; 844 unsigned int tmp; 845 846 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 847 return; 848 849 if (!uncore->fw_get_funcs) 850 return; 851 852 spin_lock_irq(&uncore->lock); 853 854 assert_rpm_wakelock_held(uncore->rpm); 855 856 fw_domains &= uncore->fw_domains; 857 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active, 858 "Expected %08x fw_domains to be active, but %08x are off\n", 859 fw_domains, fw_domains & ~uncore->fw_domains_active); 860 861 /* 862 * Check that the caller has an explicit wakeref and we don't mistake 863 * it for the auto wakeref. 864 */ 865 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 866 unsigned int actual = READ_ONCE(domain->wake_count); 867 unsigned int expect = 1; 868 869 if (uncore->fw_domains_timer & domain->mask) 870 expect++; /* pending automatic release */ 871 872 if (drm_WARN(&uncore->i915->drm, actual < expect, 873 "Expected domain %d to be held awake by caller, count=%d\n", 874 domain->id, actual)) 875 break; 876 } 877 878 spin_unlock_irq(&uncore->lock); 879 } 880 881 /* We give fast paths for the really cool registers */ 882 #define NEEDS_FORCE_WAKE(reg) ({ \ 883 u32 __reg = (reg); \ 884 __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \ 885 }) 886 887 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 888 { 889 if (offset < entry->start) 890 return -1; 891 else if (offset > entry->end) 892 return 1; 893 else 894 return 0; 895 } 896 897 /* Copied and "macroized" from lib/bsearch.c */ 898 #define BSEARCH(key, base, num, cmp) ({ \ 899 unsigned int start__ = 0, end__ = (num); \ 900 typeof(base) result__ = NULL; \ 901 while (start__ < end__) { \ 902 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 903 int ret__ = (cmp)((key), (base) + mid__); \ 904 if (ret__ < 0) { \ 905 end__ = mid__; \ 906 } else if (ret__ > 0) { \ 907 start__ = mid__ + 1; \ 908 } else { \ 909 result__ = (base) + mid__; \ 910 break; \ 911 } \ 912 } \ 913 result__; \ 914 }) 915 916 static enum forcewake_domains 917 find_fw_domain(struct intel_uncore *uncore, u32 offset) 918 { 919 const struct intel_forcewake_range *entry; 920 921 entry = BSEARCH(offset, 922 uncore->fw_domains_table, 923 uncore->fw_domains_table_entries, 924 fw_range_cmp); 925 926 if (!entry) 927 return 0; 928 929 /* 930 * The list of FW domains depends on the SKU in gen11+ so we 931 * can't determine it statically. We use FORCEWAKE_ALL and 932 * translate it here to the list of available domains. 933 */ 934 if (entry->domains == FORCEWAKE_ALL) 935 return uncore->fw_domains; 936 937 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains, 938 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 939 entry->domains & ~uncore->fw_domains, offset); 940 941 return entry->domains; 942 } 943 944 /* 945 * Shadowed register tables describe special register ranges that i915 is 946 * allowed to write to without acquiring forcewake. If these registers' power 947 * wells are down, the hardware will save values written by i915 to a shadow 948 * copy and automatically transfer them into the real register the next time 949 * the power well is woken up. Shadowing only applies to writes; forcewake 950 * must still be acquired when reading from registers in these ranges. 951 * 952 * The documentation for shadowed registers is somewhat spotty on older 953 * platforms. However missing registers from these lists is non-fatal; it just 954 * means we'll wake up the hardware for some register accesses where we didn't 955 * really need to. 956 * 957 * The ranges listed in these tables must be sorted by offset. 958 * 959 * When adding new tables here, please also add them to 960 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be 961 * scanned for obvious mistakes or typos by the selftests. 962 */ 963 964 static const struct i915_range gen8_shadowed_regs[] = { 965 { .start = 0x2030, .end = 0x2030 }, 966 { .start = 0xA008, .end = 0xA00C }, 967 { .start = 0x12030, .end = 0x12030 }, 968 { .start = 0x1a030, .end = 0x1a030 }, 969 { .start = 0x22030, .end = 0x22030 }, 970 }; 971 972 static const struct i915_range gen11_shadowed_regs[] = { 973 { .start = 0x2030, .end = 0x2030 }, 974 { .start = 0x2550, .end = 0x2550 }, 975 { .start = 0xA008, .end = 0xA00C }, 976 { .start = 0x22030, .end = 0x22030 }, 977 { .start = 0x22230, .end = 0x22230 }, 978 { .start = 0x22510, .end = 0x22550 }, 979 { .start = 0x1C0030, .end = 0x1C0030 }, 980 { .start = 0x1C0230, .end = 0x1C0230 }, 981 { .start = 0x1C0510, .end = 0x1C0550 }, 982 { .start = 0x1C4030, .end = 0x1C4030 }, 983 { .start = 0x1C4230, .end = 0x1C4230 }, 984 { .start = 0x1C4510, .end = 0x1C4550 }, 985 { .start = 0x1C8030, .end = 0x1C8030 }, 986 { .start = 0x1C8230, .end = 0x1C8230 }, 987 { .start = 0x1C8510, .end = 0x1C8550 }, 988 { .start = 0x1D0030, .end = 0x1D0030 }, 989 { .start = 0x1D0230, .end = 0x1D0230 }, 990 { .start = 0x1D0510, .end = 0x1D0550 }, 991 { .start = 0x1D4030, .end = 0x1D4030 }, 992 { .start = 0x1D4230, .end = 0x1D4230 }, 993 { .start = 0x1D4510, .end = 0x1D4550 }, 994 { .start = 0x1D8030, .end = 0x1D8030 }, 995 { .start = 0x1D8230, .end = 0x1D8230 }, 996 { .start = 0x1D8510, .end = 0x1D8550 }, 997 }; 998 999 static const struct i915_range gen12_shadowed_regs[] = { 1000 { .start = 0x2030, .end = 0x2030 }, 1001 { .start = 0x2510, .end = 0x2550 }, 1002 { .start = 0xA008, .end = 0xA00C }, 1003 { .start = 0xA188, .end = 0xA188 }, 1004 { .start = 0xA278, .end = 0xA278 }, 1005 { .start = 0xA540, .end = 0xA56C }, 1006 { .start = 0xC4C8, .end = 0xC4C8 }, 1007 { .start = 0xC4D4, .end = 0xC4D4 }, 1008 { .start = 0xC600, .end = 0xC600 }, 1009 { .start = 0x22030, .end = 0x22030 }, 1010 { .start = 0x22510, .end = 0x22550 }, 1011 { .start = 0x1C0030, .end = 0x1C0030 }, 1012 { .start = 0x1C0510, .end = 0x1C0550 }, 1013 { .start = 0x1C4030, .end = 0x1C4030 }, 1014 { .start = 0x1C4510, .end = 0x1C4550 }, 1015 { .start = 0x1C8030, .end = 0x1C8030 }, 1016 { .start = 0x1C8510, .end = 0x1C8550 }, 1017 { .start = 0x1D0030, .end = 0x1D0030 }, 1018 { .start = 0x1D0510, .end = 0x1D0550 }, 1019 { .start = 0x1D4030, .end = 0x1D4030 }, 1020 { .start = 0x1D4510, .end = 0x1D4550 }, 1021 { .start = 0x1D8030, .end = 0x1D8030 }, 1022 { .start = 0x1D8510, .end = 0x1D8550 }, 1023 1024 /* 1025 * The rest of these ranges are specific to Xe_HP and beyond, but 1026 * are reserved/unused ranges on earlier gen12 platforms, so they can 1027 * be safely added to the gen12 table. 1028 */ 1029 { .start = 0x1E0030, .end = 0x1E0030 }, 1030 { .start = 0x1E0510, .end = 0x1E0550 }, 1031 { .start = 0x1E4030, .end = 0x1E4030 }, 1032 { .start = 0x1E4510, .end = 0x1E4550 }, 1033 { .start = 0x1E8030, .end = 0x1E8030 }, 1034 { .start = 0x1E8510, .end = 0x1E8550 }, 1035 { .start = 0x1F0030, .end = 0x1F0030 }, 1036 { .start = 0x1F0510, .end = 0x1F0550 }, 1037 { .start = 0x1F4030, .end = 0x1F4030 }, 1038 { .start = 0x1F4510, .end = 0x1F4550 }, 1039 { .start = 0x1F8030, .end = 0x1F8030 }, 1040 { .start = 0x1F8510, .end = 0x1F8550 }, 1041 }; 1042 1043 static const struct i915_range dg2_shadowed_regs[] = { 1044 { .start = 0x2030, .end = 0x2030 }, 1045 { .start = 0x2510, .end = 0x2550 }, 1046 { .start = 0xA008, .end = 0xA00C }, 1047 { .start = 0xA188, .end = 0xA188 }, 1048 { .start = 0xA278, .end = 0xA278 }, 1049 { .start = 0xA540, .end = 0xA56C }, 1050 { .start = 0xC4C8, .end = 0xC4C8 }, 1051 { .start = 0xC4E0, .end = 0xC4E0 }, 1052 { .start = 0xC600, .end = 0xC600 }, 1053 { .start = 0xC658, .end = 0xC658 }, 1054 { .start = 0x22030, .end = 0x22030 }, 1055 { .start = 0x22510, .end = 0x22550 }, 1056 { .start = 0x1C0030, .end = 0x1C0030 }, 1057 { .start = 0x1C0510, .end = 0x1C0550 }, 1058 { .start = 0x1C4030, .end = 0x1C4030 }, 1059 { .start = 0x1C4510, .end = 0x1C4550 }, 1060 { .start = 0x1C8030, .end = 0x1C8030 }, 1061 { .start = 0x1C8510, .end = 0x1C8550 }, 1062 { .start = 0x1D0030, .end = 0x1D0030 }, 1063 { .start = 0x1D0510, .end = 0x1D0550 }, 1064 { .start = 0x1D4030, .end = 0x1D4030 }, 1065 { .start = 0x1D4510, .end = 0x1D4550 }, 1066 { .start = 0x1D8030, .end = 0x1D8030 }, 1067 { .start = 0x1D8510, .end = 0x1D8550 }, 1068 { .start = 0x1E0030, .end = 0x1E0030 }, 1069 { .start = 0x1E0510, .end = 0x1E0550 }, 1070 { .start = 0x1E4030, .end = 0x1E4030 }, 1071 { .start = 0x1E4510, .end = 0x1E4550 }, 1072 { .start = 0x1E8030, .end = 0x1E8030 }, 1073 { .start = 0x1E8510, .end = 0x1E8550 }, 1074 { .start = 0x1F0030, .end = 0x1F0030 }, 1075 { .start = 0x1F0510, .end = 0x1F0550 }, 1076 { .start = 0x1F4030, .end = 0x1F4030 }, 1077 { .start = 0x1F4510, .end = 0x1F4550 }, 1078 { .start = 0x1F8030, .end = 0x1F8030 }, 1079 { .start = 0x1F8510, .end = 0x1F8550 }, 1080 }; 1081 1082 static const struct i915_range pvc_shadowed_regs[] = { 1083 { .start = 0x2030, .end = 0x2030 }, 1084 { .start = 0x2510, .end = 0x2550 }, 1085 { .start = 0xA008, .end = 0xA00C }, 1086 { .start = 0xA188, .end = 0xA188 }, 1087 { .start = 0xA278, .end = 0xA278 }, 1088 { .start = 0xA540, .end = 0xA56C }, 1089 { .start = 0xC4C8, .end = 0xC4C8 }, 1090 { .start = 0xC4E0, .end = 0xC4E0 }, 1091 { .start = 0xC600, .end = 0xC600 }, 1092 { .start = 0xC658, .end = 0xC658 }, 1093 { .start = 0x22030, .end = 0x22030 }, 1094 { .start = 0x22510, .end = 0x22550 }, 1095 { .start = 0x1C0030, .end = 0x1C0030 }, 1096 { .start = 0x1C0510, .end = 0x1C0550 }, 1097 { .start = 0x1C4030, .end = 0x1C4030 }, 1098 { .start = 0x1C4510, .end = 0x1C4550 }, 1099 { .start = 0x1C8030, .end = 0x1C8030 }, 1100 { .start = 0x1C8510, .end = 0x1C8550 }, 1101 { .start = 0x1D0030, .end = 0x1D0030 }, 1102 { .start = 0x1D0510, .end = 0x1D0550 }, 1103 { .start = 0x1D4030, .end = 0x1D4030 }, 1104 { .start = 0x1D4510, .end = 0x1D4550 }, 1105 { .start = 0x1D8030, .end = 0x1D8030 }, 1106 { .start = 0x1D8510, .end = 0x1D8550 }, 1107 { .start = 0x1E0030, .end = 0x1E0030 }, 1108 { .start = 0x1E0510, .end = 0x1E0550 }, 1109 { .start = 0x1E4030, .end = 0x1E4030 }, 1110 { .start = 0x1E4510, .end = 0x1E4550 }, 1111 { .start = 0x1E8030, .end = 0x1E8030 }, 1112 { .start = 0x1E8510, .end = 0x1E8550 }, 1113 { .start = 0x1F0030, .end = 0x1F0030 }, 1114 { .start = 0x1F0510, .end = 0x1F0550 }, 1115 { .start = 0x1F4030, .end = 0x1F4030 }, 1116 { .start = 0x1F4510, .end = 0x1F4550 }, 1117 { .start = 0x1F8030, .end = 0x1F8030 }, 1118 { .start = 0x1F8510, .end = 0x1F8550 }, 1119 }; 1120 1121 static int mmio_range_cmp(u32 key, const struct i915_range *range) 1122 { 1123 if (key < range->start) 1124 return -1; 1125 else if (key > range->end) 1126 return 1; 1127 else 1128 return 0; 1129 } 1130 1131 static bool is_shadowed(struct intel_uncore *uncore, u32 offset) 1132 { 1133 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table)) 1134 return false; 1135 1136 return BSEARCH(offset, 1137 uncore->shadowed_reg_table, 1138 uncore->shadowed_reg_table_entries, 1139 mmio_range_cmp); 1140 } 1141 1142 static enum forcewake_domains 1143 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) 1144 { 1145 return FORCEWAKE_RENDER; 1146 } 1147 1148 #define __fwtable_reg_read_fw_domains(uncore, offset) \ 1149 ({ \ 1150 enum forcewake_domains __fwd = 0; \ 1151 if (NEEDS_FORCE_WAKE((offset))) \ 1152 __fwd = find_fw_domain(uncore, offset); \ 1153 __fwd; \ 1154 }) 1155 1156 #define __fwtable_reg_write_fw_domains(uncore, offset) \ 1157 ({ \ 1158 enum forcewake_domains __fwd = 0; \ 1159 const u32 __offset = (offset); \ 1160 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \ 1161 __fwd = find_fw_domain(uncore, __offset); \ 1162 __fwd; \ 1163 }) 1164 1165 #define GEN_FW_RANGE(s, e, d) \ 1166 { .start = (s), .end = (e), .domains = (d) } 1167 1168 /* 1169 * All platforms' forcewake tables below must be sorted by offset ranges. 1170 * Furthermore, new forcewake tables added should be "watertight" and have 1171 * no gaps between ranges. 1172 * 1173 * When there are multiple consecutive ranges listed in the bspec with 1174 * the same forcewake domain, it is customary to combine them into a single 1175 * row in the tables below to keep the tables small and lookups fast. 1176 * Likewise, reserved/unused ranges may be combined with the preceding and/or 1177 * following ranges since the driver will never be making MMIO accesses in 1178 * those ranges. 1179 * 1180 * For example, if the bspec were to list: 1181 * 1182 * ... 1183 * 0x1000 - 0x1fff: GT 1184 * 0x2000 - 0x2cff: GT 1185 * 0x2d00 - 0x2fff: unused/reserved 1186 * 0x3000 - 0xffff: GT 1187 * ... 1188 * 1189 * these could all be represented by a single line in the code: 1190 * 1191 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT) 1192 * 1193 * When adding new forcewake tables here, please also add them to 1194 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be 1195 * scanned for obvious mistakes or typos by the selftests. 1196 */ 1197 1198 static const struct intel_forcewake_range __gen6_fw_ranges[] = { 1199 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER), 1200 }; 1201 1202 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 1203 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 1204 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 1205 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 1206 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1207 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 1208 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 1209 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 1210 }; 1211 1212 static const struct intel_forcewake_range __chv_fw_ranges[] = { 1213 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 1214 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1215 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1216 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1217 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1218 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1219 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 1220 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1221 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1222 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 1223 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 1224 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1225 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1226 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 1227 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 1228 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 1229 }; 1230 1231 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 1232 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT), 1233 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1234 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1235 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT), 1236 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1237 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), 1238 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1239 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT), 1240 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 1241 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1242 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT), 1243 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1244 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT), 1245 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 1246 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT), 1247 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1248 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT), 1249 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1250 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT), 1251 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1252 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT), 1253 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 1254 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT), 1255 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1256 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), 1257 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1258 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT), 1259 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 1260 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT), 1261 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1262 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT), 1263 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 1264 }; 1265 1266 static const struct intel_forcewake_range __gen11_fw_ranges[] = { 1267 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */ 1268 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1269 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT), 1270 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1271 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), 1272 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1273 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), 1274 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1275 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT), 1276 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1277 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT), 1278 GEN_FW_RANGE(0x8800, 0x8bff, 0), 1279 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1280 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT), 1281 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), 1282 GEN_FW_RANGE(0x9560, 0x95ff, 0), 1283 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT), 1284 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1285 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT), 1286 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), 1287 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT), 1288 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), 1289 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT), 1290 GEN_FW_RANGE(0x24000, 0x2407f, 0), 1291 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT), 1292 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER), 1293 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT), 1294 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER), 1295 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT), 1296 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1297 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1298 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), 1299 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0), 1300 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1301 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0) 1302 }; 1303 1304 static const struct intel_forcewake_range __gen12_fw_ranges[] = { 1305 GEN_FW_RANGE(0x0, 0x1fff, 0), /* 1306 0x0 - 0xaff: reserved 1307 0xb00 - 0x1fff: always on */ 1308 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1309 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT), 1310 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER), 1311 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT), 1312 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1313 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /* 1314 0x4000 - 0x48ff: gt 1315 0x4900 - 0x51ff: reserved */ 1316 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /* 1317 0x5200 - 0x53ff: render 1318 0x5400 - 0x54ff: reserved 1319 0x5500 - 0x7fff: render */ 1320 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), 1321 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1322 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* 1323 0x8160 - 0x817f: reserved 1324 0x8180 - 0x81ff: always on */ 1325 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), 1326 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1327 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /* 1328 0x8500 - 0x87ff: gt 1329 0x8800 - 0x8fff: reserved 1330 0x9000 - 0x947f: gt 1331 0x9480 - 0x94cf: reserved */ 1332 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), 1333 GEN_FW_RANGE(0x9560, 0x97ff, 0), /* 1334 0x9560 - 0x95ff: always on 1335 0x9600 - 0x97ff: reserved */ 1336 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT), 1337 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER), 1338 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /* 1339 0xb400 - 0xbf7f: gt 1340 0xb480 - 0xbfff: reserved 1341 0xc000 - 0xcfff: gt */ 1342 GEN_FW_RANGE(0xd000, 0xd7ff, 0), 1343 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER), 1344 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT), 1345 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /* 1346 0xdc00 - 0xddff: render 1347 0xde00 - 0xde7f: reserved 1348 0xde80 - 0xe8ff: render 1349 0xe900 - 0xefff: reserved */ 1350 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /* 1351 0xf000 - 0xffff: gt 1352 0x10000 - 0x147ff: reserved */ 1353 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /* 1354 0x14800 - 0x14fff: render 1355 0x15000 - 0x16dff: reserved 1356 0x16e00 - 0x1bfff: render 1357 0x1c000 - 0x1ffff: reserved */ 1358 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0), 1359 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2), 1360 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), 1361 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* 1362 0x24000 - 0x2407f: always on 1363 0x24080 - 0x2417f: reserved */ 1364 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* 1365 0x24180 - 0x241ff: gt 1366 0x24200 - 0x249ff: reserved */ 1367 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* 1368 0x24a00 - 0x24a7f: render 1369 0x24a80 - 0x251ff: reserved */ 1370 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /* 1371 0x25200 - 0x252ff: gt 1372 0x25300 - 0x255ff: reserved */ 1373 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0), 1374 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /* 1375 0x25680 - 0x256ff: VD2 1376 0x25700 - 0x259ff: reserved */ 1377 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0), 1378 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /* 1379 0x25a80 - 0x25aff: VD2 1380 0x25b00 - 0x2ffff: reserved */ 1381 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), 1382 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1383 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* 1384 0x1c0000 - 0x1c2bff: VD0 1385 0x1c2c00 - 0x1c2cff: reserved 1386 0x1c2d00 - 0x1c2dff: VD0 1387 0x1c2e00 - 0x1c3eff: reserved 1388 0x1c3f00 - 0x1c3fff: VD0 */ 1389 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), 1390 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* 1391 0x1c8000 - 0x1ca0ff: VE0 1392 0x1ca100 - 0x1cbeff: reserved 1393 0x1cbf00 - 0x1cbfff: VE0 */ 1394 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /* 1395 0x1cc000 - 0x1ccfff: VD0 1396 0x1cd000 - 0x1cffff: reserved */ 1397 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* 1398 0x1d0000 - 0x1d2bff: VD2 1399 0x1d2c00 - 0x1d2cff: reserved 1400 0x1d2d00 - 0x1d2dff: VD2 1401 0x1d2e00 - 0x1d3eff: reserved 1402 0x1d3f00 - 0x1d3fff: VD2 */ 1403 }; 1404 1405 /* 1406 * Graphics IP version 12.55 brings a slight change to the 0xd800 range, 1407 * switching it from the GT domain to the render domain. 1408 */ 1409 #define XEHP_FWRANGES(FW_RANGE_D800) \ 1410 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \ 1411 0x0 - 0xaff: reserved \ 1412 0xb00 - 0x1fff: always on */ \ 1413 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \ 1414 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \ 1415 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \ 1416 0x4b00 - 0x4fff: reserved \ 1417 0x5000 - 0x51ff: always on */ \ 1418 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \ 1419 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \ 1420 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \ 1421 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \ 1422 0x8160 - 0x817f: reserved \ 1423 0x8180 - 0x81ff: always on */ \ 1424 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \ 1425 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \ 1426 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \ 1427 0x8500 - 0x87ff: gt \ 1428 0x8800 - 0x8c7f: reserved \ 1429 0x8c80 - 0x8cff: gt (DG2 only) */ \ 1430 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \ 1431 0x8d00 - 0x8dff: render (DG2 only) \ 1432 0x8e00 - 0x8fff: reserved */ \ 1433 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \ 1434 0x9000 - 0x947f: gt \ 1435 0x9480 - 0x94cf: reserved */ \ 1436 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \ 1437 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \ 1438 0x9560 - 0x95ff: always on \ 1439 0x9600 - 0x967f: reserved */ \ 1440 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \ 1441 0x9680 - 0x96ff: render (DG2 only) \ 1442 0x9700 - 0x97ff: reserved */ \ 1443 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \ 1444 0x9800 - 0xb4ff: gt \ 1445 0xb500 - 0xbfff: reserved \ 1446 0xc000 - 0xcfff: gt */ \ 1447 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \ 1448 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \ 1449 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \ 1450 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \ 1451 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \ 1452 0xdd00 - 0xddff: gt \ 1453 0xde00 - 0xde7f: reserved */ \ 1454 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \ 1455 0xde80 - 0xdfff: render \ 1456 0xe000 - 0xe0ff: reserved \ 1457 0xe100 - 0xe8ff: render */ \ 1458 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \ 1459 0xe900 - 0xe9ff: gt \ 1460 0xea00 - 0xefff: reserved \ 1461 0xf000 - 0xffff: gt */ \ 1462 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \ 1463 0x10000 - 0x11fff: reserved \ 1464 0x12000 - 0x127ff: always on \ 1465 0x12800 - 0x12fff: reserved */ \ 1466 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \ 1467 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \ 1468 0x13200 - 0x133ff: VD2 (DG2 only) \ 1469 0x13400 - 0x13fff: reserved */ \ 1470 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \ 1471 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \ 1472 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \ 1473 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \ 1474 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \ 1475 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \ 1476 0x15000 - 0x15fff: gt (DG2 only) \ 1477 0x16000 - 0x16dff: reserved */ \ 1478 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \ 1479 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \ 1480 0x20000 - 0x20fff: VD0 (XEHPSDV only) \ 1481 0x21000 - 0x21fff: reserved */ \ 1482 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \ 1483 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \ 1484 0x24000 - 0x2407f: always on \ 1485 0x24080 - 0x2417f: reserved */ \ 1486 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \ 1487 0x24180 - 0x241ff: gt \ 1488 0x24200 - 0x249ff: reserved */ \ 1489 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \ 1490 0x24a00 - 0x24a7f: render \ 1491 0x24a80 - 0x251ff: reserved */ \ 1492 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \ 1493 0x25200 - 0x252ff: gt \ 1494 0x25300 - 0x25fff: reserved */ \ 1495 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \ 1496 0x26000 - 0x27fff: render \ 1497 0x28000 - 0x29fff: reserved \ 1498 0x2a000 - 0x2ffff: undocumented */ \ 1499 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \ 1500 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \ 1501 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \ 1502 0x1c0000 - 0x1c2bff: VD0 \ 1503 0x1c2c00 - 0x1c2cff: reserved \ 1504 0x1c2d00 - 0x1c2dff: VD0 \ 1505 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \ 1506 0x1c3f00 - 0x1c3fff: VD0 */ \ 1507 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \ 1508 0x1c4000 - 0x1c6bff: VD1 \ 1509 0x1c6c00 - 0x1c6cff: reserved \ 1510 0x1c6d00 - 0x1c6dff: VD1 \ 1511 0x1c6e00 - 0x1c7fff: reserved */ \ 1512 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \ 1513 0x1c8000 - 0x1ca0ff: VE0 \ 1514 0x1ca100 - 0x1cbfff: reserved */ \ 1515 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \ 1516 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \ 1517 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \ 1518 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \ 1519 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \ 1520 0x1d0000 - 0x1d2bff: VD2 \ 1521 0x1d2c00 - 0x1d2cff: reserved \ 1522 0x1d2d00 - 0x1d2dff: VD2 \ 1523 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \ 1524 0x1d3e00 - 0x1d3eff: reserved \ 1525 0x1d3f00 - 0x1d3fff: VD2 */ \ 1526 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \ 1527 0x1d4000 - 0x1d6bff: VD3 \ 1528 0x1d6c00 - 0x1d6cff: reserved \ 1529 0x1d6d00 - 0x1d6dff: VD3 \ 1530 0x1d6e00 - 0x1d7fff: reserved */ \ 1531 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \ 1532 0x1d8000 - 0x1da0ff: VE1 \ 1533 0x1da100 - 0x1dffff: reserved */ \ 1534 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \ 1535 0x1e0000 - 0x1e2bff: VD4 \ 1536 0x1e2c00 - 0x1e2cff: reserved \ 1537 0x1e2d00 - 0x1e2dff: VD4 \ 1538 0x1e2e00 - 0x1e3eff: reserved \ 1539 0x1e3f00 - 0x1e3fff: VD4 */ \ 1540 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \ 1541 0x1e4000 - 0x1e6bff: VD5 \ 1542 0x1e6c00 - 0x1e6cff: reserved \ 1543 0x1e6d00 - 0x1e6dff: VD5 \ 1544 0x1e6e00 - 0x1e7fff: reserved */ \ 1545 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \ 1546 0x1e8000 - 0x1ea0ff: VE2 \ 1547 0x1ea100 - 0x1effff: reserved */ \ 1548 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \ 1549 0x1f0000 - 0x1f2bff: VD6 \ 1550 0x1f2c00 - 0x1f2cff: reserved \ 1551 0x1f2d00 - 0x1f2dff: VD6 \ 1552 0x1f2e00 - 0x1f3eff: reserved \ 1553 0x1f3f00 - 0x1f3fff: VD6 */ \ 1554 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \ 1555 0x1f4000 - 0x1f6bff: VD7 \ 1556 0x1f6c00 - 0x1f6cff: reserved \ 1557 0x1f6d00 - 0x1f6dff: VD7 \ 1558 0x1f6e00 - 0x1f7fff: reserved */ \ 1559 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3), 1560 1561 static const struct intel_forcewake_range __xehp_fw_ranges[] = { 1562 XEHP_FWRANGES(FORCEWAKE_GT) 1563 }; 1564 1565 static const struct intel_forcewake_range __dg2_fw_ranges[] = { 1566 XEHP_FWRANGES(FORCEWAKE_RENDER) 1567 }; 1568 1569 static const struct intel_forcewake_range __pvc_fw_ranges[] = { 1570 GEN_FW_RANGE(0x0, 0xaff, 0), 1571 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT), 1572 GEN_FW_RANGE(0xc00, 0xfff, 0), 1573 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT), 1574 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1575 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT), 1576 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1577 GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /* 1578 0x4000 - 0x4aff: gt 1579 0x4b00 - 0x4fff: reserved 1580 0x5000 - 0x51ff: gt 1581 0x5200 - 0x52ff: reserved 1582 0x5300 - 0x53ff: gt 1583 0x5400 - 0x7fff: reserved 1584 0x8000 - 0x813f: gt */ 1585 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), 1586 GEN_FW_RANGE(0x8180, 0x81ff, 0), 1587 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /* 1588 0x8200 - 0x82ff: gt 1589 0x8300 - 0x84ff: reserved 1590 0x8500 - 0x887f: gt 1591 0x8880 - 0x8a7f: reserved 1592 0x8a80 - 0x8aff: gt 1593 0x8b00 - 0x8fff: reserved 1594 0x9000 - 0x947f: gt 1595 0x9480 - 0x94cf: reserved */ 1596 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), 1597 GEN_FW_RANGE(0x9560, 0x967f, 0), /* 1598 0x9560 - 0x95ff: always on 1599 0x9600 - 0x967f: reserved */ 1600 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* 1601 0x9680 - 0x96ff: render 1602 0x9700 - 0x97ff: reserved */ 1603 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* 1604 0x9800 - 0xb4ff: gt 1605 0xb500 - 0xbfff: reserved 1606 0xc000 - 0xcfff: gt */ 1607 GEN_FW_RANGE(0xd000, 0xd3ff, 0), 1608 GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT), 1609 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), 1610 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* 1611 0xdd00 - 0xddff: gt 1612 0xde00 - 0xde7f: reserved */ 1613 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* 1614 0xde80 - 0xdeff: render 1615 0xdf00 - 0xe1ff: reserved 1616 0xe200 - 0xe7ff: render 1617 0xe800 - 0xe8ff: reserved */ 1618 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /* 1619 0xe900 - 0xe9ff: gt 1620 0xea00 - 0xebff: reserved 1621 0xec00 - 0xffff: gt 1622 0x10000 - 0x11fff: reserved */ 1623 GEN_FW_RANGE(0x12000, 0x12fff, 0), /* 1624 0x12000 - 0x127ff: always on 1625 0x12800 - 0x12fff: reserved */ 1626 GEN_FW_RANGE(0x13000, 0x23fff, FORCEWAKE_GT), /* 1627 0x13000 - 0x135ff: gt 1628 0x13600 - 0x147ff: reserved 1629 0x14800 - 0x153ff: gt 1630 0x15400 - 0x19fff: reserved 1631 0x1a000 - 0x1ffff: gt 1632 0x20000 - 0x21fff: reserved 1633 0x22000 - 0x23fff: gt */ 1634 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* 1635 24000 - 0x2407f: always on 1636 24080 - 0x2417f: reserved */ 1637 GEN_FW_RANGE(0x24180, 0x3ffff, FORCEWAKE_GT), /* 1638 0x24180 - 0x241ff: gt 1639 0x24200 - 0x251ff: reserved 1640 0x25200 - 0x252ff: gt 1641 0x25300 - 0x25fff: reserved 1642 0x26000 - 0x27fff: gt 1643 0x28000 - 0x2ffff: reserved 1644 0x30000 - 0x3ffff: gt */ 1645 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1646 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* 1647 0x1c0000 - 0x1c2bff: VD0 1648 0x1c2c00 - 0x1c2cff: reserved 1649 0x1c2d00 - 0x1c2dff: VD0 1650 0x1c2e00 - 0x1c3eff: reserved 1651 0x1c3f00 - 0x1c3fff: VD0 */ 1652 GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /* 1653 0x1c4000 - 0x1c6aff: VD1 1654 0x1c6b00 - 0x1c7eff: reserved 1655 0x1c7f00 - 0x1c7fff: VD1 1656 0x1c8000 - 0x1cffff: reserved */ 1657 GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /* 1658 0x1d0000 - 0x1d2aff: VD2 1659 0x1d2b00 - 0x1d3eff: reserved 1660 0x1d3f00 - 0x1d3fff: VD2 1661 0x1d4000 - 0x23ffff: reserved */ 1662 GEN_FW_RANGE(0x240000, 0x3dffff, 0), 1663 GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT), 1664 }; 1665 1666 static void 1667 ilk_dummy_write(struct intel_uncore *uncore) 1668 { 1669 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 1670 * the chip from rc6 before touching it for real. MI_MODE is masked, 1671 * hence harmless to write 0 into. */ 1672 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0); 1673 } 1674 1675 static void 1676 __unclaimed_reg_debug(struct intel_uncore *uncore, 1677 const i915_reg_t reg, 1678 const bool read) 1679 { 1680 if (drm_WARN(&uncore->i915->drm, 1681 check_for_unclaimed_mmio(uncore), 1682 "Unclaimed %s register 0x%x\n", 1683 read ? "read from" : "write to", 1684 i915_mmio_reg_offset(reg))) 1685 /* Only report the first N failures */ 1686 uncore->i915->params.mmio_debug--; 1687 } 1688 1689 static void 1690 __unclaimed_previous_reg_debug(struct intel_uncore *uncore, 1691 const i915_reg_t reg, 1692 const bool read) 1693 { 1694 if (check_for_unclaimed_mmio(uncore)) 1695 drm_dbg(&uncore->i915->drm, 1696 "Unclaimed access detected before %s register 0x%x\n", 1697 read ? "read from" : "write to", 1698 i915_mmio_reg_offset(reg)); 1699 } 1700 1701 static inline void 1702 unclaimed_reg_debug(struct intel_uncore *uncore, 1703 const i915_reg_t reg, 1704 const bool read, 1705 const bool before) 1706 { 1707 if (likely(!uncore->i915->params.mmio_debug)) 1708 return; 1709 1710 /* interrupts are disabled and re-enabled around uncore->lock usage */ 1711 lockdep_assert_held(&uncore->lock); 1712 1713 if (before) { 1714 spin_lock(&uncore->debug->lock); 1715 __unclaimed_previous_reg_debug(uncore, reg, read); 1716 } else { 1717 __unclaimed_reg_debug(uncore, reg, read); 1718 spin_unlock(&uncore->debug->lock); 1719 } 1720 } 1721 1722 #define __vgpu_read(x) \ 1723 static u##x \ 1724 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1725 u##x val = __raw_uncore_read##x(uncore, reg); \ 1726 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1727 return val; \ 1728 } 1729 __vgpu_read(8) 1730 __vgpu_read(16) 1731 __vgpu_read(32) 1732 __vgpu_read(64) 1733 1734 #define GEN2_READ_HEADER(x) \ 1735 u##x val = 0; \ 1736 assert_rpm_wakelock_held(uncore->rpm); 1737 1738 #define GEN2_READ_FOOTER \ 1739 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1740 return val 1741 1742 #define __gen2_read(x) \ 1743 static u##x \ 1744 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1745 GEN2_READ_HEADER(x); \ 1746 val = __raw_uncore_read##x(uncore, reg); \ 1747 GEN2_READ_FOOTER; \ 1748 } 1749 1750 #define __gen5_read(x) \ 1751 static u##x \ 1752 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1753 GEN2_READ_HEADER(x); \ 1754 ilk_dummy_write(uncore); \ 1755 val = __raw_uncore_read##x(uncore, reg); \ 1756 GEN2_READ_FOOTER; \ 1757 } 1758 1759 __gen5_read(8) 1760 __gen5_read(16) 1761 __gen5_read(32) 1762 __gen5_read(64) 1763 __gen2_read(8) 1764 __gen2_read(16) 1765 __gen2_read(32) 1766 __gen2_read(64) 1767 1768 #undef __gen5_read 1769 #undef __gen2_read 1770 1771 #undef GEN2_READ_FOOTER 1772 #undef GEN2_READ_HEADER 1773 1774 #define GEN6_READ_HEADER(x) \ 1775 u32 offset = i915_mmio_reg_offset(reg); \ 1776 unsigned long irqflags; \ 1777 u##x val = 0; \ 1778 assert_rpm_wakelock_held(uncore->rpm); \ 1779 spin_lock_irqsave(&uncore->lock, irqflags); \ 1780 unclaimed_reg_debug(uncore, reg, true, true) 1781 1782 #define GEN6_READ_FOOTER \ 1783 unclaimed_reg_debug(uncore, reg, true, false); \ 1784 spin_unlock_irqrestore(&uncore->lock, irqflags); \ 1785 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1786 return val 1787 1788 static noinline void ___force_wake_auto(struct intel_uncore *uncore, 1789 enum forcewake_domains fw_domains) 1790 { 1791 struct intel_uncore_forcewake_domain *domain; 1792 unsigned int tmp; 1793 1794 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 1795 1796 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) 1797 fw_domain_arm_timer(domain); 1798 1799 fw_domains_get(uncore, fw_domains); 1800 } 1801 1802 static inline void __force_wake_auto(struct intel_uncore *uncore, 1803 enum forcewake_domains fw_domains) 1804 { 1805 GEM_BUG_ON(!fw_domains); 1806 1807 /* Turn on all requested but inactive supported forcewake domains. */ 1808 fw_domains &= uncore->fw_domains; 1809 fw_domains &= ~uncore->fw_domains_active; 1810 1811 if (fw_domains) 1812 ___force_wake_auto(uncore, fw_domains); 1813 } 1814 1815 #define __gen_fwtable_read(x) \ 1816 static u##x \ 1817 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \ 1818 { \ 1819 enum forcewake_domains fw_engine; \ 1820 GEN6_READ_HEADER(x); \ 1821 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \ 1822 if (fw_engine) \ 1823 __force_wake_auto(uncore, fw_engine); \ 1824 val = __raw_uncore_read##x(uncore, reg); \ 1825 GEN6_READ_FOOTER; \ 1826 } 1827 1828 static enum forcewake_domains 1829 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { 1830 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); 1831 } 1832 1833 __gen_fwtable_read(8) 1834 __gen_fwtable_read(16) 1835 __gen_fwtable_read(32) 1836 __gen_fwtable_read(64) 1837 1838 #undef __gen_fwtable_read 1839 #undef GEN6_READ_FOOTER 1840 #undef GEN6_READ_HEADER 1841 1842 #define GEN2_WRITE_HEADER \ 1843 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1844 assert_rpm_wakelock_held(uncore->rpm); \ 1845 1846 #define GEN2_WRITE_FOOTER 1847 1848 #define __gen2_write(x) \ 1849 static void \ 1850 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1851 GEN2_WRITE_HEADER; \ 1852 __raw_uncore_write##x(uncore, reg, val); \ 1853 GEN2_WRITE_FOOTER; \ 1854 } 1855 1856 #define __gen5_write(x) \ 1857 static void \ 1858 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1859 GEN2_WRITE_HEADER; \ 1860 ilk_dummy_write(uncore); \ 1861 __raw_uncore_write##x(uncore, reg, val); \ 1862 GEN2_WRITE_FOOTER; \ 1863 } 1864 1865 __gen5_write(8) 1866 __gen5_write(16) 1867 __gen5_write(32) 1868 __gen2_write(8) 1869 __gen2_write(16) 1870 __gen2_write(32) 1871 1872 #undef __gen5_write 1873 #undef __gen2_write 1874 1875 #undef GEN2_WRITE_FOOTER 1876 #undef GEN2_WRITE_HEADER 1877 1878 #define GEN6_WRITE_HEADER \ 1879 u32 offset = i915_mmio_reg_offset(reg); \ 1880 unsigned long irqflags; \ 1881 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1882 assert_rpm_wakelock_held(uncore->rpm); \ 1883 spin_lock_irqsave(&uncore->lock, irqflags); \ 1884 unclaimed_reg_debug(uncore, reg, false, true) 1885 1886 #define GEN6_WRITE_FOOTER \ 1887 unclaimed_reg_debug(uncore, reg, false, false); \ 1888 spin_unlock_irqrestore(&uncore->lock, irqflags) 1889 1890 #define __gen6_write(x) \ 1891 static void \ 1892 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1893 GEN6_WRITE_HEADER; \ 1894 if (NEEDS_FORCE_WAKE(offset)) \ 1895 __gen6_gt_wait_for_fifo(uncore); \ 1896 __raw_uncore_write##x(uncore, reg, val); \ 1897 GEN6_WRITE_FOOTER; \ 1898 } 1899 __gen6_write(8) 1900 __gen6_write(16) 1901 __gen6_write(32) 1902 1903 #define __gen_fwtable_write(x) \ 1904 static void \ 1905 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1906 enum forcewake_domains fw_engine; \ 1907 GEN6_WRITE_HEADER; \ 1908 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \ 1909 if (fw_engine) \ 1910 __force_wake_auto(uncore, fw_engine); \ 1911 __raw_uncore_write##x(uncore, reg, val); \ 1912 GEN6_WRITE_FOOTER; \ 1913 } 1914 1915 static enum forcewake_domains 1916 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) 1917 { 1918 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); 1919 } 1920 1921 __gen_fwtable_write(8) 1922 __gen_fwtable_write(16) 1923 __gen_fwtable_write(32) 1924 1925 #undef __gen_fwtable_write 1926 #undef GEN6_WRITE_FOOTER 1927 #undef GEN6_WRITE_HEADER 1928 1929 #define __vgpu_write(x) \ 1930 static void \ 1931 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1932 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1933 __raw_uncore_write##x(uncore, reg, val); \ 1934 } 1935 __vgpu_write(8) 1936 __vgpu_write(16) 1937 __vgpu_write(32) 1938 1939 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ 1940 do { \ 1941 (uncore)->funcs.mmio_writeb = x##_write8; \ 1942 (uncore)->funcs.mmio_writew = x##_write16; \ 1943 (uncore)->funcs.mmio_writel = x##_write32; \ 1944 } while (0) 1945 1946 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ 1947 do { \ 1948 (uncore)->funcs.mmio_readb = x##_read8; \ 1949 (uncore)->funcs.mmio_readw = x##_read16; \ 1950 (uncore)->funcs.mmio_readl = x##_read32; \ 1951 (uncore)->funcs.mmio_readq = x##_read64; \ 1952 } while (0) 1953 1954 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ 1955 do { \ 1956 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ 1957 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ 1958 } while (0) 1959 1960 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ 1961 do { \ 1962 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ 1963 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ 1964 } while (0) 1965 1966 static int __fw_domain_init(struct intel_uncore *uncore, 1967 enum forcewake_domain_id domain_id, 1968 i915_reg_t reg_set, 1969 i915_reg_t reg_ack) 1970 { 1971 struct intel_uncore_forcewake_domain *d; 1972 1973 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1974 GEM_BUG_ON(uncore->fw_domain[domain_id]); 1975 1976 if (i915_inject_probe_failure(uncore->i915)) 1977 return -ENOMEM; 1978 1979 d = kzalloc(sizeof(*d), GFP_KERNEL); 1980 if (!d) 1981 return -ENOMEM; 1982 1983 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set)); 1984 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack)); 1985 1986 d->uncore = uncore; 1987 d->wake_count = 0; 1988 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); 1989 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); 1990 1991 d->id = domain_id; 1992 1993 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1994 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT)); 1995 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1996 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); 1997 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); 1998 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); 1999 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); 2000 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4)); 2001 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5)); 2002 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6)); 2003 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7)); 2004 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); 2005 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); 2006 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2)); 2007 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3)); 2008 2009 d->mask = BIT(domain_id); 2010 2011 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2012 d->timer.function = intel_uncore_fw_release_timer; 2013 2014 uncore->fw_domains |= BIT(domain_id); 2015 2016 fw_domain_reset(d); 2017 2018 uncore->fw_domain[domain_id] = d; 2019 2020 return 0; 2021 } 2022 2023 static void fw_domain_fini(struct intel_uncore *uncore, 2024 enum forcewake_domain_id domain_id) 2025 { 2026 struct intel_uncore_forcewake_domain *d; 2027 2028 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 2029 2030 d = fetch_and_zero(&uncore->fw_domain[domain_id]); 2031 if (!d) 2032 return; 2033 2034 uncore->fw_domains &= ~BIT(domain_id); 2035 drm_WARN_ON(&uncore->i915->drm, d->wake_count); 2036 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer)); 2037 kfree(d); 2038 } 2039 2040 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) 2041 { 2042 struct intel_uncore_forcewake_domain *d; 2043 int tmp; 2044 2045 for_each_fw_domain(d, uncore, tmp) 2046 fw_domain_fini(uncore, d->id); 2047 } 2048 2049 static const struct intel_uncore_fw_get uncore_get_fallback = { 2050 .force_wake_get = fw_domains_get_with_fallback 2051 }; 2052 2053 static const struct intel_uncore_fw_get uncore_get_normal = { 2054 .force_wake_get = fw_domains_get_normal, 2055 }; 2056 2057 static const struct intel_uncore_fw_get uncore_get_thread_status = { 2058 .force_wake_get = fw_domains_get_with_thread_status 2059 }; 2060 2061 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) 2062 { 2063 struct drm_i915_private *i915 = uncore->i915; 2064 int ret = 0; 2065 2066 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 2067 2068 #define fw_domain_init(uncore__, id__, set__, ack__) \ 2069 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) 2070 2071 if (GRAPHICS_VER(i915) >= 11) { 2072 /* we'll prune the domains of missing engines later */ 2073 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; 2074 int i; 2075 2076 uncore->fw_get_funcs = &uncore_get_fallback; 2077 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2078 FORCEWAKE_RENDER_GEN9, 2079 FORCEWAKE_ACK_RENDER_GEN9); 2080 fw_domain_init(uncore, FW_DOMAIN_ID_GT, 2081 FORCEWAKE_GT_GEN9, 2082 FORCEWAKE_ACK_GT_GEN9); 2083 2084 for (i = 0; i < I915_MAX_VCS; i++) { 2085 if (!__HAS_ENGINE(emask, _VCS(i))) 2086 continue; 2087 2088 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, 2089 FORCEWAKE_MEDIA_VDBOX_GEN11(i), 2090 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 2091 } 2092 for (i = 0; i < I915_MAX_VECS; i++) { 2093 if (!__HAS_ENGINE(emask, _VECS(i))) 2094 continue; 2095 2096 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, 2097 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 2098 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 2099 } 2100 } else if (IS_GRAPHICS_VER(i915, 9, 10)) { 2101 uncore->fw_get_funcs = &uncore_get_fallback; 2102 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2103 FORCEWAKE_RENDER_GEN9, 2104 FORCEWAKE_ACK_RENDER_GEN9); 2105 fw_domain_init(uncore, FW_DOMAIN_ID_GT, 2106 FORCEWAKE_GT_GEN9, 2107 FORCEWAKE_ACK_GT_GEN9); 2108 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 2109 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 2110 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 2111 uncore->fw_get_funcs = &uncore_get_normal; 2112 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2113 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 2114 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 2115 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 2116 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2117 uncore->fw_get_funcs = &uncore_get_thread_status; 2118 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2119 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 2120 } else if (IS_IVYBRIDGE(i915)) { 2121 u32 ecobus; 2122 2123 /* IVB configs may use multi-threaded forcewake */ 2124 2125 /* A small trick here - if the bios hasn't configured 2126 * MT forcewake, and if the device is in RC6, then 2127 * force_wake_mt_get will not wake the device and the 2128 * ECOBUS read will return zero. Which will be 2129 * (correctly) interpreted by the test below as MT 2130 * forcewake being disabled. 2131 */ 2132 uncore->fw_get_funcs = &uncore_get_thread_status; 2133 2134 /* We need to init first for ECOBUS access and then 2135 * determine later if we want to reinit, in case of MT access is 2136 * not working. In this stage we don't know which flavour this 2137 * ivb is, so it is better to reset also the gen6 fw registers 2138 * before the ecobus check. 2139 */ 2140 2141 __raw_uncore_write32(uncore, FORCEWAKE, 0); 2142 __raw_posting_read(uncore, ECOBUS); 2143 2144 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2145 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 2146 if (ret) 2147 goto out; 2148 2149 spin_lock_irq(&uncore->lock); 2150 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); 2151 ecobus = __raw_uncore_read32(uncore, ECOBUS); 2152 fw_domains_put(uncore, FORCEWAKE_RENDER); 2153 spin_unlock_irq(&uncore->lock); 2154 2155 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 2156 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n"); 2157 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n"); 2158 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); 2159 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2160 FORCEWAKE, FORCEWAKE_ACK); 2161 } 2162 } else if (GRAPHICS_VER(i915) == 6) { 2163 uncore->fw_get_funcs = &uncore_get_thread_status; 2164 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 2165 FORCEWAKE, FORCEWAKE_ACK); 2166 } 2167 2168 #undef fw_domain_init 2169 2170 /* All future platforms are expected to require complex power gating */ 2171 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0); 2172 2173 out: 2174 if (ret) 2175 intel_uncore_fw_domains_fini(uncore); 2176 2177 return ret; 2178 } 2179 2180 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ 2181 { \ 2182 (uncore)->fw_domains_table = \ 2183 (struct intel_forcewake_range *)(d); \ 2184 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ 2185 } 2186 2187 #define ASSIGN_SHADOW_TABLE(uncore, d) \ 2188 { \ 2189 (uncore)->shadowed_reg_table = d; \ 2190 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \ 2191 } 2192 2193 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 2194 unsigned long action, void *data) 2195 { 2196 struct intel_uncore *uncore = container_of(nb, 2197 struct intel_uncore, pmic_bus_access_nb); 2198 2199 switch (action) { 2200 case MBI_PMIC_BUS_ACCESS_BEGIN: 2201 /* 2202 * forcewake all now to make sure that we don't need to do a 2203 * forcewake later which on systems where this notifier gets 2204 * called requires the punit to access to the shared pmic i2c 2205 * bus, which will be busy after this notification, leading to: 2206 * "render: timed out waiting for forcewake ack request." 2207 * errors. 2208 * 2209 * The notifier is unregistered during intel_runtime_suspend(), 2210 * so it's ok to access the HW here without holding a RPM 2211 * wake reference -> disable wakeref asserts for the time of 2212 * the access. 2213 */ 2214 disable_rpm_wakeref_asserts(uncore->rpm); 2215 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 2216 enable_rpm_wakeref_asserts(uncore->rpm); 2217 break; 2218 case MBI_PMIC_BUS_ACCESS_END: 2219 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 2220 break; 2221 } 2222 2223 return NOTIFY_OK; 2224 } 2225 2226 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) 2227 { 2228 struct drm_i915_private *i915 = uncore->i915; 2229 int mmio_size; 2230 2231 /* 2232 * Before gen4, the registers and the GTT are behind different BARs. 2233 * However, from gen4 onwards, the registers and the GTT are shared 2234 * in the same BAR, so we want to restrict this ioremap from 2235 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 2236 * the register BAR remains the same size for all the earlier 2237 * generations up to Ironlake. 2238 * For dgfx chips register range is expanded to 4MB, and this larger 2239 * range is also used for integrated gpus beginning with Meteor Lake. 2240 */ 2241 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 2242 mmio_size = 4 * 1024 * 1024; 2243 else if (GRAPHICS_VER(i915) >= 5) 2244 mmio_size = 2 * 1024 * 1024; 2245 else 2246 mmio_size = 512 * 1024; 2247 2248 uncore->regs = ioremap(phys_addr, mmio_size); 2249 if (uncore->regs == NULL) { 2250 drm_err(&i915->drm, "failed to map registers\n"); 2251 return -EIO; 2252 } 2253 2254 return 0; 2255 } 2256 2257 void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) 2258 { 2259 iounmap(uncore->regs); 2260 } 2261 2262 void intel_uncore_init_early(struct intel_uncore *uncore, 2263 struct intel_gt *gt) 2264 { 2265 spin_lock_init(&uncore->lock); 2266 uncore->i915 = gt->i915; 2267 uncore->gt = gt; 2268 uncore->rpm = >->i915->runtime_pm; 2269 uncore->debug = >->i915->mmio_debug; 2270 } 2271 2272 static void uncore_raw_init(struct intel_uncore *uncore) 2273 { 2274 GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); 2275 2276 if (intel_vgpu_active(uncore->i915)) { 2277 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu); 2278 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu); 2279 } else if (GRAPHICS_VER(uncore->i915) == 5) { 2280 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); 2281 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); 2282 } else { 2283 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); 2284 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); 2285 } 2286 } 2287 2288 static int uncore_forcewake_init(struct intel_uncore *uncore) 2289 { 2290 struct drm_i915_private *i915 = uncore->i915; 2291 int ret; 2292 2293 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 2294 2295 ret = intel_uncore_fw_domains_init(uncore); 2296 if (ret) 2297 return ret; 2298 forcewake_early_sanitize(uncore, 0); 2299 2300 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 2301 2302 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) { 2303 ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges); 2304 ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs); 2305 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2306 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { 2307 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges); 2308 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs); 2309 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2310 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 2311 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges); 2312 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); 2313 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2314 } else if (GRAPHICS_VER(i915) >= 12) { 2315 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); 2316 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); 2317 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2318 } else if (GRAPHICS_VER(i915) == 11) { 2319 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); 2320 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs); 2321 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2322 } else if (IS_GRAPHICS_VER(i915, 9, 10)) { 2323 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); 2324 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs); 2325 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2326 } else if (IS_CHERRYVIEW(i915)) { 2327 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); 2328 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs); 2329 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2330 } else if (GRAPHICS_VER(i915) == 8) { 2331 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges); 2332 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs); 2333 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2334 } else if (IS_VALLEYVIEW(i915)) { 2335 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); 2336 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); 2337 } else if (IS_GRAPHICS_VER(i915, 6, 7)) { 2338 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges); 2339 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); 2340 } 2341 2342 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; 2343 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 2344 2345 return 0; 2346 } 2347 2348 int intel_uncore_init_mmio(struct intel_uncore *uncore) 2349 { 2350 struct drm_i915_private *i915 = uncore->i915; 2351 int ret; 2352 2353 /* 2354 * The boot firmware initializes local memory and assesses its health. 2355 * If memory training fails, the punit will have been instructed to 2356 * keep the GT powered down; we won't be able to communicate with it 2357 * and we should not continue with driver initialization. 2358 */ 2359 if (IS_DGFX(i915) && 2360 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) { 2361 drm_err(&i915->drm, "LMEM not initialized by firmware\n"); 2362 return -ENODEV; 2363 } 2364 2365 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915)) 2366 uncore->flags |= UNCORE_HAS_FORCEWAKE; 2367 2368 if (!intel_uncore_has_forcewake(uncore)) { 2369 uncore_raw_init(uncore); 2370 } else { 2371 ret = uncore_forcewake_init(uncore); 2372 if (ret) 2373 return ret; 2374 } 2375 2376 /* make sure fw funcs are set if and only if we have fw*/ 2377 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs); 2378 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); 2379 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); 2380 2381 if (HAS_FPGA_DBG_UNCLAIMED(i915)) 2382 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; 2383 2384 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 2385 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; 2386 2387 if (IS_GRAPHICS_VER(i915, 6, 7)) 2388 uncore->flags |= UNCORE_HAS_FIFO; 2389 2390 /* clear out unclaimed reg detection bit */ 2391 if (intel_uncore_unclaimed_mmio(uncore)) 2392 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); 2393 2394 return 0; 2395 } 2396 2397 /* 2398 * We might have detected that some engines are fused off after we initialized 2399 * the forcewake domains. Prune them, to make sure they only reference existing 2400 * engines. 2401 */ 2402 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, 2403 struct intel_gt *gt) 2404 { 2405 enum forcewake_domains fw_domains = uncore->fw_domains; 2406 enum forcewake_domain_id domain_id; 2407 int i; 2408 2409 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11) 2410 return; 2411 2412 for (i = 0; i < I915_MAX_VCS; i++) { 2413 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; 2414 2415 if (HAS_ENGINE(gt, _VCS(i))) 2416 continue; 2417 2418 /* 2419 * Starting with XeHP, the power well for an even-numbered 2420 * VDBOX is also used for shared units within the 2421 * media slice such as SFC. So even if the engine 2422 * itself is fused off, we still need to initialize 2423 * the forcewake domain if any of the other engines 2424 * in the same media slice are present. 2425 */ 2426 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) { 2427 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1))) 2428 continue; 2429 2430 if (HAS_ENGINE(gt, _VECS(i / 2))) 2431 continue; 2432 } 2433 2434 if (fw_domains & BIT(domain_id)) 2435 fw_domain_fini(uncore, domain_id); 2436 } 2437 2438 for (i = 0; i < I915_MAX_VECS; i++) { 2439 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; 2440 2441 if (HAS_ENGINE(gt, _VECS(i))) 2442 continue; 2443 2444 if (fw_domains & BIT(domain_id)) 2445 fw_domain_fini(uncore, domain_id); 2446 } 2447 } 2448 2449 void intel_uncore_fini_mmio(struct intel_uncore *uncore) 2450 { 2451 if (intel_uncore_has_forcewake(uncore)) { 2452 iosf_mbi_punit_acquire(); 2453 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 2454 &uncore->pmic_bus_access_nb); 2455 intel_uncore_forcewake_reset(uncore); 2456 intel_uncore_fw_domains_fini(uncore); 2457 iosf_mbi_punit_release(); 2458 } 2459 } 2460 2461 /** 2462 * __intel_wait_for_register_fw - wait until register matches expected state 2463 * @uncore: the struct intel_uncore 2464 * @reg: the register to read 2465 * @mask: mask to apply to register value 2466 * @value: expected value 2467 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 2468 * @slow_timeout_ms: slow timeout in millisecond 2469 * @out_value: optional placeholder to hold registry value 2470 * 2471 * This routine waits until the target register @reg contains the expected 2472 * @value after applying the @mask, i.e. it waits until :: 2473 * 2474 * (intel_uncore_read_fw(uncore, reg) & mask) == value 2475 * 2476 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 2477 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us 2478 * must be not larger than 20,0000 microseconds. 2479 * 2480 * Note that this routine assumes the caller holds forcewake asserted, it is 2481 * not suitable for very long waits. See intel_wait_for_register() if you 2482 * wish to wait without holding forcewake for the duration (i.e. you expect 2483 * the wait to be slow). 2484 * 2485 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 2486 */ 2487 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 2488 i915_reg_t reg, 2489 u32 mask, 2490 u32 value, 2491 unsigned int fast_timeout_us, 2492 unsigned int slow_timeout_ms, 2493 u32 *out_value) 2494 { 2495 u32 reg_value = 0; 2496 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) 2497 int ret; 2498 2499 /* Catch any overuse of this function */ 2500 might_sleep_if(slow_timeout_ms); 2501 GEM_BUG_ON(fast_timeout_us > 20000); 2502 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms); 2503 2504 ret = -ETIMEDOUT; 2505 if (fast_timeout_us && fast_timeout_us <= 20000) 2506 ret = _wait_for_atomic(done, fast_timeout_us, 0); 2507 if (ret && slow_timeout_ms) 2508 ret = wait_for(done, slow_timeout_ms); 2509 2510 if (out_value) 2511 *out_value = reg_value; 2512 2513 return ret; 2514 #undef done 2515 } 2516 2517 /** 2518 * __intel_wait_for_register - wait until register matches expected state 2519 * @uncore: the struct intel_uncore 2520 * @reg: the register to read 2521 * @mask: mask to apply to register value 2522 * @value: expected value 2523 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 2524 * @slow_timeout_ms: slow timeout in millisecond 2525 * @out_value: optional placeholder to hold registry value 2526 * 2527 * This routine waits until the target register @reg contains the expected 2528 * @value after applying the @mask, i.e. it waits until :: 2529 * 2530 * (intel_uncore_read(uncore, reg) & mask) == value 2531 * 2532 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 2533 * 2534 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 2535 */ 2536 int __intel_wait_for_register(struct intel_uncore *uncore, 2537 i915_reg_t reg, 2538 u32 mask, 2539 u32 value, 2540 unsigned int fast_timeout_us, 2541 unsigned int slow_timeout_ms, 2542 u32 *out_value) 2543 { 2544 unsigned fw = 2545 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); 2546 u32 reg_value; 2547 int ret; 2548 2549 might_sleep_if(slow_timeout_ms); 2550 2551 spin_lock_irq(&uncore->lock); 2552 intel_uncore_forcewake_get__locked(uncore, fw); 2553 2554 ret = __intel_wait_for_register_fw(uncore, 2555 reg, mask, value, 2556 fast_timeout_us, 0, ®_value); 2557 2558 intel_uncore_forcewake_put__locked(uncore, fw); 2559 spin_unlock_irq(&uncore->lock); 2560 2561 if (ret && slow_timeout_ms) 2562 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, 2563 reg), 2564 (reg_value & mask) == value, 2565 slow_timeout_ms * 1000, 10, 1000); 2566 2567 /* just trace the final value */ 2568 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); 2569 2570 if (out_value) 2571 *out_value = reg_value; 2572 2573 return ret; 2574 } 2575 2576 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) 2577 { 2578 bool ret; 2579 2580 spin_lock_irq(&uncore->debug->lock); 2581 ret = check_for_unclaimed_mmio(uncore); 2582 spin_unlock_irq(&uncore->debug->lock); 2583 2584 return ret; 2585 } 2586 2587 bool 2588 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) 2589 { 2590 bool ret = false; 2591 2592 spin_lock_irq(&uncore->debug->lock); 2593 2594 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) 2595 goto out; 2596 2597 if (unlikely(check_for_unclaimed_mmio(uncore))) { 2598 if (!uncore->i915->params.mmio_debug) { 2599 drm_dbg(&uncore->i915->drm, 2600 "Unclaimed register detected, " 2601 "enabling oneshot unclaimed register reporting. " 2602 "Please use i915.mmio_debug=N for more information.\n"); 2603 uncore->i915->params.mmio_debug++; 2604 } 2605 uncore->debug->unclaimed_mmio_check--; 2606 ret = true; 2607 } 2608 2609 out: 2610 spin_unlock_irq(&uncore->debug->lock); 2611 2612 return ret; 2613 } 2614 2615 /** 2616 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 2617 * a register 2618 * @uncore: pointer to struct intel_uncore 2619 * @reg: register in question 2620 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 2621 * 2622 * Returns a set of forcewake domains required to be taken with for example 2623 * intel_uncore_forcewake_get for the specified register to be accessible in the 2624 * specified mode (read, write or read/write) with raw mmio accessors. 2625 * 2626 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 2627 * callers to do FIFO management on their own or risk losing writes. 2628 */ 2629 enum forcewake_domains 2630 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 2631 i915_reg_t reg, unsigned int op) 2632 { 2633 enum forcewake_domains fw_domains = 0; 2634 2635 drm_WARN_ON(&uncore->i915->drm, !op); 2636 2637 if (!intel_uncore_has_forcewake(uncore)) 2638 return 0; 2639 2640 if (op & FW_REG_READ) 2641 fw_domains = uncore->funcs.read_fw_domains(uncore, reg); 2642 2643 if (op & FW_REG_WRITE) 2644 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); 2645 2646 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains); 2647 2648 return fw_domains; 2649 } 2650 2651 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2652 #include "selftests/mock_uncore.c" 2653 #include "selftests/intel_uncore.c" 2654 #endif 2655