1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/pm_runtime.h> 25 #include <asm/iosf_mbi.h> 26 27 #include "i915_drv.h" 28 #include "i915_trace.h" 29 #include "i915_vgpu.h" 30 #include "intel_pm.h" 31 32 #define FORCEWAKE_ACK_TIMEOUT_MS 50 33 #define GT_FIFO_TIMEOUT_MS 10 34 35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) 36 37 void 38 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) 39 { 40 spin_lock_init(&mmio_debug->lock); 41 mmio_debug->unclaimed_mmio_check = 1; 42 } 43 44 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) 45 { 46 lockdep_assert_held(&mmio_debug->lock); 47 48 /* Save and disable mmio debugging for the user bypass */ 49 if (!mmio_debug->suspend_count++) { 50 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; 51 mmio_debug->unclaimed_mmio_check = 0; 52 } 53 } 54 55 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) 56 { 57 lockdep_assert_held(&mmio_debug->lock); 58 59 if (!--mmio_debug->suspend_count) 60 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; 61 } 62 63 static const char * const forcewake_domain_names[] = { 64 "render", 65 "blitter", 66 "media", 67 "vdbox0", 68 "vdbox1", 69 "vdbox2", 70 "vdbox3", 71 "vebox0", 72 "vebox1", 73 }; 74 75 const char * 76 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 77 { 78 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 79 80 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 81 return forcewake_domain_names[id]; 82 83 WARN_ON(id); 84 85 return "unknown"; 86 } 87 88 #define fw_ack(d) readl((d)->reg_ack) 89 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) 90 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) 91 92 static inline void 93 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 94 { 95 /* 96 * We don't really know if the powerwell for the forcewake domain we are 97 * trying to reset here does exist at this point (engines could be fused 98 * off in ICL+), so no waiting for acks 99 */ 100 /* WaRsClearFWBitsAtReset:bdw,skl */ 101 fw_clear(d, 0xffff); 102 } 103 104 static inline void 105 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 106 { 107 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); 108 d->uncore->fw_domains_timer |= d->mask; 109 d->wake_count++; 110 hrtimer_start_range_ns(&d->timer, 111 NSEC_PER_MSEC, 112 NSEC_PER_MSEC, 113 HRTIMER_MODE_REL); 114 } 115 116 static inline int 117 __wait_for_ack(const struct intel_uncore_forcewake_domain *d, 118 const u32 ack, 119 const u32 value) 120 { 121 return wait_for_atomic((fw_ack(d) & ack) == value, 122 FORCEWAKE_ACK_TIMEOUT_MS); 123 } 124 125 static inline int 126 wait_ack_clear(const struct intel_uncore_forcewake_domain *d, 127 const u32 ack) 128 { 129 return __wait_for_ack(d, ack, 0); 130 } 131 132 static inline int 133 wait_ack_set(const struct intel_uncore_forcewake_domain *d, 134 const u32 ack) 135 { 136 return __wait_for_ack(d, ack, ack); 137 } 138 139 static inline void 140 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 141 { 142 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { 143 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 144 intel_uncore_forcewake_domain_to_str(d->id)); 145 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ 146 } 147 } 148 149 enum ack_type { 150 ACK_CLEAR = 0, 151 ACK_SET 152 }; 153 154 static int 155 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, 156 const enum ack_type type) 157 { 158 const u32 ack_bit = FORCEWAKE_KERNEL; 159 const u32 value = type == ACK_SET ? ack_bit : 0; 160 unsigned int pass; 161 bool ack_detected; 162 163 /* 164 * There is a possibility of driver's wake request colliding 165 * with hardware's own wake requests and that can cause 166 * hardware to not deliver the driver's ack message. 167 * 168 * Use a fallback bit toggle to kick the gpu state machine 169 * in the hope that the original ack will be delivered along with 170 * the fallback ack. 171 * 172 * This workaround is described in HSDES #1604254524 and it's known as: 173 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl 174 * although the name is a bit misleading. 175 */ 176 177 pass = 1; 178 do { 179 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); 180 181 fw_set(d, FORCEWAKE_KERNEL_FALLBACK); 182 /* Give gt some time to relax before the polling frenzy */ 183 udelay(10 * pass); 184 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); 185 186 ack_detected = (fw_ack(d) & ack_bit) == value; 187 188 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); 189 } while (!ack_detected && pass++ < 10); 190 191 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", 192 intel_uncore_forcewake_domain_to_str(d->id), 193 type == ACK_SET ? "set" : "clear", 194 fw_ack(d), 195 pass); 196 197 return ack_detected ? 0 : -ETIMEDOUT; 198 } 199 200 static inline void 201 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) 202 { 203 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) 204 return; 205 206 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) 207 fw_domain_wait_ack_clear(d); 208 } 209 210 static inline void 211 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 212 { 213 fw_set(d, FORCEWAKE_KERNEL); 214 } 215 216 static inline void 217 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) 218 { 219 if (wait_ack_set(d, FORCEWAKE_KERNEL)) { 220 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 221 intel_uncore_forcewake_domain_to_str(d->id)); 222 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ 223 } 224 } 225 226 static inline void 227 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) 228 { 229 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) 230 return; 231 232 if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) 233 fw_domain_wait_ack_set(d); 234 } 235 236 static inline void 237 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 238 { 239 fw_clear(d, FORCEWAKE_KERNEL); 240 } 241 242 static void 243 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 244 { 245 struct intel_uncore_forcewake_domain *d; 246 unsigned int tmp; 247 248 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 249 250 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 251 fw_domain_wait_ack_clear(d); 252 fw_domain_get(d); 253 } 254 255 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 256 fw_domain_wait_ack_set(d); 257 258 uncore->fw_domains_active |= fw_domains; 259 } 260 261 static void 262 fw_domains_get_with_fallback(struct intel_uncore *uncore, 263 enum forcewake_domains fw_domains) 264 { 265 struct intel_uncore_forcewake_domain *d; 266 unsigned int tmp; 267 268 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 269 270 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 271 fw_domain_wait_ack_clear_fallback(d); 272 fw_domain_get(d); 273 } 274 275 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 276 fw_domain_wait_ack_set_fallback(d); 277 278 uncore->fw_domains_active |= fw_domains; 279 } 280 281 static void 282 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 283 { 284 struct intel_uncore_forcewake_domain *d; 285 unsigned int tmp; 286 287 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 288 289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 290 fw_domain_put(d); 291 292 uncore->fw_domains_active &= ~fw_domains; 293 } 294 295 static void 296 fw_domains_reset(struct intel_uncore *uncore, 297 enum forcewake_domains fw_domains) 298 { 299 struct intel_uncore_forcewake_domain *d; 300 unsigned int tmp; 301 302 if (!fw_domains) 303 return; 304 305 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 306 307 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 308 fw_domain_reset(d); 309 } 310 311 static inline u32 gt_thread_status(struct intel_uncore *uncore) 312 { 313 u32 val; 314 315 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); 316 val &= GEN6_GT_THREAD_STATUS_CORE_MASK; 317 318 return val; 319 } 320 321 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) 322 { 323 /* 324 * w/a for a sporadic read returning 0 by waiting for the GT 325 * thread to wake up. 326 */ 327 drm_WARN_ONCE(&uncore->i915->drm, 328 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), 329 "GT thread status wait timed out\n"); 330 } 331 332 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, 333 enum forcewake_domains fw_domains) 334 { 335 fw_domains_get(uncore, fw_domains); 336 337 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 338 __gen6_gt_wait_for_thread_c0(uncore); 339 } 340 341 static inline u32 fifo_free_entries(struct intel_uncore *uncore) 342 { 343 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); 344 345 return count & GT_FIFO_FREE_ENTRIES_MASK; 346 } 347 348 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) 349 { 350 u32 n; 351 352 /* On VLV, FIFO will be shared by both SW and HW. 353 * So, we need to read the FREE_ENTRIES everytime */ 354 if (IS_VALLEYVIEW(uncore->i915)) 355 n = fifo_free_entries(uncore); 356 else 357 n = uncore->fifo_count; 358 359 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { 360 if (wait_for_atomic((n = fifo_free_entries(uncore)) > 361 GT_FIFO_NUM_RESERVED_ENTRIES, 362 GT_FIFO_TIMEOUT_MS)) { 363 drm_dbg(&uncore->i915->drm, 364 "GT_FIFO timeout, entries: %u\n", n); 365 return; 366 } 367 } 368 369 uncore->fifo_count = n - 1; 370 } 371 372 static enum hrtimer_restart 373 intel_uncore_fw_release_timer(struct hrtimer *timer) 374 { 375 struct intel_uncore_forcewake_domain *domain = 376 container_of(timer, struct intel_uncore_forcewake_domain, timer); 377 struct intel_uncore *uncore = domain->uncore; 378 unsigned long irqflags; 379 380 assert_rpm_device_not_suspended(uncore->rpm); 381 382 if (xchg(&domain->active, false)) 383 return HRTIMER_RESTART; 384 385 spin_lock_irqsave(&uncore->lock, irqflags); 386 387 uncore->fw_domains_timer &= ~domain->mask; 388 389 GEM_BUG_ON(!domain->wake_count); 390 if (--domain->wake_count == 0) 391 uncore->funcs.force_wake_put(uncore, domain->mask); 392 393 spin_unlock_irqrestore(&uncore->lock, irqflags); 394 395 return HRTIMER_NORESTART; 396 } 397 398 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ 399 static unsigned int 400 intel_uncore_forcewake_reset(struct intel_uncore *uncore) 401 { 402 unsigned long irqflags; 403 struct intel_uncore_forcewake_domain *domain; 404 int retry_count = 100; 405 enum forcewake_domains fw, active_domains; 406 407 iosf_mbi_assert_punit_acquired(); 408 409 /* Hold uncore.lock across reset to prevent any register access 410 * with forcewake not set correctly. Wait until all pending 411 * timers are run before holding. 412 */ 413 while (1) { 414 unsigned int tmp; 415 416 active_domains = 0; 417 418 for_each_fw_domain(domain, uncore, tmp) { 419 smp_store_mb(domain->active, false); 420 if (hrtimer_cancel(&domain->timer) == 0) 421 continue; 422 423 intel_uncore_fw_release_timer(&domain->timer); 424 } 425 426 spin_lock_irqsave(&uncore->lock, irqflags); 427 428 for_each_fw_domain(domain, uncore, tmp) { 429 if (hrtimer_active(&domain->timer)) 430 active_domains |= domain->mask; 431 } 432 433 if (active_domains == 0) 434 break; 435 436 if (--retry_count == 0) { 437 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n"); 438 break; 439 } 440 441 spin_unlock_irqrestore(&uncore->lock, irqflags); 442 cond_resched(); 443 } 444 445 drm_WARN_ON(&uncore->i915->drm, active_domains); 446 447 fw = uncore->fw_domains_active; 448 if (fw) 449 uncore->funcs.force_wake_put(uncore, fw); 450 451 fw_domains_reset(uncore, uncore->fw_domains); 452 assert_forcewakes_inactive(uncore); 453 454 spin_unlock_irqrestore(&uncore->lock, irqflags); 455 456 return fw; /* track the lost user forcewake domains */ 457 } 458 459 static bool 460 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) 461 { 462 u32 dbg; 463 464 dbg = __raw_uncore_read32(uncore, FPGA_DBG); 465 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 466 return false; 467 468 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 469 470 return true; 471 } 472 473 static bool 474 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) 475 { 476 u32 cer; 477 478 cer = __raw_uncore_read32(uncore, CLAIM_ER); 479 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 480 return false; 481 482 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); 483 484 return true; 485 } 486 487 static bool 488 gen6_check_for_fifo_debug(struct intel_uncore *uncore) 489 { 490 u32 fifodbg; 491 492 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); 493 494 if (unlikely(fifodbg)) { 495 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg); 496 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); 497 } 498 499 return fifodbg; 500 } 501 502 static bool 503 check_for_unclaimed_mmio(struct intel_uncore *uncore) 504 { 505 bool ret = false; 506 507 lockdep_assert_held(&uncore->debug->lock); 508 509 if (uncore->debug->suspend_count) 510 return false; 511 512 if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) 513 ret |= fpga_check_for_unclaimed_mmio(uncore); 514 515 if (intel_uncore_has_dbg_unclaimed(uncore)) 516 ret |= vlv_check_for_unclaimed_mmio(uncore); 517 518 if (intel_uncore_has_fifo(uncore)) 519 ret |= gen6_check_for_fifo_debug(uncore); 520 521 return ret; 522 } 523 524 static void forcewake_early_sanitize(struct intel_uncore *uncore, 525 unsigned int restore_forcewake) 526 { 527 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 528 529 /* WaDisableShadowRegForCpd:chv */ 530 if (IS_CHERRYVIEW(uncore->i915)) { 531 __raw_uncore_write32(uncore, GTFIFOCTL, 532 __raw_uncore_read32(uncore, GTFIFOCTL) | 533 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 534 GT_FIFO_CTL_RC6_POLICY_STALL); 535 } 536 537 iosf_mbi_punit_acquire(); 538 intel_uncore_forcewake_reset(uncore); 539 if (restore_forcewake) { 540 spin_lock_irq(&uncore->lock); 541 uncore->funcs.force_wake_get(uncore, restore_forcewake); 542 543 if (intel_uncore_has_fifo(uncore)) 544 uncore->fifo_count = fifo_free_entries(uncore); 545 spin_unlock_irq(&uncore->lock); 546 } 547 iosf_mbi_punit_release(); 548 } 549 550 void intel_uncore_suspend(struct intel_uncore *uncore) 551 { 552 if (!intel_uncore_has_forcewake(uncore)) 553 return; 554 555 iosf_mbi_punit_acquire(); 556 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 557 &uncore->pmic_bus_access_nb); 558 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); 559 iosf_mbi_punit_release(); 560 } 561 562 void intel_uncore_resume_early(struct intel_uncore *uncore) 563 { 564 unsigned int restore_forcewake; 565 566 if (intel_uncore_unclaimed_mmio(uncore)) 567 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n"); 568 569 if (!intel_uncore_has_forcewake(uncore)) 570 return; 571 572 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); 573 forcewake_early_sanitize(uncore, restore_forcewake); 574 575 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 576 } 577 578 void intel_uncore_runtime_resume(struct intel_uncore *uncore) 579 { 580 if (!intel_uncore_has_forcewake(uncore)) 581 return; 582 583 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 584 } 585 586 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, 587 enum forcewake_domains fw_domains) 588 { 589 struct intel_uncore_forcewake_domain *domain; 590 unsigned int tmp; 591 592 fw_domains &= uncore->fw_domains; 593 594 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 595 if (domain->wake_count++) { 596 fw_domains &= ~domain->mask; 597 domain->active = true; 598 } 599 } 600 601 if (fw_domains) 602 uncore->funcs.force_wake_get(uncore, fw_domains); 603 } 604 605 /** 606 * intel_uncore_forcewake_get - grab forcewake domain references 607 * @uncore: the intel_uncore structure 608 * @fw_domains: forcewake domains to get reference on 609 * 610 * This function can be used get GT's forcewake domain references. 611 * Normal register access will handle the forcewake domains automatically. 612 * However if some sequence requires the GT to not power down a particular 613 * forcewake domains this function should be called at the beginning of the 614 * sequence. And subsequently the reference should be dropped by symmetric 615 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 616 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 617 */ 618 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 619 enum forcewake_domains fw_domains) 620 { 621 unsigned long irqflags; 622 623 if (!uncore->funcs.force_wake_get) 624 return; 625 626 assert_rpm_wakelock_held(uncore->rpm); 627 628 spin_lock_irqsave(&uncore->lock, irqflags); 629 __intel_uncore_forcewake_get(uncore, fw_domains); 630 spin_unlock_irqrestore(&uncore->lock, irqflags); 631 } 632 633 /** 634 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace 635 * @uncore: the intel_uncore structure 636 * 637 * This function is a wrapper around intel_uncore_forcewake_get() to acquire 638 * the GT powerwell and in the process disable our debugging for the 639 * duration of userspace's bypass. 640 */ 641 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) 642 { 643 spin_lock_irq(&uncore->lock); 644 if (!uncore->user_forcewake_count++) { 645 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); 646 spin_lock(&uncore->debug->lock); 647 mmio_debug_suspend(uncore->debug); 648 spin_unlock(&uncore->debug->lock); 649 } 650 spin_unlock_irq(&uncore->lock); 651 } 652 653 /** 654 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace 655 * @uncore: the intel_uncore structure 656 * 657 * This function complements intel_uncore_forcewake_user_get() and releases 658 * the GT powerwell taken on behalf of the userspace bypass. 659 */ 660 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) 661 { 662 spin_lock_irq(&uncore->lock); 663 if (!--uncore->user_forcewake_count) { 664 spin_lock(&uncore->debug->lock); 665 mmio_debug_resume(uncore->debug); 666 667 if (check_for_unclaimed_mmio(uncore)) 668 drm_info(&uncore->i915->drm, 669 "Invalid mmio detected during user access\n"); 670 spin_unlock(&uncore->debug->lock); 671 672 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); 673 } 674 spin_unlock_irq(&uncore->lock); 675 } 676 677 /** 678 * intel_uncore_forcewake_get__locked - grab forcewake domain references 679 * @uncore: the intel_uncore structure 680 * @fw_domains: forcewake domains to get reference on 681 * 682 * See intel_uncore_forcewake_get(). This variant places the onus 683 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 684 */ 685 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 686 enum forcewake_domains fw_domains) 687 { 688 lockdep_assert_held(&uncore->lock); 689 690 if (!uncore->funcs.force_wake_get) 691 return; 692 693 __intel_uncore_forcewake_get(uncore, fw_domains); 694 } 695 696 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, 697 enum forcewake_domains fw_domains) 698 { 699 struct intel_uncore_forcewake_domain *domain; 700 unsigned int tmp; 701 702 fw_domains &= uncore->fw_domains; 703 704 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 705 GEM_BUG_ON(!domain->wake_count); 706 707 if (--domain->wake_count) { 708 domain->active = true; 709 continue; 710 } 711 712 uncore->funcs.force_wake_put(uncore, domain->mask); 713 } 714 } 715 716 /** 717 * intel_uncore_forcewake_put - release a forcewake domain reference 718 * @uncore: the intel_uncore structure 719 * @fw_domains: forcewake domains to put references 720 * 721 * This function drops the device-level forcewakes for specified 722 * domains obtained by intel_uncore_forcewake_get(). 723 */ 724 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 725 enum forcewake_domains fw_domains) 726 { 727 unsigned long irqflags; 728 729 if (!uncore->funcs.force_wake_put) 730 return; 731 732 spin_lock_irqsave(&uncore->lock, irqflags); 733 __intel_uncore_forcewake_put(uncore, fw_domains); 734 spin_unlock_irqrestore(&uncore->lock, irqflags); 735 } 736 737 /** 738 * intel_uncore_forcewake_flush - flush the delayed release 739 * @uncore: the intel_uncore structure 740 * @fw_domains: forcewake domains to flush 741 */ 742 void intel_uncore_forcewake_flush(struct intel_uncore *uncore, 743 enum forcewake_domains fw_domains) 744 { 745 struct intel_uncore_forcewake_domain *domain; 746 unsigned int tmp; 747 748 if (!uncore->funcs.force_wake_put) 749 return; 750 751 fw_domains &= uncore->fw_domains; 752 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 753 WRITE_ONCE(domain->active, false); 754 if (hrtimer_cancel(&domain->timer)) 755 intel_uncore_fw_release_timer(&domain->timer); 756 } 757 } 758 759 /** 760 * intel_uncore_forcewake_put__locked - grab forcewake domain references 761 * @uncore: the intel_uncore structure 762 * @fw_domains: forcewake domains to get reference on 763 * 764 * See intel_uncore_forcewake_put(). This variant places the onus 765 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 766 */ 767 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 768 enum forcewake_domains fw_domains) 769 { 770 lockdep_assert_held(&uncore->lock); 771 772 if (!uncore->funcs.force_wake_put) 773 return; 774 775 __intel_uncore_forcewake_put(uncore, fw_domains); 776 } 777 778 void assert_forcewakes_inactive(struct intel_uncore *uncore) 779 { 780 if (!uncore->funcs.force_wake_get) 781 return; 782 783 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active, 784 "Expected all fw_domains to be inactive, but %08x are still on\n", 785 uncore->fw_domains_active); 786 } 787 788 void assert_forcewakes_active(struct intel_uncore *uncore, 789 enum forcewake_domains fw_domains) 790 { 791 struct intel_uncore_forcewake_domain *domain; 792 unsigned int tmp; 793 794 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 795 return; 796 797 if (!uncore->funcs.force_wake_get) 798 return; 799 800 spin_lock_irq(&uncore->lock); 801 802 assert_rpm_wakelock_held(uncore->rpm); 803 804 fw_domains &= uncore->fw_domains; 805 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active, 806 "Expected %08x fw_domains to be active, but %08x are off\n", 807 fw_domains, fw_domains & ~uncore->fw_domains_active); 808 809 /* 810 * Check that the caller has an explicit wakeref and we don't mistake 811 * it for the auto wakeref. 812 */ 813 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 814 unsigned int actual = READ_ONCE(domain->wake_count); 815 unsigned int expect = 1; 816 817 if (uncore->fw_domains_timer & domain->mask) 818 expect++; /* pending automatic release */ 819 820 if (drm_WARN(&uncore->i915->drm, actual < expect, 821 "Expected domain %d to be held awake by caller, count=%d\n", 822 domain->id, actual)) 823 break; 824 } 825 826 spin_unlock_irq(&uncore->lock); 827 } 828 829 /* We give fast paths for the really cool registers */ 830 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 831 832 #define __gen6_reg_read_fw_domains(uncore, offset) \ 833 ({ \ 834 enum forcewake_domains __fwd; \ 835 if (NEEDS_FORCE_WAKE(offset)) \ 836 __fwd = FORCEWAKE_RENDER; \ 837 else \ 838 __fwd = 0; \ 839 __fwd; \ 840 }) 841 842 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 843 { 844 if (offset < entry->start) 845 return -1; 846 else if (offset > entry->end) 847 return 1; 848 else 849 return 0; 850 } 851 852 /* Copied and "macroized" from lib/bsearch.c */ 853 #define BSEARCH(key, base, num, cmp) ({ \ 854 unsigned int start__ = 0, end__ = (num); \ 855 typeof(base) result__ = NULL; \ 856 while (start__ < end__) { \ 857 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 858 int ret__ = (cmp)((key), (base) + mid__); \ 859 if (ret__ < 0) { \ 860 end__ = mid__; \ 861 } else if (ret__ > 0) { \ 862 start__ = mid__ + 1; \ 863 } else { \ 864 result__ = (base) + mid__; \ 865 break; \ 866 } \ 867 } \ 868 result__; \ 869 }) 870 871 static enum forcewake_domains 872 find_fw_domain(struct intel_uncore *uncore, u32 offset) 873 { 874 const struct intel_forcewake_range *entry; 875 876 entry = BSEARCH(offset, 877 uncore->fw_domains_table, 878 uncore->fw_domains_table_entries, 879 fw_range_cmp); 880 881 if (!entry) 882 return 0; 883 884 /* 885 * The list of FW domains depends on the SKU in gen11+ so we 886 * can't determine it statically. We use FORCEWAKE_ALL and 887 * translate it here to the list of available domains. 888 */ 889 if (entry->domains == FORCEWAKE_ALL) 890 return uncore->fw_domains; 891 892 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains, 893 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 894 entry->domains & ~uncore->fw_domains, offset); 895 896 return entry->domains; 897 } 898 899 #define GEN_FW_RANGE(s, e, d) \ 900 { .start = (s), .end = (e), .domains = (d) } 901 902 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 903 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 904 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 905 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 906 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 907 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 908 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 909 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 910 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 911 }; 912 913 #define __fwtable_reg_read_fw_domains(uncore, offset) \ 914 ({ \ 915 enum forcewake_domains __fwd = 0; \ 916 if (NEEDS_FORCE_WAKE((offset))) \ 917 __fwd = find_fw_domain(uncore, offset); \ 918 __fwd; \ 919 }) 920 921 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ 922 find_fw_domain(uncore, offset) 923 924 #define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ 925 find_fw_domain(uncore, offset) 926 927 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 928 static const i915_reg_t gen8_shadowed_regs[] = { 929 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 930 GEN6_RPNSWREQ, /* 0xA008 */ 931 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 932 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 933 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 934 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 935 /* TODO: Other registers are not yet used */ 936 }; 937 938 static const i915_reg_t gen11_shadowed_regs[] = { 939 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 940 GEN6_RPNSWREQ, /* 0xA008 */ 941 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 942 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 943 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 944 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 945 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 946 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 947 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 948 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 949 /* TODO: Other registers are not yet used */ 950 }; 951 952 static const i915_reg_t gen12_shadowed_regs[] = { 953 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 954 GEN6_RPNSWREQ, /* 0xA008 */ 955 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 956 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 957 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 958 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 959 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 960 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 961 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 962 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 963 /* TODO: Other registers are not yet used */ 964 }; 965 966 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 967 { 968 u32 offset = i915_mmio_reg_offset(*reg); 969 970 if (key < offset) 971 return -1; 972 else if (key > offset) 973 return 1; 974 else 975 return 0; 976 } 977 978 #define __is_genX_shadowed(x) \ 979 static bool is_gen##x##_shadowed(u32 offset) \ 980 { \ 981 const i915_reg_t *regs = gen##x##_shadowed_regs; \ 982 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ 983 mmio_reg_cmp); \ 984 } 985 986 __is_genX_shadowed(8) 987 __is_genX_shadowed(11) 988 __is_genX_shadowed(12) 989 990 static enum forcewake_domains 991 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) 992 { 993 return FORCEWAKE_RENDER; 994 } 995 996 #define __gen8_reg_write_fw_domains(uncore, offset) \ 997 ({ \ 998 enum forcewake_domains __fwd; \ 999 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 1000 __fwd = FORCEWAKE_RENDER; \ 1001 else \ 1002 __fwd = 0; \ 1003 __fwd; \ 1004 }) 1005 1006 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1007 static const struct intel_forcewake_range __chv_fw_ranges[] = { 1008 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 1009 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1010 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1011 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1012 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1013 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1014 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 1015 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1016 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1017 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 1018 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 1019 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1020 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1021 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 1022 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 1023 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 1024 }; 1025 1026 #define __fwtable_reg_write_fw_domains(uncore, offset) \ 1027 ({ \ 1028 enum forcewake_domains __fwd = 0; \ 1029 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 1030 __fwd = find_fw_domain(uncore, offset); \ 1031 __fwd; \ 1032 }) 1033 1034 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ 1035 ({ \ 1036 enum forcewake_domains __fwd = 0; \ 1037 const u32 __offset = (offset); \ 1038 if (!is_gen11_shadowed(__offset)) \ 1039 __fwd = find_fw_domain(uncore, __offset); \ 1040 __fwd; \ 1041 }) 1042 1043 #define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ 1044 ({ \ 1045 enum forcewake_domains __fwd = 0; \ 1046 const u32 __offset = (offset); \ 1047 if (!is_gen12_shadowed(__offset)) \ 1048 __fwd = find_fw_domain(uncore, __offset); \ 1049 __fwd; \ 1050 }) 1051 1052 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1053 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 1054 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 1055 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1056 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1057 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1058 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1059 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1060 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1061 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 1062 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 1063 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1064 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1065 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1066 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 1067 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 1068 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 1069 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1070 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1071 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1072 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1073 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1074 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 1075 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 1076 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 1077 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1078 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 1079 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1080 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 1081 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 1082 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 1083 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1084 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 1085 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 1086 }; 1087 1088 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1089 static const struct intel_forcewake_range __gen11_fw_ranges[] = { 1090 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */ 1091 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1092 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1093 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1094 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1095 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1096 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 1097 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1098 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1099 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1100 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 1101 GEN_FW_RANGE(0x8800, 0x8bff, 0), 1102 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1103 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER), 1104 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), 1105 GEN_FW_RANGE(0x9560, 0x95ff, 0), 1106 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER), 1107 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1108 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), 1109 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), 1110 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), 1111 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), 1112 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER), 1113 GEN_FW_RANGE(0x24000, 0x2407f, 0), 1114 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER), 1115 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER), 1116 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER), 1117 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER), 1118 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER), 1119 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1120 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1121 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), 1122 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0), 1123 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1124 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0) 1125 }; 1126 1127 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1128 static const struct intel_forcewake_range __gen12_fw_ranges[] = { 1129 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 1130 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1131 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1132 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1133 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1134 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1135 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1136 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 1137 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1138 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1139 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1140 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), 1141 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1142 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1143 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), 1144 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1145 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1146 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), 1147 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1148 GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), 1149 GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), 1150 GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), 1151 GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), 1152 GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), 1153 GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), 1154 GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), 1155 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1156 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), 1157 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1158 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1159 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), 1160 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), 1161 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), 1162 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1163 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), 1164 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) 1165 }; 1166 1167 static void 1168 ilk_dummy_write(struct intel_uncore *uncore) 1169 { 1170 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 1171 * the chip from rc6 before touching it for real. MI_MODE is masked, 1172 * hence harmless to write 0 into. */ 1173 __raw_uncore_write32(uncore, MI_MODE, 0); 1174 } 1175 1176 static void 1177 __unclaimed_reg_debug(struct intel_uncore *uncore, 1178 const i915_reg_t reg, 1179 const bool read, 1180 const bool before) 1181 { 1182 if (drm_WARN(&uncore->i915->drm, 1183 check_for_unclaimed_mmio(uncore) && !before, 1184 "Unclaimed %s register 0x%x\n", 1185 read ? "read from" : "write to", 1186 i915_mmio_reg_offset(reg))) 1187 /* Only report the first N failures */ 1188 uncore->i915->params.mmio_debug--; 1189 } 1190 1191 static inline void 1192 unclaimed_reg_debug(struct intel_uncore *uncore, 1193 const i915_reg_t reg, 1194 const bool read, 1195 const bool before) 1196 { 1197 if (likely(!uncore->i915->params.mmio_debug)) 1198 return; 1199 1200 /* interrupts are disabled and re-enabled around uncore->lock usage */ 1201 lockdep_assert_held(&uncore->lock); 1202 1203 if (before) 1204 spin_lock(&uncore->debug->lock); 1205 1206 __unclaimed_reg_debug(uncore, reg, read, before); 1207 1208 if (!before) 1209 spin_unlock(&uncore->debug->lock); 1210 } 1211 1212 #define __vgpu_read(x) \ 1213 static u##x \ 1214 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1215 u##x val = __raw_uncore_read##x(uncore, reg); \ 1216 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1217 return val; \ 1218 } 1219 __vgpu_read(8) 1220 __vgpu_read(16) 1221 __vgpu_read(32) 1222 __vgpu_read(64) 1223 1224 #define GEN2_READ_HEADER(x) \ 1225 u##x val = 0; \ 1226 assert_rpm_wakelock_held(uncore->rpm); 1227 1228 #define GEN2_READ_FOOTER \ 1229 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1230 return val 1231 1232 #define __gen2_read(x) \ 1233 static u##x \ 1234 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1235 GEN2_READ_HEADER(x); \ 1236 val = __raw_uncore_read##x(uncore, reg); \ 1237 GEN2_READ_FOOTER; \ 1238 } 1239 1240 #define __gen5_read(x) \ 1241 static u##x \ 1242 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1243 GEN2_READ_HEADER(x); \ 1244 ilk_dummy_write(uncore); \ 1245 val = __raw_uncore_read##x(uncore, reg); \ 1246 GEN2_READ_FOOTER; \ 1247 } 1248 1249 __gen5_read(8) 1250 __gen5_read(16) 1251 __gen5_read(32) 1252 __gen5_read(64) 1253 __gen2_read(8) 1254 __gen2_read(16) 1255 __gen2_read(32) 1256 __gen2_read(64) 1257 1258 #undef __gen5_read 1259 #undef __gen2_read 1260 1261 #undef GEN2_READ_FOOTER 1262 #undef GEN2_READ_HEADER 1263 1264 #define GEN6_READ_HEADER(x) \ 1265 u32 offset = i915_mmio_reg_offset(reg); \ 1266 unsigned long irqflags; \ 1267 u##x val = 0; \ 1268 assert_rpm_wakelock_held(uncore->rpm); \ 1269 spin_lock_irqsave(&uncore->lock, irqflags); \ 1270 unclaimed_reg_debug(uncore, reg, true, true) 1271 1272 #define GEN6_READ_FOOTER \ 1273 unclaimed_reg_debug(uncore, reg, true, false); \ 1274 spin_unlock_irqrestore(&uncore->lock, irqflags); \ 1275 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1276 return val 1277 1278 static noinline void ___force_wake_auto(struct intel_uncore *uncore, 1279 enum forcewake_domains fw_domains) 1280 { 1281 struct intel_uncore_forcewake_domain *domain; 1282 unsigned int tmp; 1283 1284 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 1285 1286 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) 1287 fw_domain_arm_timer(domain); 1288 1289 uncore->funcs.force_wake_get(uncore, fw_domains); 1290 } 1291 1292 static inline void __force_wake_auto(struct intel_uncore *uncore, 1293 enum forcewake_domains fw_domains) 1294 { 1295 GEM_BUG_ON(!fw_domains); 1296 1297 /* Turn on all requested but inactive supported forcewake domains. */ 1298 fw_domains &= uncore->fw_domains; 1299 fw_domains &= ~uncore->fw_domains_active; 1300 1301 if (fw_domains) 1302 ___force_wake_auto(uncore, fw_domains); 1303 } 1304 1305 #define __gen_read(func, x) \ 1306 static u##x \ 1307 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1308 enum forcewake_domains fw_engine; \ 1309 GEN6_READ_HEADER(x); \ 1310 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ 1311 if (fw_engine) \ 1312 __force_wake_auto(uncore, fw_engine); \ 1313 val = __raw_uncore_read##x(uncore, reg); \ 1314 GEN6_READ_FOOTER; \ 1315 } 1316 1317 #define __gen_reg_read_funcs(func) \ 1318 static enum forcewake_domains \ 1319 func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ 1320 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ 1321 } \ 1322 \ 1323 __gen_read(func, 8) \ 1324 __gen_read(func, 16) \ 1325 __gen_read(func, 32) \ 1326 __gen_read(func, 64) 1327 1328 __gen_reg_read_funcs(gen12_fwtable); 1329 __gen_reg_read_funcs(gen11_fwtable); 1330 __gen_reg_read_funcs(fwtable); 1331 __gen_reg_read_funcs(gen6); 1332 1333 #undef __gen_reg_read_funcs 1334 #undef GEN6_READ_FOOTER 1335 #undef GEN6_READ_HEADER 1336 1337 #define GEN2_WRITE_HEADER \ 1338 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1339 assert_rpm_wakelock_held(uncore->rpm); \ 1340 1341 #define GEN2_WRITE_FOOTER 1342 1343 #define __gen2_write(x) \ 1344 static void \ 1345 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1346 GEN2_WRITE_HEADER; \ 1347 __raw_uncore_write##x(uncore, reg, val); \ 1348 GEN2_WRITE_FOOTER; \ 1349 } 1350 1351 #define __gen5_write(x) \ 1352 static void \ 1353 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1354 GEN2_WRITE_HEADER; \ 1355 ilk_dummy_write(uncore); \ 1356 __raw_uncore_write##x(uncore, reg, val); \ 1357 GEN2_WRITE_FOOTER; \ 1358 } 1359 1360 __gen5_write(8) 1361 __gen5_write(16) 1362 __gen5_write(32) 1363 __gen2_write(8) 1364 __gen2_write(16) 1365 __gen2_write(32) 1366 1367 #undef __gen5_write 1368 #undef __gen2_write 1369 1370 #undef GEN2_WRITE_FOOTER 1371 #undef GEN2_WRITE_HEADER 1372 1373 #define GEN6_WRITE_HEADER \ 1374 u32 offset = i915_mmio_reg_offset(reg); \ 1375 unsigned long irqflags; \ 1376 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1377 assert_rpm_wakelock_held(uncore->rpm); \ 1378 spin_lock_irqsave(&uncore->lock, irqflags); \ 1379 unclaimed_reg_debug(uncore, reg, false, true) 1380 1381 #define GEN6_WRITE_FOOTER \ 1382 unclaimed_reg_debug(uncore, reg, false, false); \ 1383 spin_unlock_irqrestore(&uncore->lock, irqflags) 1384 1385 #define __gen6_write(x) \ 1386 static void \ 1387 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1388 GEN6_WRITE_HEADER; \ 1389 if (NEEDS_FORCE_WAKE(offset)) \ 1390 __gen6_gt_wait_for_fifo(uncore); \ 1391 __raw_uncore_write##x(uncore, reg, val); \ 1392 GEN6_WRITE_FOOTER; \ 1393 } 1394 __gen6_write(8) 1395 __gen6_write(16) 1396 __gen6_write(32) 1397 1398 #define __gen_write(func, x) \ 1399 static void \ 1400 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1401 enum forcewake_domains fw_engine; \ 1402 GEN6_WRITE_HEADER; \ 1403 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ 1404 if (fw_engine) \ 1405 __force_wake_auto(uncore, fw_engine); \ 1406 __raw_uncore_write##x(uncore, reg, val); \ 1407 GEN6_WRITE_FOOTER; \ 1408 } 1409 1410 #define __gen_reg_write_funcs(func) \ 1411 static enum forcewake_domains \ 1412 func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ 1413 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ 1414 } \ 1415 \ 1416 __gen_write(func, 8) \ 1417 __gen_write(func, 16) \ 1418 __gen_write(func, 32) 1419 1420 __gen_reg_write_funcs(gen12_fwtable); 1421 __gen_reg_write_funcs(gen11_fwtable); 1422 __gen_reg_write_funcs(fwtable); 1423 __gen_reg_write_funcs(gen8); 1424 1425 #undef __gen_reg_write_funcs 1426 #undef GEN6_WRITE_FOOTER 1427 #undef GEN6_WRITE_HEADER 1428 1429 #define __vgpu_write(x) \ 1430 static void \ 1431 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1432 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1433 __raw_uncore_write##x(uncore, reg, val); \ 1434 } 1435 __vgpu_write(8) 1436 __vgpu_write(16) 1437 __vgpu_write(32) 1438 1439 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ 1440 do { \ 1441 (uncore)->funcs.mmio_writeb = x##_write8; \ 1442 (uncore)->funcs.mmio_writew = x##_write16; \ 1443 (uncore)->funcs.mmio_writel = x##_write32; \ 1444 } while (0) 1445 1446 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ 1447 do { \ 1448 (uncore)->funcs.mmio_readb = x##_read8; \ 1449 (uncore)->funcs.mmio_readw = x##_read16; \ 1450 (uncore)->funcs.mmio_readl = x##_read32; \ 1451 (uncore)->funcs.mmio_readq = x##_read64; \ 1452 } while (0) 1453 1454 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ 1455 do { \ 1456 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ 1457 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ 1458 } while (0) 1459 1460 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ 1461 do { \ 1462 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ 1463 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ 1464 } while (0) 1465 1466 static int __fw_domain_init(struct intel_uncore *uncore, 1467 enum forcewake_domain_id domain_id, 1468 i915_reg_t reg_set, 1469 i915_reg_t reg_ack) 1470 { 1471 struct intel_uncore_forcewake_domain *d; 1472 1473 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1474 GEM_BUG_ON(uncore->fw_domain[domain_id]); 1475 1476 if (i915_inject_probe_failure(uncore->i915)) 1477 return -ENOMEM; 1478 1479 d = kzalloc(sizeof(*d), GFP_KERNEL); 1480 if (!d) 1481 return -ENOMEM; 1482 1483 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set)); 1484 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack)); 1485 1486 d->uncore = uncore; 1487 d->wake_count = 0; 1488 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); 1489 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); 1490 1491 d->id = domain_id; 1492 1493 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1494 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1495 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1496 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); 1497 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); 1498 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); 1499 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); 1500 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); 1501 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); 1502 1503 d->mask = BIT(domain_id); 1504 1505 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1506 d->timer.function = intel_uncore_fw_release_timer; 1507 1508 uncore->fw_domains |= BIT(domain_id); 1509 1510 fw_domain_reset(d); 1511 1512 uncore->fw_domain[domain_id] = d; 1513 1514 return 0; 1515 } 1516 1517 static void fw_domain_fini(struct intel_uncore *uncore, 1518 enum forcewake_domain_id domain_id) 1519 { 1520 struct intel_uncore_forcewake_domain *d; 1521 1522 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1523 1524 d = fetch_and_zero(&uncore->fw_domain[domain_id]); 1525 if (!d) 1526 return; 1527 1528 uncore->fw_domains &= ~BIT(domain_id); 1529 drm_WARN_ON(&uncore->i915->drm, d->wake_count); 1530 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer)); 1531 kfree(d); 1532 } 1533 1534 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) 1535 { 1536 struct intel_uncore_forcewake_domain *d; 1537 int tmp; 1538 1539 for_each_fw_domain(d, uncore, tmp) 1540 fw_domain_fini(uncore, d->id); 1541 } 1542 1543 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) 1544 { 1545 struct drm_i915_private *i915 = uncore->i915; 1546 int ret = 0; 1547 1548 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 1549 1550 #define fw_domain_init(uncore__, id__, set__, ack__) \ 1551 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) 1552 1553 if (INTEL_GEN(i915) >= 11) { 1554 /* we'll prune the domains of missing engines later */ 1555 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; 1556 int i; 1557 1558 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; 1559 uncore->funcs.force_wake_put = fw_domains_put; 1560 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1561 FORCEWAKE_RENDER_GEN9, 1562 FORCEWAKE_ACK_RENDER_GEN9); 1563 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1564 FORCEWAKE_BLITTER_GEN9, 1565 FORCEWAKE_ACK_BLITTER_GEN9); 1566 1567 for (i = 0; i < I915_MAX_VCS; i++) { 1568 if (!__HAS_ENGINE(emask, _VCS(i))) 1569 continue; 1570 1571 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, 1572 FORCEWAKE_MEDIA_VDBOX_GEN11(i), 1573 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 1574 } 1575 for (i = 0; i < I915_MAX_VECS; i++) { 1576 if (!__HAS_ENGINE(emask, _VECS(i))) 1577 continue; 1578 1579 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, 1580 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1581 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1582 } 1583 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1584 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; 1585 uncore->funcs.force_wake_put = fw_domains_put; 1586 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1587 FORCEWAKE_RENDER_GEN9, 1588 FORCEWAKE_ACK_RENDER_GEN9); 1589 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1590 FORCEWAKE_BLITTER_GEN9, 1591 FORCEWAKE_ACK_BLITTER_GEN9); 1592 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1593 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1594 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1595 uncore->funcs.force_wake_get = fw_domains_get; 1596 uncore->funcs.force_wake_put = fw_domains_put; 1597 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1598 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1599 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1600 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1601 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1602 uncore->funcs.force_wake_get = 1603 fw_domains_get_with_thread_status; 1604 uncore->funcs.force_wake_put = fw_domains_put; 1605 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1606 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1607 } else if (IS_IVYBRIDGE(i915)) { 1608 u32 ecobus; 1609 1610 /* IVB configs may use multi-threaded forcewake */ 1611 1612 /* A small trick here - if the bios hasn't configured 1613 * MT forcewake, and if the device is in RC6, then 1614 * force_wake_mt_get will not wake the device and the 1615 * ECOBUS read will return zero. Which will be 1616 * (correctly) interpreted by the test below as MT 1617 * forcewake being disabled. 1618 */ 1619 uncore->funcs.force_wake_get = 1620 fw_domains_get_with_thread_status; 1621 uncore->funcs.force_wake_put = fw_domains_put; 1622 1623 /* We need to init first for ECOBUS access and then 1624 * determine later if we want to reinit, in case of MT access is 1625 * not working. In this stage we don't know which flavour this 1626 * ivb is, so it is better to reset also the gen6 fw registers 1627 * before the ecobus check. 1628 */ 1629 1630 __raw_uncore_write32(uncore, FORCEWAKE, 0); 1631 __raw_posting_read(uncore, ECOBUS); 1632 1633 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1634 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1635 if (ret) 1636 goto out; 1637 1638 spin_lock_irq(&uncore->lock); 1639 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); 1640 ecobus = __raw_uncore_read32(uncore, ECOBUS); 1641 fw_domains_put(uncore, FORCEWAKE_RENDER); 1642 spin_unlock_irq(&uncore->lock); 1643 1644 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1645 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n"); 1646 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n"); 1647 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); 1648 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1649 FORCEWAKE, FORCEWAKE_ACK); 1650 } 1651 } else if (IS_GEN(i915, 6)) { 1652 uncore->funcs.force_wake_get = 1653 fw_domains_get_with_thread_status; 1654 uncore->funcs.force_wake_put = fw_domains_put; 1655 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1656 FORCEWAKE, FORCEWAKE_ACK); 1657 } 1658 1659 #undef fw_domain_init 1660 1661 /* All future platforms are expected to require complex power gating */ 1662 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0); 1663 1664 out: 1665 if (ret) 1666 intel_uncore_fw_domains_fini(uncore); 1667 1668 return ret; 1669 } 1670 1671 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ 1672 { \ 1673 (uncore)->fw_domains_table = \ 1674 (struct intel_forcewake_range *)(d); \ 1675 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ 1676 } 1677 1678 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 1679 unsigned long action, void *data) 1680 { 1681 struct intel_uncore *uncore = container_of(nb, 1682 struct intel_uncore, pmic_bus_access_nb); 1683 1684 switch (action) { 1685 case MBI_PMIC_BUS_ACCESS_BEGIN: 1686 /* 1687 * forcewake all now to make sure that we don't need to do a 1688 * forcewake later which on systems where this notifier gets 1689 * called requires the punit to access to the shared pmic i2c 1690 * bus, which will be busy after this notification, leading to: 1691 * "render: timed out waiting for forcewake ack request." 1692 * errors. 1693 * 1694 * The notifier is unregistered during intel_runtime_suspend(), 1695 * so it's ok to access the HW here without holding a RPM 1696 * wake reference -> disable wakeref asserts for the time of 1697 * the access. 1698 */ 1699 disable_rpm_wakeref_asserts(uncore->rpm); 1700 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1701 enable_rpm_wakeref_asserts(uncore->rpm); 1702 break; 1703 case MBI_PMIC_BUS_ACCESS_END: 1704 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1705 break; 1706 } 1707 1708 return NOTIFY_OK; 1709 } 1710 1711 static int uncore_mmio_setup(struct intel_uncore *uncore) 1712 { 1713 struct drm_i915_private *i915 = uncore->i915; 1714 struct pci_dev *pdev = i915->drm.pdev; 1715 int mmio_bar; 1716 int mmio_size; 1717 1718 mmio_bar = IS_GEN(i915, 2) ? 1 : 0; 1719 /* 1720 * Before gen4, the registers and the GTT are behind different BARs. 1721 * However, from gen4 onwards, the registers and the GTT are shared 1722 * in the same BAR, so we want to restrict this ioremap from 1723 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 1724 * the register BAR remains the same size for all the earlier 1725 * generations up to Ironlake. 1726 */ 1727 if (INTEL_GEN(i915) < 5) 1728 mmio_size = 512 * 1024; 1729 else 1730 mmio_size = 2 * 1024 * 1024; 1731 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); 1732 if (uncore->regs == NULL) { 1733 drm_err(&i915->drm, "failed to map registers\n"); 1734 return -EIO; 1735 } 1736 1737 return 0; 1738 } 1739 1740 static void uncore_mmio_cleanup(struct intel_uncore *uncore) 1741 { 1742 struct pci_dev *pdev = uncore->i915->drm.pdev; 1743 1744 pci_iounmap(pdev, uncore->regs); 1745 } 1746 1747 void intel_uncore_init_early(struct intel_uncore *uncore, 1748 struct drm_i915_private *i915) 1749 { 1750 spin_lock_init(&uncore->lock); 1751 uncore->i915 = i915; 1752 uncore->rpm = &i915->runtime_pm; 1753 uncore->debug = &i915->mmio_debug; 1754 } 1755 1756 static void uncore_raw_init(struct intel_uncore *uncore) 1757 { 1758 GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); 1759 1760 if (intel_vgpu_active(uncore->i915)) { 1761 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu); 1762 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu); 1763 } else if (IS_GEN(uncore->i915, 5)) { 1764 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); 1765 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); 1766 } else { 1767 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); 1768 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); 1769 } 1770 } 1771 1772 static int uncore_forcewake_init(struct intel_uncore *uncore) 1773 { 1774 struct drm_i915_private *i915 = uncore->i915; 1775 int ret; 1776 1777 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 1778 1779 ret = intel_uncore_fw_domains_init(uncore); 1780 if (ret) 1781 return ret; 1782 forcewake_early_sanitize(uncore, 0); 1783 1784 if (IS_GEN_RANGE(i915, 6, 7)) { 1785 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); 1786 1787 if (IS_VALLEYVIEW(i915)) { 1788 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); 1789 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1790 } else { 1791 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1792 } 1793 } else if (IS_GEN(i915, 8)) { 1794 if (IS_CHERRYVIEW(i915)) { 1795 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); 1796 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1797 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1798 } else { 1799 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); 1800 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1801 } 1802 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1803 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); 1804 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1805 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1806 } else if (IS_GEN(i915, 11)) { 1807 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); 1808 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); 1809 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); 1810 } else { 1811 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); 1812 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); 1813 ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); 1814 } 1815 1816 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; 1817 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 1818 1819 return 0; 1820 } 1821 1822 int intel_uncore_init_mmio(struct intel_uncore *uncore) 1823 { 1824 struct drm_i915_private *i915 = uncore->i915; 1825 int ret; 1826 1827 ret = uncore_mmio_setup(uncore); 1828 if (ret) 1829 return ret; 1830 1831 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) 1832 uncore->flags |= UNCORE_HAS_FORCEWAKE; 1833 1834 if (!intel_uncore_has_forcewake(uncore)) { 1835 uncore_raw_init(uncore); 1836 } else { 1837 ret = uncore_forcewake_init(uncore); 1838 if (ret) 1839 goto out_mmio_cleanup; 1840 } 1841 1842 /* make sure fw funcs are set if and only if we have fw*/ 1843 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); 1844 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); 1845 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); 1846 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); 1847 1848 if (HAS_FPGA_DBG_UNCLAIMED(i915)) 1849 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; 1850 1851 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1852 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; 1853 1854 if (IS_GEN_RANGE(i915, 6, 7)) 1855 uncore->flags |= UNCORE_HAS_FIFO; 1856 1857 /* clear out unclaimed reg detection bit */ 1858 if (intel_uncore_unclaimed_mmio(uncore)) 1859 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); 1860 1861 return 0; 1862 1863 out_mmio_cleanup: 1864 uncore_mmio_cleanup(uncore); 1865 1866 return ret; 1867 } 1868 1869 /* 1870 * We might have detected that some engines are fused off after we initialized 1871 * the forcewake domains. Prune them, to make sure they only reference existing 1872 * engines. 1873 */ 1874 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, 1875 struct intel_gt *gt) 1876 { 1877 enum forcewake_domains fw_domains = uncore->fw_domains; 1878 enum forcewake_domain_id domain_id; 1879 int i; 1880 1881 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11) 1882 return; 1883 1884 for (i = 0; i < I915_MAX_VCS; i++) { 1885 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; 1886 1887 if (HAS_ENGINE(gt, _VCS(i))) 1888 continue; 1889 1890 if (fw_domains & BIT(domain_id)) 1891 fw_domain_fini(uncore, domain_id); 1892 } 1893 1894 for (i = 0; i < I915_MAX_VECS; i++) { 1895 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; 1896 1897 if (HAS_ENGINE(gt, _VECS(i))) 1898 continue; 1899 1900 if (fw_domains & BIT(domain_id)) 1901 fw_domain_fini(uncore, domain_id); 1902 } 1903 } 1904 1905 void intel_uncore_fini_mmio(struct intel_uncore *uncore) 1906 { 1907 if (intel_uncore_has_forcewake(uncore)) { 1908 iosf_mbi_punit_acquire(); 1909 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 1910 &uncore->pmic_bus_access_nb); 1911 intel_uncore_forcewake_reset(uncore); 1912 intel_uncore_fw_domains_fini(uncore); 1913 iosf_mbi_punit_release(); 1914 } 1915 1916 uncore_mmio_cleanup(uncore); 1917 } 1918 1919 static const struct reg_whitelist { 1920 i915_reg_t offset_ldw; 1921 i915_reg_t offset_udw; 1922 u16 gen_mask; 1923 u8 size; 1924 } reg_read_whitelist[] = { { 1925 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1926 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1927 .gen_mask = INTEL_GEN_MASK(4, 12), 1928 .size = 8 1929 } }; 1930 1931 int i915_reg_read_ioctl(struct drm_device *dev, 1932 void *data, struct drm_file *file) 1933 { 1934 struct drm_i915_private *i915 = to_i915(dev); 1935 struct intel_uncore *uncore = &i915->uncore; 1936 struct drm_i915_reg_read *reg = data; 1937 struct reg_whitelist const *entry; 1938 intel_wakeref_t wakeref; 1939 unsigned int flags; 1940 int remain; 1941 int ret = 0; 1942 1943 entry = reg_read_whitelist; 1944 remain = ARRAY_SIZE(reg_read_whitelist); 1945 while (remain) { 1946 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); 1947 1948 GEM_BUG_ON(!is_power_of_2(entry->size)); 1949 GEM_BUG_ON(entry->size > 8); 1950 GEM_BUG_ON(entry_offset & (entry->size - 1)); 1951 1952 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask && 1953 entry_offset == (reg->offset & -entry->size)) 1954 break; 1955 entry++; 1956 remain--; 1957 } 1958 1959 if (!remain) 1960 return -EINVAL; 1961 1962 flags = reg->offset & (entry->size - 1); 1963 1964 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 1965 if (entry->size == 8 && flags == I915_REG_READ_8B_WA) 1966 reg->val = intel_uncore_read64_2x32(uncore, 1967 entry->offset_ldw, 1968 entry->offset_udw); 1969 else if (entry->size == 8 && flags == 0) 1970 reg->val = intel_uncore_read64(uncore, 1971 entry->offset_ldw); 1972 else if (entry->size == 4 && flags == 0) 1973 reg->val = intel_uncore_read(uncore, entry->offset_ldw); 1974 else if (entry->size == 2 && flags == 0) 1975 reg->val = intel_uncore_read16(uncore, 1976 entry->offset_ldw); 1977 else if (entry->size == 1 && flags == 0) 1978 reg->val = intel_uncore_read8(uncore, 1979 entry->offset_ldw); 1980 else 1981 ret = -EINVAL; 1982 } 1983 1984 return ret; 1985 } 1986 1987 /** 1988 * __intel_wait_for_register_fw - wait until register matches expected state 1989 * @uncore: the struct intel_uncore 1990 * @reg: the register to read 1991 * @mask: mask to apply to register value 1992 * @value: expected value 1993 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1994 * @slow_timeout_ms: slow timeout in millisecond 1995 * @out_value: optional placeholder to hold registry value 1996 * 1997 * This routine waits until the target register @reg contains the expected 1998 * @value after applying the @mask, i.e. it waits until :: 1999 * 2000 * (I915_READ_FW(reg) & mask) == value 2001 * 2002 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 2003 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us 2004 * must be not larger than 20,0000 microseconds. 2005 * 2006 * Note that this routine assumes the caller holds forcewake asserted, it is 2007 * not suitable for very long waits. See intel_wait_for_register() if you 2008 * wish to wait without holding forcewake for the duration (i.e. you expect 2009 * the wait to be slow). 2010 * 2011 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 2012 */ 2013 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 2014 i915_reg_t reg, 2015 u32 mask, 2016 u32 value, 2017 unsigned int fast_timeout_us, 2018 unsigned int slow_timeout_ms, 2019 u32 *out_value) 2020 { 2021 u32 reg_value = 0; 2022 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) 2023 int ret; 2024 2025 /* Catch any overuse of this function */ 2026 might_sleep_if(slow_timeout_ms); 2027 GEM_BUG_ON(fast_timeout_us > 20000); 2028 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms); 2029 2030 ret = -ETIMEDOUT; 2031 if (fast_timeout_us && fast_timeout_us <= 20000) 2032 ret = _wait_for_atomic(done, fast_timeout_us, 0); 2033 if (ret && slow_timeout_ms) 2034 ret = wait_for(done, slow_timeout_ms); 2035 2036 if (out_value) 2037 *out_value = reg_value; 2038 2039 return ret; 2040 #undef done 2041 } 2042 2043 /** 2044 * __intel_wait_for_register - wait until register matches expected state 2045 * @uncore: the struct intel_uncore 2046 * @reg: the register to read 2047 * @mask: mask to apply to register value 2048 * @value: expected value 2049 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 2050 * @slow_timeout_ms: slow timeout in millisecond 2051 * @out_value: optional placeholder to hold registry value 2052 * 2053 * This routine waits until the target register @reg contains the expected 2054 * @value after applying the @mask, i.e. it waits until :: 2055 * 2056 * (I915_READ(reg) & mask) == value 2057 * 2058 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 2059 * 2060 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 2061 */ 2062 int __intel_wait_for_register(struct intel_uncore *uncore, 2063 i915_reg_t reg, 2064 u32 mask, 2065 u32 value, 2066 unsigned int fast_timeout_us, 2067 unsigned int slow_timeout_ms, 2068 u32 *out_value) 2069 { 2070 unsigned fw = 2071 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); 2072 u32 reg_value; 2073 int ret; 2074 2075 might_sleep_if(slow_timeout_ms); 2076 2077 spin_lock_irq(&uncore->lock); 2078 intel_uncore_forcewake_get__locked(uncore, fw); 2079 2080 ret = __intel_wait_for_register_fw(uncore, 2081 reg, mask, value, 2082 fast_timeout_us, 0, ®_value); 2083 2084 intel_uncore_forcewake_put__locked(uncore, fw); 2085 spin_unlock_irq(&uncore->lock); 2086 2087 if (ret && slow_timeout_ms) 2088 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, 2089 reg), 2090 (reg_value & mask) == value, 2091 slow_timeout_ms * 1000, 10, 1000); 2092 2093 /* just trace the final value */ 2094 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); 2095 2096 if (out_value) 2097 *out_value = reg_value; 2098 2099 return ret; 2100 } 2101 2102 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) 2103 { 2104 bool ret; 2105 2106 spin_lock_irq(&uncore->debug->lock); 2107 ret = check_for_unclaimed_mmio(uncore); 2108 spin_unlock_irq(&uncore->debug->lock); 2109 2110 return ret; 2111 } 2112 2113 bool 2114 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) 2115 { 2116 bool ret = false; 2117 2118 spin_lock_irq(&uncore->debug->lock); 2119 2120 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) 2121 goto out; 2122 2123 if (unlikely(check_for_unclaimed_mmio(uncore))) { 2124 if (!uncore->i915->params.mmio_debug) { 2125 drm_dbg(&uncore->i915->drm, 2126 "Unclaimed register detected, " 2127 "enabling oneshot unclaimed register reporting. " 2128 "Please use i915.mmio_debug=N for more information.\n"); 2129 uncore->i915->params.mmio_debug++; 2130 } 2131 uncore->debug->unclaimed_mmio_check--; 2132 ret = true; 2133 } 2134 2135 out: 2136 spin_unlock_irq(&uncore->debug->lock); 2137 2138 return ret; 2139 } 2140 2141 /** 2142 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 2143 * a register 2144 * @uncore: pointer to struct intel_uncore 2145 * @reg: register in question 2146 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 2147 * 2148 * Returns a set of forcewake domains required to be taken with for example 2149 * intel_uncore_forcewake_get for the specified register to be accessible in the 2150 * specified mode (read, write or read/write) with raw mmio accessors. 2151 * 2152 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 2153 * callers to do FIFO management on their own or risk losing writes. 2154 */ 2155 enum forcewake_domains 2156 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 2157 i915_reg_t reg, unsigned int op) 2158 { 2159 enum forcewake_domains fw_domains = 0; 2160 2161 drm_WARN_ON(&uncore->i915->drm, !op); 2162 2163 if (!intel_uncore_has_forcewake(uncore)) 2164 return 0; 2165 2166 if (op & FW_REG_READ) 2167 fw_domains = uncore->funcs.read_fw_domains(uncore, reg); 2168 2169 if (op & FW_REG_WRITE) 2170 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); 2171 2172 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains); 2173 2174 return fw_domains; 2175 } 2176 2177 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2178 #include "selftests/mock_uncore.c" 2179 #include "selftests/intel_uncore.c" 2180 #endif 2181