1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/pm_runtime.h> 25 #include <asm/iosf_mbi.h> 26 27 #include "i915_drv.h" 28 #include "i915_trace.h" 29 #include "i915_vgpu.h" 30 #include "intel_pm.h" 31 32 #define FORCEWAKE_ACK_TIMEOUT_MS 50 33 #define GT_FIFO_TIMEOUT_MS 10 34 35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) 36 37 void 38 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) 39 { 40 spin_lock_init(&mmio_debug->lock); 41 mmio_debug->unclaimed_mmio_check = 1; 42 } 43 44 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) 45 { 46 lockdep_assert_held(&mmio_debug->lock); 47 48 /* Save and disable mmio debugging for the user bypass */ 49 if (!mmio_debug->suspend_count++) { 50 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; 51 mmio_debug->unclaimed_mmio_check = 0; 52 } 53 } 54 55 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) 56 { 57 lockdep_assert_held(&mmio_debug->lock); 58 59 if (!--mmio_debug->suspend_count) 60 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; 61 } 62 63 static const char * const forcewake_domain_names[] = { 64 "render", 65 "blitter", 66 "media", 67 "vdbox0", 68 "vdbox1", 69 "vdbox2", 70 "vdbox3", 71 "vebox0", 72 "vebox1", 73 }; 74 75 const char * 76 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 77 { 78 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 79 80 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 81 return forcewake_domain_names[id]; 82 83 WARN_ON(id); 84 85 return "unknown"; 86 } 87 88 #define fw_ack(d) readl((d)->reg_ack) 89 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) 90 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) 91 92 static inline void 93 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 94 { 95 /* 96 * We don't really know if the powerwell for the forcewake domain we are 97 * trying to reset here does exist at this point (engines could be fused 98 * off in ICL+), so no waiting for acks 99 */ 100 /* WaRsClearFWBitsAtReset:bdw,skl */ 101 fw_clear(d, 0xffff); 102 } 103 104 static inline void 105 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 106 { 107 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); 108 d->uncore->fw_domains_timer |= d->mask; 109 d->wake_count++; 110 hrtimer_start_range_ns(&d->timer, 111 NSEC_PER_MSEC, 112 NSEC_PER_MSEC, 113 HRTIMER_MODE_REL); 114 } 115 116 static inline int 117 __wait_for_ack(const struct intel_uncore_forcewake_domain *d, 118 const u32 ack, 119 const u32 value) 120 { 121 return wait_for_atomic((fw_ack(d) & ack) == value, 122 FORCEWAKE_ACK_TIMEOUT_MS); 123 } 124 125 static inline int 126 wait_ack_clear(const struct intel_uncore_forcewake_domain *d, 127 const u32 ack) 128 { 129 return __wait_for_ack(d, ack, 0); 130 } 131 132 static inline int 133 wait_ack_set(const struct intel_uncore_forcewake_domain *d, 134 const u32 ack) 135 { 136 return __wait_for_ack(d, ack, ack); 137 } 138 139 static inline void 140 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 141 { 142 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { 143 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 144 intel_uncore_forcewake_domain_to_str(d->id)); 145 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ 146 } 147 } 148 149 enum ack_type { 150 ACK_CLEAR = 0, 151 ACK_SET 152 }; 153 154 static int 155 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, 156 const enum ack_type type) 157 { 158 const u32 ack_bit = FORCEWAKE_KERNEL; 159 const u32 value = type == ACK_SET ? ack_bit : 0; 160 unsigned int pass; 161 bool ack_detected; 162 163 /* 164 * There is a possibility of driver's wake request colliding 165 * with hardware's own wake requests and that can cause 166 * hardware to not deliver the driver's ack message. 167 * 168 * Use a fallback bit toggle to kick the gpu state machine 169 * in the hope that the original ack will be delivered along with 170 * the fallback ack. 171 * 172 * This workaround is described in HSDES #1604254524 and it's known as: 173 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl 174 * although the name is a bit misleading. 175 */ 176 177 pass = 1; 178 do { 179 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); 180 181 fw_set(d, FORCEWAKE_KERNEL_FALLBACK); 182 /* Give gt some time to relax before the polling frenzy */ 183 udelay(10 * pass); 184 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); 185 186 ack_detected = (fw_ack(d) & ack_bit) == value; 187 188 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); 189 } while (!ack_detected && pass++ < 10); 190 191 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", 192 intel_uncore_forcewake_domain_to_str(d->id), 193 type == ACK_SET ? "set" : "clear", 194 fw_ack(d), 195 pass); 196 197 return ack_detected ? 0 : -ETIMEDOUT; 198 } 199 200 static inline void 201 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) 202 { 203 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) 204 return; 205 206 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) 207 fw_domain_wait_ack_clear(d); 208 } 209 210 static inline void 211 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 212 { 213 fw_set(d, FORCEWAKE_KERNEL); 214 } 215 216 static inline void 217 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) 218 { 219 if (wait_ack_set(d, FORCEWAKE_KERNEL)) { 220 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 221 intel_uncore_forcewake_domain_to_str(d->id)); 222 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ 223 } 224 } 225 226 static inline void 227 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) 228 { 229 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) 230 return; 231 232 if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) 233 fw_domain_wait_ack_set(d); 234 } 235 236 static inline void 237 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 238 { 239 fw_clear(d, FORCEWAKE_KERNEL); 240 } 241 242 static void 243 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 244 { 245 struct intel_uncore_forcewake_domain *d; 246 unsigned int tmp; 247 248 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 249 250 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 251 fw_domain_wait_ack_clear(d); 252 fw_domain_get(d); 253 } 254 255 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 256 fw_domain_wait_ack_set(d); 257 258 uncore->fw_domains_active |= fw_domains; 259 } 260 261 static void 262 fw_domains_get_with_fallback(struct intel_uncore *uncore, 263 enum forcewake_domains fw_domains) 264 { 265 struct intel_uncore_forcewake_domain *d; 266 unsigned int tmp; 267 268 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 269 270 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 271 fw_domain_wait_ack_clear_fallback(d); 272 fw_domain_get(d); 273 } 274 275 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 276 fw_domain_wait_ack_set_fallback(d); 277 278 uncore->fw_domains_active |= fw_domains; 279 } 280 281 static void 282 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 283 { 284 struct intel_uncore_forcewake_domain *d; 285 unsigned int tmp; 286 287 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 288 289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 290 fw_domain_put(d); 291 292 uncore->fw_domains_active &= ~fw_domains; 293 } 294 295 static void 296 fw_domains_reset(struct intel_uncore *uncore, 297 enum forcewake_domains fw_domains) 298 { 299 struct intel_uncore_forcewake_domain *d; 300 unsigned int tmp; 301 302 if (!fw_domains) 303 return; 304 305 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 306 307 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 308 fw_domain_reset(d); 309 } 310 311 static inline u32 gt_thread_status(struct intel_uncore *uncore) 312 { 313 u32 val; 314 315 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); 316 val &= GEN6_GT_THREAD_STATUS_CORE_MASK; 317 318 return val; 319 } 320 321 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) 322 { 323 /* 324 * w/a for a sporadic read returning 0 by waiting for the GT 325 * thread to wake up. 326 */ 327 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), 328 "GT thread status wait timed out\n"); 329 } 330 331 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, 332 enum forcewake_domains fw_domains) 333 { 334 fw_domains_get(uncore, fw_domains); 335 336 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 337 __gen6_gt_wait_for_thread_c0(uncore); 338 } 339 340 static inline u32 fifo_free_entries(struct intel_uncore *uncore) 341 { 342 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); 343 344 return count & GT_FIFO_FREE_ENTRIES_MASK; 345 } 346 347 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) 348 { 349 u32 n; 350 351 /* On VLV, FIFO will be shared by both SW and HW. 352 * So, we need to read the FREE_ENTRIES everytime */ 353 if (IS_VALLEYVIEW(uncore->i915)) 354 n = fifo_free_entries(uncore); 355 else 356 n = uncore->fifo_count; 357 358 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { 359 if (wait_for_atomic((n = fifo_free_entries(uncore)) > 360 GT_FIFO_NUM_RESERVED_ENTRIES, 361 GT_FIFO_TIMEOUT_MS)) { 362 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); 363 return; 364 } 365 } 366 367 uncore->fifo_count = n - 1; 368 } 369 370 static enum hrtimer_restart 371 intel_uncore_fw_release_timer(struct hrtimer *timer) 372 { 373 struct intel_uncore_forcewake_domain *domain = 374 container_of(timer, struct intel_uncore_forcewake_domain, timer); 375 struct intel_uncore *uncore = domain->uncore; 376 unsigned long irqflags; 377 378 assert_rpm_device_not_suspended(uncore->rpm); 379 380 if (xchg(&domain->active, false)) 381 return HRTIMER_RESTART; 382 383 spin_lock_irqsave(&uncore->lock, irqflags); 384 385 uncore->fw_domains_timer &= ~domain->mask; 386 387 GEM_BUG_ON(!domain->wake_count); 388 if (--domain->wake_count == 0) 389 uncore->funcs.force_wake_put(uncore, domain->mask); 390 391 spin_unlock_irqrestore(&uncore->lock, irqflags); 392 393 return HRTIMER_NORESTART; 394 } 395 396 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ 397 static unsigned int 398 intel_uncore_forcewake_reset(struct intel_uncore *uncore) 399 { 400 unsigned long irqflags; 401 struct intel_uncore_forcewake_domain *domain; 402 int retry_count = 100; 403 enum forcewake_domains fw, active_domains; 404 405 iosf_mbi_assert_punit_acquired(); 406 407 /* Hold uncore.lock across reset to prevent any register access 408 * with forcewake not set correctly. Wait until all pending 409 * timers are run before holding. 410 */ 411 while (1) { 412 unsigned int tmp; 413 414 active_domains = 0; 415 416 for_each_fw_domain(domain, uncore, tmp) { 417 smp_store_mb(domain->active, false); 418 if (hrtimer_cancel(&domain->timer) == 0) 419 continue; 420 421 intel_uncore_fw_release_timer(&domain->timer); 422 } 423 424 spin_lock_irqsave(&uncore->lock, irqflags); 425 426 for_each_fw_domain(domain, uncore, tmp) { 427 if (hrtimer_active(&domain->timer)) 428 active_domains |= domain->mask; 429 } 430 431 if (active_domains == 0) 432 break; 433 434 if (--retry_count == 0) { 435 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 436 break; 437 } 438 439 spin_unlock_irqrestore(&uncore->lock, irqflags); 440 cond_resched(); 441 } 442 443 WARN_ON(active_domains); 444 445 fw = uncore->fw_domains_active; 446 if (fw) 447 uncore->funcs.force_wake_put(uncore, fw); 448 449 fw_domains_reset(uncore, uncore->fw_domains); 450 assert_forcewakes_inactive(uncore); 451 452 spin_unlock_irqrestore(&uncore->lock, irqflags); 453 454 return fw; /* track the lost user forcewake domains */ 455 } 456 457 static bool 458 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) 459 { 460 u32 dbg; 461 462 dbg = __raw_uncore_read32(uncore, FPGA_DBG); 463 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 464 return false; 465 466 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 467 468 return true; 469 } 470 471 static bool 472 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) 473 { 474 u32 cer; 475 476 cer = __raw_uncore_read32(uncore, CLAIM_ER); 477 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 478 return false; 479 480 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); 481 482 return true; 483 } 484 485 static bool 486 gen6_check_for_fifo_debug(struct intel_uncore *uncore) 487 { 488 u32 fifodbg; 489 490 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); 491 492 if (unlikely(fifodbg)) { 493 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); 494 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); 495 } 496 497 return fifodbg; 498 } 499 500 static bool 501 check_for_unclaimed_mmio(struct intel_uncore *uncore) 502 { 503 bool ret = false; 504 505 lockdep_assert_held(&uncore->debug->lock); 506 507 if (uncore->debug->suspend_count) 508 return false; 509 510 if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) 511 ret |= fpga_check_for_unclaimed_mmio(uncore); 512 513 if (intel_uncore_has_dbg_unclaimed(uncore)) 514 ret |= vlv_check_for_unclaimed_mmio(uncore); 515 516 if (intel_uncore_has_fifo(uncore)) 517 ret |= gen6_check_for_fifo_debug(uncore); 518 519 return ret; 520 } 521 522 static void forcewake_early_sanitize(struct intel_uncore *uncore, 523 unsigned int restore_forcewake) 524 { 525 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 526 527 /* WaDisableShadowRegForCpd:chv */ 528 if (IS_CHERRYVIEW(uncore->i915)) { 529 __raw_uncore_write32(uncore, GTFIFOCTL, 530 __raw_uncore_read32(uncore, GTFIFOCTL) | 531 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 532 GT_FIFO_CTL_RC6_POLICY_STALL); 533 } 534 535 iosf_mbi_punit_acquire(); 536 intel_uncore_forcewake_reset(uncore); 537 if (restore_forcewake) { 538 spin_lock_irq(&uncore->lock); 539 uncore->funcs.force_wake_get(uncore, restore_forcewake); 540 541 if (intel_uncore_has_fifo(uncore)) 542 uncore->fifo_count = fifo_free_entries(uncore); 543 spin_unlock_irq(&uncore->lock); 544 } 545 iosf_mbi_punit_release(); 546 } 547 548 void intel_uncore_suspend(struct intel_uncore *uncore) 549 { 550 if (!intel_uncore_has_forcewake(uncore)) 551 return; 552 553 iosf_mbi_punit_acquire(); 554 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 555 &uncore->pmic_bus_access_nb); 556 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); 557 iosf_mbi_punit_release(); 558 } 559 560 void intel_uncore_resume_early(struct intel_uncore *uncore) 561 { 562 unsigned int restore_forcewake; 563 564 if (intel_uncore_unclaimed_mmio(uncore)) 565 DRM_DEBUG("unclaimed mmio detected on resume, clearing\n"); 566 567 if (!intel_uncore_has_forcewake(uncore)) 568 return; 569 570 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); 571 forcewake_early_sanitize(uncore, restore_forcewake); 572 573 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 574 } 575 576 void intel_uncore_runtime_resume(struct intel_uncore *uncore) 577 { 578 if (!intel_uncore_has_forcewake(uncore)) 579 return; 580 581 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 582 } 583 584 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, 585 enum forcewake_domains fw_domains) 586 { 587 struct intel_uncore_forcewake_domain *domain; 588 unsigned int tmp; 589 590 fw_domains &= uncore->fw_domains; 591 592 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 593 if (domain->wake_count++) { 594 fw_domains &= ~domain->mask; 595 domain->active = true; 596 } 597 } 598 599 if (fw_domains) 600 uncore->funcs.force_wake_get(uncore, fw_domains); 601 } 602 603 /** 604 * intel_uncore_forcewake_get - grab forcewake domain references 605 * @uncore: the intel_uncore structure 606 * @fw_domains: forcewake domains to get reference on 607 * 608 * This function can be used get GT's forcewake domain references. 609 * Normal register access will handle the forcewake domains automatically. 610 * However if some sequence requires the GT to not power down a particular 611 * forcewake domains this function should be called at the beginning of the 612 * sequence. And subsequently the reference should be dropped by symmetric 613 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 614 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 615 */ 616 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 617 enum forcewake_domains fw_domains) 618 { 619 unsigned long irqflags; 620 621 if (!uncore->funcs.force_wake_get) 622 return; 623 624 assert_rpm_wakelock_held(uncore->rpm); 625 626 spin_lock_irqsave(&uncore->lock, irqflags); 627 __intel_uncore_forcewake_get(uncore, fw_domains); 628 spin_unlock_irqrestore(&uncore->lock, irqflags); 629 } 630 631 /** 632 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace 633 * @uncore: the intel_uncore structure 634 * 635 * This function is a wrapper around intel_uncore_forcewake_get() to acquire 636 * the GT powerwell and in the process disable our debugging for the 637 * duration of userspace's bypass. 638 */ 639 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) 640 { 641 spin_lock_irq(&uncore->lock); 642 if (!uncore->user_forcewake_count++) { 643 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); 644 spin_lock(&uncore->debug->lock); 645 mmio_debug_suspend(uncore->debug); 646 spin_unlock(&uncore->debug->lock); 647 } 648 spin_unlock_irq(&uncore->lock); 649 } 650 651 /** 652 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace 653 * @uncore: the intel_uncore structure 654 * 655 * This function complements intel_uncore_forcewake_user_get() and releases 656 * the GT powerwell taken on behalf of the userspace bypass. 657 */ 658 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) 659 { 660 spin_lock_irq(&uncore->lock); 661 if (!--uncore->user_forcewake_count) { 662 spin_lock(&uncore->debug->lock); 663 mmio_debug_resume(uncore->debug); 664 665 if (check_for_unclaimed_mmio(uncore)) 666 dev_info(uncore->i915->drm.dev, 667 "Invalid mmio detected during user access\n"); 668 spin_unlock(&uncore->debug->lock); 669 670 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); 671 } 672 spin_unlock_irq(&uncore->lock); 673 } 674 675 /** 676 * intel_uncore_forcewake_get__locked - grab forcewake domain references 677 * @uncore: the intel_uncore structure 678 * @fw_domains: forcewake domains to get reference on 679 * 680 * See intel_uncore_forcewake_get(). This variant places the onus 681 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 682 */ 683 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 684 enum forcewake_domains fw_domains) 685 { 686 lockdep_assert_held(&uncore->lock); 687 688 if (!uncore->funcs.force_wake_get) 689 return; 690 691 __intel_uncore_forcewake_get(uncore, fw_domains); 692 } 693 694 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, 695 enum forcewake_domains fw_domains) 696 { 697 struct intel_uncore_forcewake_domain *domain; 698 unsigned int tmp; 699 700 fw_domains &= uncore->fw_domains; 701 702 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 703 GEM_BUG_ON(!domain->wake_count); 704 705 if (--domain->wake_count) { 706 domain->active = true; 707 continue; 708 } 709 710 fw_domain_arm_timer(domain); 711 } 712 } 713 714 /** 715 * intel_uncore_forcewake_put - release a forcewake domain reference 716 * @uncore: the intel_uncore structure 717 * @fw_domains: forcewake domains to put references 718 * 719 * This function drops the device-level forcewakes for specified 720 * domains obtained by intel_uncore_forcewake_get(). 721 */ 722 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 723 enum forcewake_domains fw_domains) 724 { 725 unsigned long irqflags; 726 727 if (!uncore->funcs.force_wake_put) 728 return; 729 730 spin_lock_irqsave(&uncore->lock, irqflags); 731 __intel_uncore_forcewake_put(uncore, fw_domains); 732 spin_unlock_irqrestore(&uncore->lock, irqflags); 733 } 734 735 /** 736 * intel_uncore_forcewake_put__locked - grab forcewake domain references 737 * @uncore: the intel_uncore structure 738 * @fw_domains: forcewake domains to get reference on 739 * 740 * See intel_uncore_forcewake_put(). This variant places the onus 741 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 742 */ 743 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 744 enum forcewake_domains fw_domains) 745 { 746 lockdep_assert_held(&uncore->lock); 747 748 if (!uncore->funcs.force_wake_put) 749 return; 750 751 __intel_uncore_forcewake_put(uncore, fw_domains); 752 } 753 754 void assert_forcewakes_inactive(struct intel_uncore *uncore) 755 { 756 if (!uncore->funcs.force_wake_get) 757 return; 758 759 WARN(uncore->fw_domains_active, 760 "Expected all fw_domains to be inactive, but %08x are still on\n", 761 uncore->fw_domains_active); 762 } 763 764 void assert_forcewakes_active(struct intel_uncore *uncore, 765 enum forcewake_domains fw_domains) 766 { 767 struct intel_uncore_forcewake_domain *domain; 768 unsigned int tmp; 769 770 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 771 return; 772 773 if (!uncore->funcs.force_wake_get) 774 return; 775 776 spin_lock_irq(&uncore->lock); 777 778 assert_rpm_wakelock_held(uncore->rpm); 779 780 fw_domains &= uncore->fw_domains; 781 WARN(fw_domains & ~uncore->fw_domains_active, 782 "Expected %08x fw_domains to be active, but %08x are off\n", 783 fw_domains, fw_domains & ~uncore->fw_domains_active); 784 785 /* 786 * Check that the caller has an explicit wakeref and we don't mistake 787 * it for the auto wakeref. 788 */ 789 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 790 unsigned int actual = READ_ONCE(domain->wake_count); 791 unsigned int expect = 1; 792 793 if (uncore->fw_domains_timer & domain->mask) 794 expect++; /* pending automatic release */ 795 796 if (WARN(actual < expect, 797 "Expected domain %d to be held awake by caller, count=%d\n", 798 domain->id, actual)) 799 break; 800 } 801 802 spin_unlock_irq(&uncore->lock); 803 } 804 805 /* We give fast paths for the really cool registers */ 806 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 807 808 #define __gen6_reg_read_fw_domains(uncore, offset) \ 809 ({ \ 810 enum forcewake_domains __fwd; \ 811 if (NEEDS_FORCE_WAKE(offset)) \ 812 __fwd = FORCEWAKE_RENDER; \ 813 else \ 814 __fwd = 0; \ 815 __fwd; \ 816 }) 817 818 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 819 { 820 if (offset < entry->start) 821 return -1; 822 else if (offset > entry->end) 823 return 1; 824 else 825 return 0; 826 } 827 828 /* Copied and "macroized" from lib/bsearch.c */ 829 #define BSEARCH(key, base, num, cmp) ({ \ 830 unsigned int start__ = 0, end__ = (num); \ 831 typeof(base) result__ = NULL; \ 832 while (start__ < end__) { \ 833 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 834 int ret__ = (cmp)((key), (base) + mid__); \ 835 if (ret__ < 0) { \ 836 end__ = mid__; \ 837 } else if (ret__ > 0) { \ 838 start__ = mid__ + 1; \ 839 } else { \ 840 result__ = (base) + mid__; \ 841 break; \ 842 } \ 843 } \ 844 result__; \ 845 }) 846 847 static enum forcewake_domains 848 find_fw_domain(struct intel_uncore *uncore, u32 offset) 849 { 850 const struct intel_forcewake_range *entry; 851 852 entry = BSEARCH(offset, 853 uncore->fw_domains_table, 854 uncore->fw_domains_table_entries, 855 fw_range_cmp); 856 857 if (!entry) 858 return 0; 859 860 /* 861 * The list of FW domains depends on the SKU in gen11+ so we 862 * can't determine it statically. We use FORCEWAKE_ALL and 863 * translate it here to the list of available domains. 864 */ 865 if (entry->domains == FORCEWAKE_ALL) 866 return uncore->fw_domains; 867 868 WARN(entry->domains & ~uncore->fw_domains, 869 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 870 entry->domains & ~uncore->fw_domains, offset); 871 872 return entry->domains; 873 } 874 875 #define GEN_FW_RANGE(s, e, d) \ 876 { .start = (s), .end = (e), .domains = (d) } 877 878 #define HAS_FWTABLE(dev_priv) \ 879 (INTEL_GEN(dev_priv) >= 9 || \ 880 IS_CHERRYVIEW(dev_priv) || \ 881 IS_VALLEYVIEW(dev_priv)) 882 883 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 884 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 885 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 886 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 887 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 888 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 889 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 890 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 891 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 892 }; 893 894 #define __fwtable_reg_read_fw_domains(uncore, offset) \ 895 ({ \ 896 enum forcewake_domains __fwd = 0; \ 897 if (NEEDS_FORCE_WAKE((offset))) \ 898 __fwd = find_fw_domain(uncore, offset); \ 899 __fwd; \ 900 }) 901 902 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ 903 find_fw_domain(uncore, offset) 904 905 #define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ 906 find_fw_domain(uncore, offset) 907 908 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 909 static const i915_reg_t gen8_shadowed_regs[] = { 910 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 911 GEN6_RPNSWREQ, /* 0xA008 */ 912 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 913 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 914 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 915 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 916 /* TODO: Other registers are not yet used */ 917 }; 918 919 static const i915_reg_t gen11_shadowed_regs[] = { 920 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 921 GEN6_RPNSWREQ, /* 0xA008 */ 922 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 923 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 924 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 925 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 926 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 927 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 928 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 929 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 930 /* TODO: Other registers are not yet used */ 931 }; 932 933 static const i915_reg_t gen12_shadowed_regs[] = { 934 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 935 GEN6_RPNSWREQ, /* 0xA008 */ 936 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 937 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 938 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 939 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 940 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 941 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 942 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 943 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 944 /* TODO: Other registers are not yet used */ 945 }; 946 947 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 948 { 949 u32 offset = i915_mmio_reg_offset(*reg); 950 951 if (key < offset) 952 return -1; 953 else if (key > offset) 954 return 1; 955 else 956 return 0; 957 } 958 959 #define __is_genX_shadowed(x) \ 960 static bool is_gen##x##_shadowed(u32 offset) \ 961 { \ 962 const i915_reg_t *regs = gen##x##_shadowed_regs; \ 963 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ 964 mmio_reg_cmp); \ 965 } 966 967 __is_genX_shadowed(8) 968 __is_genX_shadowed(11) 969 __is_genX_shadowed(12) 970 971 static enum forcewake_domains 972 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) 973 { 974 return FORCEWAKE_RENDER; 975 } 976 977 #define __gen8_reg_write_fw_domains(uncore, offset) \ 978 ({ \ 979 enum forcewake_domains __fwd; \ 980 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 981 __fwd = FORCEWAKE_RENDER; \ 982 else \ 983 __fwd = 0; \ 984 __fwd; \ 985 }) 986 987 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 988 static const struct intel_forcewake_range __chv_fw_ranges[] = { 989 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 990 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 991 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 992 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 993 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 994 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 995 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 996 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 997 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 998 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 999 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 1000 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1001 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1002 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 1003 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 1004 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 1005 }; 1006 1007 #define __fwtable_reg_write_fw_domains(uncore, offset) \ 1008 ({ \ 1009 enum forcewake_domains __fwd = 0; \ 1010 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 1011 __fwd = find_fw_domain(uncore, offset); \ 1012 __fwd; \ 1013 }) 1014 1015 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ 1016 ({ \ 1017 enum forcewake_domains __fwd = 0; \ 1018 const u32 __offset = (offset); \ 1019 if (!is_gen11_shadowed(__offset)) \ 1020 __fwd = find_fw_domain(uncore, __offset); \ 1021 __fwd; \ 1022 }) 1023 1024 #define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ 1025 ({ \ 1026 enum forcewake_domains __fwd = 0; \ 1027 const u32 __offset = (offset); \ 1028 if (!is_gen12_shadowed(__offset)) \ 1029 __fwd = find_fw_domain(uncore, __offset); \ 1030 __fwd; \ 1031 }) 1032 1033 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1034 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 1035 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 1036 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1037 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1038 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1039 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1040 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1041 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1042 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 1043 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 1044 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1045 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1046 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1047 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 1048 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 1049 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 1050 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1051 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1052 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1053 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1054 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1055 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 1056 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 1057 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 1058 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1059 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 1060 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1061 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 1062 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 1063 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 1064 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1065 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 1066 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 1067 }; 1068 1069 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1070 static const struct intel_forcewake_range __gen11_fw_ranges[] = { 1071 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 1072 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1073 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1074 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1075 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1076 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1077 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1078 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 1079 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1080 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1081 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1082 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), 1083 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1084 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1085 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), 1086 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1087 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1088 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), 1089 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), 1090 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), 1091 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), 1092 GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER), 1093 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1094 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), 1095 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1096 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1097 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), 1098 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), 1099 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), 1100 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1101 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), 1102 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) 1103 }; 1104 1105 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1106 static const struct intel_forcewake_range __gen12_fw_ranges[] = { 1107 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 1108 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1109 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1110 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1111 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1112 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1113 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1114 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 1115 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1116 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1117 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1118 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), 1119 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1120 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1121 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), 1122 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1123 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1124 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), 1125 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1126 GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), 1127 GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), 1128 GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), 1129 GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), 1130 GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), 1131 GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), 1132 GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), 1133 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1134 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), 1135 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1136 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1137 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), 1138 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), 1139 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), 1140 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1141 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), 1142 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) 1143 }; 1144 1145 static void 1146 ilk_dummy_write(struct intel_uncore *uncore) 1147 { 1148 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 1149 * the chip from rc6 before touching it for real. MI_MODE is masked, 1150 * hence harmless to write 0 into. */ 1151 __raw_uncore_write32(uncore, MI_MODE, 0); 1152 } 1153 1154 static void 1155 __unclaimed_reg_debug(struct intel_uncore *uncore, 1156 const i915_reg_t reg, 1157 const bool read, 1158 const bool before) 1159 { 1160 if (WARN(check_for_unclaimed_mmio(uncore) && !before, 1161 "Unclaimed %s register 0x%x\n", 1162 read ? "read from" : "write to", 1163 i915_mmio_reg_offset(reg))) 1164 /* Only report the first N failures */ 1165 i915_modparams.mmio_debug--; 1166 } 1167 1168 static inline void 1169 unclaimed_reg_debug(struct intel_uncore *uncore, 1170 const i915_reg_t reg, 1171 const bool read, 1172 const bool before) 1173 { 1174 if (likely(!i915_modparams.mmio_debug)) 1175 return; 1176 1177 /* interrupts are disabled and re-enabled around uncore->lock usage */ 1178 lockdep_assert_held(&uncore->lock); 1179 1180 if (before) 1181 spin_lock(&uncore->debug->lock); 1182 1183 __unclaimed_reg_debug(uncore, reg, read, before); 1184 1185 if (!before) 1186 spin_unlock(&uncore->debug->lock); 1187 } 1188 1189 #define GEN2_READ_HEADER(x) \ 1190 u##x val = 0; \ 1191 assert_rpm_wakelock_held(uncore->rpm); 1192 1193 #define GEN2_READ_FOOTER \ 1194 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1195 return val 1196 1197 #define __gen2_read(x) \ 1198 static u##x \ 1199 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1200 GEN2_READ_HEADER(x); \ 1201 val = __raw_uncore_read##x(uncore, reg); \ 1202 GEN2_READ_FOOTER; \ 1203 } 1204 1205 #define __gen5_read(x) \ 1206 static u##x \ 1207 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1208 GEN2_READ_HEADER(x); \ 1209 ilk_dummy_write(uncore); \ 1210 val = __raw_uncore_read##x(uncore, reg); \ 1211 GEN2_READ_FOOTER; \ 1212 } 1213 1214 __gen5_read(8) 1215 __gen5_read(16) 1216 __gen5_read(32) 1217 __gen5_read(64) 1218 __gen2_read(8) 1219 __gen2_read(16) 1220 __gen2_read(32) 1221 __gen2_read(64) 1222 1223 #undef __gen5_read 1224 #undef __gen2_read 1225 1226 #undef GEN2_READ_FOOTER 1227 #undef GEN2_READ_HEADER 1228 1229 #define GEN6_READ_HEADER(x) \ 1230 u32 offset = i915_mmio_reg_offset(reg); \ 1231 unsigned long irqflags; \ 1232 u##x val = 0; \ 1233 assert_rpm_wakelock_held(uncore->rpm); \ 1234 spin_lock_irqsave(&uncore->lock, irqflags); \ 1235 unclaimed_reg_debug(uncore, reg, true, true) 1236 1237 #define GEN6_READ_FOOTER \ 1238 unclaimed_reg_debug(uncore, reg, true, false); \ 1239 spin_unlock_irqrestore(&uncore->lock, irqflags); \ 1240 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1241 return val 1242 1243 static noinline void ___force_wake_auto(struct intel_uncore *uncore, 1244 enum forcewake_domains fw_domains) 1245 { 1246 struct intel_uncore_forcewake_domain *domain; 1247 unsigned int tmp; 1248 1249 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 1250 1251 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) 1252 fw_domain_arm_timer(domain); 1253 1254 uncore->funcs.force_wake_get(uncore, fw_domains); 1255 } 1256 1257 static inline void __force_wake_auto(struct intel_uncore *uncore, 1258 enum forcewake_domains fw_domains) 1259 { 1260 GEM_BUG_ON(!fw_domains); 1261 1262 /* Turn on all requested but inactive supported forcewake domains. */ 1263 fw_domains &= uncore->fw_domains; 1264 fw_domains &= ~uncore->fw_domains_active; 1265 1266 if (fw_domains) 1267 ___force_wake_auto(uncore, fw_domains); 1268 } 1269 1270 #define __gen_read(func, x) \ 1271 static u##x \ 1272 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1273 enum forcewake_domains fw_engine; \ 1274 GEN6_READ_HEADER(x); \ 1275 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ 1276 if (fw_engine) \ 1277 __force_wake_auto(uncore, fw_engine); \ 1278 val = __raw_uncore_read##x(uncore, reg); \ 1279 GEN6_READ_FOOTER; \ 1280 } 1281 1282 #define __gen_reg_read_funcs(func) \ 1283 static enum forcewake_domains \ 1284 func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ 1285 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ 1286 } \ 1287 \ 1288 __gen_read(func, 8) \ 1289 __gen_read(func, 16) \ 1290 __gen_read(func, 32) \ 1291 __gen_read(func, 64) 1292 1293 __gen_reg_read_funcs(gen12_fwtable); 1294 __gen_reg_read_funcs(gen11_fwtable); 1295 __gen_reg_read_funcs(fwtable); 1296 __gen_reg_read_funcs(gen6); 1297 1298 #undef __gen_reg_read_funcs 1299 #undef GEN6_READ_FOOTER 1300 #undef GEN6_READ_HEADER 1301 1302 #define GEN2_WRITE_HEADER \ 1303 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1304 assert_rpm_wakelock_held(uncore->rpm); \ 1305 1306 #define GEN2_WRITE_FOOTER 1307 1308 #define __gen2_write(x) \ 1309 static void \ 1310 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1311 GEN2_WRITE_HEADER; \ 1312 __raw_uncore_write##x(uncore, reg, val); \ 1313 GEN2_WRITE_FOOTER; \ 1314 } 1315 1316 #define __gen5_write(x) \ 1317 static void \ 1318 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1319 GEN2_WRITE_HEADER; \ 1320 ilk_dummy_write(uncore); \ 1321 __raw_uncore_write##x(uncore, reg, val); \ 1322 GEN2_WRITE_FOOTER; \ 1323 } 1324 1325 __gen5_write(8) 1326 __gen5_write(16) 1327 __gen5_write(32) 1328 __gen2_write(8) 1329 __gen2_write(16) 1330 __gen2_write(32) 1331 1332 #undef __gen5_write 1333 #undef __gen2_write 1334 1335 #undef GEN2_WRITE_FOOTER 1336 #undef GEN2_WRITE_HEADER 1337 1338 #define GEN6_WRITE_HEADER \ 1339 u32 offset = i915_mmio_reg_offset(reg); \ 1340 unsigned long irqflags; \ 1341 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1342 assert_rpm_wakelock_held(uncore->rpm); \ 1343 spin_lock_irqsave(&uncore->lock, irqflags); \ 1344 unclaimed_reg_debug(uncore, reg, false, true) 1345 1346 #define GEN6_WRITE_FOOTER \ 1347 unclaimed_reg_debug(uncore, reg, false, false); \ 1348 spin_unlock_irqrestore(&uncore->lock, irqflags) 1349 1350 #define __gen6_write(x) \ 1351 static void \ 1352 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1353 GEN6_WRITE_HEADER; \ 1354 if (NEEDS_FORCE_WAKE(offset)) \ 1355 __gen6_gt_wait_for_fifo(uncore); \ 1356 __raw_uncore_write##x(uncore, reg, val); \ 1357 GEN6_WRITE_FOOTER; \ 1358 } 1359 __gen6_write(8) 1360 __gen6_write(16) 1361 __gen6_write(32) 1362 1363 #define __gen_write(func, x) \ 1364 static void \ 1365 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1366 enum forcewake_domains fw_engine; \ 1367 GEN6_WRITE_HEADER; \ 1368 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ 1369 if (fw_engine) \ 1370 __force_wake_auto(uncore, fw_engine); \ 1371 __raw_uncore_write##x(uncore, reg, val); \ 1372 GEN6_WRITE_FOOTER; \ 1373 } 1374 1375 #define __gen_reg_write_funcs(func) \ 1376 static enum forcewake_domains \ 1377 func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ 1378 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ 1379 } \ 1380 \ 1381 __gen_write(func, 8) \ 1382 __gen_write(func, 16) \ 1383 __gen_write(func, 32) 1384 1385 __gen_reg_write_funcs(gen12_fwtable); 1386 __gen_reg_write_funcs(gen11_fwtable); 1387 __gen_reg_write_funcs(fwtable); 1388 __gen_reg_write_funcs(gen8); 1389 1390 #undef __gen_reg_write_funcs 1391 #undef GEN6_WRITE_FOOTER 1392 #undef GEN6_WRITE_HEADER 1393 1394 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ 1395 do { \ 1396 (uncore)->funcs.mmio_writeb = x##_write8; \ 1397 (uncore)->funcs.mmio_writew = x##_write16; \ 1398 (uncore)->funcs.mmio_writel = x##_write32; \ 1399 } while (0) 1400 1401 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ 1402 do { \ 1403 (uncore)->funcs.mmio_readb = x##_read8; \ 1404 (uncore)->funcs.mmio_readw = x##_read16; \ 1405 (uncore)->funcs.mmio_readl = x##_read32; \ 1406 (uncore)->funcs.mmio_readq = x##_read64; \ 1407 } while (0) 1408 1409 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ 1410 do { \ 1411 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ 1412 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ 1413 } while (0) 1414 1415 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ 1416 do { \ 1417 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ 1418 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ 1419 } while (0) 1420 1421 static int __fw_domain_init(struct intel_uncore *uncore, 1422 enum forcewake_domain_id domain_id, 1423 i915_reg_t reg_set, 1424 i915_reg_t reg_ack) 1425 { 1426 struct intel_uncore_forcewake_domain *d; 1427 1428 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1429 GEM_BUG_ON(uncore->fw_domain[domain_id]); 1430 1431 if (i915_inject_probe_failure(uncore->i915)) 1432 return -ENOMEM; 1433 1434 d = kzalloc(sizeof(*d), GFP_KERNEL); 1435 if (!d) 1436 return -ENOMEM; 1437 1438 WARN_ON(!i915_mmio_reg_valid(reg_set)); 1439 WARN_ON(!i915_mmio_reg_valid(reg_ack)); 1440 1441 d->uncore = uncore; 1442 d->wake_count = 0; 1443 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); 1444 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); 1445 1446 d->id = domain_id; 1447 1448 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1449 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1450 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1451 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); 1452 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); 1453 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); 1454 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); 1455 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); 1456 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); 1457 1458 d->mask = BIT(domain_id); 1459 1460 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1461 d->timer.function = intel_uncore_fw_release_timer; 1462 1463 uncore->fw_domains |= BIT(domain_id); 1464 1465 fw_domain_reset(d); 1466 1467 uncore->fw_domain[domain_id] = d; 1468 1469 return 0; 1470 } 1471 1472 static void fw_domain_fini(struct intel_uncore *uncore, 1473 enum forcewake_domain_id domain_id) 1474 { 1475 struct intel_uncore_forcewake_domain *d; 1476 1477 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1478 1479 d = fetch_and_zero(&uncore->fw_domain[domain_id]); 1480 if (!d) 1481 return; 1482 1483 uncore->fw_domains &= ~BIT(domain_id); 1484 WARN_ON(d->wake_count); 1485 WARN_ON(hrtimer_cancel(&d->timer)); 1486 kfree(d); 1487 } 1488 1489 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) 1490 { 1491 struct intel_uncore_forcewake_domain *d; 1492 int tmp; 1493 1494 for_each_fw_domain(d, uncore, tmp) 1495 fw_domain_fini(uncore, d->id); 1496 } 1497 1498 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) 1499 { 1500 struct drm_i915_private *i915 = uncore->i915; 1501 int ret = 0; 1502 1503 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 1504 1505 #define fw_domain_init(uncore__, id__, set__, ack__) \ 1506 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) 1507 1508 if (INTEL_GEN(i915) >= 11) { 1509 int i; 1510 1511 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; 1512 uncore->funcs.force_wake_put = fw_domains_put; 1513 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1514 FORCEWAKE_RENDER_GEN9, 1515 FORCEWAKE_ACK_RENDER_GEN9); 1516 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1517 FORCEWAKE_BLITTER_GEN9, 1518 FORCEWAKE_ACK_BLITTER_GEN9); 1519 1520 for (i = 0; i < I915_MAX_VCS; i++) { 1521 if (!HAS_ENGINE(i915, _VCS(i))) 1522 continue; 1523 1524 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, 1525 FORCEWAKE_MEDIA_VDBOX_GEN11(i), 1526 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 1527 } 1528 for (i = 0; i < I915_MAX_VECS; i++) { 1529 if (!HAS_ENGINE(i915, _VECS(i))) 1530 continue; 1531 1532 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, 1533 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1534 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1535 } 1536 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1537 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; 1538 uncore->funcs.force_wake_put = fw_domains_put; 1539 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1540 FORCEWAKE_RENDER_GEN9, 1541 FORCEWAKE_ACK_RENDER_GEN9); 1542 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1543 FORCEWAKE_BLITTER_GEN9, 1544 FORCEWAKE_ACK_BLITTER_GEN9); 1545 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1546 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1547 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1548 uncore->funcs.force_wake_get = fw_domains_get; 1549 uncore->funcs.force_wake_put = fw_domains_put; 1550 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1551 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1552 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1553 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1554 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1555 uncore->funcs.force_wake_get = 1556 fw_domains_get_with_thread_status; 1557 uncore->funcs.force_wake_put = fw_domains_put; 1558 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1559 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1560 } else if (IS_IVYBRIDGE(i915)) { 1561 u32 ecobus; 1562 1563 /* IVB configs may use multi-threaded forcewake */ 1564 1565 /* A small trick here - if the bios hasn't configured 1566 * MT forcewake, and if the device is in RC6, then 1567 * force_wake_mt_get will not wake the device and the 1568 * ECOBUS read will return zero. Which will be 1569 * (correctly) interpreted by the test below as MT 1570 * forcewake being disabled. 1571 */ 1572 uncore->funcs.force_wake_get = 1573 fw_domains_get_with_thread_status; 1574 uncore->funcs.force_wake_put = fw_domains_put; 1575 1576 /* We need to init first for ECOBUS access and then 1577 * determine later if we want to reinit, in case of MT access is 1578 * not working. In this stage we don't know which flavour this 1579 * ivb is, so it is better to reset also the gen6 fw registers 1580 * before the ecobus check. 1581 */ 1582 1583 __raw_uncore_write32(uncore, FORCEWAKE, 0); 1584 __raw_posting_read(uncore, ECOBUS); 1585 1586 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1587 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1588 if (ret) 1589 goto out; 1590 1591 spin_lock_irq(&uncore->lock); 1592 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); 1593 ecobus = __raw_uncore_read32(uncore, ECOBUS); 1594 fw_domains_put(uncore, FORCEWAKE_RENDER); 1595 spin_unlock_irq(&uncore->lock); 1596 1597 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1598 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1599 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1600 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); 1601 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1602 FORCEWAKE, FORCEWAKE_ACK); 1603 } 1604 } else if (IS_GEN(i915, 6)) { 1605 uncore->funcs.force_wake_get = 1606 fw_domains_get_with_thread_status; 1607 uncore->funcs.force_wake_put = fw_domains_put; 1608 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1609 FORCEWAKE, FORCEWAKE_ACK); 1610 } 1611 1612 #undef fw_domain_init 1613 1614 /* All future platforms are expected to require complex power gating */ 1615 WARN_ON(!ret && uncore->fw_domains == 0); 1616 1617 out: 1618 if (ret) 1619 intel_uncore_fw_domains_fini(uncore); 1620 1621 return ret; 1622 } 1623 1624 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ 1625 { \ 1626 (uncore)->fw_domains_table = \ 1627 (struct intel_forcewake_range *)(d); \ 1628 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ 1629 } 1630 1631 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 1632 unsigned long action, void *data) 1633 { 1634 struct intel_uncore *uncore = container_of(nb, 1635 struct intel_uncore, pmic_bus_access_nb); 1636 1637 switch (action) { 1638 case MBI_PMIC_BUS_ACCESS_BEGIN: 1639 /* 1640 * forcewake all now to make sure that we don't need to do a 1641 * forcewake later which on systems where this notifier gets 1642 * called requires the punit to access to the shared pmic i2c 1643 * bus, which will be busy after this notification, leading to: 1644 * "render: timed out waiting for forcewake ack request." 1645 * errors. 1646 * 1647 * The notifier is unregistered during intel_runtime_suspend(), 1648 * so it's ok to access the HW here without holding a RPM 1649 * wake reference -> disable wakeref asserts for the time of 1650 * the access. 1651 */ 1652 disable_rpm_wakeref_asserts(uncore->rpm); 1653 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1654 enable_rpm_wakeref_asserts(uncore->rpm); 1655 break; 1656 case MBI_PMIC_BUS_ACCESS_END: 1657 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1658 break; 1659 } 1660 1661 return NOTIFY_OK; 1662 } 1663 1664 static int uncore_mmio_setup(struct intel_uncore *uncore) 1665 { 1666 struct drm_i915_private *i915 = uncore->i915; 1667 struct pci_dev *pdev = i915->drm.pdev; 1668 int mmio_bar; 1669 int mmio_size; 1670 1671 mmio_bar = IS_GEN(i915, 2) ? 1 : 0; 1672 /* 1673 * Before gen4, the registers and the GTT are behind different BARs. 1674 * However, from gen4 onwards, the registers and the GTT are shared 1675 * in the same BAR, so we want to restrict this ioremap from 1676 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 1677 * the register BAR remains the same size for all the earlier 1678 * generations up to Ironlake. 1679 */ 1680 if (INTEL_GEN(i915) < 5) 1681 mmio_size = 512 * 1024; 1682 else 1683 mmio_size = 2 * 1024 * 1024; 1684 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); 1685 if (uncore->regs == NULL) { 1686 DRM_ERROR("failed to map registers\n"); 1687 1688 return -EIO; 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void uncore_mmio_cleanup(struct intel_uncore *uncore) 1695 { 1696 struct pci_dev *pdev = uncore->i915->drm.pdev; 1697 1698 pci_iounmap(pdev, uncore->regs); 1699 } 1700 1701 void intel_uncore_init_early(struct intel_uncore *uncore, 1702 struct drm_i915_private *i915) 1703 { 1704 spin_lock_init(&uncore->lock); 1705 uncore->i915 = i915; 1706 uncore->rpm = &i915->runtime_pm; 1707 uncore->debug = &i915->mmio_debug; 1708 } 1709 1710 static void uncore_raw_init(struct intel_uncore *uncore) 1711 { 1712 GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); 1713 1714 if (IS_GEN(uncore->i915, 5)) { 1715 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); 1716 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); 1717 } else { 1718 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); 1719 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); 1720 } 1721 } 1722 1723 static int uncore_forcewake_init(struct intel_uncore *uncore) 1724 { 1725 struct drm_i915_private *i915 = uncore->i915; 1726 int ret; 1727 1728 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 1729 1730 ret = intel_uncore_fw_domains_init(uncore); 1731 if (ret) 1732 return ret; 1733 forcewake_early_sanitize(uncore, 0); 1734 1735 if (IS_GEN_RANGE(i915, 6, 7)) { 1736 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); 1737 1738 if (IS_VALLEYVIEW(i915)) { 1739 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); 1740 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1741 } else { 1742 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1743 } 1744 } else if (IS_GEN(i915, 8)) { 1745 if (IS_CHERRYVIEW(i915)) { 1746 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); 1747 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1748 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1749 } else { 1750 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); 1751 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1752 } 1753 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1754 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); 1755 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1756 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1757 } else if (IS_GEN(i915, 11)) { 1758 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); 1759 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); 1760 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); 1761 } else { 1762 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); 1763 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); 1764 ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); 1765 } 1766 1767 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; 1768 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 1769 1770 return 0; 1771 } 1772 1773 int intel_uncore_init_mmio(struct intel_uncore *uncore) 1774 { 1775 struct drm_i915_private *i915 = uncore->i915; 1776 int ret; 1777 1778 ret = uncore_mmio_setup(uncore); 1779 if (ret) 1780 return ret; 1781 1782 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) 1783 uncore->flags |= UNCORE_HAS_FORCEWAKE; 1784 1785 if (!intel_uncore_has_forcewake(uncore)) { 1786 uncore_raw_init(uncore); 1787 } else { 1788 ret = uncore_forcewake_init(uncore); 1789 if (ret) 1790 goto out_mmio_cleanup; 1791 } 1792 1793 /* make sure fw funcs are set if and only if we have fw*/ 1794 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); 1795 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); 1796 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); 1797 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); 1798 1799 if (HAS_FPGA_DBG_UNCLAIMED(i915)) 1800 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; 1801 1802 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1803 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; 1804 1805 if (IS_GEN_RANGE(i915, 6, 7)) 1806 uncore->flags |= UNCORE_HAS_FIFO; 1807 1808 /* clear out unclaimed reg detection bit */ 1809 if (intel_uncore_unclaimed_mmio(uncore)) 1810 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 1811 1812 return 0; 1813 1814 out_mmio_cleanup: 1815 uncore_mmio_cleanup(uncore); 1816 1817 return ret; 1818 } 1819 1820 /* 1821 * We might have detected that some engines are fused off after we initialized 1822 * the forcewake domains. Prune them, to make sure they only reference existing 1823 * engines. 1824 */ 1825 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) 1826 { 1827 struct drm_i915_private *i915 = uncore->i915; 1828 enum forcewake_domains fw_domains = uncore->fw_domains; 1829 enum forcewake_domain_id domain_id; 1830 int i; 1831 1832 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) 1833 return; 1834 1835 for (i = 0; i < I915_MAX_VCS; i++) { 1836 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; 1837 1838 if (HAS_ENGINE(i915, _VCS(i))) 1839 continue; 1840 1841 if (fw_domains & BIT(domain_id)) 1842 fw_domain_fini(uncore, domain_id); 1843 } 1844 1845 for (i = 0; i < I915_MAX_VECS; i++) { 1846 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; 1847 1848 if (HAS_ENGINE(i915, _VECS(i))) 1849 continue; 1850 1851 if (fw_domains & BIT(domain_id)) 1852 fw_domain_fini(uncore, domain_id); 1853 } 1854 } 1855 1856 void intel_uncore_fini_mmio(struct intel_uncore *uncore) 1857 { 1858 if (intel_uncore_has_forcewake(uncore)) { 1859 iosf_mbi_punit_acquire(); 1860 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 1861 &uncore->pmic_bus_access_nb); 1862 intel_uncore_forcewake_reset(uncore); 1863 intel_uncore_fw_domains_fini(uncore); 1864 iosf_mbi_punit_release(); 1865 } 1866 1867 uncore_mmio_cleanup(uncore); 1868 } 1869 1870 static const struct reg_whitelist { 1871 i915_reg_t offset_ldw; 1872 i915_reg_t offset_udw; 1873 u16 gen_mask; 1874 u8 size; 1875 } reg_read_whitelist[] = { { 1876 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1877 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1878 .gen_mask = INTEL_GEN_MASK(4, 12), 1879 .size = 8 1880 } }; 1881 1882 int i915_reg_read_ioctl(struct drm_device *dev, 1883 void *data, struct drm_file *file) 1884 { 1885 struct drm_i915_private *i915 = to_i915(dev); 1886 struct intel_uncore *uncore = &i915->uncore; 1887 struct drm_i915_reg_read *reg = data; 1888 struct reg_whitelist const *entry; 1889 intel_wakeref_t wakeref; 1890 unsigned int flags; 1891 int remain; 1892 int ret = 0; 1893 1894 entry = reg_read_whitelist; 1895 remain = ARRAY_SIZE(reg_read_whitelist); 1896 while (remain) { 1897 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); 1898 1899 GEM_BUG_ON(!is_power_of_2(entry->size)); 1900 GEM_BUG_ON(entry->size > 8); 1901 GEM_BUG_ON(entry_offset & (entry->size - 1)); 1902 1903 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask && 1904 entry_offset == (reg->offset & -entry->size)) 1905 break; 1906 entry++; 1907 remain--; 1908 } 1909 1910 if (!remain) 1911 return -EINVAL; 1912 1913 flags = reg->offset & (entry->size - 1); 1914 1915 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 1916 if (entry->size == 8 && flags == I915_REG_READ_8B_WA) 1917 reg->val = intel_uncore_read64_2x32(uncore, 1918 entry->offset_ldw, 1919 entry->offset_udw); 1920 else if (entry->size == 8 && flags == 0) 1921 reg->val = intel_uncore_read64(uncore, 1922 entry->offset_ldw); 1923 else if (entry->size == 4 && flags == 0) 1924 reg->val = intel_uncore_read(uncore, entry->offset_ldw); 1925 else if (entry->size == 2 && flags == 0) 1926 reg->val = intel_uncore_read16(uncore, 1927 entry->offset_ldw); 1928 else if (entry->size == 1 && flags == 0) 1929 reg->val = intel_uncore_read8(uncore, 1930 entry->offset_ldw); 1931 else 1932 ret = -EINVAL; 1933 } 1934 1935 return ret; 1936 } 1937 1938 /** 1939 * __intel_wait_for_register_fw - wait until register matches expected state 1940 * @uncore: the struct intel_uncore 1941 * @reg: the register to read 1942 * @mask: mask to apply to register value 1943 * @value: expected value 1944 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1945 * @slow_timeout_ms: slow timeout in millisecond 1946 * @out_value: optional placeholder to hold registry value 1947 * 1948 * This routine waits until the target register @reg contains the expected 1949 * @value after applying the @mask, i.e. it waits until :: 1950 * 1951 * (I915_READ_FW(reg) & mask) == value 1952 * 1953 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 1954 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us 1955 * must be not larger than 20,0000 microseconds. 1956 * 1957 * Note that this routine assumes the caller holds forcewake asserted, it is 1958 * not suitable for very long waits. See intel_wait_for_register() if you 1959 * wish to wait without holding forcewake for the duration (i.e. you expect 1960 * the wait to be slow). 1961 * 1962 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 1963 */ 1964 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 1965 i915_reg_t reg, 1966 u32 mask, 1967 u32 value, 1968 unsigned int fast_timeout_us, 1969 unsigned int slow_timeout_ms, 1970 u32 *out_value) 1971 { 1972 u32 uninitialized_var(reg_value); 1973 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) 1974 int ret; 1975 1976 /* Catch any overuse of this function */ 1977 might_sleep_if(slow_timeout_ms); 1978 GEM_BUG_ON(fast_timeout_us > 20000); 1979 1980 ret = -ETIMEDOUT; 1981 if (fast_timeout_us && fast_timeout_us <= 20000) 1982 ret = _wait_for_atomic(done, fast_timeout_us, 0); 1983 if (ret && slow_timeout_ms) 1984 ret = wait_for(done, slow_timeout_ms); 1985 1986 if (out_value) 1987 *out_value = reg_value; 1988 1989 return ret; 1990 #undef done 1991 } 1992 1993 /** 1994 * __intel_wait_for_register - wait until register matches expected state 1995 * @uncore: the struct intel_uncore 1996 * @reg: the register to read 1997 * @mask: mask to apply to register value 1998 * @value: expected value 1999 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 2000 * @slow_timeout_ms: slow timeout in millisecond 2001 * @out_value: optional placeholder to hold registry value 2002 * 2003 * This routine waits until the target register @reg contains the expected 2004 * @value after applying the @mask, i.e. it waits until :: 2005 * 2006 * (I915_READ(reg) & mask) == value 2007 * 2008 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 2009 * 2010 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. 2011 */ 2012 int __intel_wait_for_register(struct intel_uncore *uncore, 2013 i915_reg_t reg, 2014 u32 mask, 2015 u32 value, 2016 unsigned int fast_timeout_us, 2017 unsigned int slow_timeout_ms, 2018 u32 *out_value) 2019 { 2020 unsigned fw = 2021 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); 2022 u32 reg_value; 2023 int ret; 2024 2025 might_sleep_if(slow_timeout_ms); 2026 2027 spin_lock_irq(&uncore->lock); 2028 intel_uncore_forcewake_get__locked(uncore, fw); 2029 2030 ret = __intel_wait_for_register_fw(uncore, 2031 reg, mask, value, 2032 fast_timeout_us, 0, ®_value); 2033 2034 intel_uncore_forcewake_put__locked(uncore, fw); 2035 spin_unlock_irq(&uncore->lock); 2036 2037 if (ret && slow_timeout_ms) 2038 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, 2039 reg), 2040 (reg_value & mask) == value, 2041 slow_timeout_ms * 1000, 10, 1000); 2042 2043 /* just trace the final value */ 2044 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); 2045 2046 if (out_value) 2047 *out_value = reg_value; 2048 2049 return ret; 2050 } 2051 2052 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) 2053 { 2054 bool ret; 2055 2056 spin_lock_irq(&uncore->debug->lock); 2057 ret = check_for_unclaimed_mmio(uncore); 2058 spin_unlock_irq(&uncore->debug->lock); 2059 2060 return ret; 2061 } 2062 2063 bool 2064 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) 2065 { 2066 bool ret = false; 2067 2068 spin_lock_irq(&uncore->debug->lock); 2069 2070 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) 2071 goto out; 2072 2073 if (unlikely(check_for_unclaimed_mmio(uncore))) { 2074 if (!i915_modparams.mmio_debug) { 2075 DRM_DEBUG("Unclaimed register detected, " 2076 "enabling oneshot unclaimed register reporting. " 2077 "Please use i915.mmio_debug=N for more information.\n"); 2078 i915_modparams.mmio_debug++; 2079 } 2080 uncore->debug->unclaimed_mmio_check--; 2081 ret = true; 2082 } 2083 2084 out: 2085 spin_unlock_irq(&uncore->debug->lock); 2086 2087 return ret; 2088 } 2089 2090 /** 2091 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 2092 * a register 2093 * @uncore: pointer to struct intel_uncore 2094 * @reg: register in question 2095 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 2096 * 2097 * Returns a set of forcewake domains required to be taken with for example 2098 * intel_uncore_forcewake_get for the specified register to be accessible in the 2099 * specified mode (read, write or read/write) with raw mmio accessors. 2100 * 2101 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 2102 * callers to do FIFO management on their own or risk losing writes. 2103 */ 2104 enum forcewake_domains 2105 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 2106 i915_reg_t reg, unsigned int op) 2107 { 2108 enum forcewake_domains fw_domains = 0; 2109 2110 WARN_ON(!op); 2111 2112 if (!intel_uncore_has_forcewake(uncore)) 2113 return 0; 2114 2115 if (op & FW_REG_READ) 2116 fw_domains = uncore->funcs.read_fw_domains(uncore, reg); 2117 2118 if (op & FW_REG_WRITE) 2119 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); 2120 2121 WARN_ON(fw_domains & ~uncore->fw_domains); 2122 2123 return fw_domains; 2124 } 2125 2126 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2127 #include "selftests/mock_uncore.c" 2128 #include "selftests/intel_uncore.c" 2129 #endif 2130