1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/pm_runtime.h> 25 #include <asm/iosf_mbi.h> 26 27 #include "i915_drv.h" 28 #include "i915_vgpu.h" 29 #include "intel_drv.h" 30 #include "intel_pm.h" 31 32 #define FORCEWAKE_ACK_TIMEOUT_MS 50 33 #define GT_FIFO_TIMEOUT_MS 10 34 35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) 36 37 static const char * const forcewake_domain_names[] = { 38 "render", 39 "blitter", 40 "media", 41 "vdbox0", 42 "vdbox1", 43 "vdbox2", 44 "vdbox3", 45 "vebox0", 46 "vebox1", 47 }; 48 49 const char * 50 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 51 { 52 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 53 54 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 55 return forcewake_domain_names[id]; 56 57 WARN_ON(id); 58 59 return "unknown"; 60 } 61 62 #define fw_ack(d) readl((d)->reg_ack) 63 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) 64 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) 65 66 static inline void 67 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 68 { 69 /* 70 * We don't really know if the powerwell for the forcewake domain we are 71 * trying to reset here does exist at this point (engines could be fused 72 * off in ICL+), so no waiting for acks 73 */ 74 /* WaRsClearFWBitsAtReset:bdw,skl */ 75 fw_clear(d, 0xffff); 76 } 77 78 static inline void 79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 80 { 81 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); 82 d->uncore->fw_domains_timer |= d->mask; 83 d->wake_count++; 84 hrtimer_start_range_ns(&d->timer, 85 NSEC_PER_MSEC, 86 NSEC_PER_MSEC, 87 HRTIMER_MODE_REL); 88 } 89 90 static inline int 91 __wait_for_ack(const struct intel_uncore_forcewake_domain *d, 92 const u32 ack, 93 const u32 value) 94 { 95 return wait_for_atomic((fw_ack(d) & ack) == value, 96 FORCEWAKE_ACK_TIMEOUT_MS); 97 } 98 99 static inline int 100 wait_ack_clear(const struct intel_uncore_forcewake_domain *d, 101 const u32 ack) 102 { 103 return __wait_for_ack(d, ack, 0); 104 } 105 106 static inline int 107 wait_ack_set(const struct intel_uncore_forcewake_domain *d, 108 const u32 ack) 109 { 110 return __wait_for_ack(d, ack, ack); 111 } 112 113 static inline void 114 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 115 { 116 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { 117 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 118 intel_uncore_forcewake_domain_to_str(d->id)); 119 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ 120 } 121 } 122 123 enum ack_type { 124 ACK_CLEAR = 0, 125 ACK_SET 126 }; 127 128 static int 129 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, 130 const enum ack_type type) 131 { 132 const u32 ack_bit = FORCEWAKE_KERNEL; 133 const u32 value = type == ACK_SET ? ack_bit : 0; 134 unsigned int pass; 135 bool ack_detected; 136 137 /* 138 * There is a possibility of driver's wake request colliding 139 * with hardware's own wake requests and that can cause 140 * hardware to not deliver the driver's ack message. 141 * 142 * Use a fallback bit toggle to kick the gpu state machine 143 * in the hope that the original ack will be delivered along with 144 * the fallback ack. 145 * 146 * This workaround is described in HSDES #1604254524 and it's known as: 147 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl 148 * although the name is a bit misleading. 149 */ 150 151 pass = 1; 152 do { 153 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); 154 155 fw_set(d, FORCEWAKE_KERNEL_FALLBACK); 156 /* Give gt some time to relax before the polling frenzy */ 157 udelay(10 * pass); 158 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); 159 160 ack_detected = (fw_ack(d) & ack_bit) == value; 161 162 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); 163 } while (!ack_detected && pass++ < 10); 164 165 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", 166 intel_uncore_forcewake_domain_to_str(d->id), 167 type == ACK_SET ? "set" : "clear", 168 fw_ack(d), 169 pass); 170 171 return ack_detected ? 0 : -ETIMEDOUT; 172 } 173 174 static inline void 175 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) 176 { 177 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) 178 return; 179 180 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) 181 fw_domain_wait_ack_clear(d); 182 } 183 184 static inline void 185 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 186 { 187 fw_set(d, FORCEWAKE_KERNEL); 188 } 189 190 static inline void 191 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) 192 { 193 if (wait_ack_set(d, FORCEWAKE_KERNEL)) { 194 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 195 intel_uncore_forcewake_domain_to_str(d->id)); 196 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ 197 } 198 } 199 200 static inline void 201 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) 202 { 203 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) 204 return; 205 206 if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) 207 fw_domain_wait_ack_set(d); 208 } 209 210 static inline void 211 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 212 { 213 fw_clear(d, FORCEWAKE_KERNEL); 214 } 215 216 static void 217 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 218 { 219 struct intel_uncore_forcewake_domain *d; 220 unsigned int tmp; 221 222 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 223 224 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 225 fw_domain_wait_ack_clear(d); 226 fw_domain_get(d); 227 } 228 229 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 230 fw_domain_wait_ack_set(d); 231 232 uncore->fw_domains_active |= fw_domains; 233 } 234 235 static void 236 fw_domains_get_with_fallback(struct intel_uncore *uncore, 237 enum forcewake_domains fw_domains) 238 { 239 struct intel_uncore_forcewake_domain *d; 240 unsigned int tmp; 241 242 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 243 244 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 245 fw_domain_wait_ack_clear_fallback(d); 246 fw_domain_get(d); 247 } 248 249 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 250 fw_domain_wait_ack_set_fallback(d); 251 252 uncore->fw_domains_active |= fw_domains; 253 } 254 255 static void 256 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 257 { 258 struct intel_uncore_forcewake_domain *d; 259 unsigned int tmp; 260 261 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 262 263 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 264 fw_domain_put(d); 265 266 uncore->fw_domains_active &= ~fw_domains; 267 } 268 269 static void 270 fw_domains_reset(struct intel_uncore *uncore, 271 enum forcewake_domains fw_domains) 272 { 273 struct intel_uncore_forcewake_domain *d; 274 unsigned int tmp; 275 276 if (!fw_domains) 277 return; 278 279 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 280 281 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 282 fw_domain_reset(d); 283 } 284 285 static inline u32 gt_thread_status(struct intel_uncore *uncore) 286 { 287 u32 val; 288 289 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); 290 val &= GEN6_GT_THREAD_STATUS_CORE_MASK; 291 292 return val; 293 } 294 295 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) 296 { 297 /* 298 * w/a for a sporadic read returning 0 by waiting for the GT 299 * thread to wake up. 300 */ 301 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), 302 "GT thread status wait timed out\n"); 303 } 304 305 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, 306 enum forcewake_domains fw_domains) 307 { 308 fw_domains_get(uncore, fw_domains); 309 310 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 311 __gen6_gt_wait_for_thread_c0(uncore); 312 } 313 314 static inline u32 fifo_free_entries(struct intel_uncore *uncore) 315 { 316 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); 317 318 return count & GT_FIFO_FREE_ENTRIES_MASK; 319 } 320 321 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) 322 { 323 u32 n; 324 325 /* On VLV, FIFO will be shared by both SW and HW. 326 * So, we need to read the FREE_ENTRIES everytime */ 327 if (IS_VALLEYVIEW(uncore->i915)) 328 n = fifo_free_entries(uncore); 329 else 330 n = uncore->fifo_count; 331 332 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { 333 if (wait_for_atomic((n = fifo_free_entries(uncore)) > 334 GT_FIFO_NUM_RESERVED_ENTRIES, 335 GT_FIFO_TIMEOUT_MS)) { 336 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); 337 return; 338 } 339 } 340 341 uncore->fifo_count = n - 1; 342 } 343 344 static enum hrtimer_restart 345 intel_uncore_fw_release_timer(struct hrtimer *timer) 346 { 347 struct intel_uncore_forcewake_domain *domain = 348 container_of(timer, struct intel_uncore_forcewake_domain, timer); 349 struct intel_uncore *uncore = domain->uncore; 350 unsigned long irqflags; 351 352 assert_rpm_device_not_suspended(uncore->rpm); 353 354 if (xchg(&domain->active, false)) 355 return HRTIMER_RESTART; 356 357 spin_lock_irqsave(&uncore->lock, irqflags); 358 359 uncore->fw_domains_timer &= ~domain->mask; 360 361 GEM_BUG_ON(!domain->wake_count); 362 if (--domain->wake_count == 0) 363 uncore->funcs.force_wake_put(uncore, domain->mask); 364 365 spin_unlock_irqrestore(&uncore->lock, irqflags); 366 367 return HRTIMER_NORESTART; 368 } 369 370 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ 371 static unsigned int 372 intel_uncore_forcewake_reset(struct intel_uncore *uncore) 373 { 374 unsigned long irqflags; 375 struct intel_uncore_forcewake_domain *domain; 376 int retry_count = 100; 377 enum forcewake_domains fw, active_domains; 378 379 iosf_mbi_assert_punit_acquired(); 380 381 /* Hold uncore.lock across reset to prevent any register access 382 * with forcewake not set correctly. Wait until all pending 383 * timers are run before holding. 384 */ 385 while (1) { 386 unsigned int tmp; 387 388 active_domains = 0; 389 390 for_each_fw_domain(domain, uncore, tmp) { 391 smp_store_mb(domain->active, false); 392 if (hrtimer_cancel(&domain->timer) == 0) 393 continue; 394 395 intel_uncore_fw_release_timer(&domain->timer); 396 } 397 398 spin_lock_irqsave(&uncore->lock, irqflags); 399 400 for_each_fw_domain(domain, uncore, tmp) { 401 if (hrtimer_active(&domain->timer)) 402 active_domains |= domain->mask; 403 } 404 405 if (active_domains == 0) 406 break; 407 408 if (--retry_count == 0) { 409 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 410 break; 411 } 412 413 spin_unlock_irqrestore(&uncore->lock, irqflags); 414 cond_resched(); 415 } 416 417 WARN_ON(active_domains); 418 419 fw = uncore->fw_domains_active; 420 if (fw) 421 uncore->funcs.force_wake_put(uncore, fw); 422 423 fw_domains_reset(uncore, uncore->fw_domains); 424 assert_forcewakes_inactive(uncore); 425 426 spin_unlock_irqrestore(&uncore->lock, irqflags); 427 428 return fw; /* track the lost user forcewake domains */ 429 } 430 431 static bool 432 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) 433 { 434 u32 dbg; 435 436 dbg = __raw_uncore_read32(uncore, FPGA_DBG); 437 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 438 return false; 439 440 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 441 442 return true; 443 } 444 445 static bool 446 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) 447 { 448 u32 cer; 449 450 cer = __raw_uncore_read32(uncore, CLAIM_ER); 451 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 452 return false; 453 454 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); 455 456 return true; 457 } 458 459 static bool 460 gen6_check_for_fifo_debug(struct intel_uncore *uncore) 461 { 462 u32 fifodbg; 463 464 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); 465 466 if (unlikely(fifodbg)) { 467 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); 468 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); 469 } 470 471 return fifodbg; 472 } 473 474 static bool 475 check_for_unclaimed_mmio(struct intel_uncore *uncore) 476 { 477 bool ret = false; 478 479 if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) 480 ret |= fpga_check_for_unclaimed_mmio(uncore); 481 482 if (intel_uncore_has_dbg_unclaimed(uncore)) 483 ret |= vlv_check_for_unclaimed_mmio(uncore); 484 485 if (intel_uncore_has_fifo(uncore)) 486 ret |= gen6_check_for_fifo_debug(uncore); 487 488 return ret; 489 } 490 491 static void forcewake_early_sanitize(struct intel_uncore *uncore, 492 unsigned int restore_forcewake) 493 { 494 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 495 496 /* WaDisableShadowRegForCpd:chv */ 497 if (IS_CHERRYVIEW(uncore->i915)) { 498 __raw_uncore_write32(uncore, GTFIFOCTL, 499 __raw_uncore_read32(uncore, GTFIFOCTL) | 500 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 501 GT_FIFO_CTL_RC6_POLICY_STALL); 502 } 503 504 iosf_mbi_punit_acquire(); 505 intel_uncore_forcewake_reset(uncore); 506 if (restore_forcewake) { 507 spin_lock_irq(&uncore->lock); 508 uncore->funcs.force_wake_get(uncore, restore_forcewake); 509 510 if (intel_uncore_has_fifo(uncore)) 511 uncore->fifo_count = fifo_free_entries(uncore); 512 spin_unlock_irq(&uncore->lock); 513 } 514 iosf_mbi_punit_release(); 515 } 516 517 void intel_uncore_suspend(struct intel_uncore *uncore) 518 { 519 if (!intel_uncore_has_forcewake(uncore)) 520 return; 521 522 iosf_mbi_punit_acquire(); 523 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 524 &uncore->pmic_bus_access_nb); 525 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); 526 iosf_mbi_punit_release(); 527 } 528 529 void intel_uncore_resume_early(struct intel_uncore *uncore) 530 { 531 unsigned int restore_forcewake; 532 533 if (intel_uncore_unclaimed_mmio(uncore)) 534 DRM_DEBUG("unclaimed mmio detected on resume, clearing\n"); 535 536 if (!intel_uncore_has_forcewake(uncore)) 537 return; 538 539 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); 540 forcewake_early_sanitize(uncore, restore_forcewake); 541 542 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 543 } 544 545 void intel_uncore_runtime_resume(struct intel_uncore *uncore) 546 { 547 if (!intel_uncore_has_forcewake(uncore)) 548 return; 549 550 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 551 } 552 553 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, 554 enum forcewake_domains fw_domains) 555 { 556 struct intel_uncore_forcewake_domain *domain; 557 unsigned int tmp; 558 559 fw_domains &= uncore->fw_domains; 560 561 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 562 if (domain->wake_count++) { 563 fw_domains &= ~domain->mask; 564 domain->active = true; 565 } 566 } 567 568 if (fw_domains) 569 uncore->funcs.force_wake_get(uncore, fw_domains); 570 } 571 572 /** 573 * intel_uncore_forcewake_get - grab forcewake domain references 574 * @uncore: the intel_uncore structure 575 * @fw_domains: forcewake domains to get reference on 576 * 577 * This function can be used get GT's forcewake domain references. 578 * Normal register access will handle the forcewake domains automatically. 579 * However if some sequence requires the GT to not power down a particular 580 * forcewake domains this function should be called at the beginning of the 581 * sequence. And subsequently the reference should be dropped by symmetric 582 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 583 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 584 */ 585 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 586 enum forcewake_domains fw_domains) 587 { 588 unsigned long irqflags; 589 590 if (!uncore->funcs.force_wake_get) 591 return; 592 593 assert_rpm_wakelock_held(uncore->rpm); 594 595 spin_lock_irqsave(&uncore->lock, irqflags); 596 __intel_uncore_forcewake_get(uncore, fw_domains); 597 spin_unlock_irqrestore(&uncore->lock, irqflags); 598 } 599 600 /** 601 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace 602 * @uncore: the intel_uncore structure 603 * 604 * This function is a wrapper around intel_uncore_forcewake_get() to acquire 605 * the GT powerwell and in the process disable our debugging for the 606 * duration of userspace's bypass. 607 */ 608 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) 609 { 610 spin_lock_irq(&uncore->lock); 611 if (!uncore->user_forcewake.count++) { 612 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); 613 614 /* Save and disable mmio debugging for the user bypass */ 615 uncore->user_forcewake.saved_mmio_check = 616 uncore->unclaimed_mmio_check; 617 uncore->user_forcewake.saved_mmio_debug = 618 i915_modparams.mmio_debug; 619 620 uncore->unclaimed_mmio_check = 0; 621 i915_modparams.mmio_debug = 0; 622 } 623 spin_unlock_irq(&uncore->lock); 624 } 625 626 /** 627 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace 628 * @uncore: the intel_uncore structure 629 * 630 * This function complements intel_uncore_forcewake_user_get() and releases 631 * the GT powerwell taken on behalf of the userspace bypass. 632 */ 633 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) 634 { 635 spin_lock_irq(&uncore->lock); 636 if (!--uncore->user_forcewake.count) { 637 if (intel_uncore_unclaimed_mmio(uncore)) 638 dev_info(uncore->i915->drm.dev, 639 "Invalid mmio detected during user access\n"); 640 641 uncore->unclaimed_mmio_check = 642 uncore->user_forcewake.saved_mmio_check; 643 i915_modparams.mmio_debug = 644 uncore->user_forcewake.saved_mmio_debug; 645 646 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); 647 } 648 spin_unlock_irq(&uncore->lock); 649 } 650 651 /** 652 * intel_uncore_forcewake_get__locked - grab forcewake domain references 653 * @uncore: the intel_uncore structure 654 * @fw_domains: forcewake domains to get reference on 655 * 656 * See intel_uncore_forcewake_get(). This variant places the onus 657 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 658 */ 659 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 660 enum forcewake_domains fw_domains) 661 { 662 lockdep_assert_held(&uncore->lock); 663 664 if (!uncore->funcs.force_wake_get) 665 return; 666 667 __intel_uncore_forcewake_get(uncore, fw_domains); 668 } 669 670 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, 671 enum forcewake_domains fw_domains) 672 { 673 struct intel_uncore_forcewake_domain *domain; 674 unsigned int tmp; 675 676 fw_domains &= uncore->fw_domains; 677 678 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 679 GEM_BUG_ON(!domain->wake_count); 680 681 if (--domain->wake_count) { 682 domain->active = true; 683 continue; 684 } 685 686 fw_domain_arm_timer(domain); 687 } 688 } 689 690 /** 691 * intel_uncore_forcewake_put - release a forcewake domain reference 692 * @uncore: the intel_uncore structure 693 * @fw_domains: forcewake domains to put references 694 * 695 * This function drops the device-level forcewakes for specified 696 * domains obtained by intel_uncore_forcewake_get(). 697 */ 698 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 699 enum forcewake_domains fw_domains) 700 { 701 unsigned long irqflags; 702 703 if (!uncore->funcs.force_wake_put) 704 return; 705 706 spin_lock_irqsave(&uncore->lock, irqflags); 707 __intel_uncore_forcewake_put(uncore, fw_domains); 708 spin_unlock_irqrestore(&uncore->lock, irqflags); 709 } 710 711 /** 712 * intel_uncore_forcewake_put__locked - grab forcewake domain references 713 * @uncore: the intel_uncore structure 714 * @fw_domains: forcewake domains to get reference on 715 * 716 * See intel_uncore_forcewake_put(). This variant places the onus 717 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 718 */ 719 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 720 enum forcewake_domains fw_domains) 721 { 722 lockdep_assert_held(&uncore->lock); 723 724 if (!uncore->funcs.force_wake_put) 725 return; 726 727 __intel_uncore_forcewake_put(uncore, fw_domains); 728 } 729 730 void assert_forcewakes_inactive(struct intel_uncore *uncore) 731 { 732 if (!uncore->funcs.force_wake_get) 733 return; 734 735 WARN(uncore->fw_domains_active, 736 "Expected all fw_domains to be inactive, but %08x are still on\n", 737 uncore->fw_domains_active); 738 } 739 740 void assert_forcewakes_active(struct intel_uncore *uncore, 741 enum forcewake_domains fw_domains) 742 { 743 struct intel_uncore_forcewake_domain *domain; 744 unsigned int tmp; 745 746 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 747 return; 748 749 if (!uncore->funcs.force_wake_get) 750 return; 751 752 spin_lock_irq(&uncore->lock); 753 754 assert_rpm_wakelock_held(uncore->rpm); 755 756 fw_domains &= uncore->fw_domains; 757 WARN(fw_domains & ~uncore->fw_domains_active, 758 "Expected %08x fw_domains to be active, but %08x are off\n", 759 fw_domains, fw_domains & ~uncore->fw_domains_active); 760 761 /* 762 * Check that the caller has an explicit wakeref and we don't mistake 763 * it for the auto wakeref. 764 */ 765 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 766 unsigned int actual = READ_ONCE(domain->wake_count); 767 unsigned int expect = 1; 768 769 if (uncore->fw_domains_timer & domain->mask) 770 expect++; /* pending automatic release */ 771 772 if (WARN(actual < expect, 773 "Expected domain %d to be held awake by caller, count=%d\n", 774 domain->id, actual)) 775 break; 776 } 777 778 spin_unlock_irq(&uncore->lock); 779 } 780 781 /* We give fast paths for the really cool registers */ 782 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 783 784 #define GEN11_NEEDS_FORCE_WAKE(reg) \ 785 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) 786 787 #define __gen6_reg_read_fw_domains(uncore, offset) \ 788 ({ \ 789 enum forcewake_domains __fwd; \ 790 if (NEEDS_FORCE_WAKE(offset)) \ 791 __fwd = FORCEWAKE_RENDER; \ 792 else \ 793 __fwd = 0; \ 794 __fwd; \ 795 }) 796 797 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 798 { 799 if (offset < entry->start) 800 return -1; 801 else if (offset > entry->end) 802 return 1; 803 else 804 return 0; 805 } 806 807 /* Copied and "macroized" from lib/bsearch.c */ 808 #define BSEARCH(key, base, num, cmp) ({ \ 809 unsigned int start__ = 0, end__ = (num); \ 810 typeof(base) result__ = NULL; \ 811 while (start__ < end__) { \ 812 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 813 int ret__ = (cmp)((key), (base) + mid__); \ 814 if (ret__ < 0) { \ 815 end__ = mid__; \ 816 } else if (ret__ > 0) { \ 817 start__ = mid__ + 1; \ 818 } else { \ 819 result__ = (base) + mid__; \ 820 break; \ 821 } \ 822 } \ 823 result__; \ 824 }) 825 826 static enum forcewake_domains 827 find_fw_domain(struct intel_uncore *uncore, u32 offset) 828 { 829 const struct intel_forcewake_range *entry; 830 831 entry = BSEARCH(offset, 832 uncore->fw_domains_table, 833 uncore->fw_domains_table_entries, 834 fw_range_cmp); 835 836 if (!entry) 837 return 0; 838 839 /* 840 * The list of FW domains depends on the SKU in gen11+ so we 841 * can't determine it statically. We use FORCEWAKE_ALL and 842 * translate it here to the list of available domains. 843 */ 844 if (entry->domains == FORCEWAKE_ALL) 845 return uncore->fw_domains; 846 847 WARN(entry->domains & ~uncore->fw_domains, 848 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 849 entry->domains & ~uncore->fw_domains, offset); 850 851 return entry->domains; 852 } 853 854 #define GEN_FW_RANGE(s, e, d) \ 855 { .start = (s), .end = (e), .domains = (d) } 856 857 #define HAS_FWTABLE(dev_priv) \ 858 (INTEL_GEN(dev_priv) >= 9 || \ 859 IS_CHERRYVIEW(dev_priv) || \ 860 IS_VALLEYVIEW(dev_priv)) 861 862 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 863 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 864 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 865 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 866 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 867 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 868 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 869 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 870 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 871 }; 872 873 #define __fwtable_reg_read_fw_domains(uncore, offset) \ 874 ({ \ 875 enum forcewake_domains __fwd = 0; \ 876 if (NEEDS_FORCE_WAKE((offset))) \ 877 __fwd = find_fw_domain(uncore, offset); \ 878 __fwd; \ 879 }) 880 881 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ 882 ({ \ 883 enum forcewake_domains __fwd = 0; \ 884 if (GEN11_NEEDS_FORCE_WAKE((offset))) \ 885 __fwd = find_fw_domain(uncore, offset); \ 886 __fwd; \ 887 }) 888 889 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 890 static const i915_reg_t gen8_shadowed_regs[] = { 891 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 892 GEN6_RPNSWREQ, /* 0xA008 */ 893 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 894 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 895 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 896 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 897 /* TODO: Other registers are not yet used */ 898 }; 899 900 static const i915_reg_t gen11_shadowed_regs[] = { 901 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 902 GEN6_RPNSWREQ, /* 0xA008 */ 903 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 904 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 905 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 906 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 907 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 908 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 909 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 910 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 911 /* TODO: Other registers are not yet used */ 912 }; 913 914 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 915 { 916 u32 offset = i915_mmio_reg_offset(*reg); 917 918 if (key < offset) 919 return -1; 920 else if (key > offset) 921 return 1; 922 else 923 return 0; 924 } 925 926 #define __is_genX_shadowed(x) \ 927 static bool is_gen##x##_shadowed(u32 offset) \ 928 { \ 929 const i915_reg_t *regs = gen##x##_shadowed_regs; \ 930 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ 931 mmio_reg_cmp); \ 932 } 933 934 __is_genX_shadowed(8) 935 __is_genX_shadowed(11) 936 937 static enum forcewake_domains 938 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) 939 { 940 return FORCEWAKE_RENDER; 941 } 942 943 #define __gen8_reg_write_fw_domains(uncore, offset) \ 944 ({ \ 945 enum forcewake_domains __fwd; \ 946 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 947 __fwd = FORCEWAKE_RENDER; \ 948 else \ 949 __fwd = 0; \ 950 __fwd; \ 951 }) 952 953 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 954 static const struct intel_forcewake_range __chv_fw_ranges[] = { 955 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 956 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 957 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 958 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 959 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 960 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 961 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 962 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 963 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 964 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 965 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 966 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 967 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 968 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 969 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 970 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 971 }; 972 973 #define __fwtable_reg_write_fw_domains(uncore, offset) \ 974 ({ \ 975 enum forcewake_domains __fwd = 0; \ 976 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 977 __fwd = find_fw_domain(uncore, offset); \ 978 __fwd; \ 979 }) 980 981 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ 982 ({ \ 983 enum forcewake_domains __fwd = 0; \ 984 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ 985 __fwd = find_fw_domain(uncore, offset); \ 986 __fwd; \ 987 }) 988 989 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 990 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 991 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 992 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 993 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 994 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 995 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 996 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 997 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 998 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 999 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 1000 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1001 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1002 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1003 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 1004 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 1005 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 1006 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1007 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1008 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 1009 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1010 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1011 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 1012 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 1013 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 1014 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1015 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 1016 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 1017 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 1018 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 1019 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 1020 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1021 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 1022 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 1023 }; 1024 1025 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 1026 static const struct intel_forcewake_range __gen11_fw_ranges[] = { 1027 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 1028 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 1029 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 1030 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 1031 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 1032 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 1033 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 1034 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 1035 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 1036 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 1037 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 1038 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), 1039 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1040 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1041 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), 1042 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1043 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1044 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), 1045 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1046 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), 1047 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1048 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), 1049 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1050 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1051 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), 1052 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), 1053 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), 1054 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1055 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), 1056 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) 1057 }; 1058 1059 static void 1060 ilk_dummy_write(struct intel_uncore *uncore) 1061 { 1062 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 1063 * the chip from rc6 before touching it for real. MI_MODE is masked, 1064 * hence harmless to write 0 into. */ 1065 __raw_uncore_write32(uncore, MI_MODE, 0); 1066 } 1067 1068 static void 1069 __unclaimed_reg_debug(struct intel_uncore *uncore, 1070 const i915_reg_t reg, 1071 const bool read, 1072 const bool before) 1073 { 1074 if (WARN(check_for_unclaimed_mmio(uncore) && !before, 1075 "Unclaimed %s register 0x%x\n", 1076 read ? "read from" : "write to", 1077 i915_mmio_reg_offset(reg))) 1078 /* Only report the first N failures */ 1079 i915_modparams.mmio_debug--; 1080 } 1081 1082 static inline void 1083 unclaimed_reg_debug(struct intel_uncore *uncore, 1084 const i915_reg_t reg, 1085 const bool read, 1086 const bool before) 1087 { 1088 if (likely(!i915_modparams.mmio_debug)) 1089 return; 1090 1091 __unclaimed_reg_debug(uncore, reg, read, before); 1092 } 1093 1094 #define GEN2_READ_HEADER(x) \ 1095 u##x val = 0; \ 1096 assert_rpm_wakelock_held(uncore->rpm); 1097 1098 #define GEN2_READ_FOOTER \ 1099 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1100 return val 1101 1102 #define __gen2_read(x) \ 1103 static u##x \ 1104 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1105 GEN2_READ_HEADER(x); \ 1106 val = __raw_uncore_read##x(uncore, reg); \ 1107 GEN2_READ_FOOTER; \ 1108 } 1109 1110 #define __gen5_read(x) \ 1111 static u##x \ 1112 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1113 GEN2_READ_HEADER(x); \ 1114 ilk_dummy_write(uncore); \ 1115 val = __raw_uncore_read##x(uncore, reg); \ 1116 GEN2_READ_FOOTER; \ 1117 } 1118 1119 __gen5_read(8) 1120 __gen5_read(16) 1121 __gen5_read(32) 1122 __gen5_read(64) 1123 __gen2_read(8) 1124 __gen2_read(16) 1125 __gen2_read(32) 1126 __gen2_read(64) 1127 1128 #undef __gen5_read 1129 #undef __gen2_read 1130 1131 #undef GEN2_READ_FOOTER 1132 #undef GEN2_READ_HEADER 1133 1134 #define GEN6_READ_HEADER(x) \ 1135 u32 offset = i915_mmio_reg_offset(reg); \ 1136 unsigned long irqflags; \ 1137 u##x val = 0; \ 1138 assert_rpm_wakelock_held(uncore->rpm); \ 1139 spin_lock_irqsave(&uncore->lock, irqflags); \ 1140 unclaimed_reg_debug(uncore, reg, true, true) 1141 1142 #define GEN6_READ_FOOTER \ 1143 unclaimed_reg_debug(uncore, reg, true, false); \ 1144 spin_unlock_irqrestore(&uncore->lock, irqflags); \ 1145 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1146 return val 1147 1148 static noinline void ___force_wake_auto(struct intel_uncore *uncore, 1149 enum forcewake_domains fw_domains) 1150 { 1151 struct intel_uncore_forcewake_domain *domain; 1152 unsigned int tmp; 1153 1154 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 1155 1156 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) 1157 fw_domain_arm_timer(domain); 1158 1159 uncore->funcs.force_wake_get(uncore, fw_domains); 1160 } 1161 1162 static inline void __force_wake_auto(struct intel_uncore *uncore, 1163 enum forcewake_domains fw_domains) 1164 { 1165 GEM_BUG_ON(!fw_domains); 1166 1167 /* Turn on all requested but inactive supported forcewake domains. */ 1168 fw_domains &= uncore->fw_domains; 1169 fw_domains &= ~uncore->fw_domains_active; 1170 1171 if (fw_domains) 1172 ___force_wake_auto(uncore, fw_domains); 1173 } 1174 1175 #define __gen_read(func, x) \ 1176 static u##x \ 1177 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1178 enum forcewake_domains fw_engine; \ 1179 GEN6_READ_HEADER(x); \ 1180 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ 1181 if (fw_engine) \ 1182 __force_wake_auto(uncore, fw_engine); \ 1183 val = __raw_uncore_read##x(uncore, reg); \ 1184 GEN6_READ_FOOTER; \ 1185 } 1186 1187 #define __gen_reg_read_funcs(func) \ 1188 static enum forcewake_domains \ 1189 func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ 1190 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ 1191 } \ 1192 \ 1193 __gen_read(func, 8) \ 1194 __gen_read(func, 16) \ 1195 __gen_read(func, 32) \ 1196 __gen_read(func, 64) 1197 1198 __gen_reg_read_funcs(gen11_fwtable); 1199 __gen_reg_read_funcs(fwtable); 1200 __gen_reg_read_funcs(gen6); 1201 1202 #undef __gen_reg_read_funcs 1203 #undef GEN6_READ_FOOTER 1204 #undef GEN6_READ_HEADER 1205 1206 #define GEN2_WRITE_HEADER \ 1207 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1208 assert_rpm_wakelock_held(uncore->rpm); \ 1209 1210 #define GEN2_WRITE_FOOTER 1211 1212 #define __gen2_write(x) \ 1213 static void \ 1214 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1215 GEN2_WRITE_HEADER; \ 1216 __raw_uncore_write##x(uncore, reg, val); \ 1217 GEN2_WRITE_FOOTER; \ 1218 } 1219 1220 #define __gen5_write(x) \ 1221 static void \ 1222 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1223 GEN2_WRITE_HEADER; \ 1224 ilk_dummy_write(uncore); \ 1225 __raw_uncore_write##x(uncore, reg, val); \ 1226 GEN2_WRITE_FOOTER; \ 1227 } 1228 1229 __gen5_write(8) 1230 __gen5_write(16) 1231 __gen5_write(32) 1232 __gen2_write(8) 1233 __gen2_write(16) 1234 __gen2_write(32) 1235 1236 #undef __gen5_write 1237 #undef __gen2_write 1238 1239 #undef GEN2_WRITE_FOOTER 1240 #undef GEN2_WRITE_HEADER 1241 1242 #define GEN6_WRITE_HEADER \ 1243 u32 offset = i915_mmio_reg_offset(reg); \ 1244 unsigned long irqflags; \ 1245 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1246 assert_rpm_wakelock_held(uncore->rpm); \ 1247 spin_lock_irqsave(&uncore->lock, irqflags); \ 1248 unclaimed_reg_debug(uncore, reg, false, true) 1249 1250 #define GEN6_WRITE_FOOTER \ 1251 unclaimed_reg_debug(uncore, reg, false, false); \ 1252 spin_unlock_irqrestore(&uncore->lock, irqflags) 1253 1254 #define __gen6_write(x) \ 1255 static void \ 1256 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1257 GEN6_WRITE_HEADER; \ 1258 if (NEEDS_FORCE_WAKE(offset)) \ 1259 __gen6_gt_wait_for_fifo(uncore); \ 1260 __raw_uncore_write##x(uncore, reg, val); \ 1261 GEN6_WRITE_FOOTER; \ 1262 } 1263 __gen6_write(8) 1264 __gen6_write(16) 1265 __gen6_write(32) 1266 1267 #define __gen_write(func, x) \ 1268 static void \ 1269 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1270 enum forcewake_domains fw_engine; \ 1271 GEN6_WRITE_HEADER; \ 1272 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ 1273 if (fw_engine) \ 1274 __force_wake_auto(uncore, fw_engine); \ 1275 __raw_uncore_write##x(uncore, reg, val); \ 1276 GEN6_WRITE_FOOTER; \ 1277 } 1278 1279 #define __gen_reg_write_funcs(func) \ 1280 static enum forcewake_domains \ 1281 func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ 1282 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ 1283 } \ 1284 \ 1285 __gen_write(func, 8) \ 1286 __gen_write(func, 16) \ 1287 __gen_write(func, 32) 1288 1289 __gen_reg_write_funcs(gen11_fwtable); 1290 __gen_reg_write_funcs(fwtable); 1291 __gen_reg_write_funcs(gen8); 1292 1293 #undef __gen_reg_write_funcs 1294 #undef GEN6_WRITE_FOOTER 1295 #undef GEN6_WRITE_HEADER 1296 1297 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ 1298 do { \ 1299 (uncore)->funcs.mmio_writeb = x##_write8; \ 1300 (uncore)->funcs.mmio_writew = x##_write16; \ 1301 (uncore)->funcs.mmio_writel = x##_write32; \ 1302 } while (0) 1303 1304 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ 1305 do { \ 1306 (uncore)->funcs.mmio_readb = x##_read8; \ 1307 (uncore)->funcs.mmio_readw = x##_read16; \ 1308 (uncore)->funcs.mmio_readl = x##_read32; \ 1309 (uncore)->funcs.mmio_readq = x##_read64; \ 1310 } while (0) 1311 1312 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ 1313 do { \ 1314 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ 1315 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ 1316 } while (0) 1317 1318 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ 1319 do { \ 1320 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ 1321 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ 1322 } while (0) 1323 1324 static int __fw_domain_init(struct intel_uncore *uncore, 1325 enum forcewake_domain_id domain_id, 1326 i915_reg_t reg_set, 1327 i915_reg_t reg_ack) 1328 { 1329 struct intel_uncore_forcewake_domain *d; 1330 1331 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1332 GEM_BUG_ON(uncore->fw_domain[domain_id]); 1333 1334 if (i915_inject_probe_failure()) 1335 return -ENOMEM; 1336 1337 d = kzalloc(sizeof(*d), GFP_KERNEL); 1338 if (!d) 1339 return -ENOMEM; 1340 1341 WARN_ON(!i915_mmio_reg_valid(reg_set)); 1342 WARN_ON(!i915_mmio_reg_valid(reg_ack)); 1343 1344 d->uncore = uncore; 1345 d->wake_count = 0; 1346 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); 1347 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); 1348 1349 d->id = domain_id; 1350 1351 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1352 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1353 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1354 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); 1355 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); 1356 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); 1357 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); 1358 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); 1359 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); 1360 1361 d->mask = BIT(domain_id); 1362 1363 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1364 d->timer.function = intel_uncore_fw_release_timer; 1365 1366 uncore->fw_domains |= BIT(domain_id); 1367 1368 fw_domain_reset(d); 1369 1370 uncore->fw_domain[domain_id] = d; 1371 1372 return 0; 1373 } 1374 1375 static void fw_domain_fini(struct intel_uncore *uncore, 1376 enum forcewake_domain_id domain_id) 1377 { 1378 struct intel_uncore_forcewake_domain *d; 1379 1380 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 1381 1382 d = fetch_and_zero(&uncore->fw_domain[domain_id]); 1383 if (!d) 1384 return; 1385 1386 uncore->fw_domains &= ~BIT(domain_id); 1387 WARN_ON(d->wake_count); 1388 WARN_ON(hrtimer_cancel(&d->timer)); 1389 kfree(d); 1390 } 1391 1392 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) 1393 { 1394 struct intel_uncore_forcewake_domain *d; 1395 int tmp; 1396 1397 for_each_fw_domain(d, uncore, tmp) 1398 fw_domain_fini(uncore, d->id); 1399 } 1400 1401 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) 1402 { 1403 struct drm_i915_private *i915 = uncore->i915; 1404 int ret = 0; 1405 1406 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 1407 1408 #define fw_domain_init(uncore__, id__, set__, ack__) \ 1409 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) 1410 1411 if (INTEL_GEN(i915) >= 11) { 1412 int i; 1413 1414 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; 1415 uncore->funcs.force_wake_put = fw_domains_put; 1416 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1417 FORCEWAKE_RENDER_GEN9, 1418 FORCEWAKE_ACK_RENDER_GEN9); 1419 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1420 FORCEWAKE_BLITTER_GEN9, 1421 FORCEWAKE_ACK_BLITTER_GEN9); 1422 1423 for (i = 0; i < I915_MAX_VCS; i++) { 1424 if (!HAS_ENGINE(i915, _VCS(i))) 1425 continue; 1426 1427 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, 1428 FORCEWAKE_MEDIA_VDBOX_GEN11(i), 1429 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 1430 } 1431 for (i = 0; i < I915_MAX_VECS; i++) { 1432 if (!HAS_ENGINE(i915, _VECS(i))) 1433 continue; 1434 1435 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, 1436 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1437 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1438 } 1439 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1440 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; 1441 uncore->funcs.force_wake_put = fw_domains_put; 1442 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1443 FORCEWAKE_RENDER_GEN9, 1444 FORCEWAKE_ACK_RENDER_GEN9); 1445 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1446 FORCEWAKE_BLITTER_GEN9, 1447 FORCEWAKE_ACK_BLITTER_GEN9); 1448 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1449 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1450 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1451 uncore->funcs.force_wake_get = fw_domains_get; 1452 uncore->funcs.force_wake_put = fw_domains_put; 1453 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1454 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1455 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1456 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1457 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1458 uncore->funcs.force_wake_get = 1459 fw_domains_get_with_thread_status; 1460 uncore->funcs.force_wake_put = fw_domains_put; 1461 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1462 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1463 } else if (IS_IVYBRIDGE(i915)) { 1464 u32 ecobus; 1465 1466 /* IVB configs may use multi-threaded forcewake */ 1467 1468 /* A small trick here - if the bios hasn't configured 1469 * MT forcewake, and if the device is in RC6, then 1470 * force_wake_mt_get will not wake the device and the 1471 * ECOBUS read will return zero. Which will be 1472 * (correctly) interpreted by the test below as MT 1473 * forcewake being disabled. 1474 */ 1475 uncore->funcs.force_wake_get = 1476 fw_domains_get_with_thread_status; 1477 uncore->funcs.force_wake_put = fw_domains_put; 1478 1479 /* We need to init first for ECOBUS access and then 1480 * determine later if we want to reinit, in case of MT access is 1481 * not working. In this stage we don't know which flavour this 1482 * ivb is, so it is better to reset also the gen6 fw registers 1483 * before the ecobus check. 1484 */ 1485 1486 __raw_uncore_write32(uncore, FORCEWAKE, 0); 1487 __raw_posting_read(uncore, ECOBUS); 1488 1489 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1490 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1491 if (ret) 1492 goto out; 1493 1494 spin_lock_irq(&uncore->lock); 1495 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); 1496 ecobus = __raw_uncore_read32(uncore, ECOBUS); 1497 fw_domains_put(uncore, FORCEWAKE_RENDER); 1498 spin_unlock_irq(&uncore->lock); 1499 1500 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1501 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1502 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1503 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); 1504 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1505 FORCEWAKE, FORCEWAKE_ACK); 1506 } 1507 } else if (IS_GEN(i915, 6)) { 1508 uncore->funcs.force_wake_get = 1509 fw_domains_get_with_thread_status; 1510 uncore->funcs.force_wake_put = fw_domains_put; 1511 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1512 FORCEWAKE, FORCEWAKE_ACK); 1513 } 1514 1515 #undef fw_domain_init 1516 1517 /* All future platforms are expected to require complex power gating */ 1518 WARN_ON(!ret && uncore->fw_domains == 0); 1519 1520 out: 1521 if (ret) 1522 intel_uncore_fw_domains_fini(uncore); 1523 1524 return ret; 1525 } 1526 1527 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ 1528 { \ 1529 (uncore)->fw_domains_table = \ 1530 (struct intel_forcewake_range *)(d); \ 1531 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ 1532 } 1533 1534 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 1535 unsigned long action, void *data) 1536 { 1537 struct intel_uncore *uncore = container_of(nb, 1538 struct intel_uncore, pmic_bus_access_nb); 1539 1540 switch (action) { 1541 case MBI_PMIC_BUS_ACCESS_BEGIN: 1542 /* 1543 * forcewake all now to make sure that we don't need to do a 1544 * forcewake later which on systems where this notifier gets 1545 * called requires the punit to access to the shared pmic i2c 1546 * bus, which will be busy after this notification, leading to: 1547 * "render: timed out waiting for forcewake ack request." 1548 * errors. 1549 * 1550 * The notifier is unregistered during intel_runtime_suspend(), 1551 * so it's ok to access the HW here without holding a RPM 1552 * wake reference -> disable wakeref asserts for the time of 1553 * the access. 1554 */ 1555 disable_rpm_wakeref_asserts(uncore->rpm); 1556 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1557 enable_rpm_wakeref_asserts(uncore->rpm); 1558 break; 1559 case MBI_PMIC_BUS_ACCESS_END: 1560 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1561 break; 1562 } 1563 1564 return NOTIFY_OK; 1565 } 1566 1567 static int uncore_mmio_setup(struct intel_uncore *uncore) 1568 { 1569 struct drm_i915_private *i915 = uncore->i915; 1570 struct pci_dev *pdev = i915->drm.pdev; 1571 int mmio_bar; 1572 int mmio_size; 1573 1574 mmio_bar = IS_GEN(i915, 2) ? 1 : 0; 1575 /* 1576 * Before gen4, the registers and the GTT are behind different BARs. 1577 * However, from gen4 onwards, the registers and the GTT are shared 1578 * in the same BAR, so we want to restrict this ioremap from 1579 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 1580 * the register BAR remains the same size for all the earlier 1581 * generations up to Ironlake. 1582 */ 1583 if (INTEL_GEN(i915) < 5) 1584 mmio_size = 512 * 1024; 1585 else 1586 mmio_size = 2 * 1024 * 1024; 1587 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); 1588 if (uncore->regs == NULL) { 1589 DRM_ERROR("failed to map registers\n"); 1590 1591 return -EIO; 1592 } 1593 1594 return 0; 1595 } 1596 1597 static void uncore_mmio_cleanup(struct intel_uncore *uncore) 1598 { 1599 struct pci_dev *pdev = uncore->i915->drm.pdev; 1600 1601 pci_iounmap(pdev, uncore->regs); 1602 } 1603 1604 void intel_uncore_init_early(struct intel_uncore *uncore, 1605 struct drm_i915_private *i915) 1606 { 1607 spin_lock_init(&uncore->lock); 1608 uncore->i915 = i915; 1609 uncore->rpm = &i915->runtime_pm; 1610 } 1611 1612 static void uncore_raw_init(struct intel_uncore *uncore) 1613 { 1614 GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); 1615 1616 if (IS_GEN(uncore->i915, 5)) { 1617 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); 1618 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); 1619 } else { 1620 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); 1621 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); 1622 } 1623 } 1624 1625 static int uncore_forcewake_init(struct intel_uncore *uncore) 1626 { 1627 struct drm_i915_private *i915 = uncore->i915; 1628 int ret; 1629 1630 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); 1631 1632 ret = intel_uncore_fw_domains_init(uncore); 1633 if (ret) 1634 return ret; 1635 1636 forcewake_early_sanitize(uncore, 0); 1637 1638 if (IS_GEN_RANGE(i915, 6, 7)) { 1639 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); 1640 1641 if (IS_VALLEYVIEW(i915)) { 1642 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); 1643 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1644 } else { 1645 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1646 } 1647 } else if (IS_GEN(i915, 8)) { 1648 if (IS_CHERRYVIEW(i915)) { 1649 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); 1650 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1651 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1652 } else { 1653 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); 1654 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1655 } 1656 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1657 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); 1658 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1659 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1660 } else { 1661 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); 1662 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); 1663 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); 1664 } 1665 1666 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; 1667 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 1668 1669 return 0; 1670 } 1671 1672 int intel_uncore_init_mmio(struct intel_uncore *uncore) 1673 { 1674 struct drm_i915_private *i915 = uncore->i915; 1675 int ret; 1676 1677 ret = uncore_mmio_setup(uncore); 1678 if (ret) 1679 return ret; 1680 1681 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) 1682 uncore->flags |= UNCORE_HAS_FORCEWAKE; 1683 1684 uncore->unclaimed_mmio_check = 1; 1685 1686 if (!intel_uncore_has_forcewake(uncore)) { 1687 uncore_raw_init(uncore); 1688 } else { 1689 ret = uncore_forcewake_init(uncore); 1690 if (ret) 1691 goto out_mmio_cleanup; 1692 } 1693 1694 /* make sure fw funcs are set if and only if we have fw*/ 1695 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); 1696 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); 1697 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); 1698 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); 1699 1700 if (HAS_FPGA_DBG_UNCLAIMED(i915)) 1701 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; 1702 1703 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1704 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; 1705 1706 if (IS_GEN_RANGE(i915, 6, 7)) 1707 uncore->flags |= UNCORE_HAS_FIFO; 1708 1709 /* clear out unclaimed reg detection bit */ 1710 if (check_for_unclaimed_mmio(uncore)) 1711 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 1712 1713 return 0; 1714 1715 out_mmio_cleanup: 1716 uncore_mmio_cleanup(uncore); 1717 1718 return ret; 1719 } 1720 1721 /* 1722 * We might have detected that some engines are fused off after we initialized 1723 * the forcewake domains. Prune them, to make sure they only reference existing 1724 * engines. 1725 */ 1726 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) 1727 { 1728 struct drm_i915_private *i915 = uncore->i915; 1729 enum forcewake_domains fw_domains = uncore->fw_domains; 1730 enum forcewake_domain_id domain_id; 1731 int i; 1732 1733 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) 1734 return; 1735 1736 for (i = 0; i < I915_MAX_VCS; i++) { 1737 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; 1738 1739 if (HAS_ENGINE(i915, _VCS(i))) 1740 continue; 1741 1742 if (fw_domains & BIT(domain_id)) 1743 fw_domain_fini(uncore, domain_id); 1744 } 1745 1746 for (i = 0; i < I915_MAX_VECS; i++) { 1747 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; 1748 1749 if (HAS_ENGINE(i915, _VECS(i))) 1750 continue; 1751 1752 if (fw_domains & BIT(domain_id)) 1753 fw_domain_fini(uncore, domain_id); 1754 } 1755 } 1756 1757 void intel_uncore_fini_mmio(struct intel_uncore *uncore) 1758 { 1759 if (intel_uncore_has_forcewake(uncore)) { 1760 iosf_mbi_punit_acquire(); 1761 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 1762 &uncore->pmic_bus_access_nb); 1763 intel_uncore_forcewake_reset(uncore); 1764 intel_uncore_fw_domains_fini(uncore); 1765 iosf_mbi_punit_release(); 1766 } 1767 1768 uncore_mmio_cleanup(uncore); 1769 } 1770 1771 static const struct reg_whitelist { 1772 i915_reg_t offset_ldw; 1773 i915_reg_t offset_udw; 1774 u16 gen_mask; 1775 u8 size; 1776 } reg_read_whitelist[] = { { 1777 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1778 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1779 .gen_mask = INTEL_GEN_MASK(4, 11), 1780 .size = 8 1781 } }; 1782 1783 int i915_reg_read_ioctl(struct drm_device *dev, 1784 void *data, struct drm_file *file) 1785 { 1786 struct drm_i915_private *i915 = to_i915(dev); 1787 struct intel_uncore *uncore = &i915->uncore; 1788 struct drm_i915_reg_read *reg = data; 1789 struct reg_whitelist const *entry; 1790 intel_wakeref_t wakeref; 1791 unsigned int flags; 1792 int remain; 1793 int ret = 0; 1794 1795 entry = reg_read_whitelist; 1796 remain = ARRAY_SIZE(reg_read_whitelist); 1797 while (remain) { 1798 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); 1799 1800 GEM_BUG_ON(!is_power_of_2(entry->size)); 1801 GEM_BUG_ON(entry->size > 8); 1802 GEM_BUG_ON(entry_offset & (entry->size - 1)); 1803 1804 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask && 1805 entry_offset == (reg->offset & -entry->size)) 1806 break; 1807 entry++; 1808 remain--; 1809 } 1810 1811 if (!remain) 1812 return -EINVAL; 1813 1814 flags = reg->offset & (entry->size - 1); 1815 1816 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 1817 if (entry->size == 8 && flags == I915_REG_READ_8B_WA) 1818 reg->val = intel_uncore_read64_2x32(uncore, 1819 entry->offset_ldw, 1820 entry->offset_udw); 1821 else if (entry->size == 8 && flags == 0) 1822 reg->val = intel_uncore_read64(uncore, 1823 entry->offset_ldw); 1824 else if (entry->size == 4 && flags == 0) 1825 reg->val = intel_uncore_read(uncore, entry->offset_ldw); 1826 else if (entry->size == 2 && flags == 0) 1827 reg->val = intel_uncore_read16(uncore, 1828 entry->offset_ldw); 1829 else if (entry->size == 1 && flags == 0) 1830 reg->val = intel_uncore_read8(uncore, 1831 entry->offset_ldw); 1832 else 1833 ret = -EINVAL; 1834 } 1835 1836 return ret; 1837 } 1838 1839 /** 1840 * __intel_wait_for_register_fw - wait until register matches expected state 1841 * @uncore: the struct intel_uncore 1842 * @reg: the register to read 1843 * @mask: mask to apply to register value 1844 * @value: expected value 1845 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1846 * @slow_timeout_ms: slow timeout in millisecond 1847 * @out_value: optional placeholder to hold registry value 1848 * 1849 * This routine waits until the target register @reg contains the expected 1850 * @value after applying the @mask, i.e. it waits until :: 1851 * 1852 * (I915_READ_FW(reg) & mask) == value 1853 * 1854 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 1855 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us 1856 * must be not larger than 20,0000 microseconds. 1857 * 1858 * Note that this routine assumes the caller holds forcewake asserted, it is 1859 * not suitable for very long waits. See intel_wait_for_register() if you 1860 * wish to wait without holding forcewake for the duration (i.e. you expect 1861 * the wait to be slow). 1862 * 1863 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1864 */ 1865 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 1866 i915_reg_t reg, 1867 u32 mask, 1868 u32 value, 1869 unsigned int fast_timeout_us, 1870 unsigned int slow_timeout_ms, 1871 u32 *out_value) 1872 { 1873 u32 uninitialized_var(reg_value); 1874 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) 1875 int ret; 1876 1877 /* Catch any overuse of this function */ 1878 might_sleep_if(slow_timeout_ms); 1879 GEM_BUG_ON(fast_timeout_us > 20000); 1880 1881 ret = -ETIMEDOUT; 1882 if (fast_timeout_us && fast_timeout_us <= 20000) 1883 ret = _wait_for_atomic(done, fast_timeout_us, 0); 1884 if (ret && slow_timeout_ms) 1885 ret = wait_for(done, slow_timeout_ms); 1886 1887 if (out_value) 1888 *out_value = reg_value; 1889 1890 return ret; 1891 #undef done 1892 } 1893 1894 /** 1895 * __intel_wait_for_register - wait until register matches expected state 1896 * @uncore: the struct intel_uncore 1897 * @reg: the register to read 1898 * @mask: mask to apply to register value 1899 * @value: expected value 1900 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1901 * @slow_timeout_ms: slow timeout in millisecond 1902 * @out_value: optional placeholder to hold registry value 1903 * 1904 * This routine waits until the target register @reg contains the expected 1905 * @value after applying the @mask, i.e. it waits until :: 1906 * 1907 * (I915_READ(reg) & mask) == value 1908 * 1909 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1910 * 1911 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1912 */ 1913 int __intel_wait_for_register(struct intel_uncore *uncore, 1914 i915_reg_t reg, 1915 u32 mask, 1916 u32 value, 1917 unsigned int fast_timeout_us, 1918 unsigned int slow_timeout_ms, 1919 u32 *out_value) 1920 { 1921 unsigned fw = 1922 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); 1923 u32 reg_value; 1924 int ret; 1925 1926 might_sleep_if(slow_timeout_ms); 1927 1928 spin_lock_irq(&uncore->lock); 1929 intel_uncore_forcewake_get__locked(uncore, fw); 1930 1931 ret = __intel_wait_for_register_fw(uncore, 1932 reg, mask, value, 1933 fast_timeout_us, 0, ®_value); 1934 1935 intel_uncore_forcewake_put__locked(uncore, fw); 1936 spin_unlock_irq(&uncore->lock); 1937 1938 if (ret && slow_timeout_ms) 1939 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, 1940 reg), 1941 (reg_value & mask) == value, 1942 slow_timeout_ms * 1000, 10, 1000); 1943 1944 /* just trace the final value */ 1945 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); 1946 1947 if (out_value) 1948 *out_value = reg_value; 1949 1950 return ret; 1951 } 1952 1953 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) 1954 { 1955 return check_for_unclaimed_mmio(uncore); 1956 } 1957 1958 bool 1959 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) 1960 { 1961 bool ret = false; 1962 1963 spin_lock_irq(&uncore->lock); 1964 1965 if (unlikely(uncore->unclaimed_mmio_check <= 0)) 1966 goto out; 1967 1968 if (unlikely(intel_uncore_unclaimed_mmio(uncore))) { 1969 if (!i915_modparams.mmio_debug) { 1970 DRM_DEBUG("Unclaimed register detected, " 1971 "enabling oneshot unclaimed register reporting. " 1972 "Please use i915.mmio_debug=N for more information.\n"); 1973 i915_modparams.mmio_debug++; 1974 } 1975 uncore->unclaimed_mmio_check--; 1976 ret = true; 1977 } 1978 1979 out: 1980 spin_unlock_irq(&uncore->lock); 1981 1982 return ret; 1983 } 1984 1985 /** 1986 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1987 * a register 1988 * @uncore: pointer to struct intel_uncore 1989 * @reg: register in question 1990 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1991 * 1992 * Returns a set of forcewake domains required to be taken with for example 1993 * intel_uncore_forcewake_get for the specified register to be accessible in the 1994 * specified mode (read, write or read/write) with raw mmio accessors. 1995 * 1996 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1997 * callers to do FIFO management on their own or risk losing writes. 1998 */ 1999 enum forcewake_domains 2000 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 2001 i915_reg_t reg, unsigned int op) 2002 { 2003 enum forcewake_domains fw_domains = 0; 2004 2005 WARN_ON(!op); 2006 2007 if (!intel_uncore_has_forcewake(uncore)) 2008 return 0; 2009 2010 if (op & FW_REG_READ) 2011 fw_domains = uncore->funcs.read_fw_domains(uncore, reg); 2012 2013 if (op & FW_REG_WRITE) 2014 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); 2015 2016 WARN_ON(fw_domains & ~uncore->fw_domains); 2017 2018 return fw_domains; 2019 } 2020 2021 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2022 #include "selftests/mock_uncore.c" 2023 #include "selftests/intel_uncore.c" 2024 #endif 2025