1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <linux/pm_runtime.h> 29 30 #define FORCEWAKE_ACK_TIMEOUT_MS 50 31 32 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 42 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 43 44 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 45 46 static const char * const forcewake_domain_names[] = { 47 "render", 48 "blitter", 49 "media", 50 }; 51 52 const char * 53 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 54 { 55 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 56 57 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 58 return forcewake_domain_names[id]; 59 60 WARN_ON(id); 61 62 return "unknown"; 63 } 64 65 static void 66 assert_device_not_suspended(struct drm_i915_private *dev_priv) 67 { 68 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 69 "Device suspended\n"); 70 } 71 72 static inline void 73 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 74 { 75 WARN_ON(d->reg_set == 0); 76 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 77 } 78 79 static inline void 80 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 81 { 82 mod_timer_pinned(&d->timer, jiffies + 1); 83 } 84 85 static inline void 86 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 87 { 88 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 89 FORCEWAKE_KERNEL) == 0, 90 FORCEWAKE_ACK_TIMEOUT_MS)) 91 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 92 intel_uncore_forcewake_domain_to_str(d->id)); 93 } 94 95 static inline void 96 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 97 { 98 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 99 } 100 101 static inline void 102 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 103 { 104 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 105 FORCEWAKE_KERNEL), 106 FORCEWAKE_ACK_TIMEOUT_MS)) 107 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 108 intel_uncore_forcewake_domain_to_str(d->id)); 109 } 110 111 static inline void 112 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 113 { 114 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 115 } 116 117 static inline void 118 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 119 { 120 /* something from same cacheline, but not from the set register */ 121 if (d->reg_post) 122 __raw_posting_read(d->i915, d->reg_post); 123 } 124 125 static void 126 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 127 { 128 struct intel_uncore_forcewake_domain *d; 129 enum forcewake_domain_id id; 130 131 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 132 fw_domain_wait_ack_clear(d); 133 fw_domain_get(d); 134 fw_domain_wait_ack(d); 135 } 136 } 137 138 static void 139 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 140 { 141 struct intel_uncore_forcewake_domain *d; 142 enum forcewake_domain_id id; 143 144 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 145 fw_domain_put(d); 146 fw_domain_posting_read(d); 147 } 148 } 149 150 static void 151 fw_domains_posting_read(struct drm_i915_private *dev_priv) 152 { 153 struct intel_uncore_forcewake_domain *d; 154 enum forcewake_domain_id id; 155 156 /* No need to do for all, just do for first found */ 157 for_each_fw_domain(d, dev_priv, id) { 158 fw_domain_posting_read(d); 159 break; 160 } 161 } 162 163 static void 164 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 165 { 166 struct intel_uncore_forcewake_domain *d; 167 enum forcewake_domain_id id; 168 169 if (dev_priv->uncore.fw_domains == 0) 170 return; 171 172 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 173 fw_domain_reset(d); 174 175 fw_domains_posting_read(dev_priv); 176 } 177 178 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 179 { 180 /* w/a for a sporadic read returning 0 by waiting for the GT 181 * thread to wake up. 182 */ 183 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 184 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 185 DRM_ERROR("GT thread status wait timed out\n"); 186 } 187 188 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 189 enum forcewake_domains fw_domains) 190 { 191 fw_domains_get(dev_priv, fw_domains); 192 193 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 194 __gen6_gt_wait_for_thread_c0(dev_priv); 195 } 196 197 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 198 { 199 u32 gtfifodbg; 200 201 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 202 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 203 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 204 } 205 206 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 207 enum forcewake_domains fw_domains) 208 { 209 fw_domains_put(dev_priv, fw_domains); 210 gen6_gt_check_fifodbg(dev_priv); 211 } 212 213 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 214 { 215 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 216 217 return count & GT_FIFO_FREE_ENTRIES_MASK; 218 } 219 220 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 221 { 222 int ret = 0; 223 224 /* On VLV, FIFO will be shared by both SW and HW. 225 * So, we need to read the FREE_ENTRIES everytime */ 226 if (IS_VALLEYVIEW(dev_priv->dev)) 227 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 228 229 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 230 int loop = 500; 231 u32 fifo = fifo_free_entries(dev_priv); 232 233 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 234 udelay(10); 235 fifo = fifo_free_entries(dev_priv); 236 } 237 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 238 ++ret; 239 dev_priv->uncore.fifo_count = fifo; 240 } 241 dev_priv->uncore.fifo_count--; 242 243 return ret; 244 } 245 246 static void intel_uncore_fw_release_timer(unsigned long arg) 247 { 248 struct intel_uncore_forcewake_domain *domain = (void *)arg; 249 unsigned long irqflags; 250 251 assert_device_not_suspended(domain->i915); 252 253 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); 254 if (WARN_ON(domain->wake_count == 0)) 255 domain->wake_count++; 256 257 if (--domain->wake_count == 0) 258 domain->i915->uncore.funcs.force_wake_put(domain->i915, 259 1 << domain->id); 260 261 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); 262 } 263 264 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 265 { 266 struct drm_i915_private *dev_priv = dev->dev_private; 267 unsigned long irqflags; 268 struct intel_uncore_forcewake_domain *domain; 269 int retry_count = 100; 270 enum forcewake_domain_id id; 271 enum forcewake_domains fw = 0, active_domains; 272 273 /* Hold uncore.lock across reset to prevent any register access 274 * with forcewake not set correctly. Wait until all pending 275 * timers are run before holding. 276 */ 277 while (1) { 278 active_domains = 0; 279 280 for_each_fw_domain(domain, dev_priv, id) { 281 if (del_timer_sync(&domain->timer) == 0) 282 continue; 283 284 intel_uncore_fw_release_timer((unsigned long)domain); 285 } 286 287 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 288 289 for_each_fw_domain(domain, dev_priv, id) { 290 if (timer_pending(&domain->timer)) 291 active_domains |= (1 << id); 292 } 293 294 if (active_domains == 0) 295 break; 296 297 if (--retry_count == 0) { 298 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 299 break; 300 } 301 302 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 303 cond_resched(); 304 } 305 306 WARN_ON(active_domains); 307 308 for_each_fw_domain(domain, dev_priv, id) 309 if (domain->wake_count) 310 fw |= 1 << id; 311 312 if (fw) 313 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 314 315 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 316 317 if (restore) { /* If reset with a user forcewake, try to restore */ 318 if (fw) 319 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 320 321 if (IS_GEN6(dev) || IS_GEN7(dev)) 322 dev_priv->uncore.fifo_count = 323 fifo_free_entries(dev_priv); 324 } 325 326 if (!restore) 327 assert_forcewakes_inactive(dev_priv); 328 329 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 330 } 331 332 static void intel_uncore_ellc_detect(struct drm_device *dev) 333 { 334 struct drm_i915_private *dev_priv = dev->dev_private; 335 336 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) || 337 INTEL_INFO(dev)->gen >= 9) && 338 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) { 339 /* The docs do not explain exactly how the calculation can be 340 * made. It is somewhat guessable, but for now, it's always 341 * 128MB. 342 * NB: We can't write IDICR yet because we do not have gt funcs 343 * set up */ 344 dev_priv->ellc_size = 128; 345 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 346 } 347 } 348 349 static void __intel_uncore_early_sanitize(struct drm_device *dev, 350 bool restore_forcewake) 351 { 352 struct drm_i915_private *dev_priv = dev->dev_private; 353 354 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 355 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 356 357 /* clear out old GT FIFO errors */ 358 if (IS_GEN6(dev) || IS_GEN7(dev)) 359 __raw_i915_write32(dev_priv, GTFIFODBG, 360 __raw_i915_read32(dev_priv, GTFIFODBG)); 361 362 /* WaDisableShadowRegForCpd:chv */ 363 if (IS_CHERRYVIEW(dev)) { 364 __raw_i915_write32(dev_priv, GTFIFOCTL, 365 __raw_i915_read32(dev_priv, GTFIFOCTL) | 366 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 367 GT_FIFO_CTL_RC6_POLICY_STALL); 368 } 369 370 intel_uncore_forcewake_reset(dev, restore_forcewake); 371 } 372 373 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 374 { 375 __intel_uncore_early_sanitize(dev, restore_forcewake); 376 i915_check_and_clear_faults(dev); 377 } 378 379 void intel_uncore_sanitize(struct drm_device *dev) 380 { 381 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 382 intel_disable_gt_powersave(dev); 383 } 384 385 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 386 enum forcewake_domains fw_domains) 387 { 388 struct intel_uncore_forcewake_domain *domain; 389 enum forcewake_domain_id id; 390 391 if (!dev_priv->uncore.funcs.force_wake_get) 392 return; 393 394 fw_domains &= dev_priv->uncore.fw_domains; 395 396 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 397 if (domain->wake_count++) 398 fw_domains &= ~(1 << id); 399 } 400 401 if (fw_domains) 402 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 403 } 404 405 /** 406 * intel_uncore_forcewake_get - grab forcewake domain references 407 * @dev_priv: i915 device instance 408 * @fw_domains: forcewake domains to get reference on 409 * 410 * This function can be used get GT's forcewake domain references. 411 * Normal register access will handle the forcewake domains automatically. 412 * However if some sequence requires the GT to not power down a particular 413 * forcewake domains this function should be called at the beginning of the 414 * sequence. And subsequently the reference should be dropped by symmetric 415 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 416 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 417 */ 418 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 419 enum forcewake_domains fw_domains) 420 { 421 unsigned long irqflags; 422 423 if (!dev_priv->uncore.funcs.force_wake_get) 424 return; 425 426 WARN_ON(dev_priv->pm.suspended); 427 428 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 429 __intel_uncore_forcewake_get(dev_priv, fw_domains); 430 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 431 } 432 433 /** 434 * intel_uncore_forcewake_get__locked - grab forcewake domain references 435 * @dev_priv: i915 device instance 436 * @fw_domains: forcewake domains to get reference on 437 * 438 * See intel_uncore_forcewake_get(). This variant places the onus 439 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 440 */ 441 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 442 enum forcewake_domains fw_domains) 443 { 444 assert_spin_locked(&dev_priv->uncore.lock); 445 446 if (!dev_priv->uncore.funcs.force_wake_get) 447 return; 448 449 __intel_uncore_forcewake_get(dev_priv, fw_domains); 450 } 451 452 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 453 enum forcewake_domains fw_domains) 454 { 455 struct intel_uncore_forcewake_domain *domain; 456 enum forcewake_domain_id id; 457 458 if (!dev_priv->uncore.funcs.force_wake_put) 459 return; 460 461 fw_domains &= dev_priv->uncore.fw_domains; 462 463 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 464 if (WARN_ON(domain->wake_count == 0)) 465 continue; 466 467 if (--domain->wake_count) 468 continue; 469 470 domain->wake_count++; 471 fw_domain_arm_timer(domain); 472 } 473 } 474 475 /** 476 * intel_uncore_forcewake_put - release a forcewake domain reference 477 * @dev_priv: i915 device instance 478 * @fw_domains: forcewake domains to put references 479 * 480 * This function drops the device-level forcewakes for specified 481 * domains obtained by intel_uncore_forcewake_get(). 482 */ 483 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 484 enum forcewake_domains fw_domains) 485 { 486 unsigned long irqflags; 487 488 if (!dev_priv->uncore.funcs.force_wake_put) 489 return; 490 491 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 492 __intel_uncore_forcewake_put(dev_priv, fw_domains); 493 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 494 } 495 496 /** 497 * intel_uncore_forcewake_put__locked - grab forcewake domain references 498 * @dev_priv: i915 device instance 499 * @fw_domains: forcewake domains to get reference on 500 * 501 * See intel_uncore_forcewake_put(). This variant places the onus 502 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 503 */ 504 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 505 enum forcewake_domains fw_domains) 506 { 507 assert_spin_locked(&dev_priv->uncore.lock); 508 509 if (!dev_priv->uncore.funcs.force_wake_put) 510 return; 511 512 __intel_uncore_forcewake_put(dev_priv, fw_domains); 513 } 514 515 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 516 { 517 struct intel_uncore_forcewake_domain *domain; 518 enum forcewake_domain_id id; 519 520 if (!dev_priv->uncore.funcs.force_wake_get) 521 return; 522 523 for_each_fw_domain(domain, dev_priv, id) 524 WARN_ON(domain->wake_count); 525 } 526 527 /* We give fast paths for the really cool registers */ 528 #define NEEDS_FORCE_WAKE(reg) \ 529 ((reg) < 0x40000 && (reg) != FORCEWAKE) 530 531 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 532 533 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 534 (REG_RANGE((reg), 0x2000, 0x4000) || \ 535 REG_RANGE((reg), 0x5000, 0x8000) || \ 536 REG_RANGE((reg), 0xB000, 0x12000) || \ 537 REG_RANGE((reg), 0x2E000, 0x30000)) 538 539 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 540 (REG_RANGE((reg), 0x12000, 0x14000) || \ 541 REG_RANGE((reg), 0x22000, 0x24000) || \ 542 REG_RANGE((reg), 0x30000, 0x40000)) 543 544 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 545 (REG_RANGE((reg), 0x2000, 0x4000) || \ 546 REG_RANGE((reg), 0x5200, 0x8000) || \ 547 REG_RANGE((reg), 0x8300, 0x8500) || \ 548 REG_RANGE((reg), 0xB000, 0xB480) || \ 549 REG_RANGE((reg), 0xE000, 0xE800)) 550 551 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 552 (REG_RANGE((reg), 0x8800, 0x8900) || \ 553 REG_RANGE((reg), 0xD000, 0xD800) || \ 554 REG_RANGE((reg), 0x12000, 0x14000) || \ 555 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 556 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 557 REG_RANGE((reg), 0x30000, 0x38000)) 558 559 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 560 (REG_RANGE((reg), 0x4000, 0x5000) || \ 561 REG_RANGE((reg), 0x8000, 0x8300) || \ 562 REG_RANGE((reg), 0x8500, 0x8600) || \ 563 REG_RANGE((reg), 0x9000, 0xB000) || \ 564 REG_RANGE((reg), 0xF000, 0x10000)) 565 566 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 567 REG_RANGE((reg), 0xB00, 0x2000) 568 569 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 570 (REG_RANGE((reg), 0x2000, 0x2700) || \ 571 REG_RANGE((reg), 0x3000, 0x4000) || \ 572 REG_RANGE((reg), 0x5200, 0x8000) || \ 573 REG_RANGE((reg), 0x8140, 0x8160) || \ 574 REG_RANGE((reg), 0x8300, 0x8500) || \ 575 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 576 REG_RANGE((reg), 0xB000, 0xB480) || \ 577 REG_RANGE((reg), 0xE000, 0xE900) || \ 578 REG_RANGE((reg), 0x24400, 0x24800)) 579 580 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 581 (REG_RANGE((reg), 0x8130, 0x8140) || \ 582 REG_RANGE((reg), 0x8800, 0x8A00) || \ 583 REG_RANGE((reg), 0xD000, 0xD800) || \ 584 REG_RANGE((reg), 0x12000, 0x14000) || \ 585 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 586 REG_RANGE((reg), 0x30000, 0x40000)) 587 588 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 589 REG_RANGE((reg), 0x9400, 0x9800) 590 591 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 592 ((reg) < 0x40000 &&\ 593 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 594 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 595 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 596 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 597 598 static void 599 ilk_dummy_write(struct drm_i915_private *dev_priv) 600 { 601 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 602 * the chip from rc6 before touching it for real. MI_MODE is masked, 603 * hence harmless to write 0 into. */ 604 __raw_i915_write32(dev_priv, MI_MODE, 0); 605 } 606 607 static void 608 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 609 bool before) 610 { 611 const char *op = read ? "reading" : "writing to"; 612 const char *when = before ? "before" : "after"; 613 614 if (!i915.mmio_debug) 615 return; 616 617 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 618 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 619 when, op, reg); 620 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 621 i915.mmio_debug--; /* Only report the first N failures */ 622 } 623 } 624 625 static void 626 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 627 { 628 static bool mmio_debug_once = true; 629 630 if (i915.mmio_debug || !mmio_debug_once) 631 return; 632 633 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 634 DRM_DEBUG("Unclaimed register detected, " 635 "enabling oneshot unclaimed register reporting. " 636 "Please use i915.mmio_debug=N for more information.\n"); 637 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 638 i915.mmio_debug = mmio_debug_once--; 639 } 640 } 641 642 #define GEN2_READ_HEADER(x) \ 643 u##x val = 0; \ 644 assert_device_not_suspended(dev_priv); 645 646 #define GEN2_READ_FOOTER \ 647 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 648 return val 649 650 #define __gen2_read(x) \ 651 static u##x \ 652 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 653 GEN2_READ_HEADER(x); \ 654 val = __raw_i915_read##x(dev_priv, reg); \ 655 GEN2_READ_FOOTER; \ 656 } 657 658 #define __gen5_read(x) \ 659 static u##x \ 660 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 661 GEN2_READ_HEADER(x); \ 662 ilk_dummy_write(dev_priv); \ 663 val = __raw_i915_read##x(dev_priv, reg); \ 664 GEN2_READ_FOOTER; \ 665 } 666 667 __gen5_read(8) 668 __gen5_read(16) 669 __gen5_read(32) 670 __gen5_read(64) 671 __gen2_read(8) 672 __gen2_read(16) 673 __gen2_read(32) 674 __gen2_read(64) 675 676 #undef __gen5_read 677 #undef __gen2_read 678 679 #undef GEN2_READ_FOOTER 680 #undef GEN2_READ_HEADER 681 682 #define GEN6_READ_HEADER(x) \ 683 unsigned long irqflags; \ 684 u##x val = 0; \ 685 assert_device_not_suspended(dev_priv); \ 686 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 687 688 #define GEN6_READ_FOOTER \ 689 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 690 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 691 return val 692 693 static inline void __force_wake_get(struct drm_i915_private *dev_priv, 694 enum forcewake_domains fw_domains) 695 { 696 struct intel_uncore_forcewake_domain *domain; 697 enum forcewake_domain_id id; 698 699 if (WARN_ON(!fw_domains)) 700 return; 701 702 /* Ideally GCC would be constant-fold and eliminate this loop */ 703 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 704 if (domain->wake_count) { 705 fw_domains &= ~(1 << id); 706 continue; 707 } 708 709 domain->wake_count++; 710 fw_domain_arm_timer(domain); 711 } 712 713 if (fw_domains) 714 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 715 } 716 717 #define __vgpu_read(x) \ 718 static u##x \ 719 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 720 GEN6_READ_HEADER(x); \ 721 val = __raw_i915_read##x(dev_priv, reg); \ 722 GEN6_READ_FOOTER; \ 723 } 724 725 #define __gen6_read(x) \ 726 static u##x \ 727 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 728 GEN6_READ_HEADER(x); \ 729 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 730 if (NEEDS_FORCE_WAKE(reg)) \ 731 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 732 val = __raw_i915_read##x(dev_priv, reg); \ 733 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 734 GEN6_READ_FOOTER; \ 735 } 736 737 #define __vlv_read(x) \ 738 static u##x \ 739 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 740 GEN6_READ_HEADER(x); \ 741 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 742 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 743 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 744 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 745 val = __raw_i915_read##x(dev_priv, reg); \ 746 GEN6_READ_FOOTER; \ 747 } 748 749 #define __chv_read(x) \ 750 static u##x \ 751 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 752 GEN6_READ_HEADER(x); \ 753 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 754 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 755 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 756 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 757 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 758 __force_wake_get(dev_priv, \ 759 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 760 val = __raw_i915_read##x(dev_priv, reg); \ 761 GEN6_READ_FOOTER; \ 762 } 763 764 #define SKL_NEEDS_FORCE_WAKE(reg) \ 765 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 766 767 #define __gen9_read(x) \ 768 static u##x \ 769 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 770 enum forcewake_domains fw_engine; \ 771 GEN6_READ_HEADER(x); \ 772 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 773 if (!SKL_NEEDS_FORCE_WAKE(reg)) \ 774 fw_engine = 0; \ 775 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 776 fw_engine = FORCEWAKE_RENDER; \ 777 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 778 fw_engine = FORCEWAKE_MEDIA; \ 779 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 780 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 781 else \ 782 fw_engine = FORCEWAKE_BLITTER; \ 783 if (fw_engine) \ 784 __force_wake_get(dev_priv, fw_engine); \ 785 val = __raw_i915_read##x(dev_priv, reg); \ 786 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 787 GEN6_READ_FOOTER; \ 788 } 789 790 __vgpu_read(8) 791 __vgpu_read(16) 792 __vgpu_read(32) 793 __vgpu_read(64) 794 __gen9_read(8) 795 __gen9_read(16) 796 __gen9_read(32) 797 __gen9_read(64) 798 __chv_read(8) 799 __chv_read(16) 800 __chv_read(32) 801 __chv_read(64) 802 __vlv_read(8) 803 __vlv_read(16) 804 __vlv_read(32) 805 __vlv_read(64) 806 __gen6_read(8) 807 __gen6_read(16) 808 __gen6_read(32) 809 __gen6_read(64) 810 811 #undef __gen9_read 812 #undef __chv_read 813 #undef __vlv_read 814 #undef __gen6_read 815 #undef __vgpu_read 816 #undef GEN6_READ_FOOTER 817 #undef GEN6_READ_HEADER 818 819 #define GEN2_WRITE_HEADER \ 820 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 821 assert_device_not_suspended(dev_priv); \ 822 823 #define GEN2_WRITE_FOOTER 824 825 #define __gen2_write(x) \ 826 static void \ 827 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 828 GEN2_WRITE_HEADER; \ 829 __raw_i915_write##x(dev_priv, reg, val); \ 830 GEN2_WRITE_FOOTER; \ 831 } 832 833 #define __gen5_write(x) \ 834 static void \ 835 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 836 GEN2_WRITE_HEADER; \ 837 ilk_dummy_write(dev_priv); \ 838 __raw_i915_write##x(dev_priv, reg, val); \ 839 GEN2_WRITE_FOOTER; \ 840 } 841 842 __gen5_write(8) 843 __gen5_write(16) 844 __gen5_write(32) 845 __gen5_write(64) 846 __gen2_write(8) 847 __gen2_write(16) 848 __gen2_write(32) 849 __gen2_write(64) 850 851 #undef __gen5_write 852 #undef __gen2_write 853 854 #undef GEN2_WRITE_FOOTER 855 #undef GEN2_WRITE_HEADER 856 857 #define GEN6_WRITE_HEADER \ 858 unsigned long irqflags; \ 859 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 860 assert_device_not_suspended(dev_priv); \ 861 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 862 863 #define GEN6_WRITE_FOOTER \ 864 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 865 866 #define __gen6_write(x) \ 867 static void \ 868 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 869 u32 __fifo_ret = 0; \ 870 GEN6_WRITE_HEADER; \ 871 if (NEEDS_FORCE_WAKE(reg)) { \ 872 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 873 } \ 874 __raw_i915_write##x(dev_priv, reg, val); \ 875 if (unlikely(__fifo_ret)) { \ 876 gen6_gt_check_fifodbg(dev_priv); \ 877 } \ 878 GEN6_WRITE_FOOTER; \ 879 } 880 881 #define __hsw_write(x) \ 882 static void \ 883 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 884 u32 __fifo_ret = 0; \ 885 GEN6_WRITE_HEADER; \ 886 if (NEEDS_FORCE_WAKE(reg)) { \ 887 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 888 } \ 889 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 890 __raw_i915_write##x(dev_priv, reg, val); \ 891 if (unlikely(__fifo_ret)) { \ 892 gen6_gt_check_fifodbg(dev_priv); \ 893 } \ 894 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 895 hsw_unclaimed_reg_detect(dev_priv); \ 896 GEN6_WRITE_FOOTER; \ 897 } 898 899 #define __vgpu_write(x) \ 900 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 901 off_t reg, u##x val, bool trace) { \ 902 GEN6_WRITE_HEADER; \ 903 __raw_i915_write##x(dev_priv, reg, val); \ 904 GEN6_WRITE_FOOTER; \ 905 } 906 907 static const u32 gen8_shadowed_regs[] = { 908 FORCEWAKE_MT, 909 GEN6_RPNSWREQ, 910 GEN6_RC_VIDEO_FREQ, 911 RING_TAIL(RENDER_RING_BASE), 912 RING_TAIL(GEN6_BSD_RING_BASE), 913 RING_TAIL(VEBOX_RING_BASE), 914 RING_TAIL(BLT_RING_BASE), 915 /* TODO: Other registers are not yet used */ 916 }; 917 918 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 919 { 920 int i; 921 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 922 if (reg == gen8_shadowed_regs[i]) 923 return true; 924 925 return false; 926 } 927 928 #define __gen8_write(x) \ 929 static void \ 930 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 931 GEN6_WRITE_HEADER; \ 932 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 933 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 934 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 935 __raw_i915_write##x(dev_priv, reg, val); \ 936 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 937 hsw_unclaimed_reg_detect(dev_priv); \ 938 GEN6_WRITE_FOOTER; \ 939 } 940 941 #define __chv_write(x) \ 942 static void \ 943 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 944 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 945 GEN6_WRITE_HEADER; \ 946 if (!shadowed) { \ 947 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 948 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 949 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 950 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 951 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 952 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 953 } \ 954 __raw_i915_write##x(dev_priv, reg, val); \ 955 GEN6_WRITE_FOOTER; \ 956 } 957 958 static const u32 gen9_shadowed_regs[] = { 959 RING_TAIL(RENDER_RING_BASE), 960 RING_TAIL(GEN6_BSD_RING_BASE), 961 RING_TAIL(VEBOX_RING_BASE), 962 RING_TAIL(BLT_RING_BASE), 963 FORCEWAKE_BLITTER_GEN9, 964 FORCEWAKE_RENDER_GEN9, 965 FORCEWAKE_MEDIA_GEN9, 966 GEN6_RPNSWREQ, 967 GEN6_RC_VIDEO_FREQ, 968 /* TODO: Other registers are not yet used */ 969 }; 970 971 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 972 { 973 int i; 974 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 975 if (reg == gen9_shadowed_regs[i]) 976 return true; 977 978 return false; 979 } 980 981 #define __gen9_write(x) \ 982 static void \ 983 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 984 bool trace) { \ 985 enum forcewake_domains fw_engine; \ 986 GEN6_WRITE_HEADER; \ 987 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 988 if (!SKL_NEEDS_FORCE_WAKE(reg) || \ 989 is_gen9_shadowed(dev_priv, reg)) \ 990 fw_engine = 0; \ 991 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 992 fw_engine = FORCEWAKE_RENDER; \ 993 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 994 fw_engine = FORCEWAKE_MEDIA; \ 995 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 996 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 997 else \ 998 fw_engine = FORCEWAKE_BLITTER; \ 999 if (fw_engine) \ 1000 __force_wake_get(dev_priv, fw_engine); \ 1001 __raw_i915_write##x(dev_priv, reg, val); \ 1002 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 1003 hsw_unclaimed_reg_detect(dev_priv); \ 1004 GEN6_WRITE_FOOTER; \ 1005 } 1006 1007 __gen9_write(8) 1008 __gen9_write(16) 1009 __gen9_write(32) 1010 __gen9_write(64) 1011 __chv_write(8) 1012 __chv_write(16) 1013 __chv_write(32) 1014 __chv_write(64) 1015 __gen8_write(8) 1016 __gen8_write(16) 1017 __gen8_write(32) 1018 __gen8_write(64) 1019 __hsw_write(8) 1020 __hsw_write(16) 1021 __hsw_write(32) 1022 __hsw_write(64) 1023 __gen6_write(8) 1024 __gen6_write(16) 1025 __gen6_write(32) 1026 __gen6_write(64) 1027 __vgpu_write(8) 1028 __vgpu_write(16) 1029 __vgpu_write(32) 1030 __vgpu_write(64) 1031 1032 #undef __gen9_write 1033 #undef __chv_write 1034 #undef __gen8_write 1035 #undef __hsw_write 1036 #undef __gen6_write 1037 #undef __vgpu_write 1038 #undef GEN6_WRITE_FOOTER 1039 #undef GEN6_WRITE_HEADER 1040 1041 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1042 do { \ 1043 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1044 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1045 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1046 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 1047 } while (0) 1048 1049 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1050 do { \ 1051 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1052 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1053 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1054 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1055 } while (0) 1056 1057 1058 static void fw_domain_init(struct drm_i915_private *dev_priv, 1059 enum forcewake_domain_id domain_id, 1060 u32 reg_set, u32 reg_ack) 1061 { 1062 struct intel_uncore_forcewake_domain *d; 1063 1064 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1065 return; 1066 1067 d = &dev_priv->uncore.fw_domain[domain_id]; 1068 1069 WARN_ON(d->wake_count); 1070 1071 d->wake_count = 0; 1072 d->reg_set = reg_set; 1073 d->reg_ack = reg_ack; 1074 1075 if (IS_GEN6(dev_priv)) { 1076 d->val_reset = 0; 1077 d->val_set = FORCEWAKE_KERNEL; 1078 d->val_clear = 0; 1079 } else { 1080 /* WaRsClearFWBitsAtReset:bdw,skl */ 1081 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1082 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1083 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1084 } 1085 1086 if (IS_VALLEYVIEW(dev_priv)) 1087 d->reg_post = FORCEWAKE_ACK_VLV; 1088 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1089 d->reg_post = ECOBUS; 1090 else 1091 d->reg_post = 0; 1092 1093 d->i915 = dev_priv; 1094 d->id = domain_id; 1095 1096 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); 1097 1098 dev_priv->uncore.fw_domains |= (1 << domain_id); 1099 1100 fw_domain_reset(d); 1101 } 1102 1103 static void intel_uncore_fw_domains_init(struct drm_device *dev) 1104 { 1105 struct drm_i915_private *dev_priv = dev->dev_private; 1106 1107 if (INTEL_INFO(dev_priv->dev)->gen <= 5) 1108 return; 1109 1110 if (IS_GEN9(dev)) { 1111 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1112 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1113 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1114 FORCEWAKE_RENDER_GEN9, 1115 FORCEWAKE_ACK_RENDER_GEN9); 1116 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1117 FORCEWAKE_BLITTER_GEN9, 1118 FORCEWAKE_ACK_BLITTER_GEN9); 1119 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1120 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1121 } else if (IS_VALLEYVIEW(dev)) { 1122 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1123 if (!IS_CHERRYVIEW(dev)) 1124 dev_priv->uncore.funcs.force_wake_put = 1125 fw_domains_put_with_fifo; 1126 else 1127 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1128 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1129 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1130 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1131 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1132 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1133 dev_priv->uncore.funcs.force_wake_get = 1134 fw_domains_get_with_thread_status; 1135 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1136 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1137 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1138 } else if (IS_IVYBRIDGE(dev)) { 1139 u32 ecobus; 1140 1141 /* IVB configs may use multi-threaded forcewake */ 1142 1143 /* A small trick here - if the bios hasn't configured 1144 * MT forcewake, and if the device is in RC6, then 1145 * force_wake_mt_get will not wake the device and the 1146 * ECOBUS read will return zero. Which will be 1147 * (correctly) interpreted by the test below as MT 1148 * forcewake being disabled. 1149 */ 1150 dev_priv->uncore.funcs.force_wake_get = 1151 fw_domains_get_with_thread_status; 1152 dev_priv->uncore.funcs.force_wake_put = 1153 fw_domains_put_with_fifo; 1154 1155 /* We need to init first for ECOBUS access and then 1156 * determine later if we want to reinit, in case of MT access is 1157 * not working. In this stage we don't know which flavour this 1158 * ivb is, so it is better to reset also the gen6 fw registers 1159 * before the ecobus check. 1160 */ 1161 1162 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1163 __raw_posting_read(dev_priv, ECOBUS); 1164 1165 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1166 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1167 1168 mutex_lock(&dev->struct_mutex); 1169 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1170 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1171 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1172 mutex_unlock(&dev->struct_mutex); 1173 1174 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1175 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1176 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1177 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1178 FORCEWAKE, FORCEWAKE_ACK); 1179 } 1180 } else if (IS_GEN6(dev)) { 1181 dev_priv->uncore.funcs.force_wake_get = 1182 fw_domains_get_with_thread_status; 1183 dev_priv->uncore.funcs.force_wake_put = 1184 fw_domains_put_with_fifo; 1185 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1186 FORCEWAKE, FORCEWAKE_ACK); 1187 } 1188 1189 /* All future platforms are expected to require complex power gating */ 1190 WARN_ON(dev_priv->uncore.fw_domains == 0); 1191 } 1192 1193 void intel_uncore_init(struct drm_device *dev) 1194 { 1195 struct drm_i915_private *dev_priv = dev->dev_private; 1196 1197 i915_check_vgpu(dev); 1198 1199 intel_uncore_ellc_detect(dev); 1200 intel_uncore_fw_domains_init(dev); 1201 __intel_uncore_early_sanitize(dev, false); 1202 1203 switch (INTEL_INFO(dev)->gen) { 1204 default: 1205 case 9: 1206 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1207 ASSIGN_READ_MMIO_VFUNCS(gen9); 1208 break; 1209 case 8: 1210 if (IS_CHERRYVIEW(dev)) { 1211 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1212 ASSIGN_READ_MMIO_VFUNCS(chv); 1213 1214 } else { 1215 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1216 ASSIGN_READ_MMIO_VFUNCS(gen6); 1217 } 1218 break; 1219 case 7: 1220 case 6: 1221 if (IS_HASWELL(dev)) { 1222 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1223 } else { 1224 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1225 } 1226 1227 if (IS_VALLEYVIEW(dev)) { 1228 ASSIGN_READ_MMIO_VFUNCS(vlv); 1229 } else { 1230 ASSIGN_READ_MMIO_VFUNCS(gen6); 1231 } 1232 break; 1233 case 5: 1234 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1235 ASSIGN_READ_MMIO_VFUNCS(gen5); 1236 break; 1237 case 4: 1238 case 3: 1239 case 2: 1240 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1241 ASSIGN_READ_MMIO_VFUNCS(gen2); 1242 break; 1243 } 1244 1245 if (intel_vgpu_active(dev)) { 1246 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1247 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1248 } 1249 1250 i915_check_and_clear_faults(dev); 1251 } 1252 #undef ASSIGN_WRITE_MMIO_VFUNCS 1253 #undef ASSIGN_READ_MMIO_VFUNCS 1254 1255 void intel_uncore_fini(struct drm_device *dev) 1256 { 1257 /* Paranoia: make sure we have disabled everything before we exit. */ 1258 intel_uncore_sanitize(dev); 1259 intel_uncore_forcewake_reset(dev, false); 1260 } 1261 1262 #define GEN_RANGE(l, h) GENMASK(h, l) 1263 1264 static const struct register_whitelist { 1265 uint64_t offset; 1266 uint32_t size; 1267 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1268 uint32_t gen_bitmask; 1269 } whitelist[] = { 1270 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1271 }; 1272 1273 int i915_reg_read_ioctl(struct drm_device *dev, 1274 void *data, struct drm_file *file) 1275 { 1276 struct drm_i915_private *dev_priv = dev->dev_private; 1277 struct drm_i915_reg_read *reg = data; 1278 struct register_whitelist const *entry = whitelist; 1279 unsigned size; 1280 u64 offset; 1281 int i, ret = 0; 1282 1283 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1284 if (entry->offset == (reg->offset & -entry->size) && 1285 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1286 break; 1287 } 1288 1289 if (i == ARRAY_SIZE(whitelist)) 1290 return -EINVAL; 1291 1292 /* We use the low bits to encode extra flags as the register should 1293 * be naturally aligned (and those that are not so aligned merely 1294 * limit the available flags for that register). 1295 */ 1296 offset = entry->offset; 1297 size = entry->size; 1298 size |= reg->offset ^ offset; 1299 1300 intel_runtime_pm_get(dev_priv); 1301 1302 switch (size) { 1303 case 8 | 1: 1304 reg->val = I915_READ64_2x32(offset, offset+4); 1305 break; 1306 case 8: 1307 reg->val = I915_READ64(offset); 1308 break; 1309 case 4: 1310 reg->val = I915_READ(offset); 1311 break; 1312 case 2: 1313 reg->val = I915_READ16(offset); 1314 break; 1315 case 1: 1316 reg->val = I915_READ8(offset); 1317 break; 1318 default: 1319 ret = -EINVAL; 1320 goto out; 1321 } 1322 1323 out: 1324 intel_runtime_pm_put(dev_priv); 1325 return ret; 1326 } 1327 1328 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1329 void *data, struct drm_file *file) 1330 { 1331 struct drm_i915_private *dev_priv = dev->dev_private; 1332 struct drm_i915_reset_stats *args = data; 1333 struct i915_ctx_hang_stats *hs; 1334 struct intel_context *ctx; 1335 int ret; 1336 1337 if (args->flags || args->pad) 1338 return -EINVAL; 1339 1340 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1341 return -EPERM; 1342 1343 ret = mutex_lock_interruptible(&dev->struct_mutex); 1344 if (ret) 1345 return ret; 1346 1347 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1348 if (IS_ERR(ctx)) { 1349 mutex_unlock(&dev->struct_mutex); 1350 return PTR_ERR(ctx); 1351 } 1352 hs = &ctx->hang_stats; 1353 1354 if (capable(CAP_SYS_ADMIN)) 1355 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1356 else 1357 args->reset_count = 0; 1358 1359 args->batch_active = hs->batch_active; 1360 args->batch_pending = hs->batch_pending; 1361 1362 mutex_unlock(&dev->struct_mutex); 1363 1364 return 0; 1365 } 1366 1367 static int i915_reset_complete(struct drm_device *dev) 1368 { 1369 u8 gdrst; 1370 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1371 return (gdrst & GRDOM_RESET_STATUS) == 0; 1372 } 1373 1374 static int i915_do_reset(struct drm_device *dev) 1375 { 1376 /* assert reset for at least 20 usec */ 1377 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1378 udelay(20); 1379 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1380 1381 return wait_for(i915_reset_complete(dev), 500); 1382 } 1383 1384 static int g4x_reset_complete(struct drm_device *dev) 1385 { 1386 u8 gdrst; 1387 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1388 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1389 } 1390 1391 static int g33_do_reset(struct drm_device *dev) 1392 { 1393 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1394 return wait_for(g4x_reset_complete(dev), 500); 1395 } 1396 1397 static int g4x_do_reset(struct drm_device *dev) 1398 { 1399 struct drm_i915_private *dev_priv = dev->dev_private; 1400 int ret; 1401 1402 pci_write_config_byte(dev->pdev, I915_GDRST, 1403 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1404 ret = wait_for(g4x_reset_complete(dev), 500); 1405 if (ret) 1406 return ret; 1407 1408 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1409 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1410 POSTING_READ(VDECCLK_GATE_D); 1411 1412 pci_write_config_byte(dev->pdev, I915_GDRST, 1413 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1414 ret = wait_for(g4x_reset_complete(dev), 500); 1415 if (ret) 1416 return ret; 1417 1418 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1419 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1420 POSTING_READ(VDECCLK_GATE_D); 1421 1422 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1423 1424 return 0; 1425 } 1426 1427 static int ironlake_do_reset(struct drm_device *dev) 1428 { 1429 struct drm_i915_private *dev_priv = dev->dev_private; 1430 int ret; 1431 1432 I915_WRITE(ILK_GDSR, 1433 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1434 ret = wait_for((I915_READ(ILK_GDSR) & 1435 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1436 if (ret) 1437 return ret; 1438 1439 I915_WRITE(ILK_GDSR, 1440 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1441 ret = wait_for((I915_READ(ILK_GDSR) & 1442 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1443 if (ret) 1444 return ret; 1445 1446 I915_WRITE(ILK_GDSR, 0); 1447 1448 return 0; 1449 } 1450 1451 static int gen6_do_reset(struct drm_device *dev) 1452 { 1453 struct drm_i915_private *dev_priv = dev->dev_private; 1454 int ret; 1455 1456 /* Reset the chip */ 1457 1458 /* GEN6_GDRST is not in the gt power well, no need to check 1459 * for fifo space for the write or forcewake the chip for 1460 * the read 1461 */ 1462 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1463 1464 /* Spin waiting for the device to ack the reset request */ 1465 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1466 1467 intel_uncore_forcewake_reset(dev, true); 1468 1469 return ret; 1470 } 1471 1472 static int wait_for_register(struct drm_i915_private *dev_priv, 1473 const u32 reg, 1474 const u32 mask, 1475 const u32 value, 1476 const unsigned long timeout_ms) 1477 { 1478 return wait_for((I915_READ(reg) & mask) == value, timeout_ms); 1479 } 1480 1481 static int gen8_do_reset(struct drm_device *dev) 1482 { 1483 struct drm_i915_private *dev_priv = dev->dev_private; 1484 struct intel_engine_cs *engine; 1485 int i; 1486 1487 for_each_ring(engine, dev_priv, i) { 1488 I915_WRITE(RING_RESET_CTL(engine->mmio_base), 1489 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1490 1491 if (wait_for_register(dev_priv, 1492 RING_RESET_CTL(engine->mmio_base), 1493 RESET_CTL_READY_TO_RESET, 1494 RESET_CTL_READY_TO_RESET, 1495 700)) { 1496 DRM_ERROR("%s: reset request timeout\n", engine->name); 1497 goto not_ready; 1498 } 1499 } 1500 1501 return gen6_do_reset(dev); 1502 1503 not_ready: 1504 for_each_ring(engine, dev_priv, i) 1505 I915_WRITE(RING_RESET_CTL(engine->mmio_base), 1506 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1507 1508 return -EIO; 1509 } 1510 1511 static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *) 1512 { 1513 if (!i915.reset) 1514 return NULL; 1515 1516 if (INTEL_INFO(dev)->gen >= 8) 1517 return gen8_do_reset; 1518 else if (INTEL_INFO(dev)->gen >= 6) 1519 return gen6_do_reset; 1520 else if (IS_GEN5(dev)) 1521 return ironlake_do_reset; 1522 else if (IS_G4X(dev)) 1523 return g4x_do_reset; 1524 else if (IS_G33(dev)) 1525 return g33_do_reset; 1526 else if (INTEL_INFO(dev)->gen >= 3) 1527 return i915_do_reset; 1528 else 1529 return NULL; 1530 } 1531 1532 int intel_gpu_reset(struct drm_device *dev) 1533 { 1534 struct drm_i915_private *dev_priv = to_i915(dev); 1535 int (*reset)(struct drm_device *); 1536 int ret; 1537 1538 reset = intel_get_gpu_reset(dev); 1539 if (reset == NULL) 1540 return -ENODEV; 1541 1542 /* If the power well sleeps during the reset, the reset 1543 * request may be dropped and never completes (causing -EIO). 1544 */ 1545 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1546 ret = reset(dev); 1547 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1548 1549 return ret; 1550 } 1551 1552 bool intel_has_gpu_reset(struct drm_device *dev) 1553 { 1554 return intel_get_gpu_reset(dev) != NULL; 1555 } 1556 1557 void intel_uncore_check_errors(struct drm_device *dev) 1558 { 1559 struct drm_i915_private *dev_priv = dev->dev_private; 1560 1561 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1562 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1563 DRM_ERROR("Unclaimed register before interrupt\n"); 1564 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1565 } 1566 } 1567