1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <linux/pm_runtime.h> 29 30 #define FORCEWAKE_ACK_TIMEOUT_MS 2 31 32 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 42 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 43 44 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 45 46 static const char * const forcewake_domain_names[] = { 47 "render", 48 "blitter", 49 "media", 50 }; 51 52 const char * 53 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 54 { 55 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != 56 FW_DOMAIN_ID_COUNT); 57 58 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 59 return forcewake_domain_names[id]; 60 61 WARN_ON(id); 62 63 return "unknown"; 64 } 65 66 static void 67 assert_device_not_suspended(struct drm_i915_private *dev_priv) 68 { 69 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 70 "Device suspended\n"); 71 } 72 73 static inline void 74 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 75 { 76 WARN_ON(d->reg_set == 0); 77 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 78 } 79 80 static inline void 81 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 82 { 83 mod_timer_pinned(&d->timer, jiffies + 1); 84 } 85 86 static inline void 87 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 88 { 89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 90 FORCEWAKE_KERNEL) == 0, 91 FORCEWAKE_ACK_TIMEOUT_MS)) 92 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 93 intel_uncore_forcewake_domain_to_str(d->id)); 94 } 95 96 static inline void 97 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 98 { 99 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 100 } 101 102 static inline void 103 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 104 { 105 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 106 FORCEWAKE_KERNEL), 107 FORCEWAKE_ACK_TIMEOUT_MS)) 108 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 109 intel_uncore_forcewake_domain_to_str(d->id)); 110 } 111 112 static inline void 113 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 114 { 115 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 116 } 117 118 static inline void 119 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 120 { 121 /* something from same cacheline, but not from the set register */ 122 if (d->reg_post) 123 __raw_posting_read(d->i915, d->reg_post); 124 } 125 126 static void 127 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 128 { 129 struct intel_uncore_forcewake_domain *d; 130 enum forcewake_domain_id id; 131 132 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 133 fw_domain_wait_ack_clear(d); 134 fw_domain_get(d); 135 fw_domain_wait_ack(d); 136 } 137 } 138 139 static void 140 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 141 { 142 struct intel_uncore_forcewake_domain *d; 143 enum forcewake_domain_id id; 144 145 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 146 fw_domain_put(d); 147 fw_domain_posting_read(d); 148 } 149 } 150 151 static void 152 fw_domains_posting_read(struct drm_i915_private *dev_priv) 153 { 154 struct intel_uncore_forcewake_domain *d; 155 enum forcewake_domain_id id; 156 157 /* No need to do for all, just do for first found */ 158 for_each_fw_domain(d, dev_priv, id) { 159 fw_domain_posting_read(d); 160 break; 161 } 162 } 163 164 static void 165 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 166 { 167 struct intel_uncore_forcewake_domain *d; 168 enum forcewake_domain_id id; 169 170 if (dev_priv->uncore.fw_domains == 0) 171 return; 172 173 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 174 fw_domain_reset(d); 175 176 fw_domains_posting_read(dev_priv); 177 } 178 179 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 180 { 181 /* w/a for a sporadic read returning 0 by waiting for the GT 182 * thread to wake up. 183 */ 184 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 185 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 186 DRM_ERROR("GT thread status wait timed out\n"); 187 } 188 189 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 190 enum forcewake_domains fw_domains) 191 { 192 fw_domains_get(dev_priv, fw_domains); 193 194 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 195 __gen6_gt_wait_for_thread_c0(dev_priv); 196 } 197 198 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 199 { 200 u32 gtfifodbg; 201 202 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 203 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 204 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 205 } 206 207 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 208 enum forcewake_domains fw_domains) 209 { 210 fw_domains_put(dev_priv, fw_domains); 211 gen6_gt_check_fifodbg(dev_priv); 212 } 213 214 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 215 { 216 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 217 218 return count & GT_FIFO_FREE_ENTRIES_MASK; 219 } 220 221 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 222 { 223 int ret = 0; 224 225 /* On VLV, FIFO will be shared by both SW and HW. 226 * So, we need to read the FREE_ENTRIES everytime */ 227 if (IS_VALLEYVIEW(dev_priv->dev)) 228 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 229 230 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 231 int loop = 500; 232 u32 fifo = fifo_free_entries(dev_priv); 233 234 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 235 udelay(10); 236 fifo = fifo_free_entries(dev_priv); 237 } 238 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 239 ++ret; 240 dev_priv->uncore.fifo_count = fifo; 241 } 242 dev_priv->uncore.fifo_count--; 243 244 return ret; 245 } 246 247 static void intel_uncore_fw_release_timer(unsigned long arg) 248 { 249 struct intel_uncore_forcewake_domain *domain = (void *)arg; 250 unsigned long irqflags; 251 252 assert_device_not_suspended(domain->i915); 253 254 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); 255 if (WARN_ON(domain->wake_count == 0)) 256 domain->wake_count++; 257 258 if (--domain->wake_count == 0) 259 domain->i915->uncore.funcs.force_wake_put(domain->i915, 260 1 << domain->id); 261 262 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); 263 } 264 265 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 266 { 267 struct drm_i915_private *dev_priv = dev->dev_private; 268 unsigned long irqflags; 269 struct intel_uncore_forcewake_domain *domain; 270 int retry_count = 100; 271 enum forcewake_domain_id id; 272 enum forcewake_domains fw = 0, active_domains; 273 274 /* Hold uncore.lock across reset to prevent any register access 275 * with forcewake not set correctly. Wait until all pending 276 * timers are run before holding. 277 */ 278 while (1) { 279 active_domains = 0; 280 281 for_each_fw_domain(domain, dev_priv, id) { 282 if (del_timer_sync(&domain->timer) == 0) 283 continue; 284 285 intel_uncore_fw_release_timer((unsigned long)domain); 286 } 287 288 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 289 290 for_each_fw_domain(domain, dev_priv, id) { 291 if (timer_pending(&domain->timer)) 292 active_domains |= (1 << id); 293 } 294 295 if (active_domains == 0) 296 break; 297 298 if (--retry_count == 0) { 299 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 300 break; 301 } 302 303 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 304 cond_resched(); 305 } 306 307 WARN_ON(active_domains); 308 309 for_each_fw_domain(domain, dev_priv, id) 310 if (domain->wake_count) 311 fw |= 1 << id; 312 313 if (fw) 314 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 315 316 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 317 318 if (restore) { /* If reset with a user forcewake, try to restore */ 319 if (fw) 320 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 321 322 if (IS_GEN6(dev) || IS_GEN7(dev)) 323 dev_priv->uncore.fifo_count = 324 fifo_free_entries(dev_priv); 325 } 326 327 if (!restore) 328 assert_forcewakes_inactive(dev_priv); 329 330 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 331 } 332 333 static void intel_uncore_ellc_detect(struct drm_device *dev) 334 { 335 struct drm_i915_private *dev_priv = dev->dev_private; 336 337 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) || 338 INTEL_INFO(dev)->gen >= 9) && 339 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) { 340 /* The docs do not explain exactly how the calculation can be 341 * made. It is somewhat guessable, but for now, it's always 342 * 128MB. 343 * NB: We can't write IDICR yet because we do not have gt funcs 344 * set up */ 345 dev_priv->ellc_size = 128; 346 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 347 } 348 } 349 350 static void __intel_uncore_early_sanitize(struct drm_device *dev, 351 bool restore_forcewake) 352 { 353 struct drm_i915_private *dev_priv = dev->dev_private; 354 355 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 356 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 357 358 /* clear out old GT FIFO errors */ 359 if (IS_GEN6(dev) || IS_GEN7(dev)) 360 __raw_i915_write32(dev_priv, GTFIFODBG, 361 __raw_i915_read32(dev_priv, GTFIFODBG)); 362 363 /* WaDisableShadowRegForCpd:chv */ 364 if (IS_CHERRYVIEW(dev)) { 365 __raw_i915_write32(dev_priv, GTFIFOCTL, 366 __raw_i915_read32(dev_priv, GTFIFOCTL) | 367 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 368 GT_FIFO_CTL_RC6_POLICY_STALL); 369 } 370 371 intel_uncore_forcewake_reset(dev, restore_forcewake); 372 } 373 374 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 375 { 376 __intel_uncore_early_sanitize(dev, restore_forcewake); 377 i915_check_and_clear_faults(dev); 378 } 379 380 void intel_uncore_sanitize(struct drm_device *dev) 381 { 382 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 383 intel_disable_gt_powersave(dev); 384 } 385 386 /** 387 * intel_uncore_forcewake_get - grab forcewake domain references 388 * @dev_priv: i915 device instance 389 * @fw_domains: forcewake domains to get reference on 390 * 391 * This function can be used get GT's forcewake domain references. 392 * Normal register access will handle the forcewake domains automatically. 393 * However if some sequence requires the GT to not power down a particular 394 * forcewake domains this function should be called at the beginning of the 395 * sequence. And subsequently the reference should be dropped by symmetric 396 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 397 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 398 */ 399 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 400 enum forcewake_domains fw_domains) 401 { 402 unsigned long irqflags; 403 struct intel_uncore_forcewake_domain *domain; 404 enum forcewake_domain_id id; 405 406 if (!dev_priv->uncore.funcs.force_wake_get) 407 return; 408 409 WARN_ON(dev_priv->pm.suspended); 410 411 fw_domains &= dev_priv->uncore.fw_domains; 412 413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 414 415 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 416 if (domain->wake_count++) 417 fw_domains &= ~(1 << id); 418 } 419 420 if (fw_domains) 421 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 422 423 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 424 } 425 426 /** 427 * intel_uncore_forcewake_put - release a forcewake domain reference 428 * @dev_priv: i915 device instance 429 * @fw_domains: forcewake domains to put references 430 * 431 * This function drops the device-level forcewakes for specified 432 * domains obtained by intel_uncore_forcewake_get(). 433 */ 434 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 435 enum forcewake_domains fw_domains) 436 { 437 unsigned long irqflags; 438 struct intel_uncore_forcewake_domain *domain; 439 enum forcewake_domain_id id; 440 441 if (!dev_priv->uncore.funcs.force_wake_put) 442 return; 443 444 fw_domains &= dev_priv->uncore.fw_domains; 445 446 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 447 448 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 449 if (WARN_ON(domain->wake_count == 0)) 450 continue; 451 452 if (--domain->wake_count) 453 continue; 454 455 domain->wake_count++; 456 fw_domain_arm_timer(domain); 457 } 458 459 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 460 } 461 462 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 463 { 464 struct intel_uncore_forcewake_domain *domain; 465 enum forcewake_domain_id id; 466 467 if (!dev_priv->uncore.funcs.force_wake_get) 468 return; 469 470 for_each_fw_domain(domain, dev_priv, id) 471 WARN_ON(domain->wake_count); 472 } 473 474 /* We give fast paths for the really cool registers */ 475 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 476 ((reg) < 0x40000 && (reg) != FORCEWAKE) 477 478 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 479 480 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 481 (REG_RANGE((reg), 0x2000, 0x4000) || \ 482 REG_RANGE((reg), 0x5000, 0x8000) || \ 483 REG_RANGE((reg), 0xB000, 0x12000) || \ 484 REG_RANGE((reg), 0x2E000, 0x30000)) 485 486 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 487 (REG_RANGE((reg), 0x12000, 0x14000) || \ 488 REG_RANGE((reg), 0x22000, 0x24000) || \ 489 REG_RANGE((reg), 0x30000, 0x40000)) 490 491 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 492 (REG_RANGE((reg), 0x2000, 0x4000) || \ 493 REG_RANGE((reg), 0x5200, 0x8000) || \ 494 REG_RANGE((reg), 0x8300, 0x8500) || \ 495 REG_RANGE((reg), 0xB000, 0xB480) || \ 496 REG_RANGE((reg), 0xE000, 0xE800)) 497 498 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 499 (REG_RANGE((reg), 0x8800, 0x8900) || \ 500 REG_RANGE((reg), 0xD000, 0xD800) || \ 501 REG_RANGE((reg), 0x12000, 0x14000) || \ 502 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 503 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 504 REG_RANGE((reg), 0x30000, 0x38000)) 505 506 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 507 (REG_RANGE((reg), 0x4000, 0x5000) || \ 508 REG_RANGE((reg), 0x8000, 0x8300) || \ 509 REG_RANGE((reg), 0x8500, 0x8600) || \ 510 REG_RANGE((reg), 0x9000, 0xB000) || \ 511 REG_RANGE((reg), 0xF000, 0x10000)) 512 513 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 514 REG_RANGE((reg), 0xB00, 0x2000) 515 516 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 517 (REG_RANGE((reg), 0x2000, 0x2700) || \ 518 REG_RANGE((reg), 0x3000, 0x4000) || \ 519 REG_RANGE((reg), 0x5200, 0x8000) || \ 520 REG_RANGE((reg), 0x8140, 0x8160) || \ 521 REG_RANGE((reg), 0x8300, 0x8500) || \ 522 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 523 REG_RANGE((reg), 0xB000, 0xB480) || \ 524 REG_RANGE((reg), 0xE000, 0xE900) || \ 525 REG_RANGE((reg), 0x24400, 0x24800)) 526 527 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 528 (REG_RANGE((reg), 0x8130, 0x8140) || \ 529 REG_RANGE((reg), 0x8800, 0x8A00) || \ 530 REG_RANGE((reg), 0xD000, 0xD800) || \ 531 REG_RANGE((reg), 0x12000, 0x14000) || \ 532 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 533 REG_RANGE((reg), 0x30000, 0x40000)) 534 535 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 536 REG_RANGE((reg), 0x9400, 0x9800) 537 538 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 539 ((reg) < 0x40000 &&\ 540 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 541 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 542 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 543 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 544 545 static void 546 ilk_dummy_write(struct drm_i915_private *dev_priv) 547 { 548 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 549 * the chip from rc6 before touching it for real. MI_MODE is masked, 550 * hence harmless to write 0 into. */ 551 __raw_i915_write32(dev_priv, MI_MODE, 0); 552 } 553 554 static void 555 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 556 bool before) 557 { 558 const char *op = read ? "reading" : "writing to"; 559 const char *when = before ? "before" : "after"; 560 561 if (!i915.mmio_debug) 562 return; 563 564 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 565 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 566 when, op, reg); 567 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 568 i915.mmio_debug--; /* Only report the first N failures */ 569 } 570 } 571 572 static void 573 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 574 { 575 static bool mmio_debug_once = true; 576 577 if (i915.mmio_debug || !mmio_debug_once) 578 return; 579 580 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 581 DRM_DEBUG("Unclaimed register detected, " 582 "enabling oneshot unclaimed register reporting. " 583 "Please use i915.mmio_debug=N for more information.\n"); 584 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 585 i915.mmio_debug = mmio_debug_once--; 586 } 587 } 588 589 #define GEN2_READ_HEADER(x) \ 590 u##x val = 0; \ 591 assert_device_not_suspended(dev_priv); 592 593 #define GEN2_READ_FOOTER \ 594 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 595 return val 596 597 #define __gen2_read(x) \ 598 static u##x \ 599 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 600 GEN2_READ_HEADER(x); \ 601 val = __raw_i915_read##x(dev_priv, reg); \ 602 GEN2_READ_FOOTER; \ 603 } 604 605 #define __gen5_read(x) \ 606 static u##x \ 607 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 608 GEN2_READ_HEADER(x); \ 609 ilk_dummy_write(dev_priv); \ 610 val = __raw_i915_read##x(dev_priv, reg); \ 611 GEN2_READ_FOOTER; \ 612 } 613 614 __gen5_read(8) 615 __gen5_read(16) 616 __gen5_read(32) 617 __gen5_read(64) 618 __gen2_read(8) 619 __gen2_read(16) 620 __gen2_read(32) 621 __gen2_read(64) 622 623 #undef __gen5_read 624 #undef __gen2_read 625 626 #undef GEN2_READ_FOOTER 627 #undef GEN2_READ_HEADER 628 629 #define GEN6_READ_HEADER(x) \ 630 unsigned long irqflags; \ 631 u##x val = 0; \ 632 assert_device_not_suspended(dev_priv); \ 633 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 634 635 #define GEN6_READ_FOOTER \ 636 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 637 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 638 return val 639 640 static inline void __force_wake_get(struct drm_i915_private *dev_priv, 641 enum forcewake_domains fw_domains) 642 { 643 struct intel_uncore_forcewake_domain *domain; 644 enum forcewake_domain_id id; 645 646 if (WARN_ON(!fw_domains)) 647 return; 648 649 /* Ideally GCC would be constant-fold and eliminate this loop */ 650 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 651 if (domain->wake_count) { 652 fw_domains &= ~(1 << id); 653 continue; 654 } 655 656 domain->wake_count++; 657 fw_domain_arm_timer(domain); 658 } 659 660 if (fw_domains) 661 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 662 } 663 664 #define __vgpu_read(x) \ 665 static u##x \ 666 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 667 GEN6_READ_HEADER(x); \ 668 val = __raw_i915_read##x(dev_priv, reg); \ 669 GEN6_READ_FOOTER; \ 670 } 671 672 #define __gen6_read(x) \ 673 static u##x \ 674 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 675 GEN6_READ_HEADER(x); \ 676 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 677 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 678 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 679 val = __raw_i915_read##x(dev_priv, reg); \ 680 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 681 GEN6_READ_FOOTER; \ 682 } 683 684 #define __vlv_read(x) \ 685 static u##x \ 686 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 687 GEN6_READ_HEADER(x); \ 688 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 689 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 690 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 691 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 692 val = __raw_i915_read##x(dev_priv, reg); \ 693 GEN6_READ_FOOTER; \ 694 } 695 696 #define __chv_read(x) \ 697 static u##x \ 698 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 699 GEN6_READ_HEADER(x); \ 700 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 701 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 702 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 703 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 704 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 705 __force_wake_get(dev_priv, \ 706 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 707 val = __raw_i915_read##x(dev_priv, reg); \ 708 GEN6_READ_FOOTER; \ 709 } 710 711 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 712 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 713 714 #define __gen9_read(x) \ 715 static u##x \ 716 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 717 enum forcewake_domains fw_engine; \ 718 GEN6_READ_HEADER(x); \ 719 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 720 fw_engine = 0; \ 721 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 722 fw_engine = FORCEWAKE_RENDER; \ 723 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 724 fw_engine = FORCEWAKE_MEDIA; \ 725 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 726 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 727 else \ 728 fw_engine = FORCEWAKE_BLITTER; \ 729 if (fw_engine) \ 730 __force_wake_get(dev_priv, fw_engine); \ 731 val = __raw_i915_read##x(dev_priv, reg); \ 732 GEN6_READ_FOOTER; \ 733 } 734 735 __vgpu_read(8) 736 __vgpu_read(16) 737 __vgpu_read(32) 738 __vgpu_read(64) 739 __gen9_read(8) 740 __gen9_read(16) 741 __gen9_read(32) 742 __gen9_read(64) 743 __chv_read(8) 744 __chv_read(16) 745 __chv_read(32) 746 __chv_read(64) 747 __vlv_read(8) 748 __vlv_read(16) 749 __vlv_read(32) 750 __vlv_read(64) 751 __gen6_read(8) 752 __gen6_read(16) 753 __gen6_read(32) 754 __gen6_read(64) 755 756 #undef __gen9_read 757 #undef __chv_read 758 #undef __vlv_read 759 #undef __gen6_read 760 #undef __vgpu_read 761 #undef GEN6_READ_FOOTER 762 #undef GEN6_READ_HEADER 763 764 #define GEN2_WRITE_HEADER \ 765 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 766 assert_device_not_suspended(dev_priv); \ 767 768 #define GEN2_WRITE_FOOTER 769 770 #define __gen2_write(x) \ 771 static void \ 772 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 773 GEN2_WRITE_HEADER; \ 774 __raw_i915_write##x(dev_priv, reg, val); \ 775 GEN2_WRITE_FOOTER; \ 776 } 777 778 #define __gen5_write(x) \ 779 static void \ 780 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 781 GEN2_WRITE_HEADER; \ 782 ilk_dummy_write(dev_priv); \ 783 __raw_i915_write##x(dev_priv, reg, val); \ 784 GEN2_WRITE_FOOTER; \ 785 } 786 787 __gen5_write(8) 788 __gen5_write(16) 789 __gen5_write(32) 790 __gen5_write(64) 791 __gen2_write(8) 792 __gen2_write(16) 793 __gen2_write(32) 794 __gen2_write(64) 795 796 #undef __gen5_write 797 #undef __gen2_write 798 799 #undef GEN2_WRITE_FOOTER 800 #undef GEN2_WRITE_HEADER 801 802 #define GEN6_WRITE_HEADER \ 803 unsigned long irqflags; \ 804 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 805 assert_device_not_suspended(dev_priv); \ 806 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 807 808 #define GEN6_WRITE_FOOTER \ 809 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 810 811 #define __gen6_write(x) \ 812 static void \ 813 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 814 u32 __fifo_ret = 0; \ 815 GEN6_WRITE_HEADER; \ 816 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 817 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 818 } \ 819 __raw_i915_write##x(dev_priv, reg, val); \ 820 if (unlikely(__fifo_ret)) { \ 821 gen6_gt_check_fifodbg(dev_priv); \ 822 } \ 823 GEN6_WRITE_FOOTER; \ 824 } 825 826 #define __hsw_write(x) \ 827 static void \ 828 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 829 u32 __fifo_ret = 0; \ 830 GEN6_WRITE_HEADER; \ 831 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 832 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 833 } \ 834 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 835 __raw_i915_write##x(dev_priv, reg, val); \ 836 if (unlikely(__fifo_ret)) { \ 837 gen6_gt_check_fifodbg(dev_priv); \ 838 } \ 839 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 840 hsw_unclaimed_reg_detect(dev_priv); \ 841 GEN6_WRITE_FOOTER; \ 842 } 843 844 #define __vgpu_write(x) \ 845 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 846 off_t reg, u##x val, bool trace) { \ 847 GEN6_WRITE_HEADER; \ 848 __raw_i915_write##x(dev_priv, reg, val); \ 849 GEN6_WRITE_FOOTER; \ 850 } 851 852 static const u32 gen8_shadowed_regs[] = { 853 FORCEWAKE_MT, 854 GEN6_RPNSWREQ, 855 GEN6_RC_VIDEO_FREQ, 856 RING_TAIL(RENDER_RING_BASE), 857 RING_TAIL(GEN6_BSD_RING_BASE), 858 RING_TAIL(VEBOX_RING_BASE), 859 RING_TAIL(BLT_RING_BASE), 860 /* TODO: Other registers are not yet used */ 861 }; 862 863 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 864 { 865 int i; 866 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 867 if (reg == gen8_shadowed_regs[i]) 868 return true; 869 870 return false; 871 } 872 873 #define __gen8_write(x) \ 874 static void \ 875 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 876 GEN6_WRITE_HEADER; \ 877 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 878 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 879 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 880 __raw_i915_write##x(dev_priv, reg, val); \ 881 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 882 hsw_unclaimed_reg_detect(dev_priv); \ 883 GEN6_WRITE_FOOTER; \ 884 } 885 886 #define __chv_write(x) \ 887 static void \ 888 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 889 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 890 GEN6_WRITE_HEADER; \ 891 if (!shadowed) { \ 892 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 893 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 894 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 895 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 896 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 897 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 898 } \ 899 __raw_i915_write##x(dev_priv, reg, val); \ 900 GEN6_WRITE_FOOTER; \ 901 } 902 903 static const u32 gen9_shadowed_regs[] = { 904 RING_TAIL(RENDER_RING_BASE), 905 RING_TAIL(GEN6_BSD_RING_BASE), 906 RING_TAIL(VEBOX_RING_BASE), 907 RING_TAIL(BLT_RING_BASE), 908 FORCEWAKE_BLITTER_GEN9, 909 FORCEWAKE_RENDER_GEN9, 910 FORCEWAKE_MEDIA_GEN9, 911 GEN6_RPNSWREQ, 912 GEN6_RC_VIDEO_FREQ, 913 /* TODO: Other registers are not yet used */ 914 }; 915 916 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 917 { 918 int i; 919 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 920 if (reg == gen9_shadowed_regs[i]) 921 return true; 922 923 return false; 924 } 925 926 #define __gen9_write(x) \ 927 static void \ 928 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 929 bool trace) { \ 930 enum forcewake_domains fw_engine; \ 931 GEN6_WRITE_HEADER; \ 932 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 933 is_gen9_shadowed(dev_priv, reg)) \ 934 fw_engine = 0; \ 935 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 936 fw_engine = FORCEWAKE_RENDER; \ 937 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 938 fw_engine = FORCEWAKE_MEDIA; \ 939 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 940 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 941 else \ 942 fw_engine = FORCEWAKE_BLITTER; \ 943 if (fw_engine) \ 944 __force_wake_get(dev_priv, fw_engine); \ 945 __raw_i915_write##x(dev_priv, reg, val); \ 946 GEN6_WRITE_FOOTER; \ 947 } 948 949 __gen9_write(8) 950 __gen9_write(16) 951 __gen9_write(32) 952 __gen9_write(64) 953 __chv_write(8) 954 __chv_write(16) 955 __chv_write(32) 956 __chv_write(64) 957 __gen8_write(8) 958 __gen8_write(16) 959 __gen8_write(32) 960 __gen8_write(64) 961 __hsw_write(8) 962 __hsw_write(16) 963 __hsw_write(32) 964 __hsw_write(64) 965 __gen6_write(8) 966 __gen6_write(16) 967 __gen6_write(32) 968 __gen6_write(64) 969 __vgpu_write(8) 970 __vgpu_write(16) 971 __vgpu_write(32) 972 __vgpu_write(64) 973 974 #undef __gen9_write 975 #undef __chv_write 976 #undef __gen8_write 977 #undef __hsw_write 978 #undef __gen6_write 979 #undef __vgpu_write 980 #undef GEN6_WRITE_FOOTER 981 #undef GEN6_WRITE_HEADER 982 983 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 984 do { \ 985 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 986 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 987 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 988 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 989 } while (0) 990 991 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 992 do { \ 993 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 994 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 995 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 996 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 997 } while (0) 998 999 1000 static void fw_domain_init(struct drm_i915_private *dev_priv, 1001 enum forcewake_domain_id domain_id, 1002 u32 reg_set, u32 reg_ack) 1003 { 1004 struct intel_uncore_forcewake_domain *d; 1005 1006 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1007 return; 1008 1009 d = &dev_priv->uncore.fw_domain[domain_id]; 1010 1011 WARN_ON(d->wake_count); 1012 1013 d->wake_count = 0; 1014 d->reg_set = reg_set; 1015 d->reg_ack = reg_ack; 1016 1017 if (IS_GEN6(dev_priv)) { 1018 d->val_reset = 0; 1019 d->val_set = FORCEWAKE_KERNEL; 1020 d->val_clear = 0; 1021 } else { 1022 /* WaRsClearFWBitsAtReset:bdw,skl */ 1023 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1024 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1025 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1026 } 1027 1028 if (IS_VALLEYVIEW(dev_priv)) 1029 d->reg_post = FORCEWAKE_ACK_VLV; 1030 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1031 d->reg_post = ECOBUS; 1032 else 1033 d->reg_post = 0; 1034 1035 d->i915 = dev_priv; 1036 d->id = domain_id; 1037 1038 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); 1039 1040 dev_priv->uncore.fw_domains |= (1 << domain_id); 1041 1042 fw_domain_reset(d); 1043 } 1044 1045 static void intel_uncore_fw_domains_init(struct drm_device *dev) 1046 { 1047 struct drm_i915_private *dev_priv = dev->dev_private; 1048 1049 if (INTEL_INFO(dev_priv->dev)->gen <= 5) 1050 return; 1051 1052 if (IS_GEN9(dev)) { 1053 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1054 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1055 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1056 FORCEWAKE_RENDER_GEN9, 1057 FORCEWAKE_ACK_RENDER_GEN9); 1058 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1059 FORCEWAKE_BLITTER_GEN9, 1060 FORCEWAKE_ACK_BLITTER_GEN9); 1061 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1062 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1063 } else if (IS_VALLEYVIEW(dev)) { 1064 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1065 if (!IS_CHERRYVIEW(dev)) 1066 dev_priv->uncore.funcs.force_wake_put = 1067 fw_domains_put_with_fifo; 1068 else 1069 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1070 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1071 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1072 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1073 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1074 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1075 dev_priv->uncore.funcs.force_wake_get = 1076 fw_domains_get_with_thread_status; 1077 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1078 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1079 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1080 } else if (IS_IVYBRIDGE(dev)) { 1081 u32 ecobus; 1082 1083 /* IVB configs may use multi-threaded forcewake */ 1084 1085 /* A small trick here - if the bios hasn't configured 1086 * MT forcewake, and if the device is in RC6, then 1087 * force_wake_mt_get will not wake the device and the 1088 * ECOBUS read will return zero. Which will be 1089 * (correctly) interpreted by the test below as MT 1090 * forcewake being disabled. 1091 */ 1092 dev_priv->uncore.funcs.force_wake_get = 1093 fw_domains_get_with_thread_status; 1094 dev_priv->uncore.funcs.force_wake_put = 1095 fw_domains_put_with_fifo; 1096 1097 /* We need to init first for ECOBUS access and then 1098 * determine later if we want to reinit, in case of MT access is 1099 * not working. In this stage we don't know which flavour this 1100 * ivb is, so it is better to reset also the gen6 fw registers 1101 * before the ecobus check. 1102 */ 1103 1104 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1105 __raw_posting_read(dev_priv, ECOBUS); 1106 1107 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1108 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1109 1110 mutex_lock(&dev->struct_mutex); 1111 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1112 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1113 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1114 mutex_unlock(&dev->struct_mutex); 1115 1116 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1117 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1118 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1119 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1120 FORCEWAKE, FORCEWAKE_ACK); 1121 } 1122 } else if (IS_GEN6(dev)) { 1123 dev_priv->uncore.funcs.force_wake_get = 1124 fw_domains_get_with_thread_status; 1125 dev_priv->uncore.funcs.force_wake_put = 1126 fw_domains_put_with_fifo; 1127 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1128 FORCEWAKE, FORCEWAKE_ACK); 1129 } 1130 1131 /* All future platforms are expected to require complex power gating */ 1132 WARN_ON(dev_priv->uncore.fw_domains == 0); 1133 } 1134 1135 void intel_uncore_init(struct drm_device *dev) 1136 { 1137 struct drm_i915_private *dev_priv = dev->dev_private; 1138 1139 i915_check_vgpu(dev); 1140 1141 intel_uncore_ellc_detect(dev); 1142 intel_uncore_fw_domains_init(dev); 1143 __intel_uncore_early_sanitize(dev, false); 1144 1145 switch (INTEL_INFO(dev)->gen) { 1146 default: 1147 MISSING_CASE(INTEL_INFO(dev)->gen); 1148 return; 1149 case 9: 1150 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1151 ASSIGN_READ_MMIO_VFUNCS(gen9); 1152 break; 1153 case 8: 1154 if (IS_CHERRYVIEW(dev)) { 1155 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1156 ASSIGN_READ_MMIO_VFUNCS(chv); 1157 1158 } else { 1159 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1160 ASSIGN_READ_MMIO_VFUNCS(gen6); 1161 } 1162 break; 1163 case 7: 1164 case 6: 1165 if (IS_HASWELL(dev)) { 1166 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1167 } else { 1168 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1169 } 1170 1171 if (IS_VALLEYVIEW(dev)) { 1172 ASSIGN_READ_MMIO_VFUNCS(vlv); 1173 } else { 1174 ASSIGN_READ_MMIO_VFUNCS(gen6); 1175 } 1176 break; 1177 case 5: 1178 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1179 ASSIGN_READ_MMIO_VFUNCS(gen5); 1180 break; 1181 case 4: 1182 case 3: 1183 case 2: 1184 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1185 ASSIGN_READ_MMIO_VFUNCS(gen2); 1186 break; 1187 } 1188 1189 if (intel_vgpu_active(dev)) { 1190 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1191 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1192 } 1193 1194 i915_check_and_clear_faults(dev); 1195 } 1196 #undef ASSIGN_WRITE_MMIO_VFUNCS 1197 #undef ASSIGN_READ_MMIO_VFUNCS 1198 1199 void intel_uncore_fini(struct drm_device *dev) 1200 { 1201 /* Paranoia: make sure we have disabled everything before we exit. */ 1202 intel_uncore_sanitize(dev); 1203 intel_uncore_forcewake_reset(dev, false); 1204 } 1205 1206 #define GEN_RANGE(l, h) GENMASK(h, l) 1207 1208 static const struct register_whitelist { 1209 uint64_t offset; 1210 uint32_t size; 1211 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1212 uint32_t gen_bitmask; 1213 } whitelist[] = { 1214 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1215 }; 1216 1217 int i915_reg_read_ioctl(struct drm_device *dev, 1218 void *data, struct drm_file *file) 1219 { 1220 struct drm_i915_private *dev_priv = dev->dev_private; 1221 struct drm_i915_reg_read *reg = data; 1222 struct register_whitelist const *entry = whitelist; 1223 int i, ret = 0; 1224 1225 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1226 if (entry->offset == reg->offset && 1227 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1228 break; 1229 } 1230 1231 if (i == ARRAY_SIZE(whitelist)) 1232 return -EINVAL; 1233 1234 intel_runtime_pm_get(dev_priv); 1235 1236 switch (entry->size) { 1237 case 8: 1238 reg->val = I915_READ64(reg->offset); 1239 break; 1240 case 4: 1241 reg->val = I915_READ(reg->offset); 1242 break; 1243 case 2: 1244 reg->val = I915_READ16(reg->offset); 1245 break; 1246 case 1: 1247 reg->val = I915_READ8(reg->offset); 1248 break; 1249 default: 1250 MISSING_CASE(entry->size); 1251 ret = -EINVAL; 1252 goto out; 1253 } 1254 1255 out: 1256 intel_runtime_pm_put(dev_priv); 1257 return ret; 1258 } 1259 1260 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1261 void *data, struct drm_file *file) 1262 { 1263 struct drm_i915_private *dev_priv = dev->dev_private; 1264 struct drm_i915_reset_stats *args = data; 1265 struct i915_ctx_hang_stats *hs; 1266 struct intel_context *ctx; 1267 int ret; 1268 1269 if (args->flags || args->pad) 1270 return -EINVAL; 1271 1272 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1273 return -EPERM; 1274 1275 ret = mutex_lock_interruptible(&dev->struct_mutex); 1276 if (ret) 1277 return ret; 1278 1279 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1280 if (IS_ERR(ctx)) { 1281 mutex_unlock(&dev->struct_mutex); 1282 return PTR_ERR(ctx); 1283 } 1284 hs = &ctx->hang_stats; 1285 1286 if (capable(CAP_SYS_ADMIN)) 1287 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1288 else 1289 args->reset_count = 0; 1290 1291 args->batch_active = hs->batch_active; 1292 args->batch_pending = hs->batch_pending; 1293 1294 mutex_unlock(&dev->struct_mutex); 1295 1296 return 0; 1297 } 1298 1299 static int i915_reset_complete(struct drm_device *dev) 1300 { 1301 u8 gdrst; 1302 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1303 return (gdrst & GRDOM_RESET_STATUS) == 0; 1304 } 1305 1306 static int i915_do_reset(struct drm_device *dev) 1307 { 1308 /* assert reset for at least 20 usec */ 1309 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1310 udelay(20); 1311 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1312 1313 return wait_for(i915_reset_complete(dev), 500); 1314 } 1315 1316 static int g4x_reset_complete(struct drm_device *dev) 1317 { 1318 u8 gdrst; 1319 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1320 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1321 } 1322 1323 static int g33_do_reset(struct drm_device *dev) 1324 { 1325 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1326 return wait_for(g4x_reset_complete(dev), 500); 1327 } 1328 1329 static int g4x_do_reset(struct drm_device *dev) 1330 { 1331 struct drm_i915_private *dev_priv = dev->dev_private; 1332 int ret; 1333 1334 pci_write_config_byte(dev->pdev, I915_GDRST, 1335 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1336 ret = wait_for(g4x_reset_complete(dev), 500); 1337 if (ret) 1338 return ret; 1339 1340 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1341 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1342 POSTING_READ(VDECCLK_GATE_D); 1343 1344 pci_write_config_byte(dev->pdev, I915_GDRST, 1345 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1346 ret = wait_for(g4x_reset_complete(dev), 500); 1347 if (ret) 1348 return ret; 1349 1350 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1351 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1352 POSTING_READ(VDECCLK_GATE_D); 1353 1354 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1355 1356 return 0; 1357 } 1358 1359 static int ironlake_do_reset(struct drm_device *dev) 1360 { 1361 struct drm_i915_private *dev_priv = dev->dev_private; 1362 int ret; 1363 1364 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1365 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1366 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1367 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1368 if (ret) 1369 return ret; 1370 1371 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1372 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1373 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1374 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1375 if (ret) 1376 return ret; 1377 1378 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1379 1380 return 0; 1381 } 1382 1383 static int gen6_do_reset(struct drm_device *dev) 1384 { 1385 struct drm_i915_private *dev_priv = dev->dev_private; 1386 int ret; 1387 1388 /* Reset the chip */ 1389 1390 /* GEN6_GDRST is not in the gt power well, no need to check 1391 * for fifo space for the write or forcewake the chip for 1392 * the read 1393 */ 1394 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1395 1396 /* Spin waiting for the device to ack the reset request */ 1397 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1398 1399 intel_uncore_forcewake_reset(dev, true); 1400 1401 return ret; 1402 } 1403 1404 int intel_gpu_reset(struct drm_device *dev) 1405 { 1406 if (INTEL_INFO(dev)->gen >= 6) 1407 return gen6_do_reset(dev); 1408 else if (IS_GEN5(dev)) 1409 return ironlake_do_reset(dev); 1410 else if (IS_G4X(dev)) 1411 return g4x_do_reset(dev); 1412 else if (IS_G33(dev)) 1413 return g33_do_reset(dev); 1414 else if (INTEL_INFO(dev)->gen >= 3) 1415 return i915_do_reset(dev); 1416 else 1417 return -ENODEV; 1418 } 1419 1420 void intel_uncore_check_errors(struct drm_device *dev) 1421 { 1422 struct drm_i915_private *dev_priv = dev->dev_private; 1423 1424 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1425 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1426 DRM_ERROR("Unclaimed register before interrupt\n"); 1427 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1428 } 1429 } 1430