1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #include <linux/pm_runtime.h> 29 30 #define FORCEWAKE_ACK_TIMEOUT_MS 2 31 32 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 42 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 43 44 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 45 46 static const char * const forcewake_domain_names[] = { 47 "render", 48 "blitter", 49 "media", 50 }; 51 52 const char * 53 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 54 { 55 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != 56 FW_DOMAIN_ID_COUNT); 57 58 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 59 return forcewake_domain_names[id]; 60 61 WARN_ON(id); 62 63 return "unknown"; 64 } 65 66 static void 67 assert_device_not_suspended(struct drm_i915_private *dev_priv) 68 { 69 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 70 "Device suspended\n"); 71 } 72 73 static inline void 74 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 75 { 76 WARN_ON(d->reg_set == 0); 77 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 78 } 79 80 static inline void 81 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 82 { 83 mod_timer_pinned(&d->timer, jiffies + 1); 84 } 85 86 static inline void 87 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 88 { 89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 90 FORCEWAKE_KERNEL) == 0, 91 FORCEWAKE_ACK_TIMEOUT_MS)) 92 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 93 intel_uncore_forcewake_domain_to_str(d->id)); 94 } 95 96 static inline void 97 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 98 { 99 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 100 } 101 102 static inline void 103 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 104 { 105 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 106 FORCEWAKE_KERNEL), 107 FORCEWAKE_ACK_TIMEOUT_MS)) 108 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 109 intel_uncore_forcewake_domain_to_str(d->id)); 110 } 111 112 static inline void 113 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 114 { 115 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 116 } 117 118 static inline void 119 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 120 { 121 /* something from same cacheline, but not from the set register */ 122 if (d->reg_post) 123 __raw_posting_read(d->i915, d->reg_post); 124 } 125 126 static void 127 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 128 { 129 struct intel_uncore_forcewake_domain *d; 130 enum forcewake_domain_id id; 131 132 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 133 fw_domain_wait_ack_clear(d); 134 fw_domain_get(d); 135 fw_domain_wait_ack(d); 136 } 137 } 138 139 static void 140 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 141 { 142 struct intel_uncore_forcewake_domain *d; 143 enum forcewake_domain_id id; 144 145 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 146 fw_domain_put(d); 147 fw_domain_posting_read(d); 148 } 149 } 150 151 static void 152 fw_domains_posting_read(struct drm_i915_private *dev_priv) 153 { 154 struct intel_uncore_forcewake_domain *d; 155 enum forcewake_domain_id id; 156 157 /* No need to do for all, just do for first found */ 158 for_each_fw_domain(d, dev_priv, id) { 159 fw_domain_posting_read(d); 160 break; 161 } 162 } 163 164 static void 165 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 166 { 167 struct intel_uncore_forcewake_domain *d; 168 enum forcewake_domain_id id; 169 170 if (dev_priv->uncore.fw_domains == 0) 171 return; 172 173 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 174 fw_domain_reset(d); 175 176 fw_domains_posting_read(dev_priv); 177 } 178 179 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 180 { 181 /* w/a for a sporadic read returning 0 by waiting for the GT 182 * thread to wake up. 183 */ 184 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 185 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 186 DRM_ERROR("GT thread status wait timed out\n"); 187 } 188 189 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 190 enum forcewake_domains fw_domains) 191 { 192 fw_domains_get(dev_priv, fw_domains); 193 194 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 195 __gen6_gt_wait_for_thread_c0(dev_priv); 196 } 197 198 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 199 { 200 u32 gtfifodbg; 201 202 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 203 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 204 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 205 } 206 207 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 208 enum forcewake_domains fw_domains) 209 { 210 fw_domains_put(dev_priv, fw_domains); 211 gen6_gt_check_fifodbg(dev_priv); 212 } 213 214 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 215 { 216 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 217 218 return count & GT_FIFO_FREE_ENTRIES_MASK; 219 } 220 221 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 222 { 223 int ret = 0; 224 225 /* On VLV, FIFO will be shared by both SW and HW. 226 * So, we need to read the FREE_ENTRIES everytime */ 227 if (IS_VALLEYVIEW(dev_priv->dev)) 228 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 229 230 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 231 int loop = 500; 232 u32 fifo = fifo_free_entries(dev_priv); 233 234 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 235 udelay(10); 236 fifo = fifo_free_entries(dev_priv); 237 } 238 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 239 ++ret; 240 dev_priv->uncore.fifo_count = fifo; 241 } 242 dev_priv->uncore.fifo_count--; 243 244 return ret; 245 } 246 247 static void intel_uncore_fw_release_timer(unsigned long arg) 248 { 249 struct intel_uncore_forcewake_domain *domain = (void *)arg; 250 unsigned long irqflags; 251 252 assert_device_not_suspended(domain->i915); 253 254 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); 255 if (WARN_ON(domain->wake_count == 0)) 256 domain->wake_count++; 257 258 if (--domain->wake_count == 0) 259 domain->i915->uncore.funcs.force_wake_put(domain->i915, 260 1 << domain->id); 261 262 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); 263 } 264 265 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 266 { 267 struct drm_i915_private *dev_priv = dev->dev_private; 268 unsigned long irqflags; 269 struct intel_uncore_forcewake_domain *domain; 270 int retry_count = 100; 271 enum forcewake_domain_id id; 272 enum forcewake_domains fw = 0, active_domains; 273 274 /* Hold uncore.lock across reset to prevent any register access 275 * with forcewake not set correctly. Wait until all pending 276 * timers are run before holding. 277 */ 278 while (1) { 279 active_domains = 0; 280 281 for_each_fw_domain(domain, dev_priv, id) { 282 if (del_timer_sync(&domain->timer) == 0) 283 continue; 284 285 intel_uncore_fw_release_timer((unsigned long)domain); 286 } 287 288 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 289 290 for_each_fw_domain(domain, dev_priv, id) { 291 if (timer_pending(&domain->timer)) 292 active_domains |= (1 << id); 293 } 294 295 if (active_domains == 0) 296 break; 297 298 if (--retry_count == 0) { 299 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 300 break; 301 } 302 303 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 304 cond_resched(); 305 } 306 307 WARN_ON(active_domains); 308 309 for_each_fw_domain(domain, dev_priv, id) 310 if (domain->wake_count) 311 fw |= 1 << id; 312 313 if (fw) 314 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 315 316 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 317 318 if (restore) { /* If reset with a user forcewake, try to restore */ 319 if (fw) 320 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 321 322 if (IS_GEN6(dev) || IS_GEN7(dev)) 323 dev_priv->uncore.fifo_count = 324 fifo_free_entries(dev_priv); 325 } 326 327 if (!restore) 328 assert_forcewakes_inactive(dev_priv); 329 330 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 331 } 332 333 static void intel_uncore_ellc_detect(struct drm_device *dev) 334 { 335 struct drm_i915_private *dev_priv = dev->dev_private; 336 337 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) || 338 INTEL_INFO(dev)->gen >= 9) && 339 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) { 340 /* The docs do not explain exactly how the calculation can be 341 * made. It is somewhat guessable, but for now, it's always 342 * 128MB. 343 * NB: We can't write IDICR yet because we do not have gt funcs 344 * set up */ 345 dev_priv->ellc_size = 128; 346 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 347 } 348 } 349 350 static void __intel_uncore_early_sanitize(struct drm_device *dev, 351 bool restore_forcewake) 352 { 353 struct drm_i915_private *dev_priv = dev->dev_private; 354 355 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 356 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 357 358 /* clear out old GT FIFO errors */ 359 if (IS_GEN6(dev) || IS_GEN7(dev)) 360 __raw_i915_write32(dev_priv, GTFIFODBG, 361 __raw_i915_read32(dev_priv, GTFIFODBG)); 362 363 intel_uncore_forcewake_reset(dev, restore_forcewake); 364 } 365 366 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 367 { 368 __intel_uncore_early_sanitize(dev, restore_forcewake); 369 i915_check_and_clear_faults(dev); 370 } 371 372 void intel_uncore_sanitize(struct drm_device *dev) 373 { 374 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 375 intel_disable_gt_powersave(dev); 376 } 377 378 /** 379 * intel_uncore_forcewake_get - grab forcewake domain references 380 * @dev_priv: i915 device instance 381 * @fw_domains: forcewake domains to get reference on 382 * 383 * This function can be used get GT's forcewake domain references. 384 * Normal register access will handle the forcewake domains automatically. 385 * However if some sequence requires the GT to not power down a particular 386 * forcewake domains this function should be called at the beginning of the 387 * sequence. And subsequently the reference should be dropped by symmetric 388 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 389 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 390 */ 391 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 392 enum forcewake_domains fw_domains) 393 { 394 unsigned long irqflags; 395 struct intel_uncore_forcewake_domain *domain; 396 enum forcewake_domain_id id; 397 398 if (!dev_priv->uncore.funcs.force_wake_get) 399 return; 400 401 WARN_ON(dev_priv->pm.suspended); 402 403 fw_domains &= dev_priv->uncore.fw_domains; 404 405 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 406 407 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 408 if (domain->wake_count++) 409 fw_domains &= ~(1 << id); 410 } 411 412 if (fw_domains) 413 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 414 415 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 416 } 417 418 /** 419 * intel_uncore_forcewake_put - release a forcewake domain reference 420 * @dev_priv: i915 device instance 421 * @fw_domains: forcewake domains to put references 422 * 423 * This function drops the device-level forcewakes for specified 424 * domains obtained by intel_uncore_forcewake_get(). 425 */ 426 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 427 enum forcewake_domains fw_domains) 428 { 429 unsigned long irqflags; 430 struct intel_uncore_forcewake_domain *domain; 431 enum forcewake_domain_id id; 432 433 if (!dev_priv->uncore.funcs.force_wake_put) 434 return; 435 436 fw_domains &= dev_priv->uncore.fw_domains; 437 438 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 439 440 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 441 if (WARN_ON(domain->wake_count == 0)) 442 continue; 443 444 if (--domain->wake_count) 445 continue; 446 447 domain->wake_count++; 448 fw_domain_arm_timer(domain); 449 } 450 451 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 452 } 453 454 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 455 { 456 struct intel_uncore_forcewake_domain *domain; 457 enum forcewake_domain_id id; 458 459 if (!dev_priv->uncore.funcs.force_wake_get) 460 return; 461 462 for_each_fw_domain(domain, dev_priv, id) 463 WARN_ON(domain->wake_count); 464 } 465 466 /* We give fast paths for the really cool registers */ 467 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 468 ((reg) < 0x40000 && (reg) != FORCEWAKE) 469 470 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 471 472 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 473 (REG_RANGE((reg), 0x2000, 0x4000) || \ 474 REG_RANGE((reg), 0x5000, 0x8000) || \ 475 REG_RANGE((reg), 0xB000, 0x12000) || \ 476 REG_RANGE((reg), 0x2E000, 0x30000)) 477 478 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 479 (REG_RANGE((reg), 0x12000, 0x14000) || \ 480 REG_RANGE((reg), 0x22000, 0x24000) || \ 481 REG_RANGE((reg), 0x30000, 0x40000)) 482 483 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 484 (REG_RANGE((reg), 0x2000, 0x4000) || \ 485 REG_RANGE((reg), 0x5200, 0x8000) || \ 486 REG_RANGE((reg), 0x8300, 0x8500) || \ 487 REG_RANGE((reg), 0xB000, 0xB480) || \ 488 REG_RANGE((reg), 0xE000, 0xE800)) 489 490 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 491 (REG_RANGE((reg), 0x8800, 0x8900) || \ 492 REG_RANGE((reg), 0xD000, 0xD800) || \ 493 REG_RANGE((reg), 0x12000, 0x14000) || \ 494 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 495 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 496 REG_RANGE((reg), 0x30000, 0x38000)) 497 498 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 499 (REG_RANGE((reg), 0x4000, 0x5000) || \ 500 REG_RANGE((reg), 0x8000, 0x8300) || \ 501 REG_RANGE((reg), 0x8500, 0x8600) || \ 502 REG_RANGE((reg), 0x9000, 0xB000) || \ 503 REG_RANGE((reg), 0xF000, 0x10000)) 504 505 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 506 REG_RANGE((reg), 0xB00, 0x2000) 507 508 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 509 (REG_RANGE((reg), 0x2000, 0x2700) || \ 510 REG_RANGE((reg), 0x3000, 0x4000) || \ 511 REG_RANGE((reg), 0x5200, 0x8000) || \ 512 REG_RANGE((reg), 0x8140, 0x8160) || \ 513 REG_RANGE((reg), 0x8300, 0x8500) || \ 514 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 515 REG_RANGE((reg), 0xB000, 0xB480) || \ 516 REG_RANGE((reg), 0xE000, 0xE900) || \ 517 REG_RANGE((reg), 0x24400, 0x24800)) 518 519 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 520 (REG_RANGE((reg), 0x8130, 0x8140) || \ 521 REG_RANGE((reg), 0x8800, 0x8A00) || \ 522 REG_RANGE((reg), 0xD000, 0xD800) || \ 523 REG_RANGE((reg), 0x12000, 0x14000) || \ 524 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 525 REG_RANGE((reg), 0x30000, 0x40000)) 526 527 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 528 REG_RANGE((reg), 0x9400, 0x9800) 529 530 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 531 ((reg) < 0x40000 &&\ 532 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 533 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 534 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 535 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 536 537 static void 538 ilk_dummy_write(struct drm_i915_private *dev_priv) 539 { 540 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 541 * the chip from rc6 before touching it for real. MI_MODE is masked, 542 * hence harmless to write 0 into. */ 543 __raw_i915_write32(dev_priv, MI_MODE, 0); 544 } 545 546 static void 547 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 548 bool before) 549 { 550 const char *op = read ? "reading" : "writing to"; 551 const char *when = before ? "before" : "after"; 552 553 if (!i915.mmio_debug) 554 return; 555 556 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 557 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 558 when, op, reg); 559 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 560 i915.mmio_debug--; /* Only report the first N failures */ 561 } 562 } 563 564 static void 565 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 566 { 567 static bool mmio_debug_once = true; 568 569 if (i915.mmio_debug || !mmio_debug_once) 570 return; 571 572 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 573 DRM_DEBUG("Unclaimed register detected, " 574 "enabling oneshot unclaimed register reporting. " 575 "Please use i915.mmio_debug=N for more information.\n"); 576 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 577 i915.mmio_debug = mmio_debug_once--; 578 } 579 } 580 581 #define GEN2_READ_HEADER(x) \ 582 u##x val = 0; \ 583 assert_device_not_suspended(dev_priv); 584 585 #define GEN2_READ_FOOTER \ 586 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 587 return val 588 589 #define __gen2_read(x) \ 590 static u##x \ 591 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 592 GEN2_READ_HEADER(x); \ 593 val = __raw_i915_read##x(dev_priv, reg); \ 594 GEN2_READ_FOOTER; \ 595 } 596 597 #define __gen5_read(x) \ 598 static u##x \ 599 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 600 GEN2_READ_HEADER(x); \ 601 ilk_dummy_write(dev_priv); \ 602 val = __raw_i915_read##x(dev_priv, reg); \ 603 GEN2_READ_FOOTER; \ 604 } 605 606 __gen5_read(8) 607 __gen5_read(16) 608 __gen5_read(32) 609 __gen5_read(64) 610 __gen2_read(8) 611 __gen2_read(16) 612 __gen2_read(32) 613 __gen2_read(64) 614 615 #undef __gen5_read 616 #undef __gen2_read 617 618 #undef GEN2_READ_FOOTER 619 #undef GEN2_READ_HEADER 620 621 #define GEN6_READ_HEADER(x) \ 622 unsigned long irqflags; \ 623 u##x val = 0; \ 624 assert_device_not_suspended(dev_priv); \ 625 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 626 627 #define GEN6_READ_FOOTER \ 628 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 629 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 630 return val 631 632 static inline void __force_wake_get(struct drm_i915_private *dev_priv, 633 enum forcewake_domains fw_domains) 634 { 635 struct intel_uncore_forcewake_domain *domain; 636 enum forcewake_domain_id id; 637 638 if (WARN_ON(!fw_domains)) 639 return; 640 641 /* Ideally GCC would be constant-fold and eliminate this loop */ 642 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 643 if (domain->wake_count) { 644 fw_domains &= ~(1 << id); 645 continue; 646 } 647 648 domain->wake_count++; 649 fw_domain_arm_timer(domain); 650 } 651 652 if (fw_domains) 653 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 654 } 655 656 #define __vgpu_read(x) \ 657 static u##x \ 658 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 659 GEN6_READ_HEADER(x); \ 660 val = __raw_i915_read##x(dev_priv, reg); \ 661 GEN6_READ_FOOTER; \ 662 } 663 664 #define __gen6_read(x) \ 665 static u##x \ 666 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 667 GEN6_READ_HEADER(x); \ 668 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 669 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 670 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 671 val = __raw_i915_read##x(dev_priv, reg); \ 672 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 673 GEN6_READ_FOOTER; \ 674 } 675 676 #define __vlv_read(x) \ 677 static u##x \ 678 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 679 GEN6_READ_HEADER(x); \ 680 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 681 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 682 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 683 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 684 val = __raw_i915_read##x(dev_priv, reg); \ 685 GEN6_READ_FOOTER; \ 686 } 687 688 #define __chv_read(x) \ 689 static u##x \ 690 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 691 GEN6_READ_HEADER(x); \ 692 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 693 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 694 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 695 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 696 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 697 __force_wake_get(dev_priv, \ 698 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 699 val = __raw_i915_read##x(dev_priv, reg); \ 700 GEN6_READ_FOOTER; \ 701 } 702 703 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 704 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 705 706 #define __gen9_read(x) \ 707 static u##x \ 708 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 709 enum forcewake_domains fw_engine; \ 710 GEN6_READ_HEADER(x); \ 711 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 712 fw_engine = 0; \ 713 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 714 fw_engine = FORCEWAKE_RENDER; \ 715 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 716 fw_engine = FORCEWAKE_MEDIA; \ 717 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 718 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 719 else \ 720 fw_engine = FORCEWAKE_BLITTER; \ 721 if (fw_engine) \ 722 __force_wake_get(dev_priv, fw_engine); \ 723 val = __raw_i915_read##x(dev_priv, reg); \ 724 GEN6_READ_FOOTER; \ 725 } 726 727 __vgpu_read(8) 728 __vgpu_read(16) 729 __vgpu_read(32) 730 __vgpu_read(64) 731 __gen9_read(8) 732 __gen9_read(16) 733 __gen9_read(32) 734 __gen9_read(64) 735 __chv_read(8) 736 __chv_read(16) 737 __chv_read(32) 738 __chv_read(64) 739 __vlv_read(8) 740 __vlv_read(16) 741 __vlv_read(32) 742 __vlv_read(64) 743 __gen6_read(8) 744 __gen6_read(16) 745 __gen6_read(32) 746 __gen6_read(64) 747 748 #undef __gen9_read 749 #undef __chv_read 750 #undef __vlv_read 751 #undef __gen6_read 752 #undef __vgpu_read 753 #undef GEN6_READ_FOOTER 754 #undef GEN6_READ_HEADER 755 756 #define GEN2_WRITE_HEADER \ 757 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 758 assert_device_not_suspended(dev_priv); \ 759 760 #define GEN2_WRITE_FOOTER 761 762 #define __gen2_write(x) \ 763 static void \ 764 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 765 GEN2_WRITE_HEADER; \ 766 __raw_i915_write##x(dev_priv, reg, val); \ 767 GEN2_WRITE_FOOTER; \ 768 } 769 770 #define __gen5_write(x) \ 771 static void \ 772 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 773 GEN2_WRITE_HEADER; \ 774 ilk_dummy_write(dev_priv); \ 775 __raw_i915_write##x(dev_priv, reg, val); \ 776 GEN2_WRITE_FOOTER; \ 777 } 778 779 __gen5_write(8) 780 __gen5_write(16) 781 __gen5_write(32) 782 __gen5_write(64) 783 __gen2_write(8) 784 __gen2_write(16) 785 __gen2_write(32) 786 __gen2_write(64) 787 788 #undef __gen5_write 789 #undef __gen2_write 790 791 #undef GEN2_WRITE_FOOTER 792 #undef GEN2_WRITE_HEADER 793 794 #define GEN6_WRITE_HEADER \ 795 unsigned long irqflags; \ 796 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 797 assert_device_not_suspended(dev_priv); \ 798 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 799 800 #define GEN6_WRITE_FOOTER \ 801 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 802 803 #define __gen6_write(x) \ 804 static void \ 805 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 806 u32 __fifo_ret = 0; \ 807 GEN6_WRITE_HEADER; \ 808 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 809 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 810 } \ 811 __raw_i915_write##x(dev_priv, reg, val); \ 812 if (unlikely(__fifo_ret)) { \ 813 gen6_gt_check_fifodbg(dev_priv); \ 814 } \ 815 GEN6_WRITE_FOOTER; \ 816 } 817 818 #define __hsw_write(x) \ 819 static void \ 820 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 821 u32 __fifo_ret = 0; \ 822 GEN6_WRITE_HEADER; \ 823 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 824 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 825 } \ 826 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 827 __raw_i915_write##x(dev_priv, reg, val); \ 828 if (unlikely(__fifo_ret)) { \ 829 gen6_gt_check_fifodbg(dev_priv); \ 830 } \ 831 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 832 hsw_unclaimed_reg_detect(dev_priv); \ 833 GEN6_WRITE_FOOTER; \ 834 } 835 836 #define __vgpu_write(x) \ 837 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 838 off_t reg, u##x val, bool trace) { \ 839 GEN6_WRITE_HEADER; \ 840 __raw_i915_write##x(dev_priv, reg, val); \ 841 GEN6_WRITE_FOOTER; \ 842 } 843 844 static const u32 gen8_shadowed_regs[] = { 845 FORCEWAKE_MT, 846 GEN6_RPNSWREQ, 847 GEN6_RC_VIDEO_FREQ, 848 RING_TAIL(RENDER_RING_BASE), 849 RING_TAIL(GEN6_BSD_RING_BASE), 850 RING_TAIL(VEBOX_RING_BASE), 851 RING_TAIL(BLT_RING_BASE), 852 /* TODO: Other registers are not yet used */ 853 }; 854 855 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 856 { 857 int i; 858 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 859 if (reg == gen8_shadowed_regs[i]) 860 return true; 861 862 return false; 863 } 864 865 #define __gen8_write(x) \ 866 static void \ 867 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 868 GEN6_WRITE_HEADER; \ 869 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 870 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 871 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 872 __raw_i915_write##x(dev_priv, reg, val); \ 873 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 874 hsw_unclaimed_reg_detect(dev_priv); \ 875 GEN6_WRITE_FOOTER; \ 876 } 877 878 #define __chv_write(x) \ 879 static void \ 880 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 881 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 882 GEN6_WRITE_HEADER; \ 883 if (!shadowed) { \ 884 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 885 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 886 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 887 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 888 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 889 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 890 } \ 891 __raw_i915_write##x(dev_priv, reg, val); \ 892 GEN6_WRITE_FOOTER; \ 893 } 894 895 static const u32 gen9_shadowed_regs[] = { 896 RING_TAIL(RENDER_RING_BASE), 897 RING_TAIL(GEN6_BSD_RING_BASE), 898 RING_TAIL(VEBOX_RING_BASE), 899 RING_TAIL(BLT_RING_BASE), 900 FORCEWAKE_BLITTER_GEN9, 901 FORCEWAKE_RENDER_GEN9, 902 FORCEWAKE_MEDIA_GEN9, 903 GEN6_RPNSWREQ, 904 GEN6_RC_VIDEO_FREQ, 905 /* TODO: Other registers are not yet used */ 906 }; 907 908 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 909 { 910 int i; 911 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 912 if (reg == gen9_shadowed_regs[i]) 913 return true; 914 915 return false; 916 } 917 918 #define __gen9_write(x) \ 919 static void \ 920 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 921 bool trace) { \ 922 enum forcewake_domains fw_engine; \ 923 GEN6_WRITE_HEADER; \ 924 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 925 is_gen9_shadowed(dev_priv, reg)) \ 926 fw_engine = 0; \ 927 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 928 fw_engine = FORCEWAKE_RENDER; \ 929 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 930 fw_engine = FORCEWAKE_MEDIA; \ 931 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 932 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 933 else \ 934 fw_engine = FORCEWAKE_BLITTER; \ 935 if (fw_engine) \ 936 __force_wake_get(dev_priv, fw_engine); \ 937 __raw_i915_write##x(dev_priv, reg, val); \ 938 GEN6_WRITE_FOOTER; \ 939 } 940 941 __gen9_write(8) 942 __gen9_write(16) 943 __gen9_write(32) 944 __gen9_write(64) 945 __chv_write(8) 946 __chv_write(16) 947 __chv_write(32) 948 __chv_write(64) 949 __gen8_write(8) 950 __gen8_write(16) 951 __gen8_write(32) 952 __gen8_write(64) 953 __hsw_write(8) 954 __hsw_write(16) 955 __hsw_write(32) 956 __hsw_write(64) 957 __gen6_write(8) 958 __gen6_write(16) 959 __gen6_write(32) 960 __gen6_write(64) 961 __vgpu_write(8) 962 __vgpu_write(16) 963 __vgpu_write(32) 964 __vgpu_write(64) 965 966 #undef __gen9_write 967 #undef __chv_write 968 #undef __gen8_write 969 #undef __hsw_write 970 #undef __gen6_write 971 #undef __vgpu_write 972 #undef GEN6_WRITE_FOOTER 973 #undef GEN6_WRITE_HEADER 974 975 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 976 do { \ 977 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 978 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 979 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 980 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 981 } while (0) 982 983 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 984 do { \ 985 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 986 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 987 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 988 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 989 } while (0) 990 991 992 static void fw_domain_init(struct drm_i915_private *dev_priv, 993 enum forcewake_domain_id domain_id, 994 u32 reg_set, u32 reg_ack) 995 { 996 struct intel_uncore_forcewake_domain *d; 997 998 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 999 return; 1000 1001 d = &dev_priv->uncore.fw_domain[domain_id]; 1002 1003 WARN_ON(d->wake_count); 1004 1005 d->wake_count = 0; 1006 d->reg_set = reg_set; 1007 d->reg_ack = reg_ack; 1008 1009 if (IS_GEN6(dev_priv)) { 1010 d->val_reset = 0; 1011 d->val_set = FORCEWAKE_KERNEL; 1012 d->val_clear = 0; 1013 } else { 1014 /* WaRsClearFWBitsAtReset:bdw,skl */ 1015 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1016 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1017 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1018 } 1019 1020 if (IS_VALLEYVIEW(dev_priv)) 1021 d->reg_post = FORCEWAKE_ACK_VLV; 1022 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1023 d->reg_post = ECOBUS; 1024 else 1025 d->reg_post = 0; 1026 1027 d->i915 = dev_priv; 1028 d->id = domain_id; 1029 1030 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); 1031 1032 dev_priv->uncore.fw_domains |= (1 << domain_id); 1033 1034 fw_domain_reset(d); 1035 } 1036 1037 static void intel_uncore_fw_domains_init(struct drm_device *dev) 1038 { 1039 struct drm_i915_private *dev_priv = dev->dev_private; 1040 1041 if (INTEL_INFO(dev_priv->dev)->gen <= 5) 1042 return; 1043 1044 if (IS_GEN9(dev)) { 1045 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1046 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1047 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1048 FORCEWAKE_RENDER_GEN9, 1049 FORCEWAKE_ACK_RENDER_GEN9); 1050 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1051 FORCEWAKE_BLITTER_GEN9, 1052 FORCEWAKE_ACK_BLITTER_GEN9); 1053 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1054 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1055 } else if (IS_VALLEYVIEW(dev)) { 1056 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1057 if (!IS_CHERRYVIEW(dev)) 1058 dev_priv->uncore.funcs.force_wake_put = 1059 fw_domains_put_with_fifo; 1060 else 1061 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1062 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1063 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1064 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1065 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1066 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1067 dev_priv->uncore.funcs.force_wake_get = 1068 fw_domains_get_with_thread_status; 1069 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1070 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1071 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1072 } else if (IS_IVYBRIDGE(dev)) { 1073 u32 ecobus; 1074 1075 /* IVB configs may use multi-threaded forcewake */ 1076 1077 /* A small trick here - if the bios hasn't configured 1078 * MT forcewake, and if the device is in RC6, then 1079 * force_wake_mt_get will not wake the device and the 1080 * ECOBUS read will return zero. Which will be 1081 * (correctly) interpreted by the test below as MT 1082 * forcewake being disabled. 1083 */ 1084 dev_priv->uncore.funcs.force_wake_get = 1085 fw_domains_get_with_thread_status; 1086 dev_priv->uncore.funcs.force_wake_put = 1087 fw_domains_put_with_fifo; 1088 1089 /* We need to init first for ECOBUS access and then 1090 * determine later if we want to reinit, in case of MT access is 1091 * not working. In this stage we don't know which flavour this 1092 * ivb is, so it is better to reset also the gen6 fw registers 1093 * before the ecobus check. 1094 */ 1095 1096 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1097 __raw_posting_read(dev_priv, ECOBUS); 1098 1099 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1100 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1101 1102 mutex_lock(&dev->struct_mutex); 1103 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1104 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1105 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1106 mutex_unlock(&dev->struct_mutex); 1107 1108 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1109 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1110 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1111 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1112 FORCEWAKE, FORCEWAKE_ACK); 1113 } 1114 } else if (IS_GEN6(dev)) { 1115 dev_priv->uncore.funcs.force_wake_get = 1116 fw_domains_get_with_thread_status; 1117 dev_priv->uncore.funcs.force_wake_put = 1118 fw_domains_put_with_fifo; 1119 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1120 FORCEWAKE, FORCEWAKE_ACK); 1121 } 1122 1123 /* All future platforms are expected to require complex power gating */ 1124 WARN_ON(dev_priv->uncore.fw_domains == 0); 1125 } 1126 1127 void intel_uncore_init(struct drm_device *dev) 1128 { 1129 struct drm_i915_private *dev_priv = dev->dev_private; 1130 1131 i915_check_vgpu(dev); 1132 1133 intel_uncore_ellc_detect(dev); 1134 intel_uncore_fw_domains_init(dev); 1135 __intel_uncore_early_sanitize(dev, false); 1136 1137 switch (INTEL_INFO(dev)->gen) { 1138 default: 1139 MISSING_CASE(INTEL_INFO(dev)->gen); 1140 return; 1141 case 9: 1142 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1143 ASSIGN_READ_MMIO_VFUNCS(gen9); 1144 break; 1145 case 8: 1146 if (IS_CHERRYVIEW(dev)) { 1147 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1148 ASSIGN_READ_MMIO_VFUNCS(chv); 1149 1150 } else { 1151 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1152 ASSIGN_READ_MMIO_VFUNCS(gen6); 1153 } 1154 break; 1155 case 7: 1156 case 6: 1157 if (IS_HASWELL(dev)) { 1158 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1159 } else { 1160 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1161 } 1162 1163 if (IS_VALLEYVIEW(dev)) { 1164 ASSIGN_READ_MMIO_VFUNCS(vlv); 1165 } else { 1166 ASSIGN_READ_MMIO_VFUNCS(gen6); 1167 } 1168 break; 1169 case 5: 1170 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1171 ASSIGN_READ_MMIO_VFUNCS(gen5); 1172 break; 1173 case 4: 1174 case 3: 1175 case 2: 1176 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1177 ASSIGN_READ_MMIO_VFUNCS(gen2); 1178 break; 1179 } 1180 1181 if (intel_vgpu_active(dev)) { 1182 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1183 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1184 } 1185 1186 i915_check_and_clear_faults(dev); 1187 } 1188 #undef ASSIGN_WRITE_MMIO_VFUNCS 1189 #undef ASSIGN_READ_MMIO_VFUNCS 1190 1191 void intel_uncore_fini(struct drm_device *dev) 1192 { 1193 /* Paranoia: make sure we have disabled everything before we exit. */ 1194 intel_uncore_sanitize(dev); 1195 intel_uncore_forcewake_reset(dev, false); 1196 } 1197 1198 #define GEN_RANGE(l, h) GENMASK(h, l) 1199 1200 static const struct register_whitelist { 1201 uint64_t offset; 1202 uint32_t size; 1203 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1204 uint32_t gen_bitmask; 1205 } whitelist[] = { 1206 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1207 }; 1208 1209 int i915_reg_read_ioctl(struct drm_device *dev, 1210 void *data, struct drm_file *file) 1211 { 1212 struct drm_i915_private *dev_priv = dev->dev_private; 1213 struct drm_i915_reg_read *reg = data; 1214 struct register_whitelist const *entry = whitelist; 1215 int i, ret = 0; 1216 1217 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1218 if (entry->offset == reg->offset && 1219 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1220 break; 1221 } 1222 1223 if (i == ARRAY_SIZE(whitelist)) 1224 return -EINVAL; 1225 1226 intel_runtime_pm_get(dev_priv); 1227 1228 switch (entry->size) { 1229 case 8: 1230 reg->val = I915_READ64(reg->offset); 1231 break; 1232 case 4: 1233 reg->val = I915_READ(reg->offset); 1234 break; 1235 case 2: 1236 reg->val = I915_READ16(reg->offset); 1237 break; 1238 case 1: 1239 reg->val = I915_READ8(reg->offset); 1240 break; 1241 default: 1242 MISSING_CASE(entry->size); 1243 ret = -EINVAL; 1244 goto out; 1245 } 1246 1247 out: 1248 intel_runtime_pm_put(dev_priv); 1249 return ret; 1250 } 1251 1252 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1253 void *data, struct drm_file *file) 1254 { 1255 struct drm_i915_private *dev_priv = dev->dev_private; 1256 struct drm_i915_reset_stats *args = data; 1257 struct i915_ctx_hang_stats *hs; 1258 struct intel_context *ctx; 1259 int ret; 1260 1261 if (args->flags || args->pad) 1262 return -EINVAL; 1263 1264 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1265 return -EPERM; 1266 1267 ret = mutex_lock_interruptible(&dev->struct_mutex); 1268 if (ret) 1269 return ret; 1270 1271 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1272 if (IS_ERR(ctx)) { 1273 mutex_unlock(&dev->struct_mutex); 1274 return PTR_ERR(ctx); 1275 } 1276 hs = &ctx->hang_stats; 1277 1278 if (capable(CAP_SYS_ADMIN)) 1279 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1280 else 1281 args->reset_count = 0; 1282 1283 args->batch_active = hs->batch_active; 1284 args->batch_pending = hs->batch_pending; 1285 1286 mutex_unlock(&dev->struct_mutex); 1287 1288 return 0; 1289 } 1290 1291 static int i915_reset_complete(struct drm_device *dev) 1292 { 1293 u8 gdrst; 1294 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1295 return (gdrst & GRDOM_RESET_STATUS) == 0; 1296 } 1297 1298 static int i915_do_reset(struct drm_device *dev) 1299 { 1300 /* assert reset for at least 20 usec */ 1301 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1302 udelay(20); 1303 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1304 1305 return wait_for(i915_reset_complete(dev), 500); 1306 } 1307 1308 static int g4x_reset_complete(struct drm_device *dev) 1309 { 1310 u8 gdrst; 1311 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1312 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1313 } 1314 1315 static int g33_do_reset(struct drm_device *dev) 1316 { 1317 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1318 return wait_for(g4x_reset_complete(dev), 500); 1319 } 1320 1321 static int g4x_do_reset(struct drm_device *dev) 1322 { 1323 struct drm_i915_private *dev_priv = dev->dev_private; 1324 int ret; 1325 1326 pci_write_config_byte(dev->pdev, I915_GDRST, 1327 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1328 ret = wait_for(g4x_reset_complete(dev), 500); 1329 if (ret) 1330 return ret; 1331 1332 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1333 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1334 POSTING_READ(VDECCLK_GATE_D); 1335 1336 pci_write_config_byte(dev->pdev, I915_GDRST, 1337 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1338 ret = wait_for(g4x_reset_complete(dev), 500); 1339 if (ret) 1340 return ret; 1341 1342 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1343 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1344 POSTING_READ(VDECCLK_GATE_D); 1345 1346 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1347 1348 return 0; 1349 } 1350 1351 static int ironlake_do_reset(struct drm_device *dev) 1352 { 1353 struct drm_i915_private *dev_priv = dev->dev_private; 1354 int ret; 1355 1356 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1357 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1358 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1359 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1360 if (ret) 1361 return ret; 1362 1363 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1364 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1365 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1366 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1367 if (ret) 1368 return ret; 1369 1370 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1371 1372 return 0; 1373 } 1374 1375 static int gen6_do_reset(struct drm_device *dev) 1376 { 1377 struct drm_i915_private *dev_priv = dev->dev_private; 1378 int ret; 1379 1380 /* Reset the chip */ 1381 1382 /* GEN6_GDRST is not in the gt power well, no need to check 1383 * for fifo space for the write or forcewake the chip for 1384 * the read 1385 */ 1386 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1387 1388 /* Spin waiting for the device to ack the reset request */ 1389 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1390 1391 intel_uncore_forcewake_reset(dev, true); 1392 1393 return ret; 1394 } 1395 1396 int intel_gpu_reset(struct drm_device *dev) 1397 { 1398 if (INTEL_INFO(dev)->gen >= 6) 1399 return gen6_do_reset(dev); 1400 else if (IS_GEN5(dev)) 1401 return ironlake_do_reset(dev); 1402 else if (IS_G4X(dev)) 1403 return g4x_do_reset(dev); 1404 else if (IS_G33(dev)) 1405 return g33_do_reset(dev); 1406 else if (INTEL_INFO(dev)->gen >= 3) 1407 return i915_do_reset(dev); 1408 else 1409 return -ENODEV; 1410 } 1411 1412 void intel_uncore_check_errors(struct drm_device *dev) 1413 { 1414 struct drm_i915_private *dev_priv = dev->dev_private; 1415 1416 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1417 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1418 DRM_ERROR("Unclaimed register before interrupt\n"); 1419 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1420 } 1421 } 1422