1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #define FORCEWAKE_ACK_TIMEOUT_MS 2 28 29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 31 32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 42 43 static void 44 assert_device_not_suspended(struct drm_i915_private *dev_priv) 45 { 46 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 "Device suspended\n"); 48 } 49 50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 51 { 52 u32 gt_thread_status_mask; 53 54 if (IS_HASWELL(dev_priv->dev)) 55 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; 56 else 57 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; 58 59 /* w/a for a sporadic read returning 0 by waiting for the GT 60 * thread to wake up. 61 */ 62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 63 DRM_ERROR("GT thread status wait timed out\n"); 64 } 65 66 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 67 { 68 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 69 /* something from same cacheline, but !FORCEWAKE */ 70 __raw_posting_read(dev_priv, ECOBUS); 71 } 72 73 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, 74 int fw_engine) 75 { 76 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, 77 FORCEWAKE_ACK_TIMEOUT_MS)) 78 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 79 80 __raw_i915_write32(dev_priv, FORCEWAKE, 1); 81 /* something from same cacheline, but !FORCEWAKE */ 82 __raw_posting_read(dev_priv, ECOBUS); 83 84 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), 85 FORCEWAKE_ACK_TIMEOUT_MS)) 86 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 87 88 /* WaRsForcewakeWaitTC0:snb */ 89 __gen6_gt_wait_for_thread_c0(dev_priv); 90 } 91 92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 93 { 94 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 95 /* something from same cacheline, but !FORCEWAKE_MT */ 96 __raw_posting_read(dev_priv, ECOBUS); 97 } 98 99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, 100 int fw_engine) 101 { 102 u32 forcewake_ack; 103 104 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) 105 forcewake_ack = FORCEWAKE_ACK_HSW; 106 else 107 forcewake_ack = FORCEWAKE_MT_ACK; 108 109 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, 110 FORCEWAKE_ACK_TIMEOUT_MS)) 111 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 112 113 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 114 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 115 /* something from same cacheline, but !FORCEWAKE_MT */ 116 __raw_posting_read(dev_priv, ECOBUS); 117 118 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), 119 FORCEWAKE_ACK_TIMEOUT_MS)) 120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 121 122 /* WaRsForcewakeWaitTC0:ivb,hsw */ 123 if (INTEL_INFO(dev_priv->dev)->gen < 8) 124 __gen6_gt_wait_for_thread_c0(dev_priv); 125 } 126 127 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 128 { 129 u32 gtfifodbg; 130 131 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 132 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 133 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 134 } 135 136 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, 137 int fw_engine) 138 { 139 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 140 /* something from same cacheline, but !FORCEWAKE */ 141 __raw_posting_read(dev_priv, ECOBUS); 142 gen6_gt_check_fifodbg(dev_priv); 143 } 144 145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, 146 int fw_engine) 147 { 148 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 149 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 150 /* something from same cacheline, but !FORCEWAKE_MT */ 151 __raw_posting_read(dev_priv, ECOBUS); 152 153 if (IS_GEN7(dev_priv->dev)) 154 gen6_gt_check_fifodbg(dev_priv); 155 } 156 157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 158 { 159 int ret = 0; 160 161 /* On VLV, FIFO will be shared by both SW and HW. 162 * So, we need to read the FREE_ENTRIES everytime */ 163 if (IS_VALLEYVIEW(dev_priv->dev)) 164 dev_priv->uncore.fifo_count = 165 __raw_i915_read32(dev_priv, GTFIFOCTL) & 166 GT_FIFO_FREE_ENTRIES_MASK; 167 168 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 169 int loop = 500; 170 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 171 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 172 udelay(10); 173 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 174 } 175 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 176 ++ret; 177 dev_priv->uncore.fifo_count = fifo; 178 } 179 dev_priv->uncore.fifo_count--; 180 181 return ret; 182 } 183 184 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 185 { 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 187 _MASKED_BIT_DISABLE(0xffff)); 188 /* something from same cacheline, but !FORCEWAKE_VLV */ 189 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 190 } 191 192 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, 193 int fw_engine) 194 { 195 /* Check for Render Engine */ 196 if (FORCEWAKE_RENDER & fw_engine) { 197 if (wait_for_atomic((__raw_i915_read32(dev_priv, 198 FORCEWAKE_ACK_VLV) & 199 FORCEWAKE_KERNEL) == 0, 200 FORCEWAKE_ACK_TIMEOUT_MS)) 201 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 202 203 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 204 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 205 206 if (wait_for_atomic((__raw_i915_read32(dev_priv, 207 FORCEWAKE_ACK_VLV) & 208 FORCEWAKE_KERNEL), 209 FORCEWAKE_ACK_TIMEOUT_MS)) 210 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 211 } 212 213 /* Check for Media Engine */ 214 if (FORCEWAKE_MEDIA & fw_engine) { 215 if (wait_for_atomic((__raw_i915_read32(dev_priv, 216 FORCEWAKE_ACK_MEDIA_VLV) & 217 FORCEWAKE_KERNEL) == 0, 218 FORCEWAKE_ACK_TIMEOUT_MS)) 219 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 220 221 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 222 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 223 224 if (wait_for_atomic((__raw_i915_read32(dev_priv, 225 FORCEWAKE_ACK_MEDIA_VLV) & 226 FORCEWAKE_KERNEL), 227 FORCEWAKE_ACK_TIMEOUT_MS)) 228 DRM_ERROR("Timed out: waiting for media to ack.\n"); 229 } 230 231 /* WaRsForcewakeWaitTC0:vlv */ 232 __gen6_gt_wait_for_thread_c0(dev_priv); 233 234 } 235 236 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 237 int fw_engine) 238 { 239 240 /* Check for Render Engine */ 241 if (FORCEWAKE_RENDER & fw_engine) 242 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 243 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 244 245 246 /* Check for Media Engine */ 247 if (FORCEWAKE_MEDIA & fw_engine) 248 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 249 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 250 251 /* The below doubles as a POSTING_READ */ 252 gen6_gt_check_fifodbg(dev_priv); 253 254 } 255 256 void vlv_force_wake_get(struct drm_i915_private *dev_priv, 257 int fw_engine) 258 { 259 unsigned long irqflags; 260 261 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 262 263 if (fw_engine & FORCEWAKE_RENDER && 264 dev_priv->uncore.fw_rendercount++ != 0) 265 fw_engine &= ~FORCEWAKE_RENDER; 266 if (fw_engine & FORCEWAKE_MEDIA && 267 dev_priv->uncore.fw_mediacount++ != 0) 268 fw_engine &= ~FORCEWAKE_MEDIA; 269 270 if (fw_engine) 271 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); 272 273 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 274 } 275 276 void vlv_force_wake_put(struct drm_i915_private *dev_priv, 277 int fw_engine) 278 { 279 unsigned long irqflags; 280 281 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 282 283 if (fw_engine & FORCEWAKE_RENDER) { 284 WARN_ON(!dev_priv->uncore.fw_rendercount); 285 if (--dev_priv->uncore.fw_rendercount != 0) 286 fw_engine &= ~FORCEWAKE_RENDER; 287 } 288 289 if (fw_engine & FORCEWAKE_MEDIA) { 290 WARN_ON(!dev_priv->uncore.fw_mediacount); 291 if (--dev_priv->uncore.fw_mediacount != 0) 292 fw_engine &= ~FORCEWAKE_MEDIA; 293 } 294 295 if (fw_engine) 296 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); 297 298 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 299 } 300 301 static void gen6_force_wake_timer(unsigned long arg) 302 { 303 struct drm_i915_private *dev_priv = (void *)arg; 304 unsigned long irqflags; 305 306 assert_device_not_suspended(dev_priv); 307 308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 309 WARN_ON(!dev_priv->uncore.forcewake_count); 310 311 if (--dev_priv->uncore.forcewake_count == 0) 312 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 313 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 314 315 intel_runtime_pm_put(dev_priv); 316 } 317 318 static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 319 { 320 struct drm_i915_private *dev_priv = dev->dev_private; 321 unsigned long irqflags; 322 323 del_timer_sync(&dev_priv->uncore.force_wake_timer); 324 325 /* Hold uncore.lock across reset to prevent any register access 326 * with forcewake not set correctly 327 */ 328 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 329 330 if (IS_VALLEYVIEW(dev)) 331 vlv_force_wake_reset(dev_priv); 332 else if (IS_GEN6(dev) || IS_GEN7(dev)) 333 __gen6_gt_force_wake_reset(dev_priv); 334 335 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) 336 __gen7_gt_force_wake_mt_reset(dev_priv); 337 338 if (restore) { /* If reset with a user forcewake, try to restore */ 339 unsigned fw = 0; 340 341 if (IS_VALLEYVIEW(dev)) { 342 if (dev_priv->uncore.fw_rendercount) 343 fw |= FORCEWAKE_RENDER; 344 345 if (dev_priv->uncore.fw_mediacount) 346 fw |= FORCEWAKE_MEDIA; 347 } else { 348 if (dev_priv->uncore.forcewake_count) 349 fw = FORCEWAKE_ALL; 350 } 351 352 if (fw) 353 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 354 355 if (IS_GEN6(dev) || IS_GEN7(dev)) 356 dev_priv->uncore.fifo_count = 357 __raw_i915_read32(dev_priv, GTFIFOCTL) & 358 GT_FIFO_FREE_ENTRIES_MASK; 359 } else { 360 dev_priv->uncore.forcewake_count = 0; 361 dev_priv->uncore.fw_rendercount = 0; 362 dev_priv->uncore.fw_mediacount = 0; 363 } 364 365 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 366 } 367 368 void intel_uncore_early_sanitize(struct drm_device *dev) 369 { 370 struct drm_i915_private *dev_priv = dev->dev_private; 371 372 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 373 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 374 375 if (IS_HASWELL(dev) && 376 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 377 /* The docs do not explain exactly how the calculation can be 378 * made. It is somewhat guessable, but for now, it's always 379 * 128MB. 380 * NB: We can't write IDICR yet because we do not have gt funcs 381 * set up */ 382 dev_priv->ellc_size = 128; 383 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 384 } 385 386 /* clear out old GT FIFO errors */ 387 if (IS_GEN6(dev) || IS_GEN7(dev)) 388 __raw_i915_write32(dev_priv, GTFIFODBG, 389 __raw_i915_read32(dev_priv, GTFIFODBG)); 390 391 intel_uncore_forcewake_reset(dev, false); 392 } 393 394 void intel_uncore_sanitize(struct drm_device *dev) 395 { 396 struct drm_i915_private *dev_priv = dev->dev_private; 397 u32 reg_val; 398 399 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 400 intel_disable_gt_powersave(dev); 401 402 /* Turn off power gate, require especially for the BIOS less system */ 403 if (IS_VALLEYVIEW(dev)) { 404 405 mutex_lock(&dev_priv->rps.hw_lock); 406 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS); 407 408 if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) | 409 PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) | 410 PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D))) 411 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0); 412 413 mutex_unlock(&dev_priv->rps.hw_lock); 414 415 } 416 } 417 418 /* 419 * Generally this is called implicitly by the register read function. However, 420 * if some sequence requires the GT to not power down then this function should 421 * be called at the beginning of the sequence followed by a call to 422 * gen6_gt_force_wake_put() at the end of the sequence. 423 */ 424 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 425 { 426 unsigned long irqflags; 427 428 if (!dev_priv->uncore.funcs.force_wake_get) 429 return; 430 431 intel_runtime_pm_get(dev_priv); 432 433 /* Redirect to VLV specific routine */ 434 if (IS_VALLEYVIEW(dev_priv->dev)) 435 return vlv_force_wake_get(dev_priv, fw_engine); 436 437 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 438 if (dev_priv->uncore.forcewake_count++ == 0) 439 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 440 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 441 } 442 443 /* 444 * see gen6_gt_force_wake_get() 445 */ 446 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 447 { 448 unsigned long irqflags; 449 bool delayed = false; 450 451 if (!dev_priv->uncore.funcs.force_wake_put) 452 return; 453 454 /* Redirect to VLV specific routine */ 455 if (IS_VALLEYVIEW(dev_priv->dev)) { 456 vlv_force_wake_put(dev_priv, fw_engine); 457 goto out; 458 } 459 460 461 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 462 WARN_ON(!dev_priv->uncore.forcewake_count); 463 464 if (--dev_priv->uncore.forcewake_count == 0) { 465 dev_priv->uncore.forcewake_count++; 466 delayed = true; 467 mod_timer_pinned(&dev_priv->uncore.force_wake_timer, 468 jiffies + 1); 469 } 470 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 471 472 out: 473 if (!delayed) 474 intel_runtime_pm_put(dev_priv); 475 } 476 477 void assert_force_wake_inactive(struct drm_i915_private *dev_priv) 478 { 479 if (!dev_priv->uncore.funcs.force_wake_get) 480 return; 481 482 WARN_ON(dev_priv->uncore.forcewake_count > 0); 483 } 484 485 /* We give fast paths for the really cool registers */ 486 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 487 ((reg) < 0x40000 && (reg) != FORCEWAKE) 488 489 static void 490 ilk_dummy_write(struct drm_i915_private *dev_priv) 491 { 492 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 493 * the chip from rc6 before touching it for real. MI_MODE is masked, 494 * hence harmless to write 0 into. */ 495 __raw_i915_write32(dev_priv, MI_MODE, 0); 496 } 497 498 static void 499 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 500 { 501 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 502 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 503 reg); 504 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 505 } 506 } 507 508 static void 509 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 510 { 511 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 512 DRM_ERROR("Unclaimed write to %x\n", reg); 513 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 514 } 515 } 516 517 #define REG_READ_HEADER(x) \ 518 unsigned long irqflags; \ 519 u##x val = 0; \ 520 assert_device_not_suspended(dev_priv); \ 521 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 522 523 #define REG_READ_FOOTER \ 524 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 525 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 526 return val 527 528 #define __gen4_read(x) \ 529 static u##x \ 530 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 531 REG_READ_HEADER(x); \ 532 val = __raw_i915_read##x(dev_priv, reg); \ 533 REG_READ_FOOTER; \ 534 } 535 536 #define __gen5_read(x) \ 537 static u##x \ 538 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 539 REG_READ_HEADER(x); \ 540 ilk_dummy_write(dev_priv); \ 541 val = __raw_i915_read##x(dev_priv, reg); \ 542 REG_READ_FOOTER; \ 543 } 544 545 #define __gen6_read(x) \ 546 static u##x \ 547 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 548 REG_READ_HEADER(x); \ 549 if (dev_priv->uncore.forcewake_count == 0 && \ 550 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 551 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 552 FORCEWAKE_ALL); \ 553 val = __raw_i915_read##x(dev_priv, reg); \ 554 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 555 FORCEWAKE_ALL); \ 556 } else { \ 557 val = __raw_i915_read##x(dev_priv, reg); \ 558 } \ 559 REG_READ_FOOTER; \ 560 } 561 562 #define __vlv_read(x) \ 563 static u##x \ 564 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 565 unsigned fwengine = 0; \ 566 REG_READ_HEADER(x); \ 567 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ 568 if (dev_priv->uncore.fw_rendercount == 0) \ 569 fwengine = FORCEWAKE_RENDER; \ 570 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ 571 if (dev_priv->uncore.fw_mediacount == 0) \ 572 fwengine = FORCEWAKE_MEDIA; \ 573 } \ 574 if (fwengine) \ 575 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 576 val = __raw_i915_read##x(dev_priv, reg); \ 577 if (fwengine) \ 578 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 579 REG_READ_FOOTER; \ 580 } 581 582 583 __vlv_read(8) 584 __vlv_read(16) 585 __vlv_read(32) 586 __vlv_read(64) 587 __gen6_read(8) 588 __gen6_read(16) 589 __gen6_read(32) 590 __gen6_read(64) 591 __gen5_read(8) 592 __gen5_read(16) 593 __gen5_read(32) 594 __gen5_read(64) 595 __gen4_read(8) 596 __gen4_read(16) 597 __gen4_read(32) 598 __gen4_read(64) 599 600 #undef __vlv_read 601 #undef __gen6_read 602 #undef __gen5_read 603 #undef __gen4_read 604 #undef REG_READ_FOOTER 605 #undef REG_READ_HEADER 606 607 #define REG_WRITE_HEADER \ 608 unsigned long irqflags; \ 609 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 610 assert_device_not_suspended(dev_priv); \ 611 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 612 613 #define REG_WRITE_FOOTER \ 614 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 615 616 #define __gen4_write(x) \ 617 static void \ 618 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 619 REG_WRITE_HEADER; \ 620 __raw_i915_write##x(dev_priv, reg, val); \ 621 REG_WRITE_FOOTER; \ 622 } 623 624 #define __gen5_write(x) \ 625 static void \ 626 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 627 REG_WRITE_HEADER; \ 628 ilk_dummy_write(dev_priv); \ 629 __raw_i915_write##x(dev_priv, reg, val); \ 630 REG_WRITE_FOOTER; \ 631 } 632 633 #define __gen6_write(x) \ 634 static void \ 635 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 636 u32 __fifo_ret = 0; \ 637 REG_WRITE_HEADER; \ 638 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 639 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 640 } \ 641 __raw_i915_write##x(dev_priv, reg, val); \ 642 if (unlikely(__fifo_ret)) { \ 643 gen6_gt_check_fifodbg(dev_priv); \ 644 } \ 645 REG_WRITE_FOOTER; \ 646 } 647 648 #define __hsw_write(x) \ 649 static void \ 650 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 651 u32 __fifo_ret = 0; \ 652 REG_WRITE_HEADER; \ 653 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 654 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 655 } \ 656 hsw_unclaimed_reg_clear(dev_priv, reg); \ 657 __raw_i915_write##x(dev_priv, reg, val); \ 658 if (unlikely(__fifo_ret)) { \ 659 gen6_gt_check_fifodbg(dev_priv); \ 660 } \ 661 hsw_unclaimed_reg_check(dev_priv, reg); \ 662 REG_WRITE_FOOTER; \ 663 } 664 665 static const u32 gen8_shadowed_regs[] = { 666 FORCEWAKE_MT, 667 GEN6_RPNSWREQ, 668 GEN6_RC_VIDEO_FREQ, 669 RING_TAIL(RENDER_RING_BASE), 670 RING_TAIL(GEN6_BSD_RING_BASE), 671 RING_TAIL(VEBOX_RING_BASE), 672 RING_TAIL(BLT_RING_BASE), 673 /* TODO: Other registers are not yet used */ 674 }; 675 676 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 677 { 678 int i; 679 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 680 if (reg == gen8_shadowed_regs[i]) 681 return true; 682 683 return false; 684 } 685 686 #define __gen8_write(x) \ 687 static void \ 688 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 689 REG_WRITE_HEADER; \ 690 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 691 if (dev_priv->uncore.forcewake_count == 0) \ 692 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 693 FORCEWAKE_ALL); \ 694 __raw_i915_write##x(dev_priv, reg, val); \ 695 if (dev_priv->uncore.forcewake_count == 0) \ 696 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 697 FORCEWAKE_ALL); \ 698 } else { \ 699 __raw_i915_write##x(dev_priv, reg, val); \ 700 } \ 701 REG_WRITE_FOOTER; \ 702 } 703 704 __gen8_write(8) 705 __gen8_write(16) 706 __gen8_write(32) 707 __gen8_write(64) 708 __hsw_write(8) 709 __hsw_write(16) 710 __hsw_write(32) 711 __hsw_write(64) 712 __gen6_write(8) 713 __gen6_write(16) 714 __gen6_write(32) 715 __gen6_write(64) 716 __gen5_write(8) 717 __gen5_write(16) 718 __gen5_write(32) 719 __gen5_write(64) 720 __gen4_write(8) 721 __gen4_write(16) 722 __gen4_write(32) 723 __gen4_write(64) 724 725 #undef __gen8_write 726 #undef __hsw_write 727 #undef __gen6_write 728 #undef __gen5_write 729 #undef __gen4_write 730 #undef REG_WRITE_FOOTER 731 #undef REG_WRITE_HEADER 732 733 void intel_uncore_init(struct drm_device *dev) 734 { 735 struct drm_i915_private *dev_priv = dev->dev_private; 736 737 setup_timer(&dev_priv->uncore.force_wake_timer, 738 gen6_force_wake_timer, (unsigned long)dev_priv); 739 740 intel_uncore_early_sanitize(dev); 741 742 if (IS_VALLEYVIEW(dev)) { 743 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 744 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 745 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 746 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 747 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 748 } else if (IS_IVYBRIDGE(dev)) { 749 u32 ecobus; 750 751 /* IVB configs may use multi-threaded forcewake */ 752 753 /* A small trick here - if the bios hasn't configured 754 * MT forcewake, and if the device is in RC6, then 755 * force_wake_mt_get will not wake the device and the 756 * ECOBUS read will return zero. Which will be 757 * (correctly) interpreted by the test below as MT 758 * forcewake being disabled. 759 */ 760 mutex_lock(&dev->struct_mutex); 761 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); 762 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 763 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); 764 mutex_unlock(&dev->struct_mutex); 765 766 if (ecobus & FORCEWAKE_MT_ENABLE) { 767 dev_priv->uncore.funcs.force_wake_get = 768 __gen7_gt_force_wake_mt_get; 769 dev_priv->uncore.funcs.force_wake_put = 770 __gen7_gt_force_wake_mt_put; 771 } else { 772 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 773 DRM_INFO("when using vblank-synced partial screen updates.\n"); 774 dev_priv->uncore.funcs.force_wake_get = 775 __gen6_gt_force_wake_get; 776 dev_priv->uncore.funcs.force_wake_put = 777 __gen6_gt_force_wake_put; 778 } 779 } else if (IS_GEN6(dev)) { 780 dev_priv->uncore.funcs.force_wake_get = 781 __gen6_gt_force_wake_get; 782 dev_priv->uncore.funcs.force_wake_put = 783 __gen6_gt_force_wake_put; 784 } 785 786 switch (INTEL_INFO(dev)->gen) { 787 default: 788 dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 789 dev_priv->uncore.funcs.mmio_writew = gen8_write16; 790 dev_priv->uncore.funcs.mmio_writel = gen8_write32; 791 dev_priv->uncore.funcs.mmio_writeq = gen8_write64; 792 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 793 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 794 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 795 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 796 break; 797 case 7: 798 case 6: 799 if (IS_HASWELL(dev)) { 800 dev_priv->uncore.funcs.mmio_writeb = hsw_write8; 801 dev_priv->uncore.funcs.mmio_writew = hsw_write16; 802 dev_priv->uncore.funcs.mmio_writel = hsw_write32; 803 dev_priv->uncore.funcs.mmio_writeq = hsw_write64; 804 } else { 805 dev_priv->uncore.funcs.mmio_writeb = gen6_write8; 806 dev_priv->uncore.funcs.mmio_writew = gen6_write16; 807 dev_priv->uncore.funcs.mmio_writel = gen6_write32; 808 dev_priv->uncore.funcs.mmio_writeq = gen6_write64; 809 } 810 811 if (IS_VALLEYVIEW(dev)) { 812 dev_priv->uncore.funcs.mmio_readb = vlv_read8; 813 dev_priv->uncore.funcs.mmio_readw = vlv_read16; 814 dev_priv->uncore.funcs.mmio_readl = vlv_read32; 815 dev_priv->uncore.funcs.mmio_readq = vlv_read64; 816 } else { 817 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 818 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 819 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 820 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 821 } 822 break; 823 case 5: 824 dev_priv->uncore.funcs.mmio_writeb = gen5_write8; 825 dev_priv->uncore.funcs.mmio_writew = gen5_write16; 826 dev_priv->uncore.funcs.mmio_writel = gen5_write32; 827 dev_priv->uncore.funcs.mmio_writeq = gen5_write64; 828 dev_priv->uncore.funcs.mmio_readb = gen5_read8; 829 dev_priv->uncore.funcs.mmio_readw = gen5_read16; 830 dev_priv->uncore.funcs.mmio_readl = gen5_read32; 831 dev_priv->uncore.funcs.mmio_readq = gen5_read64; 832 break; 833 case 4: 834 case 3: 835 case 2: 836 dev_priv->uncore.funcs.mmio_writeb = gen4_write8; 837 dev_priv->uncore.funcs.mmio_writew = gen4_write16; 838 dev_priv->uncore.funcs.mmio_writel = gen4_write32; 839 dev_priv->uncore.funcs.mmio_writeq = gen4_write64; 840 dev_priv->uncore.funcs.mmio_readb = gen4_read8; 841 dev_priv->uncore.funcs.mmio_readw = gen4_read16; 842 dev_priv->uncore.funcs.mmio_readl = gen4_read32; 843 dev_priv->uncore.funcs.mmio_readq = gen4_read64; 844 break; 845 } 846 } 847 848 void intel_uncore_fini(struct drm_device *dev) 849 { 850 /* Paranoia: make sure we have disabled everything before we exit. */ 851 intel_uncore_sanitize(dev); 852 intel_uncore_forcewake_reset(dev, false); 853 } 854 855 static const struct register_whitelist { 856 uint64_t offset; 857 uint32_t size; 858 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 859 } whitelist[] = { 860 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 }, 861 }; 862 863 int i915_reg_read_ioctl(struct drm_device *dev, 864 void *data, struct drm_file *file) 865 { 866 struct drm_i915_private *dev_priv = dev->dev_private; 867 struct drm_i915_reg_read *reg = data; 868 struct register_whitelist const *entry = whitelist; 869 int i, ret = 0; 870 871 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 872 if (entry->offset == reg->offset && 873 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 874 break; 875 } 876 877 if (i == ARRAY_SIZE(whitelist)) 878 return -EINVAL; 879 880 intel_runtime_pm_get(dev_priv); 881 882 switch (entry->size) { 883 case 8: 884 reg->val = I915_READ64(reg->offset); 885 break; 886 case 4: 887 reg->val = I915_READ(reg->offset); 888 break; 889 case 2: 890 reg->val = I915_READ16(reg->offset); 891 break; 892 case 1: 893 reg->val = I915_READ8(reg->offset); 894 break; 895 default: 896 WARN_ON(1); 897 ret = -EINVAL; 898 goto out; 899 } 900 901 out: 902 intel_runtime_pm_put(dev_priv); 903 return ret; 904 } 905 906 int i915_get_reset_stats_ioctl(struct drm_device *dev, 907 void *data, struct drm_file *file) 908 { 909 struct drm_i915_private *dev_priv = dev->dev_private; 910 struct drm_i915_reset_stats *args = data; 911 struct i915_ctx_hang_stats *hs; 912 struct i915_hw_context *ctx; 913 int ret; 914 915 if (args->flags || args->pad) 916 return -EINVAL; 917 918 if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) 919 return -EPERM; 920 921 ret = mutex_lock_interruptible(&dev->struct_mutex); 922 if (ret) 923 return ret; 924 925 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 926 if (IS_ERR(ctx)) { 927 mutex_unlock(&dev->struct_mutex); 928 return PTR_ERR(ctx); 929 } 930 hs = &ctx->hang_stats; 931 932 if (capable(CAP_SYS_ADMIN)) 933 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 934 else 935 args->reset_count = 0; 936 937 args->batch_active = hs->batch_active; 938 args->batch_pending = hs->batch_pending; 939 940 mutex_unlock(&dev->struct_mutex); 941 942 return 0; 943 } 944 945 static int i965_reset_complete(struct drm_device *dev) 946 { 947 u8 gdrst; 948 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 949 return (gdrst & GRDOM_RESET_ENABLE) == 0; 950 } 951 952 static int i965_do_reset(struct drm_device *dev) 953 { 954 int ret; 955 956 /* 957 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 958 * well as the reset bit (GR/bit 0). Setting the GR bit 959 * triggers the reset; when done, the hardware will clear it. 960 */ 961 pci_write_config_byte(dev->pdev, I965_GDRST, 962 GRDOM_RENDER | GRDOM_RESET_ENABLE); 963 ret = wait_for(i965_reset_complete(dev), 500); 964 if (ret) 965 return ret; 966 967 /* We can't reset render&media without also resetting display ... */ 968 pci_write_config_byte(dev->pdev, I965_GDRST, 969 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 970 971 ret = wait_for(i965_reset_complete(dev), 500); 972 if (ret) 973 return ret; 974 975 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 976 977 return 0; 978 } 979 980 static int ironlake_do_reset(struct drm_device *dev) 981 { 982 struct drm_i915_private *dev_priv = dev->dev_private; 983 u32 gdrst; 984 int ret; 985 986 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 987 gdrst &= ~GRDOM_MASK; 988 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 989 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); 990 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 991 if (ret) 992 return ret; 993 994 /* We can't reset render&media without also resetting display ... */ 995 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 996 gdrst &= ~GRDOM_MASK; 997 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 998 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); 999 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 1000 } 1001 1002 static int gen6_do_reset(struct drm_device *dev) 1003 { 1004 struct drm_i915_private *dev_priv = dev->dev_private; 1005 int ret; 1006 1007 /* Reset the chip */ 1008 1009 /* GEN6_GDRST is not in the gt power well, no need to check 1010 * for fifo space for the write or forcewake the chip for 1011 * the read 1012 */ 1013 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1014 1015 /* Spin waiting for the device to ack the reset request */ 1016 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1017 1018 intel_uncore_forcewake_reset(dev, true); 1019 1020 return ret; 1021 } 1022 1023 int intel_gpu_reset(struct drm_device *dev) 1024 { 1025 switch (INTEL_INFO(dev)->gen) { 1026 case 8: 1027 case 7: 1028 case 6: return gen6_do_reset(dev); 1029 case 5: return ironlake_do_reset(dev); 1030 case 4: return i965_do_reset(dev); 1031 default: return -ENODEV; 1032 } 1033 } 1034 1035 void intel_uncore_check_errors(struct drm_device *dev) 1036 { 1037 struct drm_i915_private *dev_priv = dev->dev_private; 1038 1039 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1040 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1041 DRM_ERROR("Unclaimed register before interrupt\n"); 1042 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1043 } 1044 } 1045