1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #define FORCEWAKE_ACK_TIMEOUT_MS 2 28 29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 31 32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 42 43 static void 44 assert_device_not_suspended(struct drm_i915_private *dev_priv) 45 { 46 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 "Device suspended\n"); 48 } 49 50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 51 { 52 u32 gt_thread_status_mask; 53 54 if (IS_HASWELL(dev_priv->dev)) 55 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; 56 else 57 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; 58 59 /* w/a for a sporadic read returning 0 by waiting for the GT 60 * thread to wake up. 61 */ 62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 63 DRM_ERROR("GT thread status wait timed out\n"); 64 } 65 66 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 67 { 68 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 69 /* something from same cacheline, but !FORCEWAKE */ 70 __raw_posting_read(dev_priv, ECOBUS); 71 } 72 73 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, 74 int fw_engine) 75 { 76 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, 77 FORCEWAKE_ACK_TIMEOUT_MS)) 78 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 79 80 __raw_i915_write32(dev_priv, FORCEWAKE, 1); 81 /* something from same cacheline, but !FORCEWAKE */ 82 __raw_posting_read(dev_priv, ECOBUS); 83 84 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), 85 FORCEWAKE_ACK_TIMEOUT_MS)) 86 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 87 88 /* WaRsForcewakeWaitTC0:snb */ 89 __gen6_gt_wait_for_thread_c0(dev_priv); 90 } 91 92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 93 { 94 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 95 /* something from same cacheline, but !FORCEWAKE_MT */ 96 __raw_posting_read(dev_priv, ECOBUS); 97 } 98 99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, 100 int fw_engine) 101 { 102 u32 forcewake_ack; 103 104 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) 105 forcewake_ack = FORCEWAKE_ACK_HSW; 106 else 107 forcewake_ack = FORCEWAKE_MT_ACK; 108 109 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, 110 FORCEWAKE_ACK_TIMEOUT_MS)) 111 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 112 113 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 114 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 115 /* something from same cacheline, but !FORCEWAKE_MT */ 116 __raw_posting_read(dev_priv, ECOBUS); 117 118 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), 119 FORCEWAKE_ACK_TIMEOUT_MS)) 120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 121 122 /* WaRsForcewakeWaitTC0:ivb,hsw */ 123 if (INTEL_INFO(dev_priv->dev)->gen < 8) 124 __gen6_gt_wait_for_thread_c0(dev_priv); 125 } 126 127 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 128 { 129 u32 gtfifodbg; 130 131 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 132 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 133 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 134 } 135 136 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, 137 int fw_engine) 138 { 139 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 140 /* something from same cacheline, but !FORCEWAKE */ 141 __raw_posting_read(dev_priv, ECOBUS); 142 gen6_gt_check_fifodbg(dev_priv); 143 } 144 145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, 146 int fw_engine) 147 { 148 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 149 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 150 /* something from same cacheline, but !FORCEWAKE_MT */ 151 __raw_posting_read(dev_priv, ECOBUS); 152 153 if (IS_GEN7(dev_priv->dev)) 154 gen6_gt_check_fifodbg(dev_priv); 155 } 156 157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 158 { 159 int ret = 0; 160 161 /* On VLV, FIFO will be shared by both SW and HW. 162 * So, we need to read the FREE_ENTRIES everytime */ 163 if (IS_VALLEYVIEW(dev_priv->dev)) 164 dev_priv->uncore.fifo_count = 165 __raw_i915_read32(dev_priv, GTFIFOCTL) & 166 GT_FIFO_FREE_ENTRIES_MASK; 167 168 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 169 int loop = 500; 170 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 171 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 172 udelay(10); 173 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 174 } 175 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 176 ++ret; 177 dev_priv->uncore.fifo_count = fifo; 178 } 179 dev_priv->uncore.fifo_count--; 180 181 return ret; 182 } 183 184 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 185 { 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 187 _MASKED_BIT_DISABLE(0xffff)); 188 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 189 _MASKED_BIT_DISABLE(0xffff)); 190 /* something from same cacheline, but !FORCEWAKE_VLV */ 191 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 192 } 193 194 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, 195 int fw_engine) 196 { 197 /* Check for Render Engine */ 198 if (FORCEWAKE_RENDER & fw_engine) { 199 if (wait_for_atomic((__raw_i915_read32(dev_priv, 200 FORCEWAKE_ACK_VLV) & 201 FORCEWAKE_KERNEL) == 0, 202 FORCEWAKE_ACK_TIMEOUT_MS)) 203 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 204 205 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 206 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 207 208 if (wait_for_atomic((__raw_i915_read32(dev_priv, 209 FORCEWAKE_ACK_VLV) & 210 FORCEWAKE_KERNEL), 211 FORCEWAKE_ACK_TIMEOUT_MS)) 212 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 213 } 214 215 /* Check for Media Engine */ 216 if (FORCEWAKE_MEDIA & fw_engine) { 217 if (wait_for_atomic((__raw_i915_read32(dev_priv, 218 FORCEWAKE_ACK_MEDIA_VLV) & 219 FORCEWAKE_KERNEL) == 0, 220 FORCEWAKE_ACK_TIMEOUT_MS)) 221 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 222 223 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 224 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 225 226 if (wait_for_atomic((__raw_i915_read32(dev_priv, 227 FORCEWAKE_ACK_MEDIA_VLV) & 228 FORCEWAKE_KERNEL), 229 FORCEWAKE_ACK_TIMEOUT_MS)) 230 DRM_ERROR("Timed out: waiting for media to ack.\n"); 231 } 232 233 /* WaRsForcewakeWaitTC0:vlv */ 234 __gen6_gt_wait_for_thread_c0(dev_priv); 235 236 } 237 238 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 239 int fw_engine) 240 { 241 242 /* Check for Render Engine */ 243 if (FORCEWAKE_RENDER & fw_engine) 244 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 245 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 246 247 248 /* Check for Media Engine */ 249 if (FORCEWAKE_MEDIA & fw_engine) 250 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 251 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 252 253 /* The below doubles as a POSTING_READ */ 254 gen6_gt_check_fifodbg(dev_priv); 255 256 } 257 258 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 259 { 260 unsigned long irqflags; 261 262 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 263 264 if (fw_engine & FORCEWAKE_RENDER && 265 dev_priv->uncore.fw_rendercount++ != 0) 266 fw_engine &= ~FORCEWAKE_RENDER; 267 if (fw_engine & FORCEWAKE_MEDIA && 268 dev_priv->uncore.fw_mediacount++ != 0) 269 fw_engine &= ~FORCEWAKE_MEDIA; 270 271 if (fw_engine) 272 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); 273 274 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 275 } 276 277 static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 278 { 279 unsigned long irqflags; 280 281 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 282 283 if (fw_engine & FORCEWAKE_RENDER) { 284 WARN_ON(!dev_priv->uncore.fw_rendercount); 285 if (--dev_priv->uncore.fw_rendercount != 0) 286 fw_engine &= ~FORCEWAKE_RENDER; 287 } 288 289 if (fw_engine & FORCEWAKE_MEDIA) { 290 WARN_ON(!dev_priv->uncore.fw_mediacount); 291 if (--dev_priv->uncore.fw_mediacount != 0) 292 fw_engine &= ~FORCEWAKE_MEDIA; 293 } 294 295 if (fw_engine) 296 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); 297 298 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 299 } 300 301 static void gen6_force_wake_timer(unsigned long arg) 302 { 303 struct drm_i915_private *dev_priv = (void *)arg; 304 unsigned long irqflags; 305 306 assert_device_not_suspended(dev_priv); 307 308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 309 WARN_ON(!dev_priv->uncore.forcewake_count); 310 311 if (--dev_priv->uncore.forcewake_count == 0) 312 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 313 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 314 315 intel_runtime_pm_put(dev_priv); 316 } 317 318 static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 319 { 320 struct drm_i915_private *dev_priv = dev->dev_private; 321 unsigned long irqflags; 322 323 del_timer_sync(&dev_priv->uncore.force_wake_timer); 324 325 /* Hold uncore.lock across reset to prevent any register access 326 * with forcewake not set correctly 327 */ 328 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 329 330 if (IS_VALLEYVIEW(dev)) 331 vlv_force_wake_reset(dev_priv); 332 else if (IS_GEN6(dev) || IS_GEN7(dev)) 333 __gen6_gt_force_wake_reset(dev_priv); 334 335 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) 336 __gen7_gt_force_wake_mt_reset(dev_priv); 337 338 if (restore) { /* If reset with a user forcewake, try to restore */ 339 unsigned fw = 0; 340 341 if (IS_VALLEYVIEW(dev)) { 342 if (dev_priv->uncore.fw_rendercount) 343 fw |= FORCEWAKE_RENDER; 344 345 if (dev_priv->uncore.fw_mediacount) 346 fw |= FORCEWAKE_MEDIA; 347 } else { 348 if (dev_priv->uncore.forcewake_count) 349 fw = FORCEWAKE_ALL; 350 } 351 352 if (fw) 353 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 354 355 if (IS_GEN6(dev) || IS_GEN7(dev)) 356 dev_priv->uncore.fifo_count = 357 __raw_i915_read32(dev_priv, GTFIFOCTL) & 358 GT_FIFO_FREE_ENTRIES_MASK; 359 } else { 360 dev_priv->uncore.forcewake_count = 0; 361 dev_priv->uncore.fw_rendercount = 0; 362 dev_priv->uncore.fw_mediacount = 0; 363 } 364 365 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 366 } 367 368 void intel_uncore_early_sanitize(struct drm_device *dev) 369 { 370 struct drm_i915_private *dev_priv = dev->dev_private; 371 372 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 373 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 374 375 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 376 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 377 /* The docs do not explain exactly how the calculation can be 378 * made. It is somewhat guessable, but for now, it's always 379 * 128MB. 380 * NB: We can't write IDICR yet because we do not have gt funcs 381 * set up */ 382 dev_priv->ellc_size = 128; 383 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 384 } 385 386 /* clear out old GT FIFO errors */ 387 if (IS_GEN6(dev) || IS_GEN7(dev)) 388 __raw_i915_write32(dev_priv, GTFIFODBG, 389 __raw_i915_read32(dev_priv, GTFIFODBG)); 390 391 intel_uncore_forcewake_reset(dev, false); 392 } 393 394 void intel_uncore_sanitize(struct drm_device *dev) 395 { 396 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 397 intel_disable_gt_powersave(dev); 398 } 399 400 /* 401 * Generally this is called implicitly by the register read function. However, 402 * if some sequence requires the GT to not power down then this function should 403 * be called at the beginning of the sequence followed by a call to 404 * gen6_gt_force_wake_put() at the end of the sequence. 405 */ 406 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 407 { 408 unsigned long irqflags; 409 410 if (!dev_priv->uncore.funcs.force_wake_get) 411 return; 412 413 intel_runtime_pm_get(dev_priv); 414 415 /* Redirect to VLV specific routine */ 416 if (IS_VALLEYVIEW(dev_priv->dev)) 417 return vlv_force_wake_get(dev_priv, fw_engine); 418 419 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 420 if (dev_priv->uncore.forcewake_count++ == 0) 421 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 422 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 423 } 424 425 /* 426 * see gen6_gt_force_wake_get() 427 */ 428 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 429 { 430 unsigned long irqflags; 431 bool delayed = false; 432 433 if (!dev_priv->uncore.funcs.force_wake_put) 434 return; 435 436 /* Redirect to VLV specific routine */ 437 if (IS_VALLEYVIEW(dev_priv->dev)) { 438 vlv_force_wake_put(dev_priv, fw_engine); 439 goto out; 440 } 441 442 443 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 444 WARN_ON(!dev_priv->uncore.forcewake_count); 445 446 if (--dev_priv->uncore.forcewake_count == 0) { 447 dev_priv->uncore.forcewake_count++; 448 delayed = true; 449 mod_timer_pinned(&dev_priv->uncore.force_wake_timer, 450 jiffies + 1); 451 } 452 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 453 454 out: 455 if (!delayed) 456 intel_runtime_pm_put(dev_priv); 457 } 458 459 void assert_force_wake_inactive(struct drm_i915_private *dev_priv) 460 { 461 if (!dev_priv->uncore.funcs.force_wake_get) 462 return; 463 464 WARN_ON(dev_priv->uncore.forcewake_count > 0); 465 } 466 467 /* We give fast paths for the really cool registers */ 468 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 469 ((reg) < 0x40000 && (reg) != FORCEWAKE) 470 471 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 472 (((reg) >= 0x2000 && (reg) < 0x4000) ||\ 473 ((reg) >= 0x5000 && (reg) < 0x8000) ||\ 474 ((reg) >= 0xB000 && (reg) < 0x12000) ||\ 475 ((reg) >= 0x2E000 && (reg) < 0x30000)) 476 477 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ 478 (((reg) >= 0x12000 && (reg) < 0x14000) ||\ 479 ((reg) >= 0x22000 && (reg) < 0x24000) ||\ 480 ((reg) >= 0x30000 && (reg) < 0x40000)) 481 482 static void 483 ilk_dummy_write(struct drm_i915_private *dev_priv) 484 { 485 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 486 * the chip from rc6 before touching it for real. MI_MODE is masked, 487 * hence harmless to write 0 into. */ 488 __raw_i915_write32(dev_priv, MI_MODE, 0); 489 } 490 491 static void 492 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 493 { 494 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 495 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 496 reg); 497 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 498 } 499 } 500 501 static void 502 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 503 { 504 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 505 DRM_ERROR("Unclaimed write to %x\n", reg); 506 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 507 } 508 } 509 510 #define REG_READ_HEADER(x) \ 511 unsigned long irqflags; \ 512 u##x val = 0; \ 513 assert_device_not_suspended(dev_priv); \ 514 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 515 516 #define REG_READ_FOOTER \ 517 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 518 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 519 return val 520 521 #define __gen4_read(x) \ 522 static u##x \ 523 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 524 REG_READ_HEADER(x); \ 525 val = __raw_i915_read##x(dev_priv, reg); \ 526 REG_READ_FOOTER; \ 527 } 528 529 #define __gen5_read(x) \ 530 static u##x \ 531 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 532 REG_READ_HEADER(x); \ 533 ilk_dummy_write(dev_priv); \ 534 val = __raw_i915_read##x(dev_priv, reg); \ 535 REG_READ_FOOTER; \ 536 } 537 538 #define __gen6_read(x) \ 539 static u##x \ 540 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 541 REG_READ_HEADER(x); \ 542 if (dev_priv->uncore.forcewake_count == 0 && \ 543 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 544 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 545 FORCEWAKE_ALL); \ 546 val = __raw_i915_read##x(dev_priv, reg); \ 547 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 548 FORCEWAKE_ALL); \ 549 } else { \ 550 val = __raw_i915_read##x(dev_priv, reg); \ 551 } \ 552 REG_READ_FOOTER; \ 553 } 554 555 #define __vlv_read(x) \ 556 static u##x \ 557 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 558 unsigned fwengine = 0; \ 559 REG_READ_HEADER(x); \ 560 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ 561 if (dev_priv->uncore.fw_rendercount == 0) \ 562 fwengine = FORCEWAKE_RENDER; \ 563 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ 564 if (dev_priv->uncore.fw_mediacount == 0) \ 565 fwengine = FORCEWAKE_MEDIA; \ 566 } \ 567 if (fwengine) \ 568 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 569 val = __raw_i915_read##x(dev_priv, reg); \ 570 if (fwengine) \ 571 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 572 REG_READ_FOOTER; \ 573 } 574 575 576 __vlv_read(8) 577 __vlv_read(16) 578 __vlv_read(32) 579 __vlv_read(64) 580 __gen6_read(8) 581 __gen6_read(16) 582 __gen6_read(32) 583 __gen6_read(64) 584 __gen5_read(8) 585 __gen5_read(16) 586 __gen5_read(32) 587 __gen5_read(64) 588 __gen4_read(8) 589 __gen4_read(16) 590 __gen4_read(32) 591 __gen4_read(64) 592 593 #undef __vlv_read 594 #undef __gen6_read 595 #undef __gen5_read 596 #undef __gen4_read 597 #undef REG_READ_FOOTER 598 #undef REG_READ_HEADER 599 600 #define REG_WRITE_HEADER \ 601 unsigned long irqflags; \ 602 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 603 assert_device_not_suspended(dev_priv); \ 604 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 605 606 #define REG_WRITE_FOOTER \ 607 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 608 609 #define __gen4_write(x) \ 610 static void \ 611 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 612 REG_WRITE_HEADER; \ 613 __raw_i915_write##x(dev_priv, reg, val); \ 614 REG_WRITE_FOOTER; \ 615 } 616 617 #define __gen5_write(x) \ 618 static void \ 619 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 620 REG_WRITE_HEADER; \ 621 ilk_dummy_write(dev_priv); \ 622 __raw_i915_write##x(dev_priv, reg, val); \ 623 REG_WRITE_FOOTER; \ 624 } 625 626 #define __gen6_write(x) \ 627 static void \ 628 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 629 u32 __fifo_ret = 0; \ 630 REG_WRITE_HEADER; \ 631 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 632 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 633 } \ 634 __raw_i915_write##x(dev_priv, reg, val); \ 635 if (unlikely(__fifo_ret)) { \ 636 gen6_gt_check_fifodbg(dev_priv); \ 637 } \ 638 REG_WRITE_FOOTER; \ 639 } 640 641 #define __hsw_write(x) \ 642 static void \ 643 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 644 u32 __fifo_ret = 0; \ 645 REG_WRITE_HEADER; \ 646 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 647 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 648 } \ 649 hsw_unclaimed_reg_clear(dev_priv, reg); \ 650 __raw_i915_write##x(dev_priv, reg, val); \ 651 if (unlikely(__fifo_ret)) { \ 652 gen6_gt_check_fifodbg(dev_priv); \ 653 } \ 654 hsw_unclaimed_reg_check(dev_priv, reg); \ 655 REG_WRITE_FOOTER; \ 656 } 657 658 static const u32 gen8_shadowed_regs[] = { 659 FORCEWAKE_MT, 660 GEN6_RPNSWREQ, 661 GEN6_RC_VIDEO_FREQ, 662 RING_TAIL(RENDER_RING_BASE), 663 RING_TAIL(GEN6_BSD_RING_BASE), 664 RING_TAIL(VEBOX_RING_BASE), 665 RING_TAIL(BLT_RING_BASE), 666 /* TODO: Other registers are not yet used */ 667 }; 668 669 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 670 { 671 int i; 672 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 673 if (reg == gen8_shadowed_regs[i]) 674 return true; 675 676 return false; 677 } 678 679 #define __gen8_write(x) \ 680 static void \ 681 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 682 REG_WRITE_HEADER; \ 683 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 684 if (dev_priv->uncore.forcewake_count == 0) \ 685 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 686 FORCEWAKE_ALL); \ 687 __raw_i915_write##x(dev_priv, reg, val); \ 688 if (dev_priv->uncore.forcewake_count == 0) \ 689 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 690 FORCEWAKE_ALL); \ 691 } else { \ 692 __raw_i915_write##x(dev_priv, reg, val); \ 693 } \ 694 REG_WRITE_FOOTER; \ 695 } 696 697 __gen8_write(8) 698 __gen8_write(16) 699 __gen8_write(32) 700 __gen8_write(64) 701 __hsw_write(8) 702 __hsw_write(16) 703 __hsw_write(32) 704 __hsw_write(64) 705 __gen6_write(8) 706 __gen6_write(16) 707 __gen6_write(32) 708 __gen6_write(64) 709 __gen5_write(8) 710 __gen5_write(16) 711 __gen5_write(32) 712 __gen5_write(64) 713 __gen4_write(8) 714 __gen4_write(16) 715 __gen4_write(32) 716 __gen4_write(64) 717 718 #undef __gen8_write 719 #undef __hsw_write 720 #undef __gen6_write 721 #undef __gen5_write 722 #undef __gen4_write 723 #undef REG_WRITE_FOOTER 724 #undef REG_WRITE_HEADER 725 726 void intel_uncore_init(struct drm_device *dev) 727 { 728 struct drm_i915_private *dev_priv = dev->dev_private; 729 730 setup_timer(&dev_priv->uncore.force_wake_timer, 731 gen6_force_wake_timer, (unsigned long)dev_priv); 732 733 intel_uncore_early_sanitize(dev); 734 735 if (IS_VALLEYVIEW(dev)) { 736 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 737 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 738 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 739 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 740 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 741 } else if (IS_IVYBRIDGE(dev)) { 742 u32 ecobus; 743 744 /* IVB configs may use multi-threaded forcewake */ 745 746 /* A small trick here - if the bios hasn't configured 747 * MT forcewake, and if the device is in RC6, then 748 * force_wake_mt_get will not wake the device and the 749 * ECOBUS read will return zero. Which will be 750 * (correctly) interpreted by the test below as MT 751 * forcewake being disabled. 752 */ 753 mutex_lock(&dev->struct_mutex); 754 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); 755 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 756 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); 757 mutex_unlock(&dev->struct_mutex); 758 759 if (ecobus & FORCEWAKE_MT_ENABLE) { 760 dev_priv->uncore.funcs.force_wake_get = 761 __gen7_gt_force_wake_mt_get; 762 dev_priv->uncore.funcs.force_wake_put = 763 __gen7_gt_force_wake_mt_put; 764 } else { 765 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 766 DRM_INFO("when using vblank-synced partial screen updates.\n"); 767 dev_priv->uncore.funcs.force_wake_get = 768 __gen6_gt_force_wake_get; 769 dev_priv->uncore.funcs.force_wake_put = 770 __gen6_gt_force_wake_put; 771 } 772 } else if (IS_GEN6(dev)) { 773 dev_priv->uncore.funcs.force_wake_get = 774 __gen6_gt_force_wake_get; 775 dev_priv->uncore.funcs.force_wake_put = 776 __gen6_gt_force_wake_put; 777 } 778 779 switch (INTEL_INFO(dev)->gen) { 780 default: 781 dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 782 dev_priv->uncore.funcs.mmio_writew = gen8_write16; 783 dev_priv->uncore.funcs.mmio_writel = gen8_write32; 784 dev_priv->uncore.funcs.mmio_writeq = gen8_write64; 785 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 786 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 787 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 788 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 789 break; 790 case 7: 791 case 6: 792 if (IS_HASWELL(dev)) { 793 dev_priv->uncore.funcs.mmio_writeb = hsw_write8; 794 dev_priv->uncore.funcs.mmio_writew = hsw_write16; 795 dev_priv->uncore.funcs.mmio_writel = hsw_write32; 796 dev_priv->uncore.funcs.mmio_writeq = hsw_write64; 797 } else { 798 dev_priv->uncore.funcs.mmio_writeb = gen6_write8; 799 dev_priv->uncore.funcs.mmio_writew = gen6_write16; 800 dev_priv->uncore.funcs.mmio_writel = gen6_write32; 801 dev_priv->uncore.funcs.mmio_writeq = gen6_write64; 802 } 803 804 if (IS_VALLEYVIEW(dev)) { 805 dev_priv->uncore.funcs.mmio_readb = vlv_read8; 806 dev_priv->uncore.funcs.mmio_readw = vlv_read16; 807 dev_priv->uncore.funcs.mmio_readl = vlv_read32; 808 dev_priv->uncore.funcs.mmio_readq = vlv_read64; 809 } else { 810 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 811 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 812 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 813 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 814 } 815 break; 816 case 5: 817 dev_priv->uncore.funcs.mmio_writeb = gen5_write8; 818 dev_priv->uncore.funcs.mmio_writew = gen5_write16; 819 dev_priv->uncore.funcs.mmio_writel = gen5_write32; 820 dev_priv->uncore.funcs.mmio_writeq = gen5_write64; 821 dev_priv->uncore.funcs.mmio_readb = gen5_read8; 822 dev_priv->uncore.funcs.mmio_readw = gen5_read16; 823 dev_priv->uncore.funcs.mmio_readl = gen5_read32; 824 dev_priv->uncore.funcs.mmio_readq = gen5_read64; 825 break; 826 case 4: 827 case 3: 828 case 2: 829 dev_priv->uncore.funcs.mmio_writeb = gen4_write8; 830 dev_priv->uncore.funcs.mmio_writew = gen4_write16; 831 dev_priv->uncore.funcs.mmio_writel = gen4_write32; 832 dev_priv->uncore.funcs.mmio_writeq = gen4_write64; 833 dev_priv->uncore.funcs.mmio_readb = gen4_read8; 834 dev_priv->uncore.funcs.mmio_readw = gen4_read16; 835 dev_priv->uncore.funcs.mmio_readl = gen4_read32; 836 dev_priv->uncore.funcs.mmio_readq = gen4_read64; 837 break; 838 } 839 } 840 841 void intel_uncore_fini(struct drm_device *dev) 842 { 843 /* Paranoia: make sure we have disabled everything before we exit. */ 844 intel_uncore_sanitize(dev); 845 intel_uncore_forcewake_reset(dev, false); 846 } 847 848 #define GEN_RANGE(l, h) GENMASK(h, l) 849 850 static const struct register_whitelist { 851 uint64_t offset; 852 uint32_t size; 853 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 854 uint32_t gen_bitmask; 855 } whitelist[] = { 856 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) }, 857 }; 858 859 int i915_reg_read_ioctl(struct drm_device *dev, 860 void *data, struct drm_file *file) 861 { 862 struct drm_i915_private *dev_priv = dev->dev_private; 863 struct drm_i915_reg_read *reg = data; 864 struct register_whitelist const *entry = whitelist; 865 int i, ret = 0; 866 867 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 868 if (entry->offset == reg->offset && 869 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 870 break; 871 } 872 873 if (i == ARRAY_SIZE(whitelist)) 874 return -EINVAL; 875 876 intel_runtime_pm_get(dev_priv); 877 878 switch (entry->size) { 879 case 8: 880 reg->val = I915_READ64(reg->offset); 881 break; 882 case 4: 883 reg->val = I915_READ(reg->offset); 884 break; 885 case 2: 886 reg->val = I915_READ16(reg->offset); 887 break; 888 case 1: 889 reg->val = I915_READ8(reg->offset); 890 break; 891 default: 892 WARN_ON(1); 893 ret = -EINVAL; 894 goto out; 895 } 896 897 out: 898 intel_runtime_pm_put(dev_priv); 899 return ret; 900 } 901 902 int i915_get_reset_stats_ioctl(struct drm_device *dev, 903 void *data, struct drm_file *file) 904 { 905 struct drm_i915_private *dev_priv = dev->dev_private; 906 struct drm_i915_reset_stats *args = data; 907 struct i915_ctx_hang_stats *hs; 908 struct intel_context *ctx; 909 int ret; 910 911 if (args->flags || args->pad) 912 return -EINVAL; 913 914 if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) 915 return -EPERM; 916 917 ret = mutex_lock_interruptible(&dev->struct_mutex); 918 if (ret) 919 return ret; 920 921 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 922 if (IS_ERR(ctx)) { 923 mutex_unlock(&dev->struct_mutex); 924 return PTR_ERR(ctx); 925 } 926 hs = &ctx->hang_stats; 927 928 if (capable(CAP_SYS_ADMIN)) 929 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 930 else 931 args->reset_count = 0; 932 933 args->batch_active = hs->batch_active; 934 args->batch_pending = hs->batch_pending; 935 936 mutex_unlock(&dev->struct_mutex); 937 938 return 0; 939 } 940 941 static int i965_reset_complete(struct drm_device *dev) 942 { 943 u8 gdrst; 944 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 945 return (gdrst & GRDOM_RESET_ENABLE) == 0; 946 } 947 948 static int i965_do_reset(struct drm_device *dev) 949 { 950 int ret; 951 952 /* FIXME: i965g/gm need a display save/restore for gpu reset. */ 953 return -ENODEV; 954 955 /* 956 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 957 * well as the reset bit (GR/bit 0). Setting the GR bit 958 * triggers the reset; when done, the hardware will clear it. 959 */ 960 pci_write_config_byte(dev->pdev, I965_GDRST, 961 GRDOM_RENDER | GRDOM_RESET_ENABLE); 962 ret = wait_for(i965_reset_complete(dev), 500); 963 if (ret) 964 return ret; 965 966 pci_write_config_byte(dev->pdev, I965_GDRST, 967 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 968 969 ret = wait_for(i965_reset_complete(dev), 500); 970 if (ret) 971 return ret; 972 973 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 974 975 return 0; 976 } 977 978 static int g4x_do_reset(struct drm_device *dev) 979 { 980 struct drm_i915_private *dev_priv = dev->dev_private; 981 int ret; 982 983 pci_write_config_byte(dev->pdev, I965_GDRST, 984 GRDOM_RENDER | GRDOM_RESET_ENABLE); 985 ret = wait_for(i965_reset_complete(dev), 500); 986 if (ret) 987 return ret; 988 989 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 990 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 991 POSTING_READ(VDECCLK_GATE_D); 992 993 pci_write_config_byte(dev->pdev, I965_GDRST, 994 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 995 ret = wait_for(i965_reset_complete(dev), 500); 996 if (ret) 997 return ret; 998 999 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1000 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1001 POSTING_READ(VDECCLK_GATE_D); 1002 1003 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 1004 1005 return 0; 1006 } 1007 1008 static int ironlake_do_reset(struct drm_device *dev) 1009 { 1010 struct drm_i915_private *dev_priv = dev->dev_private; 1011 int ret; 1012 1013 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1014 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1015 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1016 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1017 if (ret) 1018 return ret; 1019 1020 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1021 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1022 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1023 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1024 if (ret) 1025 return ret; 1026 1027 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1028 1029 return 0; 1030 } 1031 1032 static int gen6_do_reset(struct drm_device *dev) 1033 { 1034 struct drm_i915_private *dev_priv = dev->dev_private; 1035 int ret; 1036 1037 /* Reset the chip */ 1038 1039 /* GEN6_GDRST is not in the gt power well, no need to check 1040 * for fifo space for the write or forcewake the chip for 1041 * the read 1042 */ 1043 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1044 1045 /* Spin waiting for the device to ack the reset request */ 1046 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1047 1048 intel_uncore_forcewake_reset(dev, true); 1049 1050 return ret; 1051 } 1052 1053 int intel_gpu_reset(struct drm_device *dev) 1054 { 1055 switch (INTEL_INFO(dev)->gen) { 1056 case 8: 1057 case 7: 1058 case 6: return gen6_do_reset(dev); 1059 case 5: return ironlake_do_reset(dev); 1060 case 4: 1061 if (IS_G4X(dev)) 1062 return g4x_do_reset(dev); 1063 else 1064 return i965_do_reset(dev); 1065 default: return -ENODEV; 1066 } 1067 } 1068 1069 void intel_uncore_check_errors(struct drm_device *dev) 1070 { 1071 struct drm_i915_private *dev_priv = dev->dev_private; 1072 1073 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1074 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1075 DRM_ERROR("Unclaimed register before interrupt\n"); 1076 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1077 } 1078 } 1079