1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #define FORCEWAKE_ACK_TIMEOUT_MS 2 28 29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 31 32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 42 43 static void 44 assert_device_not_suspended(struct drm_i915_private *dev_priv) 45 { 46 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 "Device suspended\n"); 48 } 49 50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 51 { 52 /* w/a for a sporadic read returning 0 by waiting for the GT 53 * thread to wake up. 54 */ 55 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 56 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 57 DRM_ERROR("GT thread status wait timed out\n"); 58 } 59 60 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 61 { 62 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 63 /* something from same cacheline, but !FORCEWAKE */ 64 __raw_posting_read(dev_priv, ECOBUS); 65 } 66 67 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, 68 int fw_engine) 69 { 70 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, 71 FORCEWAKE_ACK_TIMEOUT_MS)) 72 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 73 74 __raw_i915_write32(dev_priv, FORCEWAKE, 1); 75 /* something from same cacheline, but !FORCEWAKE */ 76 __raw_posting_read(dev_priv, ECOBUS); 77 78 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), 79 FORCEWAKE_ACK_TIMEOUT_MS)) 80 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 81 82 /* WaRsForcewakeWaitTC0:snb */ 83 __gen6_gt_wait_for_thread_c0(dev_priv); 84 } 85 86 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 87 { 88 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 89 /* something from same cacheline, but !FORCEWAKE_MT */ 90 __raw_posting_read(dev_priv, ECOBUS); 91 } 92 93 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, 94 int fw_engine) 95 { 96 u32 forcewake_ack; 97 98 if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev)) 99 forcewake_ack = FORCEWAKE_ACK_HSW; 100 else 101 forcewake_ack = FORCEWAKE_MT_ACK; 102 103 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, 104 FORCEWAKE_ACK_TIMEOUT_MS)) 105 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 106 107 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 108 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 109 /* something from same cacheline, but !FORCEWAKE_MT */ 110 __raw_posting_read(dev_priv, ECOBUS); 111 112 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), 113 FORCEWAKE_ACK_TIMEOUT_MS)) 114 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 115 116 /* WaRsForcewakeWaitTC0:ivb,hsw */ 117 __gen6_gt_wait_for_thread_c0(dev_priv); 118 } 119 120 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 121 { 122 u32 gtfifodbg; 123 124 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 125 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 126 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 127 } 128 129 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, 130 int fw_engine) 131 { 132 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 133 /* something from same cacheline, but !FORCEWAKE */ 134 __raw_posting_read(dev_priv, ECOBUS); 135 gen6_gt_check_fifodbg(dev_priv); 136 } 137 138 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, 139 int fw_engine) 140 { 141 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 142 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 143 /* something from same cacheline, but !FORCEWAKE_MT */ 144 __raw_posting_read(dev_priv, ECOBUS); 145 146 if (IS_GEN7(dev_priv->dev)) 147 gen6_gt_check_fifodbg(dev_priv); 148 } 149 150 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 151 { 152 int ret = 0; 153 154 /* On VLV, FIFO will be shared by both SW and HW. 155 * So, we need to read the FREE_ENTRIES everytime */ 156 if (IS_VALLEYVIEW(dev_priv->dev)) 157 dev_priv->uncore.fifo_count = 158 __raw_i915_read32(dev_priv, GTFIFOCTL) & 159 GT_FIFO_FREE_ENTRIES_MASK; 160 161 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 162 int loop = 500; 163 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 164 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 165 udelay(10); 166 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 167 } 168 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 169 ++ret; 170 dev_priv->uncore.fifo_count = fifo; 171 } 172 dev_priv->uncore.fifo_count--; 173 174 return ret; 175 } 176 177 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 178 { 179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 180 _MASKED_BIT_DISABLE(0xffff)); 181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 182 _MASKED_BIT_DISABLE(0xffff)); 183 /* something from same cacheline, but !FORCEWAKE_VLV */ 184 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 185 } 186 187 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, 188 int fw_engine) 189 { 190 /* Check for Render Engine */ 191 if (FORCEWAKE_RENDER & fw_engine) { 192 if (wait_for_atomic((__raw_i915_read32(dev_priv, 193 FORCEWAKE_ACK_VLV) & 194 FORCEWAKE_KERNEL) == 0, 195 FORCEWAKE_ACK_TIMEOUT_MS)) 196 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 197 198 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 199 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 200 201 if (wait_for_atomic((__raw_i915_read32(dev_priv, 202 FORCEWAKE_ACK_VLV) & 203 FORCEWAKE_KERNEL), 204 FORCEWAKE_ACK_TIMEOUT_MS)) 205 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 206 } 207 208 /* Check for Media Engine */ 209 if (FORCEWAKE_MEDIA & fw_engine) { 210 if (wait_for_atomic((__raw_i915_read32(dev_priv, 211 FORCEWAKE_ACK_MEDIA_VLV) & 212 FORCEWAKE_KERNEL) == 0, 213 FORCEWAKE_ACK_TIMEOUT_MS)) 214 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 215 216 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 217 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 218 219 if (wait_for_atomic((__raw_i915_read32(dev_priv, 220 FORCEWAKE_ACK_MEDIA_VLV) & 221 FORCEWAKE_KERNEL), 222 FORCEWAKE_ACK_TIMEOUT_MS)) 223 DRM_ERROR("Timed out: waiting for media to ack.\n"); 224 } 225 } 226 227 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 228 int fw_engine) 229 { 230 231 /* Check for Render Engine */ 232 if (FORCEWAKE_RENDER & fw_engine) 233 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 234 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 235 236 237 /* Check for Media Engine */ 238 if (FORCEWAKE_MEDIA & fw_engine) 239 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 240 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 241 242 /* something from same cacheline, but !FORCEWAKE_VLV */ 243 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 244 if (!IS_CHERRYVIEW(dev_priv->dev)) 245 gen6_gt_check_fifodbg(dev_priv); 246 } 247 248 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 249 { 250 unsigned long irqflags; 251 252 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 253 254 if (fw_engine & FORCEWAKE_RENDER && 255 dev_priv->uncore.fw_rendercount++ != 0) 256 fw_engine &= ~FORCEWAKE_RENDER; 257 if (fw_engine & FORCEWAKE_MEDIA && 258 dev_priv->uncore.fw_mediacount++ != 0) 259 fw_engine &= ~FORCEWAKE_MEDIA; 260 261 if (fw_engine) 262 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); 263 264 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 265 } 266 267 static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 268 { 269 unsigned long irqflags; 270 271 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 272 273 if (fw_engine & FORCEWAKE_RENDER) { 274 WARN_ON(!dev_priv->uncore.fw_rendercount); 275 if (--dev_priv->uncore.fw_rendercount != 0) 276 fw_engine &= ~FORCEWAKE_RENDER; 277 } 278 279 if (fw_engine & FORCEWAKE_MEDIA) { 280 WARN_ON(!dev_priv->uncore.fw_mediacount); 281 if (--dev_priv->uncore.fw_mediacount != 0) 282 fw_engine &= ~FORCEWAKE_MEDIA; 283 } 284 285 if (fw_engine) 286 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); 287 288 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 289 } 290 291 static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 292 { 293 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9, 294 _MASKED_BIT_DISABLE(0xffff)); 295 296 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9, 297 _MASKED_BIT_DISABLE(0xffff)); 298 299 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9, 300 _MASKED_BIT_DISABLE(0xffff)); 301 } 302 303 static void 304 __gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 305 { 306 /* Check for Render Engine */ 307 if (FORCEWAKE_RENDER & fw_engine) { 308 if (wait_for_atomic((__raw_i915_read32(dev_priv, 309 FORCEWAKE_ACK_RENDER_GEN9) & 310 FORCEWAKE_KERNEL) == 0, 311 FORCEWAKE_ACK_TIMEOUT_MS)) 312 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 313 314 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9, 315 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 316 317 if (wait_for_atomic((__raw_i915_read32(dev_priv, 318 FORCEWAKE_ACK_RENDER_GEN9) & 319 FORCEWAKE_KERNEL), 320 FORCEWAKE_ACK_TIMEOUT_MS)) 321 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 322 } 323 324 /* Check for Media Engine */ 325 if (FORCEWAKE_MEDIA & fw_engine) { 326 if (wait_for_atomic((__raw_i915_read32(dev_priv, 327 FORCEWAKE_ACK_MEDIA_GEN9) & 328 FORCEWAKE_KERNEL) == 0, 329 FORCEWAKE_ACK_TIMEOUT_MS)) 330 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 331 332 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9, 333 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 334 335 if (wait_for_atomic((__raw_i915_read32(dev_priv, 336 FORCEWAKE_ACK_MEDIA_GEN9) & 337 FORCEWAKE_KERNEL), 338 FORCEWAKE_ACK_TIMEOUT_MS)) 339 DRM_ERROR("Timed out: waiting for Media to ack.\n"); 340 } 341 342 /* Check for Blitter Engine */ 343 if (FORCEWAKE_BLITTER & fw_engine) { 344 if (wait_for_atomic((__raw_i915_read32(dev_priv, 345 FORCEWAKE_ACK_BLITTER_GEN9) & 346 FORCEWAKE_KERNEL) == 0, 347 FORCEWAKE_ACK_TIMEOUT_MS)) 348 DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n"); 349 350 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9, 351 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 352 353 if (wait_for_atomic((__raw_i915_read32(dev_priv, 354 FORCEWAKE_ACK_BLITTER_GEN9) & 355 FORCEWAKE_KERNEL), 356 FORCEWAKE_ACK_TIMEOUT_MS)) 357 DRM_ERROR("Timed out: waiting for Blitter to ack.\n"); 358 } 359 } 360 361 static void 362 __gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 363 { 364 /* Check for Render Engine */ 365 if (FORCEWAKE_RENDER & fw_engine) 366 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9, 367 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 368 369 /* Check for Media Engine */ 370 if (FORCEWAKE_MEDIA & fw_engine) 371 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9, 372 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 373 374 /* Check for Blitter Engine */ 375 if (FORCEWAKE_BLITTER & fw_engine) 376 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9, 377 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 378 } 379 380 static void 381 gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 382 { 383 unsigned long irqflags; 384 385 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 386 387 if (FORCEWAKE_RENDER & fw_engine) { 388 if (dev_priv->uncore.fw_rendercount++ == 0) 389 dev_priv->uncore.funcs.force_wake_get(dev_priv, 390 FORCEWAKE_RENDER); 391 } 392 393 if (FORCEWAKE_MEDIA & fw_engine) { 394 if (dev_priv->uncore.fw_mediacount++ == 0) 395 dev_priv->uncore.funcs.force_wake_get(dev_priv, 396 FORCEWAKE_MEDIA); 397 } 398 399 if (FORCEWAKE_BLITTER & fw_engine) { 400 if (dev_priv->uncore.fw_blittercount++ == 0) 401 dev_priv->uncore.funcs.force_wake_get(dev_priv, 402 FORCEWAKE_BLITTER); 403 } 404 405 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 406 } 407 408 static void 409 gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 410 { 411 unsigned long irqflags; 412 413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 414 415 if (FORCEWAKE_RENDER & fw_engine) { 416 WARN_ON(dev_priv->uncore.fw_rendercount == 0); 417 if (--dev_priv->uncore.fw_rendercount == 0) 418 dev_priv->uncore.funcs.force_wake_put(dev_priv, 419 FORCEWAKE_RENDER); 420 } 421 422 if (FORCEWAKE_MEDIA & fw_engine) { 423 WARN_ON(dev_priv->uncore.fw_mediacount == 0); 424 if (--dev_priv->uncore.fw_mediacount == 0) 425 dev_priv->uncore.funcs.force_wake_put(dev_priv, 426 FORCEWAKE_MEDIA); 427 } 428 429 if (FORCEWAKE_BLITTER & fw_engine) { 430 WARN_ON(dev_priv->uncore.fw_blittercount == 0); 431 if (--dev_priv->uncore.fw_blittercount == 0) 432 dev_priv->uncore.funcs.force_wake_put(dev_priv, 433 FORCEWAKE_BLITTER); 434 } 435 436 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 437 } 438 439 static void gen6_force_wake_timer(unsigned long arg) 440 { 441 struct drm_i915_private *dev_priv = (void *)arg; 442 unsigned long irqflags; 443 444 assert_device_not_suspended(dev_priv); 445 446 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 447 WARN_ON(!dev_priv->uncore.forcewake_count); 448 449 if (--dev_priv->uncore.forcewake_count == 0) 450 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 451 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 452 453 intel_runtime_pm_put(dev_priv); 454 } 455 456 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 457 { 458 struct drm_i915_private *dev_priv = dev->dev_private; 459 unsigned long irqflags; 460 461 if (del_timer_sync(&dev_priv->uncore.force_wake_timer)) 462 gen6_force_wake_timer((unsigned long)dev_priv); 463 464 /* Hold uncore.lock across reset to prevent any register access 465 * with forcewake not set correctly 466 */ 467 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 468 469 if (IS_VALLEYVIEW(dev)) 470 vlv_force_wake_reset(dev_priv); 471 else if (IS_GEN6(dev) || IS_GEN7(dev)) 472 __gen6_gt_force_wake_reset(dev_priv); 473 474 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 475 __gen7_gt_force_wake_mt_reset(dev_priv); 476 477 if (IS_GEN9(dev)) 478 __gen9_gt_force_wake_mt_reset(dev_priv); 479 480 if (restore) { /* If reset with a user forcewake, try to restore */ 481 unsigned fw = 0; 482 483 if (IS_VALLEYVIEW(dev)) { 484 if (dev_priv->uncore.fw_rendercount) 485 fw |= FORCEWAKE_RENDER; 486 487 if (dev_priv->uncore.fw_mediacount) 488 fw |= FORCEWAKE_MEDIA; 489 } else if (IS_GEN9(dev)) { 490 if (dev_priv->uncore.fw_rendercount) 491 fw |= FORCEWAKE_RENDER; 492 493 if (dev_priv->uncore.fw_mediacount) 494 fw |= FORCEWAKE_MEDIA; 495 496 if (dev_priv->uncore.fw_blittercount) 497 fw |= FORCEWAKE_BLITTER; 498 } else { 499 if (dev_priv->uncore.forcewake_count) 500 fw = FORCEWAKE_ALL; 501 } 502 503 if (fw) 504 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 505 506 if (IS_GEN6(dev) || IS_GEN7(dev)) 507 dev_priv->uncore.fifo_count = 508 __raw_i915_read32(dev_priv, GTFIFOCTL) & 509 GT_FIFO_FREE_ENTRIES_MASK; 510 } 511 512 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 513 } 514 515 static void __intel_uncore_early_sanitize(struct drm_device *dev, 516 bool restore_forcewake) 517 { 518 struct drm_i915_private *dev_priv = dev->dev_private; 519 520 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 521 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 522 523 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 524 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 525 /* The docs do not explain exactly how the calculation can be 526 * made. It is somewhat guessable, but for now, it's always 527 * 128MB. 528 * NB: We can't write IDICR yet because we do not have gt funcs 529 * set up */ 530 dev_priv->ellc_size = 128; 531 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 532 } 533 534 /* clear out old GT FIFO errors */ 535 if (IS_GEN6(dev) || IS_GEN7(dev)) 536 __raw_i915_write32(dev_priv, GTFIFODBG, 537 __raw_i915_read32(dev_priv, GTFIFODBG)); 538 539 intel_uncore_forcewake_reset(dev, restore_forcewake); 540 } 541 542 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 543 { 544 __intel_uncore_early_sanitize(dev, restore_forcewake); 545 i915_check_and_clear_faults(dev); 546 } 547 548 void intel_uncore_sanitize(struct drm_device *dev) 549 { 550 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 551 intel_disable_gt_powersave(dev); 552 } 553 554 /* 555 * Generally this is called implicitly by the register read function. However, 556 * if some sequence requires the GT to not power down then this function should 557 * be called at the beginning of the sequence followed by a call to 558 * gen6_gt_force_wake_put() at the end of the sequence. 559 */ 560 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 561 { 562 unsigned long irqflags; 563 564 if (!dev_priv->uncore.funcs.force_wake_get) 565 return; 566 567 intel_runtime_pm_get(dev_priv); 568 569 /* Redirect to Gen9 specific routine */ 570 if (IS_GEN9(dev_priv->dev)) 571 return gen9_force_wake_get(dev_priv, fw_engine); 572 573 /* Redirect to VLV specific routine */ 574 if (IS_VALLEYVIEW(dev_priv->dev)) 575 return vlv_force_wake_get(dev_priv, fw_engine); 576 577 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 578 if (dev_priv->uncore.forcewake_count++ == 0) 579 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 580 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 581 } 582 583 /* 584 * see gen6_gt_force_wake_get() 585 */ 586 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 587 { 588 unsigned long irqflags; 589 bool delayed = false; 590 591 if (!dev_priv->uncore.funcs.force_wake_put) 592 return; 593 594 /* Redirect to Gen9 specific routine */ 595 if (IS_GEN9(dev_priv->dev)) { 596 gen9_force_wake_put(dev_priv, fw_engine); 597 goto out; 598 } 599 600 /* Redirect to VLV specific routine */ 601 if (IS_VALLEYVIEW(dev_priv->dev)) { 602 vlv_force_wake_put(dev_priv, fw_engine); 603 goto out; 604 } 605 606 607 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 608 WARN_ON(!dev_priv->uncore.forcewake_count); 609 610 if (--dev_priv->uncore.forcewake_count == 0) { 611 dev_priv->uncore.forcewake_count++; 612 delayed = true; 613 mod_timer_pinned(&dev_priv->uncore.force_wake_timer, 614 jiffies + 1); 615 } 616 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 617 618 out: 619 if (!delayed) 620 intel_runtime_pm_put(dev_priv); 621 } 622 623 void assert_force_wake_inactive(struct drm_i915_private *dev_priv) 624 { 625 if (!dev_priv->uncore.funcs.force_wake_get) 626 return; 627 628 WARN_ON(dev_priv->uncore.forcewake_count > 0); 629 } 630 631 /* We give fast paths for the really cool registers */ 632 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 633 ((reg) < 0x40000 && (reg) != FORCEWAKE) 634 635 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 636 637 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 638 (REG_RANGE((reg), 0x2000, 0x4000) || \ 639 REG_RANGE((reg), 0x5000, 0x8000) || \ 640 REG_RANGE((reg), 0xB000, 0x12000) || \ 641 REG_RANGE((reg), 0x2E000, 0x30000)) 642 643 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 644 (REG_RANGE((reg), 0x12000, 0x14000) || \ 645 REG_RANGE((reg), 0x22000, 0x24000) || \ 646 REG_RANGE((reg), 0x30000, 0x40000)) 647 648 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 649 (REG_RANGE((reg), 0x2000, 0x4000) || \ 650 REG_RANGE((reg), 0x5200, 0x8000) || \ 651 REG_RANGE((reg), 0x8300, 0x8500) || \ 652 REG_RANGE((reg), 0xB000, 0xB480) || \ 653 REG_RANGE((reg), 0xE000, 0xE800)) 654 655 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 656 (REG_RANGE((reg), 0x8800, 0x8900) || \ 657 REG_RANGE((reg), 0xD000, 0xD800) || \ 658 REG_RANGE((reg), 0x12000, 0x14000) || \ 659 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 660 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 661 REG_RANGE((reg), 0x30000, 0x38000)) 662 663 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 664 (REG_RANGE((reg), 0x4000, 0x5000) || \ 665 REG_RANGE((reg), 0x8000, 0x8300) || \ 666 REG_RANGE((reg), 0x8500, 0x8600) || \ 667 REG_RANGE((reg), 0x9000, 0xB000) || \ 668 REG_RANGE((reg), 0xF000, 0x10000)) 669 670 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 671 REG_RANGE((reg), 0xB00, 0x2000) 672 673 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 674 (REG_RANGE((reg), 0x2000, 0x2700) || \ 675 REG_RANGE((reg), 0x3000, 0x4000) || \ 676 REG_RANGE((reg), 0x5200, 0x8000) || \ 677 REG_RANGE((reg), 0x8140, 0x8160) || \ 678 REG_RANGE((reg), 0x8300, 0x8500) || \ 679 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 680 REG_RANGE((reg), 0xB000, 0xB480) || \ 681 REG_RANGE((reg), 0xE000, 0xE900) || \ 682 REG_RANGE((reg), 0x24400, 0x24800)) 683 684 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 685 (REG_RANGE((reg), 0x8130, 0x8140) || \ 686 REG_RANGE((reg), 0x8800, 0x8A00) || \ 687 REG_RANGE((reg), 0xD000, 0xD800) || \ 688 REG_RANGE((reg), 0x12000, 0x14000) || \ 689 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 690 REG_RANGE((reg), 0x30000, 0x40000)) 691 692 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 693 REG_RANGE((reg), 0x9400, 0x9800) 694 695 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 696 ((reg) < 0x40000 &&\ 697 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 698 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 699 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 700 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 701 702 static void 703 ilk_dummy_write(struct drm_i915_private *dev_priv) 704 { 705 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 706 * the chip from rc6 before touching it for real. MI_MODE is masked, 707 * hence harmless to write 0 into. */ 708 __raw_i915_write32(dev_priv, MI_MODE, 0); 709 } 710 711 static void 712 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 713 bool before) 714 { 715 const char *op = read ? "reading" : "writing to"; 716 const char *when = before ? "before" : "after"; 717 718 if (!i915.mmio_debug) 719 return; 720 721 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 722 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 723 when, op, reg); 724 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 725 } 726 } 727 728 static void 729 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 730 { 731 if (i915.mmio_debug) 732 return; 733 734 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 735 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem."); 736 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 737 } 738 } 739 740 #define REG_READ_HEADER(x) \ 741 unsigned long irqflags; \ 742 u##x val = 0; \ 743 assert_device_not_suspended(dev_priv); \ 744 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 745 746 #define REG_READ_FOOTER \ 747 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 748 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 749 return val 750 751 #define __gen4_read(x) \ 752 static u##x \ 753 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 754 REG_READ_HEADER(x); \ 755 val = __raw_i915_read##x(dev_priv, reg); \ 756 REG_READ_FOOTER; \ 757 } 758 759 #define __gen5_read(x) \ 760 static u##x \ 761 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 762 REG_READ_HEADER(x); \ 763 ilk_dummy_write(dev_priv); \ 764 val = __raw_i915_read##x(dev_priv, reg); \ 765 REG_READ_FOOTER; \ 766 } 767 768 #define __gen6_read(x) \ 769 static u##x \ 770 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 771 REG_READ_HEADER(x); \ 772 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 773 if (dev_priv->uncore.forcewake_count == 0 && \ 774 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 775 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 776 FORCEWAKE_ALL); \ 777 val = __raw_i915_read##x(dev_priv, reg); \ 778 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 779 FORCEWAKE_ALL); \ 780 } else { \ 781 val = __raw_i915_read##x(dev_priv, reg); \ 782 } \ 783 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 784 REG_READ_FOOTER; \ 785 } 786 787 #define __vlv_read(x) \ 788 static u##x \ 789 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 790 unsigned fwengine = 0; \ 791 REG_READ_HEADER(x); \ 792 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ 793 if (dev_priv->uncore.fw_rendercount == 0) \ 794 fwengine = FORCEWAKE_RENDER; \ 795 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ 796 if (dev_priv->uncore.fw_mediacount == 0) \ 797 fwengine = FORCEWAKE_MEDIA; \ 798 } \ 799 if (fwengine) \ 800 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 801 val = __raw_i915_read##x(dev_priv, reg); \ 802 if (fwengine) \ 803 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 804 REG_READ_FOOTER; \ 805 } 806 807 #define __chv_read(x) \ 808 static u##x \ 809 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 810 unsigned fwengine = 0; \ 811 REG_READ_HEADER(x); \ 812 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ 813 if (dev_priv->uncore.fw_rendercount == 0) \ 814 fwengine = FORCEWAKE_RENDER; \ 815 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ 816 if (dev_priv->uncore.fw_mediacount == 0) \ 817 fwengine = FORCEWAKE_MEDIA; \ 818 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ 819 if (dev_priv->uncore.fw_rendercount == 0) \ 820 fwengine |= FORCEWAKE_RENDER; \ 821 if (dev_priv->uncore.fw_mediacount == 0) \ 822 fwengine |= FORCEWAKE_MEDIA; \ 823 } \ 824 if (fwengine) \ 825 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 826 val = __raw_i915_read##x(dev_priv, reg); \ 827 if (fwengine) \ 828 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 829 REG_READ_FOOTER; \ 830 } 831 832 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 833 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 834 835 #define __gen9_read(x) \ 836 static u##x \ 837 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 838 REG_READ_HEADER(x); \ 839 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 840 val = __raw_i915_read##x(dev_priv, reg); \ 841 } else { \ 842 unsigned fwengine = 0; \ 843 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \ 844 if (dev_priv->uncore.fw_rendercount == 0) \ 845 fwengine = FORCEWAKE_RENDER; \ 846 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \ 847 if (dev_priv->uncore.fw_mediacount == 0) \ 848 fwengine = FORCEWAKE_MEDIA; \ 849 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \ 850 if (dev_priv->uncore.fw_rendercount == 0) \ 851 fwengine |= FORCEWAKE_RENDER; \ 852 if (dev_priv->uncore.fw_mediacount == 0) \ 853 fwengine |= FORCEWAKE_MEDIA; \ 854 } else { \ 855 if (dev_priv->uncore.fw_blittercount == 0) \ 856 fwengine = FORCEWAKE_BLITTER; \ 857 } \ 858 if (fwengine) \ 859 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 860 val = __raw_i915_read##x(dev_priv, reg); \ 861 if (fwengine) \ 862 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 863 } \ 864 REG_READ_FOOTER; \ 865 } 866 867 __gen9_read(8) 868 __gen9_read(16) 869 __gen9_read(32) 870 __gen9_read(64) 871 __chv_read(8) 872 __chv_read(16) 873 __chv_read(32) 874 __chv_read(64) 875 __vlv_read(8) 876 __vlv_read(16) 877 __vlv_read(32) 878 __vlv_read(64) 879 __gen6_read(8) 880 __gen6_read(16) 881 __gen6_read(32) 882 __gen6_read(64) 883 __gen5_read(8) 884 __gen5_read(16) 885 __gen5_read(32) 886 __gen5_read(64) 887 __gen4_read(8) 888 __gen4_read(16) 889 __gen4_read(32) 890 __gen4_read(64) 891 892 #undef __gen9_read 893 #undef __chv_read 894 #undef __vlv_read 895 #undef __gen6_read 896 #undef __gen5_read 897 #undef __gen4_read 898 #undef REG_READ_FOOTER 899 #undef REG_READ_HEADER 900 901 #define REG_WRITE_HEADER \ 902 unsigned long irqflags; \ 903 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 904 assert_device_not_suspended(dev_priv); \ 905 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 906 907 #define REG_WRITE_FOOTER \ 908 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 909 910 #define __gen4_write(x) \ 911 static void \ 912 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 913 REG_WRITE_HEADER; \ 914 __raw_i915_write##x(dev_priv, reg, val); \ 915 REG_WRITE_FOOTER; \ 916 } 917 918 #define __gen5_write(x) \ 919 static void \ 920 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 921 REG_WRITE_HEADER; \ 922 ilk_dummy_write(dev_priv); \ 923 __raw_i915_write##x(dev_priv, reg, val); \ 924 REG_WRITE_FOOTER; \ 925 } 926 927 #define __gen6_write(x) \ 928 static void \ 929 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 930 u32 __fifo_ret = 0; \ 931 REG_WRITE_HEADER; \ 932 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 933 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 934 } \ 935 __raw_i915_write##x(dev_priv, reg, val); \ 936 if (unlikely(__fifo_ret)) { \ 937 gen6_gt_check_fifodbg(dev_priv); \ 938 } \ 939 REG_WRITE_FOOTER; \ 940 } 941 942 #define __hsw_write(x) \ 943 static void \ 944 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 945 u32 __fifo_ret = 0; \ 946 REG_WRITE_HEADER; \ 947 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 948 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 949 } \ 950 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 951 __raw_i915_write##x(dev_priv, reg, val); \ 952 if (unlikely(__fifo_ret)) { \ 953 gen6_gt_check_fifodbg(dev_priv); \ 954 } \ 955 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 956 hsw_unclaimed_reg_detect(dev_priv); \ 957 REG_WRITE_FOOTER; \ 958 } 959 960 static const u32 gen8_shadowed_regs[] = { 961 FORCEWAKE_MT, 962 GEN6_RPNSWREQ, 963 GEN6_RC_VIDEO_FREQ, 964 RING_TAIL(RENDER_RING_BASE), 965 RING_TAIL(GEN6_BSD_RING_BASE), 966 RING_TAIL(VEBOX_RING_BASE), 967 RING_TAIL(BLT_RING_BASE), 968 /* TODO: Other registers are not yet used */ 969 }; 970 971 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 972 { 973 int i; 974 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 975 if (reg == gen8_shadowed_regs[i]) 976 return true; 977 978 return false; 979 } 980 981 #define __gen8_write(x) \ 982 static void \ 983 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 984 REG_WRITE_HEADER; \ 985 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 986 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 987 if (dev_priv->uncore.forcewake_count == 0) \ 988 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 989 FORCEWAKE_ALL); \ 990 __raw_i915_write##x(dev_priv, reg, val); \ 991 if (dev_priv->uncore.forcewake_count == 0) \ 992 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 993 FORCEWAKE_ALL); \ 994 } else { \ 995 __raw_i915_write##x(dev_priv, reg, val); \ 996 } \ 997 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 998 hsw_unclaimed_reg_detect(dev_priv); \ 999 REG_WRITE_FOOTER; \ 1000 } 1001 1002 #define __chv_write(x) \ 1003 static void \ 1004 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 1005 unsigned fwengine = 0; \ 1006 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 1007 REG_WRITE_HEADER; \ 1008 if (!shadowed) { \ 1009 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ 1010 if (dev_priv->uncore.fw_rendercount == 0) \ 1011 fwengine = FORCEWAKE_RENDER; \ 1012 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ 1013 if (dev_priv->uncore.fw_mediacount == 0) \ 1014 fwengine = FORCEWAKE_MEDIA; \ 1015 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ 1016 if (dev_priv->uncore.fw_rendercount == 0) \ 1017 fwengine |= FORCEWAKE_RENDER; \ 1018 if (dev_priv->uncore.fw_mediacount == 0) \ 1019 fwengine |= FORCEWAKE_MEDIA; \ 1020 } \ 1021 } \ 1022 if (fwengine) \ 1023 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 1024 __raw_i915_write##x(dev_priv, reg, val); \ 1025 if (fwengine) \ 1026 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 1027 REG_WRITE_FOOTER; \ 1028 } 1029 1030 static const u32 gen9_shadowed_regs[] = { 1031 RING_TAIL(RENDER_RING_BASE), 1032 RING_TAIL(GEN6_BSD_RING_BASE), 1033 RING_TAIL(VEBOX_RING_BASE), 1034 RING_TAIL(BLT_RING_BASE), 1035 FORCEWAKE_BLITTER_GEN9, 1036 FORCEWAKE_RENDER_GEN9, 1037 FORCEWAKE_MEDIA_GEN9, 1038 GEN6_RPNSWREQ, 1039 GEN6_RC_VIDEO_FREQ, 1040 /* TODO: Other registers are not yet used */ 1041 }; 1042 1043 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 1044 { 1045 int i; 1046 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 1047 if (reg == gen9_shadowed_regs[i]) 1048 return true; 1049 1050 return false; 1051 } 1052 1053 #define __gen9_write(x) \ 1054 static void \ 1055 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 1056 bool trace) { \ 1057 REG_WRITE_HEADER; \ 1058 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 1059 is_gen9_shadowed(dev_priv, reg)) { \ 1060 __raw_i915_write##x(dev_priv, reg, val); \ 1061 } else { \ 1062 unsigned fwengine = 0; \ 1063 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \ 1064 if (dev_priv->uncore.fw_rendercount == 0) \ 1065 fwengine = FORCEWAKE_RENDER; \ 1066 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \ 1067 if (dev_priv->uncore.fw_mediacount == 0) \ 1068 fwengine = FORCEWAKE_MEDIA; \ 1069 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \ 1070 if (dev_priv->uncore.fw_rendercount == 0) \ 1071 fwengine |= FORCEWAKE_RENDER; \ 1072 if (dev_priv->uncore.fw_mediacount == 0) \ 1073 fwengine |= FORCEWAKE_MEDIA; \ 1074 } else { \ 1075 if (dev_priv->uncore.fw_blittercount == 0) \ 1076 fwengine = FORCEWAKE_BLITTER; \ 1077 } \ 1078 if (fwengine) \ 1079 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 1080 fwengine); \ 1081 __raw_i915_write##x(dev_priv, reg, val); \ 1082 if (fwengine) \ 1083 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 1084 fwengine); \ 1085 } \ 1086 REG_WRITE_FOOTER; \ 1087 } 1088 1089 __gen9_write(8) 1090 __gen9_write(16) 1091 __gen9_write(32) 1092 __gen9_write(64) 1093 __chv_write(8) 1094 __chv_write(16) 1095 __chv_write(32) 1096 __chv_write(64) 1097 __gen8_write(8) 1098 __gen8_write(16) 1099 __gen8_write(32) 1100 __gen8_write(64) 1101 __hsw_write(8) 1102 __hsw_write(16) 1103 __hsw_write(32) 1104 __hsw_write(64) 1105 __gen6_write(8) 1106 __gen6_write(16) 1107 __gen6_write(32) 1108 __gen6_write(64) 1109 __gen5_write(8) 1110 __gen5_write(16) 1111 __gen5_write(32) 1112 __gen5_write(64) 1113 __gen4_write(8) 1114 __gen4_write(16) 1115 __gen4_write(32) 1116 __gen4_write(64) 1117 1118 #undef __gen9_write 1119 #undef __chv_write 1120 #undef __gen8_write 1121 #undef __hsw_write 1122 #undef __gen6_write 1123 #undef __gen5_write 1124 #undef __gen4_write 1125 #undef REG_WRITE_FOOTER 1126 #undef REG_WRITE_HEADER 1127 1128 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1129 do { \ 1130 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1131 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1132 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1133 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 1134 } while (0) 1135 1136 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1137 do { \ 1138 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1139 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1140 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1141 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1142 } while (0) 1143 1144 void intel_uncore_init(struct drm_device *dev) 1145 { 1146 struct drm_i915_private *dev_priv = dev->dev_private; 1147 1148 setup_timer(&dev_priv->uncore.force_wake_timer, 1149 gen6_force_wake_timer, (unsigned long)dev_priv); 1150 1151 __intel_uncore_early_sanitize(dev, false); 1152 1153 if (IS_GEN9(dev)) { 1154 dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get; 1155 dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put; 1156 } else if (IS_VALLEYVIEW(dev)) { 1157 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 1158 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 1159 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1160 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 1161 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 1162 } else if (IS_IVYBRIDGE(dev)) { 1163 u32 ecobus; 1164 1165 /* IVB configs may use multi-threaded forcewake */ 1166 1167 /* A small trick here - if the bios hasn't configured 1168 * MT forcewake, and if the device is in RC6, then 1169 * force_wake_mt_get will not wake the device and the 1170 * ECOBUS read will return zero. Which will be 1171 * (correctly) interpreted by the test below as MT 1172 * forcewake being disabled. 1173 */ 1174 mutex_lock(&dev->struct_mutex); 1175 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); 1176 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1177 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); 1178 mutex_unlock(&dev->struct_mutex); 1179 1180 if (ecobus & FORCEWAKE_MT_ENABLE) { 1181 dev_priv->uncore.funcs.force_wake_get = 1182 __gen7_gt_force_wake_mt_get; 1183 dev_priv->uncore.funcs.force_wake_put = 1184 __gen7_gt_force_wake_mt_put; 1185 } else { 1186 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1187 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1188 dev_priv->uncore.funcs.force_wake_get = 1189 __gen6_gt_force_wake_get; 1190 dev_priv->uncore.funcs.force_wake_put = 1191 __gen6_gt_force_wake_put; 1192 } 1193 } else if (IS_GEN6(dev)) { 1194 dev_priv->uncore.funcs.force_wake_get = 1195 __gen6_gt_force_wake_get; 1196 dev_priv->uncore.funcs.force_wake_put = 1197 __gen6_gt_force_wake_put; 1198 } 1199 1200 switch (INTEL_INFO(dev)->gen) { 1201 default: 1202 MISSING_CASE(INTEL_INFO(dev)->gen); 1203 return; 1204 case 9: 1205 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1206 ASSIGN_READ_MMIO_VFUNCS(gen9); 1207 break; 1208 case 8: 1209 if (IS_CHERRYVIEW(dev)) { 1210 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1211 ASSIGN_READ_MMIO_VFUNCS(chv); 1212 1213 } else { 1214 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1215 ASSIGN_READ_MMIO_VFUNCS(gen6); 1216 } 1217 break; 1218 case 7: 1219 case 6: 1220 if (IS_HASWELL(dev)) { 1221 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1222 } else { 1223 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1224 } 1225 1226 if (IS_VALLEYVIEW(dev)) { 1227 ASSIGN_READ_MMIO_VFUNCS(vlv); 1228 } else { 1229 ASSIGN_READ_MMIO_VFUNCS(gen6); 1230 } 1231 break; 1232 case 5: 1233 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1234 ASSIGN_READ_MMIO_VFUNCS(gen5); 1235 break; 1236 case 4: 1237 case 3: 1238 case 2: 1239 ASSIGN_WRITE_MMIO_VFUNCS(gen4); 1240 ASSIGN_READ_MMIO_VFUNCS(gen4); 1241 break; 1242 } 1243 1244 i915_check_and_clear_faults(dev); 1245 } 1246 #undef ASSIGN_WRITE_MMIO_VFUNCS 1247 #undef ASSIGN_READ_MMIO_VFUNCS 1248 1249 void intel_uncore_fini(struct drm_device *dev) 1250 { 1251 /* Paranoia: make sure we have disabled everything before we exit. */ 1252 intel_uncore_sanitize(dev); 1253 intel_uncore_forcewake_reset(dev, false); 1254 } 1255 1256 #define GEN_RANGE(l, h) GENMASK(h, l) 1257 1258 static const struct register_whitelist { 1259 uint64_t offset; 1260 uint32_t size; 1261 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1262 uint32_t gen_bitmask; 1263 } whitelist[] = { 1264 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1265 }; 1266 1267 int i915_reg_read_ioctl(struct drm_device *dev, 1268 void *data, struct drm_file *file) 1269 { 1270 struct drm_i915_private *dev_priv = dev->dev_private; 1271 struct drm_i915_reg_read *reg = data; 1272 struct register_whitelist const *entry = whitelist; 1273 int i, ret = 0; 1274 1275 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1276 if (entry->offset == reg->offset && 1277 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1278 break; 1279 } 1280 1281 if (i == ARRAY_SIZE(whitelist)) 1282 return -EINVAL; 1283 1284 intel_runtime_pm_get(dev_priv); 1285 1286 switch (entry->size) { 1287 case 8: 1288 reg->val = I915_READ64(reg->offset); 1289 break; 1290 case 4: 1291 reg->val = I915_READ(reg->offset); 1292 break; 1293 case 2: 1294 reg->val = I915_READ16(reg->offset); 1295 break; 1296 case 1: 1297 reg->val = I915_READ8(reg->offset); 1298 break; 1299 default: 1300 MISSING_CASE(entry->size); 1301 ret = -EINVAL; 1302 goto out; 1303 } 1304 1305 out: 1306 intel_runtime_pm_put(dev_priv); 1307 return ret; 1308 } 1309 1310 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1311 void *data, struct drm_file *file) 1312 { 1313 struct drm_i915_private *dev_priv = dev->dev_private; 1314 struct drm_i915_reset_stats *args = data; 1315 struct i915_ctx_hang_stats *hs; 1316 struct intel_context *ctx; 1317 int ret; 1318 1319 if (args->flags || args->pad) 1320 return -EINVAL; 1321 1322 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1323 return -EPERM; 1324 1325 ret = mutex_lock_interruptible(&dev->struct_mutex); 1326 if (ret) 1327 return ret; 1328 1329 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1330 if (IS_ERR(ctx)) { 1331 mutex_unlock(&dev->struct_mutex); 1332 return PTR_ERR(ctx); 1333 } 1334 hs = &ctx->hang_stats; 1335 1336 if (capable(CAP_SYS_ADMIN)) 1337 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1338 else 1339 args->reset_count = 0; 1340 1341 args->batch_active = hs->batch_active; 1342 args->batch_pending = hs->batch_pending; 1343 1344 mutex_unlock(&dev->struct_mutex); 1345 1346 return 0; 1347 } 1348 1349 static int i915_reset_complete(struct drm_device *dev) 1350 { 1351 u8 gdrst; 1352 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1353 return (gdrst & GRDOM_RESET_STATUS) == 0; 1354 } 1355 1356 static int i915_do_reset(struct drm_device *dev) 1357 { 1358 /* assert reset for at least 20 usec */ 1359 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1360 udelay(20); 1361 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1362 1363 return wait_for(i915_reset_complete(dev), 500); 1364 } 1365 1366 static int g4x_reset_complete(struct drm_device *dev) 1367 { 1368 u8 gdrst; 1369 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1370 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1371 } 1372 1373 static int g33_do_reset(struct drm_device *dev) 1374 { 1375 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1376 return wait_for(g4x_reset_complete(dev), 500); 1377 } 1378 1379 static int g4x_do_reset(struct drm_device *dev) 1380 { 1381 struct drm_i915_private *dev_priv = dev->dev_private; 1382 int ret; 1383 1384 pci_write_config_byte(dev->pdev, I915_GDRST, 1385 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1386 ret = wait_for(g4x_reset_complete(dev), 500); 1387 if (ret) 1388 return ret; 1389 1390 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1391 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1392 POSTING_READ(VDECCLK_GATE_D); 1393 1394 pci_write_config_byte(dev->pdev, I915_GDRST, 1395 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1396 ret = wait_for(g4x_reset_complete(dev), 500); 1397 if (ret) 1398 return ret; 1399 1400 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1401 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1402 POSTING_READ(VDECCLK_GATE_D); 1403 1404 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1405 1406 return 0; 1407 } 1408 1409 static int ironlake_do_reset(struct drm_device *dev) 1410 { 1411 struct drm_i915_private *dev_priv = dev->dev_private; 1412 int ret; 1413 1414 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1415 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1416 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1417 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1418 if (ret) 1419 return ret; 1420 1421 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1422 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1423 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1424 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1425 if (ret) 1426 return ret; 1427 1428 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1429 1430 return 0; 1431 } 1432 1433 static int gen6_do_reset(struct drm_device *dev) 1434 { 1435 struct drm_i915_private *dev_priv = dev->dev_private; 1436 int ret; 1437 1438 /* Reset the chip */ 1439 1440 /* GEN6_GDRST is not in the gt power well, no need to check 1441 * for fifo space for the write or forcewake the chip for 1442 * the read 1443 */ 1444 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1445 1446 /* Spin waiting for the device to ack the reset request */ 1447 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1448 1449 intel_uncore_forcewake_reset(dev, true); 1450 1451 return ret; 1452 } 1453 1454 int intel_gpu_reset(struct drm_device *dev) 1455 { 1456 if (INTEL_INFO(dev)->gen >= 6) 1457 return gen6_do_reset(dev); 1458 else if (IS_GEN5(dev)) 1459 return ironlake_do_reset(dev); 1460 else if (IS_G4X(dev)) 1461 return g4x_do_reset(dev); 1462 else if (IS_G33(dev)) 1463 return g33_do_reset(dev); 1464 else if (INTEL_INFO(dev)->gen >= 3) 1465 return i915_do_reset(dev); 1466 else 1467 return -ENODEV; 1468 } 1469 1470 void intel_uncore_check_errors(struct drm_device *dev) 1471 { 1472 struct drm_i915_private *dev_priv = dev->dev_private; 1473 1474 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1475 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1476 DRM_ERROR("Unclaimed register before interrupt\n"); 1477 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1478 } 1479 } 1480