1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ibx[HPD_NUM_PINS] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 54 }; 55 56 static const u32 hpd_cpt[HPD_NUM_PINS] = { 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 62 }; 63 64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 71 }; 72 73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 89 }; 90 91 /* BXT hpd list */ 92 static const u32 hpd_bxt[HPD_NUM_PINS] = { 93 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 94 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 95 }; 96 97 /* IIR can theoretically queue up two events. Be paranoid. */ 98 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 99 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 100 POSTING_READ(GEN8_##type##_IMR(which)); \ 101 I915_WRITE(GEN8_##type##_IER(which), 0); \ 102 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 103 POSTING_READ(GEN8_##type##_IIR(which)); \ 104 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 105 POSTING_READ(GEN8_##type##_IIR(which)); \ 106 } while (0) 107 108 #define GEN5_IRQ_RESET(type) do { \ 109 I915_WRITE(type##IMR, 0xffffffff); \ 110 POSTING_READ(type##IMR); \ 111 I915_WRITE(type##IER, 0); \ 112 I915_WRITE(type##IIR, 0xffffffff); \ 113 POSTING_READ(type##IIR); \ 114 I915_WRITE(type##IIR, 0xffffffff); \ 115 POSTING_READ(type##IIR); \ 116 } while (0) 117 118 /* 119 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 120 */ 121 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 122 u32 val = I915_READ(reg); \ 123 if (val) { \ 124 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 125 (reg), val); \ 126 I915_WRITE((reg), 0xffffffff); \ 127 POSTING_READ(reg); \ 128 I915_WRITE((reg), 0xffffffff); \ 129 POSTING_READ(reg); \ 130 } \ 131 } while (0) 132 133 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 134 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 135 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 136 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 137 POSTING_READ(GEN8_##type##_IMR(which)); \ 138 } while (0) 139 140 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 141 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 142 I915_WRITE(type##IER, (ier_val)); \ 143 I915_WRITE(type##IMR, (imr_val)); \ 144 POSTING_READ(type##IMR); \ 145 } while (0) 146 147 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 148 149 /* For display hotplug interrupt */ 150 void 151 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 152 { 153 assert_spin_locked(&dev_priv->irq_lock); 154 155 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 156 return; 157 158 if ((dev_priv->irq_mask & mask) != 0) { 159 dev_priv->irq_mask &= ~mask; 160 I915_WRITE(DEIMR, dev_priv->irq_mask); 161 POSTING_READ(DEIMR); 162 } 163 } 164 165 void 166 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 167 { 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 171 return; 172 173 if ((dev_priv->irq_mask & mask) != mask) { 174 dev_priv->irq_mask |= mask; 175 I915_WRITE(DEIMR, dev_priv->irq_mask); 176 POSTING_READ(DEIMR); 177 } 178 } 179 180 /** 181 * ilk_update_gt_irq - update GTIMR 182 * @dev_priv: driver private 183 * @interrupt_mask: mask of interrupt bits to update 184 * @enabled_irq_mask: mask of interrupt bits to enable 185 */ 186 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 187 uint32_t interrupt_mask, 188 uint32_t enabled_irq_mask) 189 { 190 assert_spin_locked(&dev_priv->irq_lock); 191 192 WARN_ON(enabled_irq_mask & ~interrupt_mask); 193 194 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 195 return; 196 197 dev_priv->gt_irq_mask &= ~interrupt_mask; 198 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 199 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 200 POSTING_READ(GTIMR); 201 } 202 203 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 204 { 205 ilk_update_gt_irq(dev_priv, mask, mask); 206 } 207 208 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 209 { 210 ilk_update_gt_irq(dev_priv, mask, 0); 211 } 212 213 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 214 { 215 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 216 } 217 218 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 219 { 220 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 221 } 222 223 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 224 { 225 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 226 } 227 228 /** 229 * snb_update_pm_irq - update GEN6_PMIMR 230 * @dev_priv: driver private 231 * @interrupt_mask: mask of interrupt bits to update 232 * @enabled_irq_mask: mask of interrupt bits to enable 233 */ 234 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 235 uint32_t interrupt_mask, 236 uint32_t enabled_irq_mask) 237 { 238 uint32_t new_val; 239 240 WARN_ON(enabled_irq_mask & ~interrupt_mask); 241 242 assert_spin_locked(&dev_priv->irq_lock); 243 244 new_val = dev_priv->pm_irq_mask; 245 new_val &= ~interrupt_mask; 246 new_val |= (~enabled_irq_mask & interrupt_mask); 247 248 if (new_val != dev_priv->pm_irq_mask) { 249 dev_priv->pm_irq_mask = new_val; 250 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 251 POSTING_READ(gen6_pm_imr(dev_priv)); 252 } 253 } 254 255 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 256 { 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 snb_update_pm_irq(dev_priv, mask, mask); 261 } 262 263 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 264 uint32_t mask) 265 { 266 snb_update_pm_irq(dev_priv, mask, 0); 267 } 268 269 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 270 { 271 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 272 return; 273 274 __gen6_disable_pm_irq(dev_priv, mask); 275 } 276 277 void gen6_reset_rps_interrupts(struct drm_device *dev) 278 { 279 struct drm_i915_private *dev_priv = dev->dev_private; 280 uint32_t reg = gen6_pm_iir(dev_priv); 281 282 spin_lock_irq(&dev_priv->irq_lock); 283 I915_WRITE(reg, dev_priv->pm_rps_events); 284 I915_WRITE(reg, dev_priv->pm_rps_events); 285 POSTING_READ(reg); 286 dev_priv->rps.pm_iir = 0; 287 spin_unlock_irq(&dev_priv->irq_lock); 288 } 289 290 void gen6_enable_rps_interrupts(struct drm_device *dev) 291 { 292 struct drm_i915_private *dev_priv = dev->dev_private; 293 294 spin_lock_irq(&dev_priv->irq_lock); 295 296 WARN_ON(dev_priv->rps.pm_iir); 297 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 298 dev_priv->rps.interrupts_enabled = true; 299 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 300 dev_priv->pm_rps_events); 301 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 302 303 spin_unlock_irq(&dev_priv->irq_lock); 304 } 305 306 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 307 { 308 /* 309 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 310 * if GEN6_PM_UP_EI_EXPIRED is masked. 311 * 312 * TODO: verify if this can be reproduced on VLV,CHV. 313 */ 314 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 315 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 316 317 if (INTEL_INFO(dev_priv)->gen >= 8) 318 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 319 320 return mask; 321 } 322 323 void gen6_disable_rps_interrupts(struct drm_device *dev) 324 { 325 struct drm_i915_private *dev_priv = dev->dev_private; 326 327 spin_lock_irq(&dev_priv->irq_lock); 328 dev_priv->rps.interrupts_enabled = false; 329 spin_unlock_irq(&dev_priv->irq_lock); 330 331 cancel_work_sync(&dev_priv->rps.work); 332 333 spin_lock_irq(&dev_priv->irq_lock); 334 335 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 336 337 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 338 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 339 ~dev_priv->pm_rps_events); 340 341 spin_unlock_irq(&dev_priv->irq_lock); 342 343 synchronize_irq(dev->irq); 344 } 345 346 /** 347 * ibx_display_interrupt_update - update SDEIMR 348 * @dev_priv: driver private 349 * @interrupt_mask: mask of interrupt bits to update 350 * @enabled_irq_mask: mask of interrupt bits to enable 351 */ 352 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 353 uint32_t interrupt_mask, 354 uint32_t enabled_irq_mask) 355 { 356 uint32_t sdeimr = I915_READ(SDEIMR); 357 sdeimr &= ~interrupt_mask; 358 sdeimr |= (~enabled_irq_mask & interrupt_mask); 359 360 WARN_ON(enabled_irq_mask & ~interrupt_mask); 361 362 assert_spin_locked(&dev_priv->irq_lock); 363 364 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 365 return; 366 367 I915_WRITE(SDEIMR, sdeimr); 368 POSTING_READ(SDEIMR); 369 } 370 371 static void 372 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 373 u32 enable_mask, u32 status_mask) 374 { 375 u32 reg = PIPESTAT(pipe); 376 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 377 378 assert_spin_locked(&dev_priv->irq_lock); 379 WARN_ON(!intel_irqs_enabled(dev_priv)); 380 381 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 382 status_mask & ~PIPESTAT_INT_STATUS_MASK, 383 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 384 pipe_name(pipe), enable_mask, status_mask)) 385 return; 386 387 if ((pipestat & enable_mask) == enable_mask) 388 return; 389 390 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 391 392 /* Enable the interrupt, clear any pending status */ 393 pipestat |= enable_mask | status_mask; 394 I915_WRITE(reg, pipestat); 395 POSTING_READ(reg); 396 } 397 398 static void 399 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 400 u32 enable_mask, u32 status_mask) 401 { 402 u32 reg = PIPESTAT(pipe); 403 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 404 405 assert_spin_locked(&dev_priv->irq_lock); 406 WARN_ON(!intel_irqs_enabled(dev_priv)); 407 408 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 409 status_mask & ~PIPESTAT_INT_STATUS_MASK, 410 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 411 pipe_name(pipe), enable_mask, status_mask)) 412 return; 413 414 if ((pipestat & enable_mask) == 0) 415 return; 416 417 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 418 419 pipestat &= ~enable_mask; 420 I915_WRITE(reg, pipestat); 421 POSTING_READ(reg); 422 } 423 424 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 425 { 426 u32 enable_mask = status_mask << 16; 427 428 /* 429 * On pipe A we don't support the PSR interrupt yet, 430 * on pipe B and C the same bit MBZ. 431 */ 432 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 433 return 0; 434 /* 435 * On pipe B and C we don't support the PSR interrupt yet, on pipe 436 * A the same bit is for perf counters which we don't use either. 437 */ 438 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 439 return 0; 440 441 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 442 SPRITE0_FLIP_DONE_INT_EN_VLV | 443 SPRITE1_FLIP_DONE_INT_EN_VLV); 444 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 445 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 446 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 447 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 448 449 return enable_mask; 450 } 451 452 void 453 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 454 u32 status_mask) 455 { 456 u32 enable_mask; 457 458 if (IS_VALLEYVIEW(dev_priv->dev)) 459 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 460 status_mask); 461 else 462 enable_mask = status_mask << 16; 463 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 464 } 465 466 void 467 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 468 u32 status_mask) 469 { 470 u32 enable_mask; 471 472 if (IS_VALLEYVIEW(dev_priv->dev)) 473 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 474 status_mask); 475 else 476 enable_mask = status_mask << 16; 477 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 478 } 479 480 /** 481 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 482 */ 483 static void i915_enable_asle_pipestat(struct drm_device *dev) 484 { 485 struct drm_i915_private *dev_priv = dev->dev_private; 486 487 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 488 return; 489 490 spin_lock_irq(&dev_priv->irq_lock); 491 492 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 493 if (INTEL_INFO(dev)->gen >= 4) 494 i915_enable_pipestat(dev_priv, PIPE_A, 495 PIPE_LEGACY_BLC_EVENT_STATUS); 496 497 spin_unlock_irq(&dev_priv->irq_lock); 498 } 499 500 /* 501 * This timing diagram depicts the video signal in and 502 * around the vertical blanking period. 503 * 504 * Assumptions about the fictitious mode used in this example: 505 * vblank_start >= 3 506 * vsync_start = vblank_start + 1 507 * vsync_end = vblank_start + 2 508 * vtotal = vblank_start + 3 509 * 510 * start of vblank: 511 * latch double buffered registers 512 * increment frame counter (ctg+) 513 * generate start of vblank interrupt (gen4+) 514 * | 515 * | frame start: 516 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 517 * | may be shifted forward 1-3 extra lines via PIPECONF 518 * | | 519 * | | start of vsync: 520 * | | generate vsync interrupt 521 * | | | 522 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 523 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 524 * ----va---> <-----------------vb--------------------> <--------va------------- 525 * | | <----vs-----> | 526 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 527 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 528 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 529 * | | | 530 * last visible pixel first visible pixel 531 * | increment frame counter (gen3/4) 532 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 533 * 534 * x = horizontal active 535 * _ = horizontal blanking 536 * hs = horizontal sync 537 * va = vertical active 538 * vb = vertical blanking 539 * vs = vertical sync 540 * vbs = vblank_start (number) 541 * 542 * Summary: 543 * - most events happen at the start of horizontal sync 544 * - frame start happens at the start of horizontal blank, 1-4 lines 545 * (depending on PIPECONF settings) after the start of vblank 546 * - gen3/4 pixel and frame counter are synchronized with the start 547 * of horizontal active on the first line of vertical active 548 */ 549 550 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 551 { 552 /* Gen2 doesn't have a hardware frame counter */ 553 return 0; 554 } 555 556 /* Called from drm generic code, passed a 'crtc', which 557 * we use as a pipe index 558 */ 559 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 560 { 561 struct drm_i915_private *dev_priv = dev->dev_private; 562 unsigned long high_frame; 563 unsigned long low_frame; 564 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 565 struct intel_crtc *intel_crtc = 566 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 567 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 568 569 htotal = mode->crtc_htotal; 570 hsync_start = mode->crtc_hsync_start; 571 vbl_start = mode->crtc_vblank_start; 572 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 573 vbl_start = DIV_ROUND_UP(vbl_start, 2); 574 575 /* Convert to pixel count */ 576 vbl_start *= htotal; 577 578 /* Start of vblank event occurs at start of hsync */ 579 vbl_start -= htotal - hsync_start; 580 581 high_frame = PIPEFRAME(pipe); 582 low_frame = PIPEFRAMEPIXEL(pipe); 583 584 /* 585 * High & low register fields aren't synchronized, so make sure 586 * we get a low value that's stable across two reads of the high 587 * register. 588 */ 589 do { 590 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 591 low = I915_READ(low_frame); 592 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 593 } while (high1 != high2); 594 595 high1 >>= PIPE_FRAME_HIGH_SHIFT; 596 pixel = low & PIPE_PIXEL_MASK; 597 low >>= PIPE_FRAME_LOW_SHIFT; 598 599 /* 600 * The frame counter increments at beginning of active. 601 * Cook up a vblank counter by also checking the pixel 602 * counter against vblank start. 603 */ 604 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 605 } 606 607 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 608 { 609 struct drm_i915_private *dev_priv = dev->dev_private; 610 int reg = PIPE_FRMCOUNT_GM45(pipe); 611 612 return I915_READ(reg); 613 } 614 615 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 616 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 617 618 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 619 { 620 struct drm_device *dev = crtc->base.dev; 621 struct drm_i915_private *dev_priv = dev->dev_private; 622 const struct drm_display_mode *mode = &crtc->base.hwmode; 623 enum pipe pipe = crtc->pipe; 624 int position, vtotal; 625 626 vtotal = mode->crtc_vtotal; 627 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 628 vtotal /= 2; 629 630 if (IS_GEN2(dev)) 631 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 632 else 633 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 634 635 /* 636 * See update_scanline_offset() for the details on the 637 * scanline_offset adjustment. 638 */ 639 return (position + crtc->scanline_offset) % vtotal; 640 } 641 642 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 643 unsigned int flags, int *vpos, int *hpos, 644 ktime_t *stime, ktime_t *etime) 645 { 646 struct drm_i915_private *dev_priv = dev->dev_private; 647 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 648 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 649 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 650 int position; 651 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 652 bool in_vbl = true; 653 int ret = 0; 654 unsigned long irqflags; 655 656 if (WARN_ON(!mode->crtc_clock)) { 657 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 658 "pipe %c\n", pipe_name(pipe)); 659 return 0; 660 } 661 662 htotal = mode->crtc_htotal; 663 hsync_start = mode->crtc_hsync_start; 664 vtotal = mode->crtc_vtotal; 665 vbl_start = mode->crtc_vblank_start; 666 vbl_end = mode->crtc_vblank_end; 667 668 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 669 vbl_start = DIV_ROUND_UP(vbl_start, 2); 670 vbl_end /= 2; 671 vtotal /= 2; 672 } 673 674 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 675 676 /* 677 * Lock uncore.lock, as we will do multiple timing critical raw 678 * register reads, potentially with preemption disabled, so the 679 * following code must not block on uncore.lock. 680 */ 681 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 682 683 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 684 685 /* Get optional system timestamp before query. */ 686 if (stime) 687 *stime = ktime_get(); 688 689 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 690 /* No obvious pixelcount register. Only query vertical 691 * scanout position from Display scan line register. 692 */ 693 position = __intel_get_crtc_scanline(intel_crtc); 694 } else { 695 /* Have access to pixelcount since start of frame. 696 * We can split this into vertical and horizontal 697 * scanout position. 698 */ 699 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 700 701 /* convert to pixel counts */ 702 vbl_start *= htotal; 703 vbl_end *= htotal; 704 vtotal *= htotal; 705 706 /* 707 * In interlaced modes, the pixel counter counts all pixels, 708 * so one field will have htotal more pixels. In order to avoid 709 * the reported position from jumping backwards when the pixel 710 * counter is beyond the length of the shorter field, just 711 * clamp the position the length of the shorter field. This 712 * matches how the scanline counter based position works since 713 * the scanline counter doesn't count the two half lines. 714 */ 715 if (position >= vtotal) 716 position = vtotal - 1; 717 718 /* 719 * Start of vblank interrupt is triggered at start of hsync, 720 * just prior to the first active line of vblank. However we 721 * consider lines to start at the leading edge of horizontal 722 * active. So, should we get here before we've crossed into 723 * the horizontal active of the first line in vblank, we would 724 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 725 * always add htotal-hsync_start to the current pixel position. 726 */ 727 position = (position + htotal - hsync_start) % vtotal; 728 } 729 730 /* Get optional system timestamp after query. */ 731 if (etime) 732 *etime = ktime_get(); 733 734 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 735 736 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 737 738 in_vbl = position >= vbl_start && position < vbl_end; 739 740 /* 741 * While in vblank, position will be negative 742 * counting up towards 0 at vbl_end. And outside 743 * vblank, position will be positive counting 744 * up since vbl_end. 745 */ 746 if (position >= vbl_start) 747 position -= vbl_end; 748 else 749 position += vtotal - vbl_end; 750 751 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 752 *vpos = position; 753 *hpos = 0; 754 } else { 755 *vpos = position / htotal; 756 *hpos = position - (*vpos * htotal); 757 } 758 759 /* In vblank? */ 760 if (in_vbl) 761 ret |= DRM_SCANOUTPOS_IN_VBLANK; 762 763 return ret; 764 } 765 766 int intel_get_crtc_scanline(struct intel_crtc *crtc) 767 { 768 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 769 unsigned long irqflags; 770 int position; 771 772 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 773 position = __intel_get_crtc_scanline(crtc); 774 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 775 776 return position; 777 } 778 779 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 780 int *max_error, 781 struct timeval *vblank_time, 782 unsigned flags) 783 { 784 struct drm_crtc *crtc; 785 786 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 787 DRM_ERROR("Invalid crtc %d\n", pipe); 788 return -EINVAL; 789 } 790 791 /* Get drm_crtc to timestamp: */ 792 crtc = intel_get_crtc_for_pipe(dev, pipe); 793 if (crtc == NULL) { 794 DRM_ERROR("Invalid crtc %d\n", pipe); 795 return -EINVAL; 796 } 797 798 if (!crtc->hwmode.crtc_clock) { 799 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 800 return -EBUSY; 801 } 802 803 /* Helper routine in DRM core does all the work: */ 804 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 805 vblank_time, flags, 806 crtc, 807 &crtc->hwmode); 808 } 809 810 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 811 { 812 struct drm_i915_private *dev_priv = dev->dev_private; 813 u32 busy_up, busy_down, max_avg, min_avg; 814 u8 new_delay; 815 816 spin_lock(&mchdev_lock); 817 818 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 819 820 new_delay = dev_priv->ips.cur_delay; 821 822 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 823 busy_up = I915_READ(RCPREVBSYTUPAVG); 824 busy_down = I915_READ(RCPREVBSYTDNAVG); 825 max_avg = I915_READ(RCBMAXAVG); 826 min_avg = I915_READ(RCBMINAVG); 827 828 /* Handle RCS change request from hw */ 829 if (busy_up > max_avg) { 830 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 831 new_delay = dev_priv->ips.cur_delay - 1; 832 if (new_delay < dev_priv->ips.max_delay) 833 new_delay = dev_priv->ips.max_delay; 834 } else if (busy_down < min_avg) { 835 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 836 new_delay = dev_priv->ips.cur_delay + 1; 837 if (new_delay > dev_priv->ips.min_delay) 838 new_delay = dev_priv->ips.min_delay; 839 } 840 841 if (ironlake_set_drps(dev, new_delay)) 842 dev_priv->ips.cur_delay = new_delay; 843 844 spin_unlock(&mchdev_lock); 845 846 return; 847 } 848 849 static void notify_ring(struct intel_engine_cs *ring) 850 { 851 if (!intel_ring_initialized(ring)) 852 return; 853 854 trace_i915_gem_request_notify(ring); 855 856 wake_up_all(&ring->irq_queue); 857 } 858 859 static void vlv_c0_read(struct drm_i915_private *dev_priv, 860 struct intel_rps_ei *ei) 861 { 862 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 863 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 864 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 865 } 866 867 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 868 const struct intel_rps_ei *old, 869 const struct intel_rps_ei *now, 870 int threshold) 871 { 872 u64 time, c0; 873 874 if (old->cz_clock == 0) 875 return false; 876 877 time = now->cz_clock - old->cz_clock; 878 time *= threshold * dev_priv->mem_freq; 879 880 /* Workload can be split between render + media, e.g. SwapBuffers 881 * being blitted in X after being rendered in mesa. To account for 882 * this we need to combine both engines into our activity counter. 883 */ 884 c0 = now->render_c0 - old->render_c0; 885 c0 += now->media_c0 - old->media_c0; 886 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 887 888 return c0 >= time; 889 } 890 891 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 892 { 893 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 894 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 895 } 896 897 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 898 { 899 struct intel_rps_ei now; 900 u32 events = 0; 901 902 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 903 return 0; 904 905 vlv_c0_read(dev_priv, &now); 906 if (now.cz_clock == 0) 907 return 0; 908 909 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 910 if (!vlv_c0_above(dev_priv, 911 &dev_priv->rps.down_ei, &now, 912 dev_priv->rps.down_threshold)) 913 events |= GEN6_PM_RP_DOWN_THRESHOLD; 914 dev_priv->rps.down_ei = now; 915 } 916 917 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 918 if (vlv_c0_above(dev_priv, 919 &dev_priv->rps.up_ei, &now, 920 dev_priv->rps.up_threshold)) 921 events |= GEN6_PM_RP_UP_THRESHOLD; 922 dev_priv->rps.up_ei = now; 923 } 924 925 return events; 926 } 927 928 static bool any_waiters(struct drm_i915_private *dev_priv) 929 { 930 struct intel_engine_cs *ring; 931 int i; 932 933 for_each_ring(ring, dev_priv, i) 934 if (ring->irq_refcount) 935 return true; 936 937 return false; 938 } 939 940 static void gen6_pm_rps_work(struct work_struct *work) 941 { 942 struct drm_i915_private *dev_priv = 943 container_of(work, struct drm_i915_private, rps.work); 944 bool client_boost; 945 int new_delay, adj, min, max; 946 u32 pm_iir; 947 948 spin_lock_irq(&dev_priv->irq_lock); 949 /* Speed up work cancelation during disabling rps interrupts. */ 950 if (!dev_priv->rps.interrupts_enabled) { 951 spin_unlock_irq(&dev_priv->irq_lock); 952 return; 953 } 954 pm_iir = dev_priv->rps.pm_iir; 955 dev_priv->rps.pm_iir = 0; 956 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 957 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 958 client_boost = dev_priv->rps.client_boost; 959 dev_priv->rps.client_boost = false; 960 spin_unlock_irq(&dev_priv->irq_lock); 961 962 /* Make sure we didn't queue anything we're not going to process. */ 963 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 964 965 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 966 return; 967 968 mutex_lock(&dev_priv->rps.hw_lock); 969 970 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 971 972 adj = dev_priv->rps.last_adj; 973 new_delay = dev_priv->rps.cur_freq; 974 min = dev_priv->rps.min_freq_softlimit; 975 max = dev_priv->rps.max_freq_softlimit; 976 977 if (client_boost) { 978 new_delay = dev_priv->rps.max_freq_softlimit; 979 adj = 0; 980 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 981 if (adj > 0) 982 adj *= 2; 983 else /* CHV needs even encode values */ 984 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 985 /* 986 * For better performance, jump directly 987 * to RPe if we're below it. 988 */ 989 if (new_delay < dev_priv->rps.efficient_freq - adj) { 990 new_delay = dev_priv->rps.efficient_freq; 991 adj = 0; 992 } 993 } else if (any_waiters(dev_priv)) { 994 adj = 0; 995 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 996 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 997 new_delay = dev_priv->rps.efficient_freq; 998 else 999 new_delay = dev_priv->rps.min_freq_softlimit; 1000 adj = 0; 1001 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1002 if (adj < 0) 1003 adj *= 2; 1004 else /* CHV needs even encode values */ 1005 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1006 } else { /* unknown event */ 1007 adj = 0; 1008 } 1009 1010 dev_priv->rps.last_adj = adj; 1011 1012 /* sysfs frequency interfaces may have snuck in while servicing the 1013 * interrupt 1014 */ 1015 new_delay += adj; 1016 new_delay = clamp_t(int, new_delay, min, max); 1017 1018 intel_set_rps(dev_priv->dev, new_delay); 1019 1020 mutex_unlock(&dev_priv->rps.hw_lock); 1021 } 1022 1023 1024 /** 1025 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1026 * occurred. 1027 * @work: workqueue struct 1028 * 1029 * Doesn't actually do anything except notify userspace. As a consequence of 1030 * this event, userspace should try to remap the bad rows since statistically 1031 * it is likely the same row is more likely to go bad again. 1032 */ 1033 static void ivybridge_parity_work(struct work_struct *work) 1034 { 1035 struct drm_i915_private *dev_priv = 1036 container_of(work, struct drm_i915_private, l3_parity.error_work); 1037 u32 error_status, row, bank, subbank; 1038 char *parity_event[6]; 1039 uint32_t misccpctl; 1040 uint8_t slice = 0; 1041 1042 /* We must turn off DOP level clock gating to access the L3 registers. 1043 * In order to prevent a get/put style interface, acquire struct mutex 1044 * any time we access those registers. 1045 */ 1046 mutex_lock(&dev_priv->dev->struct_mutex); 1047 1048 /* If we've screwed up tracking, just let the interrupt fire again */ 1049 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1050 goto out; 1051 1052 misccpctl = I915_READ(GEN7_MISCCPCTL); 1053 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1054 POSTING_READ(GEN7_MISCCPCTL); 1055 1056 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1057 u32 reg; 1058 1059 slice--; 1060 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1061 break; 1062 1063 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1064 1065 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1066 1067 error_status = I915_READ(reg); 1068 row = GEN7_PARITY_ERROR_ROW(error_status); 1069 bank = GEN7_PARITY_ERROR_BANK(error_status); 1070 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1071 1072 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1073 POSTING_READ(reg); 1074 1075 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1076 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1077 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1078 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1079 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1080 parity_event[5] = NULL; 1081 1082 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1083 KOBJ_CHANGE, parity_event); 1084 1085 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1086 slice, row, bank, subbank); 1087 1088 kfree(parity_event[4]); 1089 kfree(parity_event[3]); 1090 kfree(parity_event[2]); 1091 kfree(parity_event[1]); 1092 } 1093 1094 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1095 1096 out: 1097 WARN_ON(dev_priv->l3_parity.which_slice); 1098 spin_lock_irq(&dev_priv->irq_lock); 1099 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1100 spin_unlock_irq(&dev_priv->irq_lock); 1101 1102 mutex_unlock(&dev_priv->dev->struct_mutex); 1103 } 1104 1105 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1106 { 1107 struct drm_i915_private *dev_priv = dev->dev_private; 1108 1109 if (!HAS_L3_DPF(dev)) 1110 return; 1111 1112 spin_lock(&dev_priv->irq_lock); 1113 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1114 spin_unlock(&dev_priv->irq_lock); 1115 1116 iir &= GT_PARITY_ERROR(dev); 1117 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1118 dev_priv->l3_parity.which_slice |= 1 << 1; 1119 1120 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1121 dev_priv->l3_parity.which_slice |= 1 << 0; 1122 1123 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1124 } 1125 1126 static void ilk_gt_irq_handler(struct drm_device *dev, 1127 struct drm_i915_private *dev_priv, 1128 u32 gt_iir) 1129 { 1130 if (gt_iir & 1131 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1132 notify_ring(&dev_priv->ring[RCS]); 1133 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1134 notify_ring(&dev_priv->ring[VCS]); 1135 } 1136 1137 static void snb_gt_irq_handler(struct drm_device *dev, 1138 struct drm_i915_private *dev_priv, 1139 u32 gt_iir) 1140 { 1141 1142 if (gt_iir & 1143 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1144 notify_ring(&dev_priv->ring[RCS]); 1145 if (gt_iir & GT_BSD_USER_INTERRUPT) 1146 notify_ring(&dev_priv->ring[VCS]); 1147 if (gt_iir & GT_BLT_USER_INTERRUPT) 1148 notify_ring(&dev_priv->ring[BCS]); 1149 1150 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1151 GT_BSD_CS_ERROR_INTERRUPT | 1152 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1153 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1154 1155 if (gt_iir & GT_PARITY_ERROR(dev)) 1156 ivybridge_parity_error_irq_handler(dev, gt_iir); 1157 } 1158 1159 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1160 u32 master_ctl) 1161 { 1162 irqreturn_t ret = IRQ_NONE; 1163 1164 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1165 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1166 if (tmp) { 1167 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1168 ret = IRQ_HANDLED; 1169 1170 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1171 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1172 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1173 notify_ring(&dev_priv->ring[RCS]); 1174 1175 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1176 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1177 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1178 notify_ring(&dev_priv->ring[BCS]); 1179 } else 1180 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1181 } 1182 1183 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1184 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1185 if (tmp) { 1186 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1187 ret = IRQ_HANDLED; 1188 1189 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1190 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1191 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1192 notify_ring(&dev_priv->ring[VCS]); 1193 1194 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1195 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1196 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1197 notify_ring(&dev_priv->ring[VCS2]); 1198 } else 1199 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1200 } 1201 1202 if (master_ctl & GEN8_GT_VECS_IRQ) { 1203 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1204 if (tmp) { 1205 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1206 ret = IRQ_HANDLED; 1207 1208 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1209 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1210 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1211 notify_ring(&dev_priv->ring[VECS]); 1212 } else 1213 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1214 } 1215 1216 if (master_ctl & GEN8_GT_PM_IRQ) { 1217 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1218 if (tmp & dev_priv->pm_rps_events) { 1219 I915_WRITE_FW(GEN8_GT_IIR(2), 1220 tmp & dev_priv->pm_rps_events); 1221 ret = IRQ_HANDLED; 1222 gen6_rps_irq_handler(dev_priv, tmp); 1223 } else 1224 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1225 } 1226 1227 return ret; 1228 } 1229 1230 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1231 { 1232 switch (port) { 1233 case PORT_A: 1234 return val & BXT_PORTA_HOTPLUG_LONG_DETECT; 1235 case PORT_B: 1236 return val & PORTB_HOTPLUG_LONG_DETECT; 1237 case PORT_C: 1238 return val & PORTC_HOTPLUG_LONG_DETECT; 1239 case PORT_D: 1240 return val & PORTD_HOTPLUG_LONG_DETECT; 1241 default: 1242 return false; 1243 } 1244 } 1245 1246 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1247 { 1248 switch (port) { 1249 case PORT_B: 1250 return val & PORTB_HOTPLUG_LONG_DETECT; 1251 case PORT_C: 1252 return val & PORTC_HOTPLUG_LONG_DETECT; 1253 case PORT_D: 1254 return val & PORTD_HOTPLUG_LONG_DETECT; 1255 default: 1256 return false; 1257 } 1258 } 1259 1260 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1261 { 1262 switch (port) { 1263 case PORT_B: 1264 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1265 case PORT_C: 1266 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1267 case PORT_D: 1268 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1269 default: 1270 return false; 1271 } 1272 } 1273 1274 /* Get a bit mask of pins that have triggered, and which ones may be long. */ 1275 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1276 u32 hotplug_trigger, u32 dig_hotplug_reg, 1277 const u32 hpd[HPD_NUM_PINS], 1278 bool long_pulse_detect(enum port port, u32 val)) 1279 { 1280 enum port port; 1281 int i; 1282 1283 *pin_mask = 0; 1284 *long_mask = 0; 1285 1286 for_each_hpd_pin(i) { 1287 if ((hpd[i] & hotplug_trigger) == 0) 1288 continue; 1289 1290 *pin_mask |= BIT(i); 1291 1292 if (!intel_hpd_pin_to_port(i, &port)) 1293 continue; 1294 1295 if (long_pulse_detect(port, dig_hotplug_reg)) 1296 *long_mask |= BIT(i); 1297 } 1298 1299 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1300 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1301 1302 } 1303 1304 static void gmbus_irq_handler(struct drm_device *dev) 1305 { 1306 struct drm_i915_private *dev_priv = dev->dev_private; 1307 1308 wake_up_all(&dev_priv->gmbus_wait_queue); 1309 } 1310 1311 static void dp_aux_irq_handler(struct drm_device *dev) 1312 { 1313 struct drm_i915_private *dev_priv = dev->dev_private; 1314 1315 wake_up_all(&dev_priv->gmbus_wait_queue); 1316 } 1317 1318 #if defined(CONFIG_DEBUG_FS) 1319 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1320 uint32_t crc0, uint32_t crc1, 1321 uint32_t crc2, uint32_t crc3, 1322 uint32_t crc4) 1323 { 1324 struct drm_i915_private *dev_priv = dev->dev_private; 1325 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1326 struct intel_pipe_crc_entry *entry; 1327 int head, tail; 1328 1329 spin_lock(&pipe_crc->lock); 1330 1331 if (!pipe_crc->entries) { 1332 spin_unlock(&pipe_crc->lock); 1333 DRM_DEBUG_KMS("spurious interrupt\n"); 1334 return; 1335 } 1336 1337 head = pipe_crc->head; 1338 tail = pipe_crc->tail; 1339 1340 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1341 spin_unlock(&pipe_crc->lock); 1342 DRM_ERROR("CRC buffer overflowing\n"); 1343 return; 1344 } 1345 1346 entry = &pipe_crc->entries[head]; 1347 1348 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1349 entry->crc[0] = crc0; 1350 entry->crc[1] = crc1; 1351 entry->crc[2] = crc2; 1352 entry->crc[3] = crc3; 1353 entry->crc[4] = crc4; 1354 1355 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1356 pipe_crc->head = head; 1357 1358 spin_unlock(&pipe_crc->lock); 1359 1360 wake_up_interruptible(&pipe_crc->wq); 1361 } 1362 #else 1363 static inline void 1364 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1365 uint32_t crc0, uint32_t crc1, 1366 uint32_t crc2, uint32_t crc3, 1367 uint32_t crc4) {} 1368 #endif 1369 1370 1371 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1372 { 1373 struct drm_i915_private *dev_priv = dev->dev_private; 1374 1375 display_pipe_crc_irq_handler(dev, pipe, 1376 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1377 0, 0, 0, 0); 1378 } 1379 1380 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1381 { 1382 struct drm_i915_private *dev_priv = dev->dev_private; 1383 1384 display_pipe_crc_irq_handler(dev, pipe, 1385 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1386 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1387 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1388 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1389 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1390 } 1391 1392 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1393 { 1394 struct drm_i915_private *dev_priv = dev->dev_private; 1395 uint32_t res1, res2; 1396 1397 if (INTEL_INFO(dev)->gen >= 3) 1398 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1399 else 1400 res1 = 0; 1401 1402 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1403 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1404 else 1405 res2 = 0; 1406 1407 display_pipe_crc_irq_handler(dev, pipe, 1408 I915_READ(PIPE_CRC_RES_RED(pipe)), 1409 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1410 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1411 res1, res2); 1412 } 1413 1414 /* The RPS events need forcewake, so we add them to a work queue and mask their 1415 * IMR bits until the work is done. Other interrupts can be processed without 1416 * the work queue. */ 1417 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1418 { 1419 if (pm_iir & dev_priv->pm_rps_events) { 1420 spin_lock(&dev_priv->irq_lock); 1421 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1422 if (dev_priv->rps.interrupts_enabled) { 1423 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1424 queue_work(dev_priv->wq, &dev_priv->rps.work); 1425 } 1426 spin_unlock(&dev_priv->irq_lock); 1427 } 1428 1429 if (INTEL_INFO(dev_priv)->gen >= 8) 1430 return; 1431 1432 if (HAS_VEBOX(dev_priv->dev)) { 1433 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1434 notify_ring(&dev_priv->ring[VECS]); 1435 1436 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1437 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1438 } 1439 } 1440 1441 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1442 { 1443 if (!drm_handle_vblank(dev, pipe)) 1444 return false; 1445 1446 return true; 1447 } 1448 1449 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1450 { 1451 struct drm_i915_private *dev_priv = dev->dev_private; 1452 u32 pipe_stats[I915_MAX_PIPES] = { }; 1453 int pipe; 1454 1455 spin_lock(&dev_priv->irq_lock); 1456 for_each_pipe(dev_priv, pipe) { 1457 int reg; 1458 u32 mask, iir_bit = 0; 1459 1460 /* 1461 * PIPESTAT bits get signalled even when the interrupt is 1462 * disabled with the mask bits, and some of the status bits do 1463 * not generate interrupts at all (like the underrun bit). Hence 1464 * we need to be careful that we only handle what we want to 1465 * handle. 1466 */ 1467 1468 /* fifo underruns are filterered in the underrun handler. */ 1469 mask = PIPE_FIFO_UNDERRUN_STATUS; 1470 1471 switch (pipe) { 1472 case PIPE_A: 1473 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1474 break; 1475 case PIPE_B: 1476 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1477 break; 1478 case PIPE_C: 1479 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1480 break; 1481 } 1482 if (iir & iir_bit) 1483 mask |= dev_priv->pipestat_irq_mask[pipe]; 1484 1485 if (!mask) 1486 continue; 1487 1488 reg = PIPESTAT(pipe); 1489 mask |= PIPESTAT_INT_ENABLE_MASK; 1490 pipe_stats[pipe] = I915_READ(reg) & mask; 1491 1492 /* 1493 * Clear the PIPE*STAT regs before the IIR 1494 */ 1495 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1496 PIPESTAT_INT_STATUS_MASK)) 1497 I915_WRITE(reg, pipe_stats[pipe]); 1498 } 1499 spin_unlock(&dev_priv->irq_lock); 1500 1501 for_each_pipe(dev_priv, pipe) { 1502 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1503 intel_pipe_handle_vblank(dev, pipe)) 1504 intel_check_page_flip(dev, pipe); 1505 1506 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1507 intel_prepare_page_flip(dev, pipe); 1508 intel_finish_page_flip(dev, pipe); 1509 } 1510 1511 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1512 i9xx_pipe_crc_irq_handler(dev, pipe); 1513 1514 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1515 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1516 } 1517 1518 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1519 gmbus_irq_handler(dev); 1520 } 1521 1522 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1523 { 1524 struct drm_i915_private *dev_priv = dev->dev_private; 1525 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1526 u32 pin_mask, long_mask; 1527 1528 if (!hotplug_status) 1529 return; 1530 1531 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1532 /* 1533 * Make sure hotplug status is cleared before we clear IIR, or else we 1534 * may miss hotplug events. 1535 */ 1536 POSTING_READ(PORT_HOTPLUG_STAT); 1537 1538 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 1539 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1540 1541 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1542 hotplug_trigger, hpd_status_g4x, 1543 i9xx_port_hotplug_long_detect); 1544 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1545 1546 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1547 dp_aux_irq_handler(dev); 1548 } else { 1549 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1550 1551 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1552 hotplug_trigger, hpd_status_g4x, 1553 i9xx_port_hotplug_long_detect); 1554 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1555 } 1556 } 1557 1558 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1559 { 1560 struct drm_device *dev = arg; 1561 struct drm_i915_private *dev_priv = dev->dev_private; 1562 u32 iir, gt_iir, pm_iir; 1563 irqreturn_t ret = IRQ_NONE; 1564 1565 if (!intel_irqs_enabled(dev_priv)) 1566 return IRQ_NONE; 1567 1568 while (true) { 1569 /* Find, clear, then process each source of interrupt */ 1570 1571 gt_iir = I915_READ(GTIIR); 1572 if (gt_iir) 1573 I915_WRITE(GTIIR, gt_iir); 1574 1575 pm_iir = I915_READ(GEN6_PMIIR); 1576 if (pm_iir) 1577 I915_WRITE(GEN6_PMIIR, pm_iir); 1578 1579 iir = I915_READ(VLV_IIR); 1580 if (iir) { 1581 /* Consume port before clearing IIR or we'll miss events */ 1582 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1583 i9xx_hpd_irq_handler(dev); 1584 I915_WRITE(VLV_IIR, iir); 1585 } 1586 1587 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1588 goto out; 1589 1590 ret = IRQ_HANDLED; 1591 1592 if (gt_iir) 1593 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1594 if (pm_iir) 1595 gen6_rps_irq_handler(dev_priv, pm_iir); 1596 /* Call regardless, as some status bits might not be 1597 * signalled in iir */ 1598 valleyview_pipestat_irq_handler(dev, iir); 1599 } 1600 1601 out: 1602 return ret; 1603 } 1604 1605 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1606 { 1607 struct drm_device *dev = arg; 1608 struct drm_i915_private *dev_priv = dev->dev_private; 1609 u32 master_ctl, iir; 1610 irqreturn_t ret = IRQ_NONE; 1611 1612 if (!intel_irqs_enabled(dev_priv)) 1613 return IRQ_NONE; 1614 1615 for (;;) { 1616 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1617 iir = I915_READ(VLV_IIR); 1618 1619 if (master_ctl == 0 && iir == 0) 1620 break; 1621 1622 ret = IRQ_HANDLED; 1623 1624 I915_WRITE(GEN8_MASTER_IRQ, 0); 1625 1626 /* Find, clear, then process each source of interrupt */ 1627 1628 if (iir) { 1629 /* Consume port before clearing IIR or we'll miss events */ 1630 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1631 i9xx_hpd_irq_handler(dev); 1632 I915_WRITE(VLV_IIR, iir); 1633 } 1634 1635 gen8_gt_irq_handler(dev_priv, master_ctl); 1636 1637 /* Call regardless, as some status bits might not be 1638 * signalled in iir */ 1639 valleyview_pipestat_irq_handler(dev, iir); 1640 1641 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1642 POSTING_READ(GEN8_MASTER_IRQ); 1643 } 1644 1645 return ret; 1646 } 1647 1648 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1649 { 1650 struct drm_i915_private *dev_priv = dev->dev_private; 1651 int pipe; 1652 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1653 1654 if (hotplug_trigger) { 1655 u32 dig_hotplug_reg, pin_mask, long_mask; 1656 1657 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1658 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1659 1660 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1661 dig_hotplug_reg, hpd_ibx, 1662 pch_port_hotplug_long_detect); 1663 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1664 } 1665 1666 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1667 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1668 SDE_AUDIO_POWER_SHIFT); 1669 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1670 port_name(port)); 1671 } 1672 1673 if (pch_iir & SDE_AUX_MASK) 1674 dp_aux_irq_handler(dev); 1675 1676 if (pch_iir & SDE_GMBUS) 1677 gmbus_irq_handler(dev); 1678 1679 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1680 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1681 1682 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1683 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1684 1685 if (pch_iir & SDE_POISON) 1686 DRM_ERROR("PCH poison interrupt\n"); 1687 1688 if (pch_iir & SDE_FDI_MASK) 1689 for_each_pipe(dev_priv, pipe) 1690 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1691 pipe_name(pipe), 1692 I915_READ(FDI_RX_IIR(pipe))); 1693 1694 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1695 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1696 1697 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1698 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1699 1700 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1701 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1702 1703 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1704 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1705 } 1706 1707 static void ivb_err_int_handler(struct drm_device *dev) 1708 { 1709 struct drm_i915_private *dev_priv = dev->dev_private; 1710 u32 err_int = I915_READ(GEN7_ERR_INT); 1711 enum pipe pipe; 1712 1713 if (err_int & ERR_INT_POISON) 1714 DRM_ERROR("Poison interrupt\n"); 1715 1716 for_each_pipe(dev_priv, pipe) { 1717 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1718 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1719 1720 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1721 if (IS_IVYBRIDGE(dev)) 1722 ivb_pipe_crc_irq_handler(dev, pipe); 1723 else 1724 hsw_pipe_crc_irq_handler(dev, pipe); 1725 } 1726 } 1727 1728 I915_WRITE(GEN7_ERR_INT, err_int); 1729 } 1730 1731 static void cpt_serr_int_handler(struct drm_device *dev) 1732 { 1733 struct drm_i915_private *dev_priv = dev->dev_private; 1734 u32 serr_int = I915_READ(SERR_INT); 1735 1736 if (serr_int & SERR_INT_POISON) 1737 DRM_ERROR("PCH poison interrupt\n"); 1738 1739 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1740 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1741 1742 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1743 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1744 1745 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1746 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1747 1748 I915_WRITE(SERR_INT, serr_int); 1749 } 1750 1751 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1752 { 1753 struct drm_i915_private *dev_priv = dev->dev_private; 1754 int pipe; 1755 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1756 1757 if (hotplug_trigger) { 1758 u32 dig_hotplug_reg, pin_mask, long_mask; 1759 1760 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1761 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1762 1763 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1764 dig_hotplug_reg, hpd_cpt, 1765 pch_port_hotplug_long_detect); 1766 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1767 } 1768 1769 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1770 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1771 SDE_AUDIO_POWER_SHIFT_CPT); 1772 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1773 port_name(port)); 1774 } 1775 1776 if (pch_iir & SDE_AUX_MASK_CPT) 1777 dp_aux_irq_handler(dev); 1778 1779 if (pch_iir & SDE_GMBUS_CPT) 1780 gmbus_irq_handler(dev); 1781 1782 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1783 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1784 1785 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1786 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1787 1788 if (pch_iir & SDE_FDI_MASK_CPT) 1789 for_each_pipe(dev_priv, pipe) 1790 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1791 pipe_name(pipe), 1792 I915_READ(FDI_RX_IIR(pipe))); 1793 1794 if (pch_iir & SDE_ERROR_CPT) 1795 cpt_serr_int_handler(dev); 1796 } 1797 1798 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1799 { 1800 struct drm_i915_private *dev_priv = dev->dev_private; 1801 enum pipe pipe; 1802 1803 if (de_iir & DE_AUX_CHANNEL_A) 1804 dp_aux_irq_handler(dev); 1805 1806 if (de_iir & DE_GSE) 1807 intel_opregion_asle_intr(dev); 1808 1809 if (de_iir & DE_POISON) 1810 DRM_ERROR("Poison interrupt\n"); 1811 1812 for_each_pipe(dev_priv, pipe) { 1813 if (de_iir & DE_PIPE_VBLANK(pipe) && 1814 intel_pipe_handle_vblank(dev, pipe)) 1815 intel_check_page_flip(dev, pipe); 1816 1817 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1818 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1819 1820 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1821 i9xx_pipe_crc_irq_handler(dev, pipe); 1822 1823 /* plane/pipes map 1:1 on ilk+ */ 1824 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1825 intel_prepare_page_flip(dev, pipe); 1826 intel_finish_page_flip_plane(dev, pipe); 1827 } 1828 } 1829 1830 /* check event from PCH */ 1831 if (de_iir & DE_PCH_EVENT) { 1832 u32 pch_iir = I915_READ(SDEIIR); 1833 1834 if (HAS_PCH_CPT(dev)) 1835 cpt_irq_handler(dev, pch_iir); 1836 else 1837 ibx_irq_handler(dev, pch_iir); 1838 1839 /* should clear PCH hotplug event before clear CPU irq */ 1840 I915_WRITE(SDEIIR, pch_iir); 1841 } 1842 1843 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1844 ironlake_rps_change_irq_handler(dev); 1845 } 1846 1847 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1848 { 1849 struct drm_i915_private *dev_priv = dev->dev_private; 1850 enum pipe pipe; 1851 1852 if (de_iir & DE_ERR_INT_IVB) 1853 ivb_err_int_handler(dev); 1854 1855 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1856 dp_aux_irq_handler(dev); 1857 1858 if (de_iir & DE_GSE_IVB) 1859 intel_opregion_asle_intr(dev); 1860 1861 for_each_pipe(dev_priv, pipe) { 1862 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 1863 intel_pipe_handle_vblank(dev, pipe)) 1864 intel_check_page_flip(dev, pipe); 1865 1866 /* plane/pipes map 1:1 on ilk+ */ 1867 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 1868 intel_prepare_page_flip(dev, pipe); 1869 intel_finish_page_flip_plane(dev, pipe); 1870 } 1871 } 1872 1873 /* check event from PCH */ 1874 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1875 u32 pch_iir = I915_READ(SDEIIR); 1876 1877 cpt_irq_handler(dev, pch_iir); 1878 1879 /* clear PCH hotplug event before clear CPU irq */ 1880 I915_WRITE(SDEIIR, pch_iir); 1881 } 1882 } 1883 1884 /* 1885 * To handle irqs with the minimum potential races with fresh interrupts, we: 1886 * 1 - Disable Master Interrupt Control. 1887 * 2 - Find the source(s) of the interrupt. 1888 * 3 - Clear the Interrupt Identity bits (IIR). 1889 * 4 - Process the interrupt(s) that had bits set in the IIRs. 1890 * 5 - Re-enable Master Interrupt Control. 1891 */ 1892 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1893 { 1894 struct drm_device *dev = arg; 1895 struct drm_i915_private *dev_priv = dev->dev_private; 1896 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1897 irqreturn_t ret = IRQ_NONE; 1898 1899 if (!intel_irqs_enabled(dev_priv)) 1900 return IRQ_NONE; 1901 1902 /* We get interrupts on unclaimed registers, so check for this before we 1903 * do any I915_{READ,WRITE}. */ 1904 intel_uncore_check_errors(dev); 1905 1906 /* disable master interrupt before clearing iir */ 1907 de_ier = I915_READ(DEIER); 1908 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1909 POSTING_READ(DEIER); 1910 1911 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1912 * interrupts will will be stored on its back queue, and then we'll be 1913 * able to process them after we restore SDEIER (as soon as we restore 1914 * it, we'll get an interrupt if SDEIIR still has something to process 1915 * due to its back queue). */ 1916 if (!HAS_PCH_NOP(dev)) { 1917 sde_ier = I915_READ(SDEIER); 1918 I915_WRITE(SDEIER, 0); 1919 POSTING_READ(SDEIER); 1920 } 1921 1922 /* Find, clear, then process each source of interrupt */ 1923 1924 gt_iir = I915_READ(GTIIR); 1925 if (gt_iir) { 1926 I915_WRITE(GTIIR, gt_iir); 1927 ret = IRQ_HANDLED; 1928 if (INTEL_INFO(dev)->gen >= 6) 1929 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1930 else 1931 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1932 } 1933 1934 de_iir = I915_READ(DEIIR); 1935 if (de_iir) { 1936 I915_WRITE(DEIIR, de_iir); 1937 ret = IRQ_HANDLED; 1938 if (INTEL_INFO(dev)->gen >= 7) 1939 ivb_display_irq_handler(dev, de_iir); 1940 else 1941 ilk_display_irq_handler(dev, de_iir); 1942 } 1943 1944 if (INTEL_INFO(dev)->gen >= 6) { 1945 u32 pm_iir = I915_READ(GEN6_PMIIR); 1946 if (pm_iir) { 1947 I915_WRITE(GEN6_PMIIR, pm_iir); 1948 ret = IRQ_HANDLED; 1949 gen6_rps_irq_handler(dev_priv, pm_iir); 1950 } 1951 } 1952 1953 I915_WRITE(DEIER, de_ier); 1954 POSTING_READ(DEIER); 1955 if (!HAS_PCH_NOP(dev)) { 1956 I915_WRITE(SDEIER, sde_ier); 1957 POSTING_READ(SDEIER); 1958 } 1959 1960 return ret; 1961 } 1962 1963 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) 1964 { 1965 struct drm_i915_private *dev_priv = dev->dev_private; 1966 u32 hp_control, hp_trigger; 1967 u32 pin_mask, long_mask; 1968 1969 /* Get the status */ 1970 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; 1971 hp_control = I915_READ(BXT_HOTPLUG_CTL); 1972 1973 /* Hotplug not enabled ? */ 1974 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { 1975 DRM_ERROR("Interrupt when HPD disabled\n"); 1976 return; 1977 } 1978 1979 /* Clear sticky bits in hpd status */ 1980 I915_WRITE(BXT_HOTPLUG_CTL, hp_control); 1981 1982 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control, 1983 hpd_bxt, bxt_port_hotplug_long_detect); 1984 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1985 } 1986 1987 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1988 { 1989 struct drm_device *dev = arg; 1990 struct drm_i915_private *dev_priv = dev->dev_private; 1991 u32 master_ctl; 1992 irqreturn_t ret = IRQ_NONE; 1993 uint32_t tmp = 0; 1994 enum pipe pipe; 1995 u32 aux_mask = GEN8_AUX_CHANNEL_A; 1996 1997 if (!intel_irqs_enabled(dev_priv)) 1998 return IRQ_NONE; 1999 2000 if (IS_GEN9(dev)) 2001 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2002 GEN9_AUX_CHANNEL_D; 2003 2004 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2005 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2006 if (!master_ctl) 2007 return IRQ_NONE; 2008 2009 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2010 2011 /* Find, clear, then process each source of interrupt */ 2012 2013 ret = gen8_gt_irq_handler(dev_priv, master_ctl); 2014 2015 if (master_ctl & GEN8_DE_MISC_IRQ) { 2016 tmp = I915_READ(GEN8_DE_MISC_IIR); 2017 if (tmp) { 2018 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2019 ret = IRQ_HANDLED; 2020 if (tmp & GEN8_DE_MISC_GSE) 2021 intel_opregion_asle_intr(dev); 2022 else 2023 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2024 } 2025 else 2026 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2027 } 2028 2029 if (master_ctl & GEN8_DE_PORT_IRQ) { 2030 tmp = I915_READ(GEN8_DE_PORT_IIR); 2031 if (tmp) { 2032 bool found = false; 2033 2034 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2035 ret = IRQ_HANDLED; 2036 2037 if (tmp & aux_mask) { 2038 dp_aux_irq_handler(dev); 2039 found = true; 2040 } 2041 2042 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { 2043 bxt_hpd_handler(dev, tmp); 2044 found = true; 2045 } 2046 2047 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { 2048 gmbus_irq_handler(dev); 2049 found = true; 2050 } 2051 2052 if (!found) 2053 DRM_ERROR("Unexpected DE Port interrupt\n"); 2054 } 2055 else 2056 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2057 } 2058 2059 for_each_pipe(dev_priv, pipe) { 2060 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2061 2062 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2063 continue; 2064 2065 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2066 if (pipe_iir) { 2067 ret = IRQ_HANDLED; 2068 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2069 2070 if (pipe_iir & GEN8_PIPE_VBLANK && 2071 intel_pipe_handle_vblank(dev, pipe)) 2072 intel_check_page_flip(dev, pipe); 2073 2074 if (IS_GEN9(dev)) 2075 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2076 else 2077 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2078 2079 if (flip_done) { 2080 intel_prepare_page_flip(dev, pipe); 2081 intel_finish_page_flip_plane(dev, pipe); 2082 } 2083 2084 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2085 hsw_pipe_crc_irq_handler(dev, pipe); 2086 2087 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2088 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2089 pipe); 2090 2091 2092 if (IS_GEN9(dev)) 2093 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2094 else 2095 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2096 2097 if (fault_errors) 2098 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2099 pipe_name(pipe), 2100 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2101 } else 2102 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2103 } 2104 2105 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2106 master_ctl & GEN8_DE_PCH_IRQ) { 2107 /* 2108 * FIXME(BDW): Assume for now that the new interrupt handling 2109 * scheme also closed the SDE interrupt handling race we've seen 2110 * on older pch-split platforms. But this needs testing. 2111 */ 2112 u32 pch_iir = I915_READ(SDEIIR); 2113 if (pch_iir) { 2114 I915_WRITE(SDEIIR, pch_iir); 2115 ret = IRQ_HANDLED; 2116 cpt_irq_handler(dev, pch_iir); 2117 } else 2118 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2119 2120 } 2121 2122 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2123 POSTING_READ_FW(GEN8_MASTER_IRQ); 2124 2125 return ret; 2126 } 2127 2128 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2129 bool reset_completed) 2130 { 2131 struct intel_engine_cs *ring; 2132 int i; 2133 2134 /* 2135 * Notify all waiters for GPU completion events that reset state has 2136 * been changed, and that they need to restart their wait after 2137 * checking for potential errors (and bail out to drop locks if there is 2138 * a gpu reset pending so that i915_error_work_func can acquire them). 2139 */ 2140 2141 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2142 for_each_ring(ring, dev_priv, i) 2143 wake_up_all(&ring->irq_queue); 2144 2145 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2146 wake_up_all(&dev_priv->pending_flip_queue); 2147 2148 /* 2149 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2150 * reset state is cleared. 2151 */ 2152 if (reset_completed) 2153 wake_up_all(&dev_priv->gpu_error.reset_queue); 2154 } 2155 2156 /** 2157 * i915_reset_and_wakeup - do process context error handling work 2158 * 2159 * Fire an error uevent so userspace can see that a hang or error 2160 * was detected. 2161 */ 2162 static void i915_reset_and_wakeup(struct drm_device *dev) 2163 { 2164 struct drm_i915_private *dev_priv = to_i915(dev); 2165 struct i915_gpu_error *error = &dev_priv->gpu_error; 2166 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2167 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2168 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2169 int ret; 2170 2171 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2172 2173 /* 2174 * Note that there's only one work item which does gpu resets, so we 2175 * need not worry about concurrent gpu resets potentially incrementing 2176 * error->reset_counter twice. We only need to take care of another 2177 * racing irq/hangcheck declaring the gpu dead for a second time. A 2178 * quick check for that is good enough: schedule_work ensures the 2179 * correct ordering between hang detection and this work item, and since 2180 * the reset in-progress bit is only ever set by code outside of this 2181 * work we don't need to worry about any other races. 2182 */ 2183 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2184 DRM_DEBUG_DRIVER("resetting chip\n"); 2185 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2186 reset_event); 2187 2188 /* 2189 * In most cases it's guaranteed that we get here with an RPM 2190 * reference held, for example because there is a pending GPU 2191 * request that won't finish until the reset is done. This 2192 * isn't the case at least when we get here by doing a 2193 * simulated reset via debugs, so get an RPM reference. 2194 */ 2195 intel_runtime_pm_get(dev_priv); 2196 2197 intel_prepare_reset(dev); 2198 2199 /* 2200 * All state reset _must_ be completed before we update the 2201 * reset counter, for otherwise waiters might miss the reset 2202 * pending state and not properly drop locks, resulting in 2203 * deadlocks with the reset work. 2204 */ 2205 ret = i915_reset(dev); 2206 2207 intel_finish_reset(dev); 2208 2209 intel_runtime_pm_put(dev_priv); 2210 2211 if (ret == 0) { 2212 /* 2213 * After all the gem state is reset, increment the reset 2214 * counter and wake up everyone waiting for the reset to 2215 * complete. 2216 * 2217 * Since unlock operations are a one-sided barrier only, 2218 * we need to insert a barrier here to order any seqno 2219 * updates before 2220 * the counter increment. 2221 */ 2222 smp_mb__before_atomic(); 2223 atomic_inc(&dev_priv->gpu_error.reset_counter); 2224 2225 kobject_uevent_env(&dev->primary->kdev->kobj, 2226 KOBJ_CHANGE, reset_done_event); 2227 } else { 2228 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2229 } 2230 2231 /* 2232 * Note: The wake_up also serves as a memory barrier so that 2233 * waiters see the update value of the reset counter atomic_t. 2234 */ 2235 i915_error_wake_up(dev_priv, true); 2236 } 2237 } 2238 2239 static void i915_report_and_clear_eir(struct drm_device *dev) 2240 { 2241 struct drm_i915_private *dev_priv = dev->dev_private; 2242 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2243 u32 eir = I915_READ(EIR); 2244 int pipe, i; 2245 2246 if (!eir) 2247 return; 2248 2249 pr_err("render error detected, EIR: 0x%08x\n", eir); 2250 2251 i915_get_extra_instdone(dev, instdone); 2252 2253 if (IS_G4X(dev)) { 2254 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2255 u32 ipeir = I915_READ(IPEIR_I965); 2256 2257 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2258 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2259 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2260 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2261 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2262 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2263 I915_WRITE(IPEIR_I965, ipeir); 2264 POSTING_READ(IPEIR_I965); 2265 } 2266 if (eir & GM45_ERROR_PAGE_TABLE) { 2267 u32 pgtbl_err = I915_READ(PGTBL_ER); 2268 pr_err("page table error\n"); 2269 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2270 I915_WRITE(PGTBL_ER, pgtbl_err); 2271 POSTING_READ(PGTBL_ER); 2272 } 2273 } 2274 2275 if (!IS_GEN2(dev)) { 2276 if (eir & I915_ERROR_PAGE_TABLE) { 2277 u32 pgtbl_err = I915_READ(PGTBL_ER); 2278 pr_err("page table error\n"); 2279 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2280 I915_WRITE(PGTBL_ER, pgtbl_err); 2281 POSTING_READ(PGTBL_ER); 2282 } 2283 } 2284 2285 if (eir & I915_ERROR_MEMORY_REFRESH) { 2286 pr_err("memory refresh error:\n"); 2287 for_each_pipe(dev_priv, pipe) 2288 pr_err("pipe %c stat: 0x%08x\n", 2289 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2290 /* pipestat has already been acked */ 2291 } 2292 if (eir & I915_ERROR_INSTRUCTION) { 2293 pr_err("instruction error\n"); 2294 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2295 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2296 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2297 if (INTEL_INFO(dev)->gen < 4) { 2298 u32 ipeir = I915_READ(IPEIR); 2299 2300 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2301 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2302 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2303 I915_WRITE(IPEIR, ipeir); 2304 POSTING_READ(IPEIR); 2305 } else { 2306 u32 ipeir = I915_READ(IPEIR_I965); 2307 2308 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2309 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2310 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2311 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2312 I915_WRITE(IPEIR_I965, ipeir); 2313 POSTING_READ(IPEIR_I965); 2314 } 2315 } 2316 2317 I915_WRITE(EIR, eir); 2318 POSTING_READ(EIR); 2319 eir = I915_READ(EIR); 2320 if (eir) { 2321 /* 2322 * some errors might have become stuck, 2323 * mask them. 2324 */ 2325 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2326 I915_WRITE(EMR, I915_READ(EMR) | eir); 2327 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2328 } 2329 } 2330 2331 /** 2332 * i915_handle_error - handle a gpu error 2333 * @dev: drm device 2334 * 2335 * Do some basic checking of regsiter state at error time and 2336 * dump it to the syslog. Also call i915_capture_error_state() to make 2337 * sure we get a record and make it available in debugfs. Fire a uevent 2338 * so userspace knows something bad happened (should trigger collection 2339 * of a ring dump etc.). 2340 */ 2341 void i915_handle_error(struct drm_device *dev, bool wedged, 2342 const char *fmt, ...) 2343 { 2344 struct drm_i915_private *dev_priv = dev->dev_private; 2345 va_list args; 2346 char error_msg[80]; 2347 2348 va_start(args, fmt); 2349 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2350 va_end(args); 2351 2352 i915_capture_error_state(dev, wedged, error_msg); 2353 i915_report_and_clear_eir(dev); 2354 2355 if (wedged) { 2356 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2357 &dev_priv->gpu_error.reset_counter); 2358 2359 /* 2360 * Wakeup waiting processes so that the reset function 2361 * i915_reset_and_wakeup doesn't deadlock trying to grab 2362 * various locks. By bumping the reset counter first, the woken 2363 * processes will see a reset in progress and back off, 2364 * releasing their locks and then wait for the reset completion. 2365 * We must do this for _all_ gpu waiters that might hold locks 2366 * that the reset work needs to acquire. 2367 * 2368 * Note: The wake_up serves as the required memory barrier to 2369 * ensure that the waiters see the updated value of the reset 2370 * counter atomic_t. 2371 */ 2372 i915_error_wake_up(dev_priv, false); 2373 } 2374 2375 i915_reset_and_wakeup(dev); 2376 } 2377 2378 /* Called from drm generic code, passed 'crtc' which 2379 * we use as a pipe index 2380 */ 2381 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2382 { 2383 struct drm_i915_private *dev_priv = dev->dev_private; 2384 unsigned long irqflags; 2385 2386 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2387 if (INTEL_INFO(dev)->gen >= 4) 2388 i915_enable_pipestat(dev_priv, pipe, 2389 PIPE_START_VBLANK_INTERRUPT_STATUS); 2390 else 2391 i915_enable_pipestat(dev_priv, pipe, 2392 PIPE_VBLANK_INTERRUPT_STATUS); 2393 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2394 2395 return 0; 2396 } 2397 2398 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2399 { 2400 struct drm_i915_private *dev_priv = dev->dev_private; 2401 unsigned long irqflags; 2402 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2403 DE_PIPE_VBLANK(pipe); 2404 2405 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2406 ironlake_enable_display_irq(dev_priv, bit); 2407 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2408 2409 return 0; 2410 } 2411 2412 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2413 { 2414 struct drm_i915_private *dev_priv = dev->dev_private; 2415 unsigned long irqflags; 2416 2417 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2418 i915_enable_pipestat(dev_priv, pipe, 2419 PIPE_START_VBLANK_INTERRUPT_STATUS); 2420 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2421 2422 return 0; 2423 } 2424 2425 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2426 { 2427 struct drm_i915_private *dev_priv = dev->dev_private; 2428 unsigned long irqflags; 2429 2430 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2431 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2432 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2433 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2434 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2435 return 0; 2436 } 2437 2438 /* Called from drm generic code, passed 'crtc' which 2439 * we use as a pipe index 2440 */ 2441 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2442 { 2443 struct drm_i915_private *dev_priv = dev->dev_private; 2444 unsigned long irqflags; 2445 2446 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2447 i915_disable_pipestat(dev_priv, pipe, 2448 PIPE_VBLANK_INTERRUPT_STATUS | 2449 PIPE_START_VBLANK_INTERRUPT_STATUS); 2450 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2451 } 2452 2453 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2454 { 2455 struct drm_i915_private *dev_priv = dev->dev_private; 2456 unsigned long irqflags; 2457 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2458 DE_PIPE_VBLANK(pipe); 2459 2460 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2461 ironlake_disable_display_irq(dev_priv, bit); 2462 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2463 } 2464 2465 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2466 { 2467 struct drm_i915_private *dev_priv = dev->dev_private; 2468 unsigned long irqflags; 2469 2470 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2471 i915_disable_pipestat(dev_priv, pipe, 2472 PIPE_START_VBLANK_INTERRUPT_STATUS); 2473 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2474 } 2475 2476 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2477 { 2478 struct drm_i915_private *dev_priv = dev->dev_private; 2479 unsigned long irqflags; 2480 2481 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2482 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2483 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2484 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2485 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2486 } 2487 2488 static bool 2489 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2490 { 2491 return (list_empty(&ring->request_list) || 2492 i915_seqno_passed(seqno, ring->last_submitted_seqno)); 2493 } 2494 2495 static bool 2496 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2497 { 2498 if (INTEL_INFO(dev)->gen >= 8) { 2499 return (ipehr >> 23) == 0x1c; 2500 } else { 2501 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2502 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2503 MI_SEMAPHORE_REGISTER); 2504 } 2505 } 2506 2507 static struct intel_engine_cs * 2508 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2509 { 2510 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2511 struct intel_engine_cs *signaller; 2512 int i; 2513 2514 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2515 for_each_ring(signaller, dev_priv, i) { 2516 if (ring == signaller) 2517 continue; 2518 2519 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2520 return signaller; 2521 } 2522 } else { 2523 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2524 2525 for_each_ring(signaller, dev_priv, i) { 2526 if(ring == signaller) 2527 continue; 2528 2529 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2530 return signaller; 2531 } 2532 } 2533 2534 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2535 ring->id, ipehr, offset); 2536 2537 return NULL; 2538 } 2539 2540 static struct intel_engine_cs * 2541 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2542 { 2543 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2544 u32 cmd, ipehr, head; 2545 u64 offset = 0; 2546 int i, backwards; 2547 2548 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2549 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2550 return NULL; 2551 2552 /* 2553 * HEAD is likely pointing to the dword after the actual command, 2554 * so scan backwards until we find the MBOX. But limit it to just 3 2555 * or 4 dwords depending on the semaphore wait command size. 2556 * Note that we don't care about ACTHD here since that might 2557 * point at at batch, and semaphores are always emitted into the 2558 * ringbuffer itself. 2559 */ 2560 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2561 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2562 2563 for (i = backwards; i; --i) { 2564 /* 2565 * Be paranoid and presume the hw has gone off into the wild - 2566 * our ring is smaller than what the hardware (and hence 2567 * HEAD_ADDR) allows. Also handles wrap-around. 2568 */ 2569 head &= ring->buffer->size - 1; 2570 2571 /* This here seems to blow up */ 2572 cmd = ioread32(ring->buffer->virtual_start + head); 2573 if (cmd == ipehr) 2574 break; 2575 2576 head -= 4; 2577 } 2578 2579 if (!i) 2580 return NULL; 2581 2582 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2583 if (INTEL_INFO(ring->dev)->gen >= 8) { 2584 offset = ioread32(ring->buffer->virtual_start + head + 12); 2585 offset <<= 32; 2586 offset = ioread32(ring->buffer->virtual_start + head + 8); 2587 } 2588 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2589 } 2590 2591 static int semaphore_passed(struct intel_engine_cs *ring) 2592 { 2593 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2594 struct intel_engine_cs *signaller; 2595 u32 seqno; 2596 2597 ring->hangcheck.deadlock++; 2598 2599 signaller = semaphore_waits_for(ring, &seqno); 2600 if (signaller == NULL) 2601 return -1; 2602 2603 /* Prevent pathological recursion due to driver bugs */ 2604 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2605 return -1; 2606 2607 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2608 return 1; 2609 2610 /* cursory check for an unkickable deadlock */ 2611 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2612 semaphore_passed(signaller) < 0) 2613 return -1; 2614 2615 return 0; 2616 } 2617 2618 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2619 { 2620 struct intel_engine_cs *ring; 2621 int i; 2622 2623 for_each_ring(ring, dev_priv, i) 2624 ring->hangcheck.deadlock = 0; 2625 } 2626 2627 static enum intel_ring_hangcheck_action 2628 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2629 { 2630 struct drm_device *dev = ring->dev; 2631 struct drm_i915_private *dev_priv = dev->dev_private; 2632 u32 tmp; 2633 2634 if (acthd != ring->hangcheck.acthd) { 2635 if (acthd > ring->hangcheck.max_acthd) { 2636 ring->hangcheck.max_acthd = acthd; 2637 return HANGCHECK_ACTIVE; 2638 } 2639 2640 return HANGCHECK_ACTIVE_LOOP; 2641 } 2642 2643 if (IS_GEN2(dev)) 2644 return HANGCHECK_HUNG; 2645 2646 /* Is the chip hanging on a WAIT_FOR_EVENT? 2647 * If so we can simply poke the RB_WAIT bit 2648 * and break the hang. This should work on 2649 * all but the second generation chipsets. 2650 */ 2651 tmp = I915_READ_CTL(ring); 2652 if (tmp & RING_WAIT) { 2653 i915_handle_error(dev, false, 2654 "Kicking stuck wait on %s", 2655 ring->name); 2656 I915_WRITE_CTL(ring, tmp); 2657 return HANGCHECK_KICK; 2658 } 2659 2660 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2661 switch (semaphore_passed(ring)) { 2662 default: 2663 return HANGCHECK_HUNG; 2664 case 1: 2665 i915_handle_error(dev, false, 2666 "Kicking stuck semaphore on %s", 2667 ring->name); 2668 I915_WRITE_CTL(ring, tmp); 2669 return HANGCHECK_KICK; 2670 case 0: 2671 return HANGCHECK_WAIT; 2672 } 2673 } 2674 2675 return HANGCHECK_HUNG; 2676 } 2677 2678 /* 2679 * This is called when the chip hasn't reported back with completed 2680 * batchbuffers in a long time. We keep track per ring seqno progress and 2681 * if there are no progress, hangcheck score for that ring is increased. 2682 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2683 * we kick the ring. If we see no progress on three subsequent calls 2684 * we assume chip is wedged and try to fix it by resetting the chip. 2685 */ 2686 static void i915_hangcheck_elapsed(struct work_struct *work) 2687 { 2688 struct drm_i915_private *dev_priv = 2689 container_of(work, typeof(*dev_priv), 2690 gpu_error.hangcheck_work.work); 2691 struct drm_device *dev = dev_priv->dev; 2692 struct intel_engine_cs *ring; 2693 int i; 2694 int busy_count = 0, rings_hung = 0; 2695 bool stuck[I915_NUM_RINGS] = { 0 }; 2696 #define BUSY 1 2697 #define KICK 5 2698 #define HUNG 20 2699 2700 if (!i915.enable_hangcheck) 2701 return; 2702 2703 for_each_ring(ring, dev_priv, i) { 2704 u64 acthd; 2705 u32 seqno; 2706 bool busy = true; 2707 2708 semaphore_clear_deadlocks(dev_priv); 2709 2710 seqno = ring->get_seqno(ring, false); 2711 acthd = intel_ring_get_active_head(ring); 2712 2713 if (ring->hangcheck.seqno == seqno) { 2714 if (ring_idle(ring, seqno)) { 2715 ring->hangcheck.action = HANGCHECK_IDLE; 2716 2717 if (waitqueue_active(&ring->irq_queue)) { 2718 /* Issue a wake-up to catch stuck h/w. */ 2719 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2720 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2721 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2722 ring->name); 2723 else 2724 DRM_INFO("Fake missed irq on %s\n", 2725 ring->name); 2726 wake_up_all(&ring->irq_queue); 2727 } 2728 /* Safeguard against driver failure */ 2729 ring->hangcheck.score += BUSY; 2730 } else 2731 busy = false; 2732 } else { 2733 /* We always increment the hangcheck score 2734 * if the ring is busy and still processing 2735 * the same request, so that no single request 2736 * can run indefinitely (such as a chain of 2737 * batches). The only time we do not increment 2738 * the hangcheck score on this ring, if this 2739 * ring is in a legitimate wait for another 2740 * ring. In that case the waiting ring is a 2741 * victim and we want to be sure we catch the 2742 * right culprit. Then every time we do kick 2743 * the ring, add a small increment to the 2744 * score so that we can catch a batch that is 2745 * being repeatedly kicked and so responsible 2746 * for stalling the machine. 2747 */ 2748 ring->hangcheck.action = ring_stuck(ring, 2749 acthd); 2750 2751 switch (ring->hangcheck.action) { 2752 case HANGCHECK_IDLE: 2753 case HANGCHECK_WAIT: 2754 case HANGCHECK_ACTIVE: 2755 break; 2756 case HANGCHECK_ACTIVE_LOOP: 2757 ring->hangcheck.score += BUSY; 2758 break; 2759 case HANGCHECK_KICK: 2760 ring->hangcheck.score += KICK; 2761 break; 2762 case HANGCHECK_HUNG: 2763 ring->hangcheck.score += HUNG; 2764 stuck[i] = true; 2765 break; 2766 } 2767 } 2768 } else { 2769 ring->hangcheck.action = HANGCHECK_ACTIVE; 2770 2771 /* Gradually reduce the count so that we catch DoS 2772 * attempts across multiple batches. 2773 */ 2774 if (ring->hangcheck.score > 0) 2775 ring->hangcheck.score--; 2776 2777 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 2778 } 2779 2780 ring->hangcheck.seqno = seqno; 2781 ring->hangcheck.acthd = acthd; 2782 busy_count += busy; 2783 } 2784 2785 for_each_ring(ring, dev_priv, i) { 2786 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2787 DRM_INFO("%s on %s\n", 2788 stuck[i] ? "stuck" : "no progress", 2789 ring->name); 2790 rings_hung++; 2791 } 2792 } 2793 2794 if (rings_hung) 2795 return i915_handle_error(dev, true, "Ring hung"); 2796 2797 if (busy_count) 2798 /* Reset timer case chip hangs without another request 2799 * being added */ 2800 i915_queue_hangcheck(dev); 2801 } 2802 2803 void i915_queue_hangcheck(struct drm_device *dev) 2804 { 2805 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 2806 2807 if (!i915.enable_hangcheck) 2808 return; 2809 2810 /* Don't continually defer the hangcheck so that it is always run at 2811 * least once after work has been scheduled on any ring. Otherwise, 2812 * we will ignore a hung ring if a second ring is kept busy. 2813 */ 2814 2815 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 2816 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 2817 } 2818 2819 static void ibx_irq_reset(struct drm_device *dev) 2820 { 2821 struct drm_i915_private *dev_priv = dev->dev_private; 2822 2823 if (HAS_PCH_NOP(dev)) 2824 return; 2825 2826 GEN5_IRQ_RESET(SDE); 2827 2828 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2829 I915_WRITE(SERR_INT, 0xffffffff); 2830 } 2831 2832 /* 2833 * SDEIER is also touched by the interrupt handler to work around missed PCH 2834 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2835 * instead we unconditionally enable all PCH interrupt sources here, but then 2836 * only unmask them as needed with SDEIMR. 2837 * 2838 * This function needs to be called before interrupts are enabled. 2839 */ 2840 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2841 { 2842 struct drm_i915_private *dev_priv = dev->dev_private; 2843 2844 if (HAS_PCH_NOP(dev)) 2845 return; 2846 2847 WARN_ON(I915_READ(SDEIER) != 0); 2848 I915_WRITE(SDEIER, 0xffffffff); 2849 POSTING_READ(SDEIER); 2850 } 2851 2852 static void gen5_gt_irq_reset(struct drm_device *dev) 2853 { 2854 struct drm_i915_private *dev_priv = dev->dev_private; 2855 2856 GEN5_IRQ_RESET(GT); 2857 if (INTEL_INFO(dev)->gen >= 6) 2858 GEN5_IRQ_RESET(GEN6_PM); 2859 } 2860 2861 /* drm_dma.h hooks 2862 */ 2863 static void ironlake_irq_reset(struct drm_device *dev) 2864 { 2865 struct drm_i915_private *dev_priv = dev->dev_private; 2866 2867 I915_WRITE(HWSTAM, 0xffffffff); 2868 2869 GEN5_IRQ_RESET(DE); 2870 if (IS_GEN7(dev)) 2871 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2872 2873 gen5_gt_irq_reset(dev); 2874 2875 ibx_irq_reset(dev); 2876 } 2877 2878 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2879 { 2880 enum pipe pipe; 2881 2882 I915_WRITE(PORT_HOTPLUG_EN, 0); 2883 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2884 2885 for_each_pipe(dev_priv, pipe) 2886 I915_WRITE(PIPESTAT(pipe), 0xffff); 2887 2888 GEN5_IRQ_RESET(VLV_); 2889 } 2890 2891 static void valleyview_irq_preinstall(struct drm_device *dev) 2892 { 2893 struct drm_i915_private *dev_priv = dev->dev_private; 2894 2895 /* VLV magic */ 2896 I915_WRITE(VLV_IMR, 0); 2897 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2898 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2899 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2900 2901 gen5_gt_irq_reset(dev); 2902 2903 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2904 2905 vlv_display_irq_reset(dev_priv); 2906 } 2907 2908 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 2909 { 2910 GEN8_IRQ_RESET_NDX(GT, 0); 2911 GEN8_IRQ_RESET_NDX(GT, 1); 2912 GEN8_IRQ_RESET_NDX(GT, 2); 2913 GEN8_IRQ_RESET_NDX(GT, 3); 2914 } 2915 2916 static void gen8_irq_reset(struct drm_device *dev) 2917 { 2918 struct drm_i915_private *dev_priv = dev->dev_private; 2919 int pipe; 2920 2921 I915_WRITE(GEN8_MASTER_IRQ, 0); 2922 POSTING_READ(GEN8_MASTER_IRQ); 2923 2924 gen8_gt_irq_reset(dev_priv); 2925 2926 for_each_pipe(dev_priv, pipe) 2927 if (intel_display_power_is_enabled(dev_priv, 2928 POWER_DOMAIN_PIPE(pipe))) 2929 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 2930 2931 GEN5_IRQ_RESET(GEN8_DE_PORT_); 2932 GEN5_IRQ_RESET(GEN8_DE_MISC_); 2933 GEN5_IRQ_RESET(GEN8_PCU_); 2934 2935 if (HAS_PCH_SPLIT(dev)) 2936 ibx_irq_reset(dev); 2937 } 2938 2939 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 2940 unsigned int pipe_mask) 2941 { 2942 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 2943 2944 spin_lock_irq(&dev_priv->irq_lock); 2945 if (pipe_mask & 1 << PIPE_A) 2946 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 2947 dev_priv->de_irq_mask[PIPE_A], 2948 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); 2949 if (pipe_mask & 1 << PIPE_B) 2950 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, 2951 dev_priv->de_irq_mask[PIPE_B], 2952 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 2953 if (pipe_mask & 1 << PIPE_C) 2954 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, 2955 dev_priv->de_irq_mask[PIPE_C], 2956 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 2957 spin_unlock_irq(&dev_priv->irq_lock); 2958 } 2959 2960 static void cherryview_irq_preinstall(struct drm_device *dev) 2961 { 2962 struct drm_i915_private *dev_priv = dev->dev_private; 2963 2964 I915_WRITE(GEN8_MASTER_IRQ, 0); 2965 POSTING_READ(GEN8_MASTER_IRQ); 2966 2967 gen8_gt_irq_reset(dev_priv); 2968 2969 GEN5_IRQ_RESET(GEN8_PCU_); 2970 2971 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2972 2973 vlv_display_irq_reset(dev_priv); 2974 } 2975 2976 static void ibx_hpd_irq_setup(struct drm_device *dev) 2977 { 2978 struct drm_i915_private *dev_priv = dev->dev_private; 2979 struct intel_encoder *intel_encoder; 2980 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2981 2982 if (HAS_PCH_IBX(dev)) { 2983 hotplug_irqs = SDE_HOTPLUG_MASK; 2984 for_each_intel_encoder(dev, intel_encoder) 2985 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 2986 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2987 } else { 2988 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2989 for_each_intel_encoder(dev, intel_encoder) 2990 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 2991 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2992 } 2993 2994 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2995 2996 /* 2997 * Enable digital hotplug on the PCH, and configure the DP short pulse 2998 * duration to 2ms (which is the minimum in the Display Port spec) 2999 * 3000 * This register is the same on all known PCH chips. 3001 */ 3002 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3003 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3004 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3005 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3006 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3007 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3008 } 3009 3010 static void bxt_hpd_irq_setup(struct drm_device *dev) 3011 { 3012 struct drm_i915_private *dev_priv = dev->dev_private; 3013 struct intel_encoder *intel_encoder; 3014 u32 hotplug_port = 0; 3015 u32 hotplug_ctrl; 3016 3017 /* Now, enable HPD */ 3018 for_each_intel_encoder(dev, intel_encoder) { 3019 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state 3020 == HPD_ENABLED) 3021 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; 3022 } 3023 3024 /* Mask all HPD control bits */ 3025 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; 3026 3027 /* Enable requested port in hotplug control */ 3028 /* TODO: implement (short) HPD support on port A */ 3029 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA); 3030 if (hotplug_port & BXT_DE_PORT_HP_DDIB) 3031 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE; 3032 if (hotplug_port & BXT_DE_PORT_HP_DDIC) 3033 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE; 3034 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl); 3035 3036 /* Unmask DDI hotplug in IMR */ 3037 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port; 3038 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl); 3039 3040 /* Enable DDI hotplug in IER */ 3041 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; 3042 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); 3043 POSTING_READ(GEN8_DE_PORT_IER); 3044 } 3045 3046 static void ibx_irq_postinstall(struct drm_device *dev) 3047 { 3048 struct drm_i915_private *dev_priv = dev->dev_private; 3049 u32 mask; 3050 3051 if (HAS_PCH_NOP(dev)) 3052 return; 3053 3054 if (HAS_PCH_IBX(dev)) 3055 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3056 else 3057 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3058 3059 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3060 I915_WRITE(SDEIMR, ~mask); 3061 } 3062 3063 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3064 { 3065 struct drm_i915_private *dev_priv = dev->dev_private; 3066 u32 pm_irqs, gt_irqs; 3067 3068 pm_irqs = gt_irqs = 0; 3069 3070 dev_priv->gt_irq_mask = ~0; 3071 if (HAS_L3_DPF(dev)) { 3072 /* L3 parity interrupt is always unmasked. */ 3073 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3074 gt_irqs |= GT_PARITY_ERROR(dev); 3075 } 3076 3077 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3078 if (IS_GEN5(dev)) { 3079 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3080 ILK_BSD_USER_INTERRUPT; 3081 } else { 3082 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3083 } 3084 3085 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3086 3087 if (INTEL_INFO(dev)->gen >= 6) { 3088 /* 3089 * RPS interrupts will get enabled/disabled on demand when RPS 3090 * itself is enabled/disabled. 3091 */ 3092 if (HAS_VEBOX(dev)) 3093 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3094 3095 dev_priv->pm_irq_mask = 0xffffffff; 3096 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3097 } 3098 } 3099 3100 static int ironlake_irq_postinstall(struct drm_device *dev) 3101 { 3102 struct drm_i915_private *dev_priv = dev->dev_private; 3103 u32 display_mask, extra_mask; 3104 3105 if (INTEL_INFO(dev)->gen >= 7) { 3106 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3107 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3108 DE_PLANEB_FLIP_DONE_IVB | 3109 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3110 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3111 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3112 } else { 3113 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3114 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3115 DE_AUX_CHANNEL_A | 3116 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3117 DE_POISON); 3118 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3119 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3120 } 3121 3122 dev_priv->irq_mask = ~display_mask; 3123 3124 I915_WRITE(HWSTAM, 0xeffe); 3125 3126 ibx_irq_pre_postinstall(dev); 3127 3128 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3129 3130 gen5_gt_irq_postinstall(dev); 3131 3132 ibx_irq_postinstall(dev); 3133 3134 if (IS_IRONLAKE_M(dev)) { 3135 /* Enable PCU event interrupts 3136 * 3137 * spinlocking not required here for correctness since interrupt 3138 * setup is guaranteed to run in single-threaded context. But we 3139 * need it to make the assert_spin_locked happy. */ 3140 spin_lock_irq(&dev_priv->irq_lock); 3141 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3142 spin_unlock_irq(&dev_priv->irq_lock); 3143 } 3144 3145 return 0; 3146 } 3147 3148 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3149 { 3150 u32 pipestat_mask; 3151 u32 iir_mask; 3152 enum pipe pipe; 3153 3154 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3155 PIPE_FIFO_UNDERRUN_STATUS; 3156 3157 for_each_pipe(dev_priv, pipe) 3158 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3159 POSTING_READ(PIPESTAT(PIPE_A)); 3160 3161 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3162 PIPE_CRC_DONE_INTERRUPT_STATUS; 3163 3164 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3165 for_each_pipe(dev_priv, pipe) 3166 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3167 3168 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3169 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3170 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3171 if (IS_CHERRYVIEW(dev_priv)) 3172 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3173 dev_priv->irq_mask &= ~iir_mask; 3174 3175 I915_WRITE(VLV_IIR, iir_mask); 3176 I915_WRITE(VLV_IIR, iir_mask); 3177 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3178 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3179 POSTING_READ(VLV_IMR); 3180 } 3181 3182 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3183 { 3184 u32 pipestat_mask; 3185 u32 iir_mask; 3186 enum pipe pipe; 3187 3188 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3189 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3190 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3191 if (IS_CHERRYVIEW(dev_priv)) 3192 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3193 3194 dev_priv->irq_mask |= iir_mask; 3195 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3196 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3197 I915_WRITE(VLV_IIR, iir_mask); 3198 I915_WRITE(VLV_IIR, iir_mask); 3199 POSTING_READ(VLV_IIR); 3200 3201 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3202 PIPE_CRC_DONE_INTERRUPT_STATUS; 3203 3204 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3205 for_each_pipe(dev_priv, pipe) 3206 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3207 3208 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3209 PIPE_FIFO_UNDERRUN_STATUS; 3210 3211 for_each_pipe(dev_priv, pipe) 3212 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3213 POSTING_READ(PIPESTAT(PIPE_A)); 3214 } 3215 3216 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3217 { 3218 assert_spin_locked(&dev_priv->irq_lock); 3219 3220 if (dev_priv->display_irqs_enabled) 3221 return; 3222 3223 dev_priv->display_irqs_enabled = true; 3224 3225 if (intel_irqs_enabled(dev_priv)) 3226 valleyview_display_irqs_install(dev_priv); 3227 } 3228 3229 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3230 { 3231 assert_spin_locked(&dev_priv->irq_lock); 3232 3233 if (!dev_priv->display_irqs_enabled) 3234 return; 3235 3236 dev_priv->display_irqs_enabled = false; 3237 3238 if (intel_irqs_enabled(dev_priv)) 3239 valleyview_display_irqs_uninstall(dev_priv); 3240 } 3241 3242 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3243 { 3244 dev_priv->irq_mask = ~0; 3245 3246 I915_WRITE(PORT_HOTPLUG_EN, 0); 3247 POSTING_READ(PORT_HOTPLUG_EN); 3248 3249 I915_WRITE(VLV_IIR, 0xffffffff); 3250 I915_WRITE(VLV_IIR, 0xffffffff); 3251 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3252 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3253 POSTING_READ(VLV_IMR); 3254 3255 /* Interrupt setup is already guaranteed to be single-threaded, this is 3256 * just to make the assert_spin_locked check happy. */ 3257 spin_lock_irq(&dev_priv->irq_lock); 3258 if (dev_priv->display_irqs_enabled) 3259 valleyview_display_irqs_install(dev_priv); 3260 spin_unlock_irq(&dev_priv->irq_lock); 3261 } 3262 3263 static int valleyview_irq_postinstall(struct drm_device *dev) 3264 { 3265 struct drm_i915_private *dev_priv = dev->dev_private; 3266 3267 vlv_display_irq_postinstall(dev_priv); 3268 3269 gen5_gt_irq_postinstall(dev); 3270 3271 /* ack & enable invalid PTE error interrupts */ 3272 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3273 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3274 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3275 #endif 3276 3277 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3278 3279 return 0; 3280 } 3281 3282 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3283 { 3284 /* These are interrupts we'll toggle with the ring mask register */ 3285 uint32_t gt_interrupts[] = { 3286 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3287 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3288 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3289 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3290 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3291 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3292 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3293 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3294 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3295 0, 3296 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3297 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3298 }; 3299 3300 dev_priv->pm_irq_mask = 0xffffffff; 3301 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3302 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3303 /* 3304 * RPS interrupts will get enabled/disabled on demand when RPS itself 3305 * is enabled/disabled. 3306 */ 3307 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3308 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3309 } 3310 3311 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3312 { 3313 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3314 uint32_t de_pipe_enables; 3315 int pipe; 3316 u32 de_port_en = GEN8_AUX_CHANNEL_A; 3317 3318 if (IS_GEN9(dev_priv)) { 3319 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3320 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3321 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3322 GEN9_AUX_CHANNEL_D; 3323 3324 if (IS_BROXTON(dev_priv)) 3325 de_port_en |= BXT_DE_PORT_GMBUS; 3326 } else 3327 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3328 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3329 3330 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3331 GEN8_PIPE_FIFO_UNDERRUN; 3332 3333 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3334 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3335 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3336 3337 for_each_pipe(dev_priv, pipe) 3338 if (intel_display_power_is_enabled(dev_priv, 3339 POWER_DOMAIN_PIPE(pipe))) 3340 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3341 dev_priv->de_irq_mask[pipe], 3342 de_pipe_enables); 3343 3344 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); 3345 } 3346 3347 static int gen8_irq_postinstall(struct drm_device *dev) 3348 { 3349 struct drm_i915_private *dev_priv = dev->dev_private; 3350 3351 if (HAS_PCH_SPLIT(dev)) 3352 ibx_irq_pre_postinstall(dev); 3353 3354 gen8_gt_irq_postinstall(dev_priv); 3355 gen8_de_irq_postinstall(dev_priv); 3356 3357 if (HAS_PCH_SPLIT(dev)) 3358 ibx_irq_postinstall(dev); 3359 3360 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3361 POSTING_READ(GEN8_MASTER_IRQ); 3362 3363 return 0; 3364 } 3365 3366 static int cherryview_irq_postinstall(struct drm_device *dev) 3367 { 3368 struct drm_i915_private *dev_priv = dev->dev_private; 3369 3370 vlv_display_irq_postinstall(dev_priv); 3371 3372 gen8_gt_irq_postinstall(dev_priv); 3373 3374 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3375 POSTING_READ(GEN8_MASTER_IRQ); 3376 3377 return 0; 3378 } 3379 3380 static void gen8_irq_uninstall(struct drm_device *dev) 3381 { 3382 struct drm_i915_private *dev_priv = dev->dev_private; 3383 3384 if (!dev_priv) 3385 return; 3386 3387 gen8_irq_reset(dev); 3388 } 3389 3390 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3391 { 3392 /* Interrupt setup is already guaranteed to be single-threaded, this is 3393 * just to make the assert_spin_locked check happy. */ 3394 spin_lock_irq(&dev_priv->irq_lock); 3395 if (dev_priv->display_irqs_enabled) 3396 valleyview_display_irqs_uninstall(dev_priv); 3397 spin_unlock_irq(&dev_priv->irq_lock); 3398 3399 vlv_display_irq_reset(dev_priv); 3400 3401 dev_priv->irq_mask = ~0; 3402 } 3403 3404 static void valleyview_irq_uninstall(struct drm_device *dev) 3405 { 3406 struct drm_i915_private *dev_priv = dev->dev_private; 3407 3408 if (!dev_priv) 3409 return; 3410 3411 I915_WRITE(VLV_MASTER_IER, 0); 3412 3413 gen5_gt_irq_reset(dev); 3414 3415 I915_WRITE(HWSTAM, 0xffffffff); 3416 3417 vlv_display_irq_uninstall(dev_priv); 3418 } 3419 3420 static void cherryview_irq_uninstall(struct drm_device *dev) 3421 { 3422 struct drm_i915_private *dev_priv = dev->dev_private; 3423 3424 if (!dev_priv) 3425 return; 3426 3427 I915_WRITE(GEN8_MASTER_IRQ, 0); 3428 POSTING_READ(GEN8_MASTER_IRQ); 3429 3430 gen8_gt_irq_reset(dev_priv); 3431 3432 GEN5_IRQ_RESET(GEN8_PCU_); 3433 3434 vlv_display_irq_uninstall(dev_priv); 3435 } 3436 3437 static void ironlake_irq_uninstall(struct drm_device *dev) 3438 { 3439 struct drm_i915_private *dev_priv = dev->dev_private; 3440 3441 if (!dev_priv) 3442 return; 3443 3444 ironlake_irq_reset(dev); 3445 } 3446 3447 static void i8xx_irq_preinstall(struct drm_device * dev) 3448 { 3449 struct drm_i915_private *dev_priv = dev->dev_private; 3450 int pipe; 3451 3452 for_each_pipe(dev_priv, pipe) 3453 I915_WRITE(PIPESTAT(pipe), 0); 3454 I915_WRITE16(IMR, 0xffff); 3455 I915_WRITE16(IER, 0x0); 3456 POSTING_READ16(IER); 3457 } 3458 3459 static int i8xx_irq_postinstall(struct drm_device *dev) 3460 { 3461 struct drm_i915_private *dev_priv = dev->dev_private; 3462 3463 I915_WRITE16(EMR, 3464 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3465 3466 /* Unmask the interrupts that we always want on. */ 3467 dev_priv->irq_mask = 3468 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3469 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3470 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3471 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3472 I915_WRITE16(IMR, dev_priv->irq_mask); 3473 3474 I915_WRITE16(IER, 3475 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3476 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3477 I915_USER_INTERRUPT); 3478 POSTING_READ16(IER); 3479 3480 /* Interrupt setup is already guaranteed to be single-threaded, this is 3481 * just to make the assert_spin_locked check happy. */ 3482 spin_lock_irq(&dev_priv->irq_lock); 3483 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3484 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3485 spin_unlock_irq(&dev_priv->irq_lock); 3486 3487 return 0; 3488 } 3489 3490 /* 3491 * Returns true when a page flip has completed. 3492 */ 3493 static bool i8xx_handle_vblank(struct drm_device *dev, 3494 int plane, int pipe, u32 iir) 3495 { 3496 struct drm_i915_private *dev_priv = dev->dev_private; 3497 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3498 3499 if (!intel_pipe_handle_vblank(dev, pipe)) 3500 return false; 3501 3502 if ((iir & flip_pending) == 0) 3503 goto check_page_flip; 3504 3505 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3506 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3507 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3508 * the flip is completed (no longer pending). Since this doesn't raise 3509 * an interrupt per se, we watch for the change at vblank. 3510 */ 3511 if (I915_READ16(ISR) & flip_pending) 3512 goto check_page_flip; 3513 3514 intel_prepare_page_flip(dev, plane); 3515 intel_finish_page_flip(dev, pipe); 3516 return true; 3517 3518 check_page_flip: 3519 intel_check_page_flip(dev, pipe); 3520 return false; 3521 } 3522 3523 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3524 { 3525 struct drm_device *dev = arg; 3526 struct drm_i915_private *dev_priv = dev->dev_private; 3527 u16 iir, new_iir; 3528 u32 pipe_stats[2]; 3529 int pipe; 3530 u16 flip_mask = 3531 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3532 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3533 3534 if (!intel_irqs_enabled(dev_priv)) 3535 return IRQ_NONE; 3536 3537 iir = I915_READ16(IIR); 3538 if (iir == 0) 3539 return IRQ_NONE; 3540 3541 while (iir & ~flip_mask) { 3542 /* Can't rely on pipestat interrupt bit in iir as it might 3543 * have been cleared after the pipestat interrupt was received. 3544 * It doesn't set the bit in iir again, but it still produces 3545 * interrupts (for non-MSI). 3546 */ 3547 spin_lock(&dev_priv->irq_lock); 3548 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3549 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3550 3551 for_each_pipe(dev_priv, pipe) { 3552 int reg = PIPESTAT(pipe); 3553 pipe_stats[pipe] = I915_READ(reg); 3554 3555 /* 3556 * Clear the PIPE*STAT regs before the IIR 3557 */ 3558 if (pipe_stats[pipe] & 0x8000ffff) 3559 I915_WRITE(reg, pipe_stats[pipe]); 3560 } 3561 spin_unlock(&dev_priv->irq_lock); 3562 3563 I915_WRITE16(IIR, iir & ~flip_mask); 3564 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3565 3566 if (iir & I915_USER_INTERRUPT) 3567 notify_ring(&dev_priv->ring[RCS]); 3568 3569 for_each_pipe(dev_priv, pipe) { 3570 int plane = pipe; 3571 if (HAS_FBC(dev)) 3572 plane = !plane; 3573 3574 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3575 i8xx_handle_vblank(dev, plane, pipe, iir)) 3576 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3577 3578 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3579 i9xx_pipe_crc_irq_handler(dev, pipe); 3580 3581 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3582 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3583 pipe); 3584 } 3585 3586 iir = new_iir; 3587 } 3588 3589 return IRQ_HANDLED; 3590 } 3591 3592 static void i8xx_irq_uninstall(struct drm_device * dev) 3593 { 3594 struct drm_i915_private *dev_priv = dev->dev_private; 3595 int pipe; 3596 3597 for_each_pipe(dev_priv, pipe) { 3598 /* Clear enable bits; then clear status bits */ 3599 I915_WRITE(PIPESTAT(pipe), 0); 3600 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3601 } 3602 I915_WRITE16(IMR, 0xffff); 3603 I915_WRITE16(IER, 0x0); 3604 I915_WRITE16(IIR, I915_READ16(IIR)); 3605 } 3606 3607 static void i915_irq_preinstall(struct drm_device * dev) 3608 { 3609 struct drm_i915_private *dev_priv = dev->dev_private; 3610 int pipe; 3611 3612 if (I915_HAS_HOTPLUG(dev)) { 3613 I915_WRITE(PORT_HOTPLUG_EN, 0); 3614 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3615 } 3616 3617 I915_WRITE16(HWSTAM, 0xeffe); 3618 for_each_pipe(dev_priv, pipe) 3619 I915_WRITE(PIPESTAT(pipe), 0); 3620 I915_WRITE(IMR, 0xffffffff); 3621 I915_WRITE(IER, 0x0); 3622 POSTING_READ(IER); 3623 } 3624 3625 static int i915_irq_postinstall(struct drm_device *dev) 3626 { 3627 struct drm_i915_private *dev_priv = dev->dev_private; 3628 u32 enable_mask; 3629 3630 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3631 3632 /* Unmask the interrupts that we always want on. */ 3633 dev_priv->irq_mask = 3634 ~(I915_ASLE_INTERRUPT | 3635 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3636 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3637 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3638 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3639 3640 enable_mask = 3641 I915_ASLE_INTERRUPT | 3642 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3643 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3644 I915_USER_INTERRUPT; 3645 3646 if (I915_HAS_HOTPLUG(dev)) { 3647 I915_WRITE(PORT_HOTPLUG_EN, 0); 3648 POSTING_READ(PORT_HOTPLUG_EN); 3649 3650 /* Enable in IER... */ 3651 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3652 /* and unmask in IMR */ 3653 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3654 } 3655 3656 I915_WRITE(IMR, dev_priv->irq_mask); 3657 I915_WRITE(IER, enable_mask); 3658 POSTING_READ(IER); 3659 3660 i915_enable_asle_pipestat(dev); 3661 3662 /* Interrupt setup is already guaranteed to be single-threaded, this is 3663 * just to make the assert_spin_locked check happy. */ 3664 spin_lock_irq(&dev_priv->irq_lock); 3665 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3666 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3667 spin_unlock_irq(&dev_priv->irq_lock); 3668 3669 return 0; 3670 } 3671 3672 /* 3673 * Returns true when a page flip has completed. 3674 */ 3675 static bool i915_handle_vblank(struct drm_device *dev, 3676 int plane, int pipe, u32 iir) 3677 { 3678 struct drm_i915_private *dev_priv = dev->dev_private; 3679 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3680 3681 if (!intel_pipe_handle_vblank(dev, pipe)) 3682 return false; 3683 3684 if ((iir & flip_pending) == 0) 3685 goto check_page_flip; 3686 3687 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3688 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3689 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3690 * the flip is completed (no longer pending). Since this doesn't raise 3691 * an interrupt per se, we watch for the change at vblank. 3692 */ 3693 if (I915_READ(ISR) & flip_pending) 3694 goto check_page_flip; 3695 3696 intel_prepare_page_flip(dev, plane); 3697 intel_finish_page_flip(dev, pipe); 3698 return true; 3699 3700 check_page_flip: 3701 intel_check_page_flip(dev, pipe); 3702 return false; 3703 } 3704 3705 static irqreturn_t i915_irq_handler(int irq, void *arg) 3706 { 3707 struct drm_device *dev = arg; 3708 struct drm_i915_private *dev_priv = dev->dev_private; 3709 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3710 u32 flip_mask = 3711 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3712 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3713 int pipe, ret = IRQ_NONE; 3714 3715 if (!intel_irqs_enabled(dev_priv)) 3716 return IRQ_NONE; 3717 3718 iir = I915_READ(IIR); 3719 do { 3720 bool irq_received = (iir & ~flip_mask) != 0; 3721 bool blc_event = false; 3722 3723 /* Can't rely on pipestat interrupt bit in iir as it might 3724 * have been cleared after the pipestat interrupt was received. 3725 * It doesn't set the bit in iir again, but it still produces 3726 * interrupts (for non-MSI). 3727 */ 3728 spin_lock(&dev_priv->irq_lock); 3729 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3730 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3731 3732 for_each_pipe(dev_priv, pipe) { 3733 int reg = PIPESTAT(pipe); 3734 pipe_stats[pipe] = I915_READ(reg); 3735 3736 /* Clear the PIPE*STAT regs before the IIR */ 3737 if (pipe_stats[pipe] & 0x8000ffff) { 3738 I915_WRITE(reg, pipe_stats[pipe]); 3739 irq_received = true; 3740 } 3741 } 3742 spin_unlock(&dev_priv->irq_lock); 3743 3744 if (!irq_received) 3745 break; 3746 3747 /* Consume port. Then clear IIR or we'll miss events */ 3748 if (I915_HAS_HOTPLUG(dev) && 3749 iir & I915_DISPLAY_PORT_INTERRUPT) 3750 i9xx_hpd_irq_handler(dev); 3751 3752 I915_WRITE(IIR, iir & ~flip_mask); 3753 new_iir = I915_READ(IIR); /* Flush posted writes */ 3754 3755 if (iir & I915_USER_INTERRUPT) 3756 notify_ring(&dev_priv->ring[RCS]); 3757 3758 for_each_pipe(dev_priv, pipe) { 3759 int plane = pipe; 3760 if (HAS_FBC(dev)) 3761 plane = !plane; 3762 3763 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3764 i915_handle_vblank(dev, plane, pipe, iir)) 3765 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3766 3767 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3768 blc_event = true; 3769 3770 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3771 i9xx_pipe_crc_irq_handler(dev, pipe); 3772 3773 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3774 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3775 pipe); 3776 } 3777 3778 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3779 intel_opregion_asle_intr(dev); 3780 3781 /* With MSI, interrupts are only generated when iir 3782 * transitions from zero to nonzero. If another bit got 3783 * set while we were handling the existing iir bits, then 3784 * we would never get another interrupt. 3785 * 3786 * This is fine on non-MSI as well, as if we hit this path 3787 * we avoid exiting the interrupt handler only to generate 3788 * another one. 3789 * 3790 * Note that for MSI this could cause a stray interrupt report 3791 * if an interrupt landed in the time between writing IIR and 3792 * the posting read. This should be rare enough to never 3793 * trigger the 99% of 100,000 interrupts test for disabling 3794 * stray interrupts. 3795 */ 3796 ret = IRQ_HANDLED; 3797 iir = new_iir; 3798 } while (iir & ~flip_mask); 3799 3800 return ret; 3801 } 3802 3803 static void i915_irq_uninstall(struct drm_device * dev) 3804 { 3805 struct drm_i915_private *dev_priv = dev->dev_private; 3806 int pipe; 3807 3808 if (I915_HAS_HOTPLUG(dev)) { 3809 I915_WRITE(PORT_HOTPLUG_EN, 0); 3810 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3811 } 3812 3813 I915_WRITE16(HWSTAM, 0xffff); 3814 for_each_pipe(dev_priv, pipe) { 3815 /* Clear enable bits; then clear status bits */ 3816 I915_WRITE(PIPESTAT(pipe), 0); 3817 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3818 } 3819 I915_WRITE(IMR, 0xffffffff); 3820 I915_WRITE(IER, 0x0); 3821 3822 I915_WRITE(IIR, I915_READ(IIR)); 3823 } 3824 3825 static void i965_irq_preinstall(struct drm_device * dev) 3826 { 3827 struct drm_i915_private *dev_priv = dev->dev_private; 3828 int pipe; 3829 3830 I915_WRITE(PORT_HOTPLUG_EN, 0); 3831 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3832 3833 I915_WRITE(HWSTAM, 0xeffe); 3834 for_each_pipe(dev_priv, pipe) 3835 I915_WRITE(PIPESTAT(pipe), 0); 3836 I915_WRITE(IMR, 0xffffffff); 3837 I915_WRITE(IER, 0x0); 3838 POSTING_READ(IER); 3839 } 3840 3841 static int i965_irq_postinstall(struct drm_device *dev) 3842 { 3843 struct drm_i915_private *dev_priv = dev->dev_private; 3844 u32 enable_mask; 3845 u32 error_mask; 3846 3847 /* Unmask the interrupts that we always want on. */ 3848 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3849 I915_DISPLAY_PORT_INTERRUPT | 3850 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3851 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3852 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3853 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3854 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3855 3856 enable_mask = ~dev_priv->irq_mask; 3857 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3858 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3859 enable_mask |= I915_USER_INTERRUPT; 3860 3861 if (IS_G4X(dev)) 3862 enable_mask |= I915_BSD_USER_INTERRUPT; 3863 3864 /* Interrupt setup is already guaranteed to be single-threaded, this is 3865 * just to make the assert_spin_locked check happy. */ 3866 spin_lock_irq(&dev_priv->irq_lock); 3867 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3868 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3869 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3870 spin_unlock_irq(&dev_priv->irq_lock); 3871 3872 /* 3873 * Enable some error detection, note the instruction error mask 3874 * bit is reserved, so we leave it masked. 3875 */ 3876 if (IS_G4X(dev)) { 3877 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3878 GM45_ERROR_MEM_PRIV | 3879 GM45_ERROR_CP_PRIV | 3880 I915_ERROR_MEMORY_REFRESH); 3881 } else { 3882 error_mask = ~(I915_ERROR_PAGE_TABLE | 3883 I915_ERROR_MEMORY_REFRESH); 3884 } 3885 I915_WRITE(EMR, error_mask); 3886 3887 I915_WRITE(IMR, dev_priv->irq_mask); 3888 I915_WRITE(IER, enable_mask); 3889 POSTING_READ(IER); 3890 3891 I915_WRITE(PORT_HOTPLUG_EN, 0); 3892 POSTING_READ(PORT_HOTPLUG_EN); 3893 3894 i915_enable_asle_pipestat(dev); 3895 3896 return 0; 3897 } 3898 3899 static void i915_hpd_irq_setup(struct drm_device *dev) 3900 { 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 struct intel_encoder *intel_encoder; 3903 u32 hotplug_en; 3904 3905 assert_spin_locked(&dev_priv->irq_lock); 3906 3907 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3908 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3909 /* Note HDMI and DP share hotplug bits */ 3910 /* enable bits are the same for all generations */ 3911 for_each_intel_encoder(dev, intel_encoder) 3912 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3913 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3914 /* Programming the CRT detection parameters tends 3915 to generate a spurious hotplug event about three 3916 seconds later. So just do it once. 3917 */ 3918 if (IS_G4X(dev)) 3919 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3920 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3921 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3922 3923 /* Ignore TV since it's buggy */ 3924 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3925 } 3926 3927 static irqreturn_t i965_irq_handler(int irq, void *arg) 3928 { 3929 struct drm_device *dev = arg; 3930 struct drm_i915_private *dev_priv = dev->dev_private; 3931 u32 iir, new_iir; 3932 u32 pipe_stats[I915_MAX_PIPES]; 3933 int ret = IRQ_NONE, pipe; 3934 u32 flip_mask = 3935 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3936 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3937 3938 if (!intel_irqs_enabled(dev_priv)) 3939 return IRQ_NONE; 3940 3941 iir = I915_READ(IIR); 3942 3943 for (;;) { 3944 bool irq_received = (iir & ~flip_mask) != 0; 3945 bool blc_event = false; 3946 3947 /* Can't rely on pipestat interrupt bit in iir as it might 3948 * have been cleared after the pipestat interrupt was received. 3949 * It doesn't set the bit in iir again, but it still produces 3950 * interrupts (for non-MSI). 3951 */ 3952 spin_lock(&dev_priv->irq_lock); 3953 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3954 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3955 3956 for_each_pipe(dev_priv, pipe) { 3957 int reg = PIPESTAT(pipe); 3958 pipe_stats[pipe] = I915_READ(reg); 3959 3960 /* 3961 * Clear the PIPE*STAT regs before the IIR 3962 */ 3963 if (pipe_stats[pipe] & 0x8000ffff) { 3964 I915_WRITE(reg, pipe_stats[pipe]); 3965 irq_received = true; 3966 } 3967 } 3968 spin_unlock(&dev_priv->irq_lock); 3969 3970 if (!irq_received) 3971 break; 3972 3973 ret = IRQ_HANDLED; 3974 3975 /* Consume port. Then clear IIR or we'll miss events */ 3976 if (iir & I915_DISPLAY_PORT_INTERRUPT) 3977 i9xx_hpd_irq_handler(dev); 3978 3979 I915_WRITE(IIR, iir & ~flip_mask); 3980 new_iir = I915_READ(IIR); /* Flush posted writes */ 3981 3982 if (iir & I915_USER_INTERRUPT) 3983 notify_ring(&dev_priv->ring[RCS]); 3984 if (iir & I915_BSD_USER_INTERRUPT) 3985 notify_ring(&dev_priv->ring[VCS]); 3986 3987 for_each_pipe(dev_priv, pipe) { 3988 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3989 i915_handle_vblank(dev, pipe, pipe, iir)) 3990 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3991 3992 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3993 blc_event = true; 3994 3995 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3996 i9xx_pipe_crc_irq_handler(dev, pipe); 3997 3998 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3999 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4000 } 4001 4002 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4003 intel_opregion_asle_intr(dev); 4004 4005 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4006 gmbus_irq_handler(dev); 4007 4008 /* With MSI, interrupts are only generated when iir 4009 * transitions from zero to nonzero. If another bit got 4010 * set while we were handling the existing iir bits, then 4011 * we would never get another interrupt. 4012 * 4013 * This is fine on non-MSI as well, as if we hit this path 4014 * we avoid exiting the interrupt handler only to generate 4015 * another one. 4016 * 4017 * Note that for MSI this could cause a stray interrupt report 4018 * if an interrupt landed in the time between writing IIR and 4019 * the posting read. This should be rare enough to never 4020 * trigger the 99% of 100,000 interrupts test for disabling 4021 * stray interrupts. 4022 */ 4023 iir = new_iir; 4024 } 4025 4026 return ret; 4027 } 4028 4029 static void i965_irq_uninstall(struct drm_device * dev) 4030 { 4031 struct drm_i915_private *dev_priv = dev->dev_private; 4032 int pipe; 4033 4034 if (!dev_priv) 4035 return; 4036 4037 I915_WRITE(PORT_HOTPLUG_EN, 0); 4038 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4039 4040 I915_WRITE(HWSTAM, 0xffffffff); 4041 for_each_pipe(dev_priv, pipe) 4042 I915_WRITE(PIPESTAT(pipe), 0); 4043 I915_WRITE(IMR, 0xffffffff); 4044 I915_WRITE(IER, 0x0); 4045 4046 for_each_pipe(dev_priv, pipe) 4047 I915_WRITE(PIPESTAT(pipe), 4048 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4049 I915_WRITE(IIR, I915_READ(IIR)); 4050 } 4051 4052 /** 4053 * intel_irq_init - initializes irq support 4054 * @dev_priv: i915 device instance 4055 * 4056 * This function initializes all the irq support including work items, timers 4057 * and all the vtables. It does not setup the interrupt itself though. 4058 */ 4059 void intel_irq_init(struct drm_i915_private *dev_priv) 4060 { 4061 struct drm_device *dev = dev_priv->dev; 4062 4063 intel_hpd_init_work(dev_priv); 4064 4065 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4066 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4067 4068 /* Let's track the enabled rps events */ 4069 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4070 /* WaGsvRC0ResidencyMethod:vlv */ 4071 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4072 else 4073 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4074 4075 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4076 i915_hangcheck_elapsed); 4077 4078 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4079 4080 if (IS_GEN2(dev_priv)) { 4081 dev->max_vblank_count = 0; 4082 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4083 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4084 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4085 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4086 } else { 4087 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4088 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4089 } 4090 4091 /* 4092 * Opt out of the vblank disable timer on everything except gen2. 4093 * Gen2 doesn't have a hardware frame counter and so depends on 4094 * vblank interrupts to produce sane vblank seuquence numbers. 4095 */ 4096 if (!IS_GEN2(dev_priv)) 4097 dev->vblank_disable_immediate = true; 4098 4099 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4100 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4101 4102 if (IS_CHERRYVIEW(dev_priv)) { 4103 dev->driver->irq_handler = cherryview_irq_handler; 4104 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4105 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4106 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4107 dev->driver->enable_vblank = valleyview_enable_vblank; 4108 dev->driver->disable_vblank = valleyview_disable_vblank; 4109 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4110 } else if (IS_VALLEYVIEW(dev_priv)) { 4111 dev->driver->irq_handler = valleyview_irq_handler; 4112 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4113 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4114 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4115 dev->driver->enable_vblank = valleyview_enable_vblank; 4116 dev->driver->disable_vblank = valleyview_disable_vblank; 4117 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4118 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4119 dev->driver->irq_handler = gen8_irq_handler; 4120 dev->driver->irq_preinstall = gen8_irq_reset; 4121 dev->driver->irq_postinstall = gen8_irq_postinstall; 4122 dev->driver->irq_uninstall = gen8_irq_uninstall; 4123 dev->driver->enable_vblank = gen8_enable_vblank; 4124 dev->driver->disable_vblank = gen8_disable_vblank; 4125 if (HAS_PCH_SPLIT(dev)) 4126 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4127 else 4128 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4129 } else if (HAS_PCH_SPLIT(dev)) { 4130 dev->driver->irq_handler = ironlake_irq_handler; 4131 dev->driver->irq_preinstall = ironlake_irq_reset; 4132 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4133 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4134 dev->driver->enable_vblank = ironlake_enable_vblank; 4135 dev->driver->disable_vblank = ironlake_disable_vblank; 4136 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4137 } else { 4138 if (INTEL_INFO(dev_priv)->gen == 2) { 4139 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4140 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4141 dev->driver->irq_handler = i8xx_irq_handler; 4142 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4143 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4144 dev->driver->irq_preinstall = i915_irq_preinstall; 4145 dev->driver->irq_postinstall = i915_irq_postinstall; 4146 dev->driver->irq_uninstall = i915_irq_uninstall; 4147 dev->driver->irq_handler = i915_irq_handler; 4148 } else { 4149 dev->driver->irq_preinstall = i965_irq_preinstall; 4150 dev->driver->irq_postinstall = i965_irq_postinstall; 4151 dev->driver->irq_uninstall = i965_irq_uninstall; 4152 dev->driver->irq_handler = i965_irq_handler; 4153 } 4154 if (I915_HAS_HOTPLUG(dev_priv)) 4155 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4156 dev->driver->enable_vblank = i915_enable_vblank; 4157 dev->driver->disable_vblank = i915_disable_vblank; 4158 } 4159 } 4160 4161 /** 4162 * intel_irq_install - enables the hardware interrupt 4163 * @dev_priv: i915 device instance 4164 * 4165 * This function enables the hardware interrupt handling, but leaves the hotplug 4166 * handling still disabled. It is called after intel_irq_init(). 4167 * 4168 * In the driver load and resume code we need working interrupts in a few places 4169 * but don't want to deal with the hassle of concurrent probe and hotplug 4170 * workers. Hence the split into this two-stage approach. 4171 */ 4172 int intel_irq_install(struct drm_i915_private *dev_priv) 4173 { 4174 /* 4175 * We enable some interrupt sources in our postinstall hooks, so mark 4176 * interrupts as enabled _before_ actually enabling them to avoid 4177 * special cases in our ordering checks. 4178 */ 4179 dev_priv->pm.irqs_enabled = true; 4180 4181 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4182 } 4183 4184 /** 4185 * intel_irq_uninstall - finilizes all irq handling 4186 * @dev_priv: i915 device instance 4187 * 4188 * This stops interrupt and hotplug handling and unregisters and frees all 4189 * resources acquired in the init functions. 4190 */ 4191 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4192 { 4193 drm_irq_uninstall(dev_priv->dev); 4194 intel_hpd_cancel_work(dev_priv); 4195 dev_priv->pm.irqs_enabled = false; 4196 } 4197 4198 /** 4199 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4200 * @dev_priv: i915 device instance 4201 * 4202 * This function is used to disable interrupts at runtime, both in the runtime 4203 * pm and the system suspend/resume code. 4204 */ 4205 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4206 { 4207 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4208 dev_priv->pm.irqs_enabled = false; 4209 synchronize_irq(dev_priv->dev->irq); 4210 } 4211 4212 /** 4213 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4214 * @dev_priv: i915 device instance 4215 * 4216 * This function is used to enable interrupts at runtime, both in the runtime 4217 * pm and the system suspend/resume code. 4218 */ 4219 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4220 { 4221 dev_priv->pm.irqs_enabled = true; 4222 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4223 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4224 } 4225