1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ibx[HPD_NUM_PINS] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 54 }; 55 56 static const u32 hpd_cpt[HPD_NUM_PINS] = { 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 62 }; 63 64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 71 }; 72 73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 89 }; 90 91 /* BXT hpd list */ 92 static const u32 hpd_bxt[HPD_NUM_PINS] = { 93 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 94 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 95 }; 96 97 /* IIR can theoretically queue up two events. Be paranoid. */ 98 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 99 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 100 POSTING_READ(GEN8_##type##_IMR(which)); \ 101 I915_WRITE(GEN8_##type##_IER(which), 0); \ 102 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 103 POSTING_READ(GEN8_##type##_IIR(which)); \ 104 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 105 POSTING_READ(GEN8_##type##_IIR(which)); \ 106 } while (0) 107 108 #define GEN5_IRQ_RESET(type) do { \ 109 I915_WRITE(type##IMR, 0xffffffff); \ 110 POSTING_READ(type##IMR); \ 111 I915_WRITE(type##IER, 0); \ 112 I915_WRITE(type##IIR, 0xffffffff); \ 113 POSTING_READ(type##IIR); \ 114 I915_WRITE(type##IIR, 0xffffffff); \ 115 POSTING_READ(type##IIR); \ 116 } while (0) 117 118 /* 119 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 120 */ 121 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 122 u32 val = I915_READ(reg); \ 123 if (val) { \ 124 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 125 (reg), val); \ 126 I915_WRITE((reg), 0xffffffff); \ 127 POSTING_READ(reg); \ 128 I915_WRITE((reg), 0xffffffff); \ 129 POSTING_READ(reg); \ 130 } \ 131 } while (0) 132 133 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 134 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 135 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 136 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 137 POSTING_READ(GEN8_##type##_IMR(which)); \ 138 } while (0) 139 140 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 141 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 142 I915_WRITE(type##IER, (ier_val)); \ 143 I915_WRITE(type##IMR, (imr_val)); \ 144 POSTING_READ(type##IMR); \ 145 } while (0) 146 147 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 148 149 /* For display hotplug interrupt */ 150 void 151 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 152 { 153 assert_spin_locked(&dev_priv->irq_lock); 154 155 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 156 return; 157 158 if ((dev_priv->irq_mask & mask) != 0) { 159 dev_priv->irq_mask &= ~mask; 160 I915_WRITE(DEIMR, dev_priv->irq_mask); 161 POSTING_READ(DEIMR); 162 } 163 } 164 165 void 166 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 167 { 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 171 return; 172 173 if ((dev_priv->irq_mask & mask) != mask) { 174 dev_priv->irq_mask |= mask; 175 I915_WRITE(DEIMR, dev_priv->irq_mask); 176 POSTING_READ(DEIMR); 177 } 178 } 179 180 /** 181 * ilk_update_gt_irq - update GTIMR 182 * @dev_priv: driver private 183 * @interrupt_mask: mask of interrupt bits to update 184 * @enabled_irq_mask: mask of interrupt bits to enable 185 */ 186 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 187 uint32_t interrupt_mask, 188 uint32_t enabled_irq_mask) 189 { 190 assert_spin_locked(&dev_priv->irq_lock); 191 192 WARN_ON(enabled_irq_mask & ~interrupt_mask); 193 194 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 195 return; 196 197 dev_priv->gt_irq_mask &= ~interrupt_mask; 198 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 199 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 200 POSTING_READ(GTIMR); 201 } 202 203 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 204 { 205 ilk_update_gt_irq(dev_priv, mask, mask); 206 } 207 208 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 209 { 210 ilk_update_gt_irq(dev_priv, mask, 0); 211 } 212 213 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 214 { 215 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 216 } 217 218 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 219 { 220 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 221 } 222 223 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 224 { 225 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 226 } 227 228 /** 229 * snb_update_pm_irq - update GEN6_PMIMR 230 * @dev_priv: driver private 231 * @interrupt_mask: mask of interrupt bits to update 232 * @enabled_irq_mask: mask of interrupt bits to enable 233 */ 234 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 235 uint32_t interrupt_mask, 236 uint32_t enabled_irq_mask) 237 { 238 uint32_t new_val; 239 240 WARN_ON(enabled_irq_mask & ~interrupt_mask); 241 242 assert_spin_locked(&dev_priv->irq_lock); 243 244 new_val = dev_priv->pm_irq_mask; 245 new_val &= ~interrupt_mask; 246 new_val |= (~enabled_irq_mask & interrupt_mask); 247 248 if (new_val != dev_priv->pm_irq_mask) { 249 dev_priv->pm_irq_mask = new_val; 250 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 251 POSTING_READ(gen6_pm_imr(dev_priv)); 252 } 253 } 254 255 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 256 { 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 snb_update_pm_irq(dev_priv, mask, mask); 261 } 262 263 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 264 uint32_t mask) 265 { 266 snb_update_pm_irq(dev_priv, mask, 0); 267 } 268 269 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 270 { 271 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 272 return; 273 274 __gen6_disable_pm_irq(dev_priv, mask); 275 } 276 277 void gen6_reset_rps_interrupts(struct drm_device *dev) 278 { 279 struct drm_i915_private *dev_priv = dev->dev_private; 280 uint32_t reg = gen6_pm_iir(dev_priv); 281 282 spin_lock_irq(&dev_priv->irq_lock); 283 I915_WRITE(reg, dev_priv->pm_rps_events); 284 I915_WRITE(reg, dev_priv->pm_rps_events); 285 POSTING_READ(reg); 286 dev_priv->rps.pm_iir = 0; 287 spin_unlock_irq(&dev_priv->irq_lock); 288 } 289 290 void gen6_enable_rps_interrupts(struct drm_device *dev) 291 { 292 struct drm_i915_private *dev_priv = dev->dev_private; 293 294 spin_lock_irq(&dev_priv->irq_lock); 295 296 WARN_ON(dev_priv->rps.pm_iir); 297 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 298 dev_priv->rps.interrupts_enabled = true; 299 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 300 dev_priv->pm_rps_events); 301 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 302 303 spin_unlock_irq(&dev_priv->irq_lock); 304 } 305 306 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 307 { 308 /* 309 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 310 * if GEN6_PM_UP_EI_EXPIRED is masked. 311 * 312 * TODO: verify if this can be reproduced on VLV,CHV. 313 */ 314 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 315 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 316 317 if (INTEL_INFO(dev_priv)->gen >= 8) 318 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 319 320 return mask; 321 } 322 323 void gen6_disable_rps_interrupts(struct drm_device *dev) 324 { 325 struct drm_i915_private *dev_priv = dev->dev_private; 326 327 spin_lock_irq(&dev_priv->irq_lock); 328 dev_priv->rps.interrupts_enabled = false; 329 spin_unlock_irq(&dev_priv->irq_lock); 330 331 cancel_work_sync(&dev_priv->rps.work); 332 333 spin_lock_irq(&dev_priv->irq_lock); 334 335 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 336 337 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 338 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 339 ~dev_priv->pm_rps_events); 340 341 spin_unlock_irq(&dev_priv->irq_lock); 342 343 synchronize_irq(dev->irq); 344 } 345 346 /** 347 * ibx_display_interrupt_update - update SDEIMR 348 * @dev_priv: driver private 349 * @interrupt_mask: mask of interrupt bits to update 350 * @enabled_irq_mask: mask of interrupt bits to enable 351 */ 352 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 353 uint32_t interrupt_mask, 354 uint32_t enabled_irq_mask) 355 { 356 uint32_t sdeimr = I915_READ(SDEIMR); 357 sdeimr &= ~interrupt_mask; 358 sdeimr |= (~enabled_irq_mask & interrupt_mask); 359 360 WARN_ON(enabled_irq_mask & ~interrupt_mask); 361 362 assert_spin_locked(&dev_priv->irq_lock); 363 364 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 365 return; 366 367 I915_WRITE(SDEIMR, sdeimr); 368 POSTING_READ(SDEIMR); 369 } 370 371 static void 372 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 373 u32 enable_mask, u32 status_mask) 374 { 375 u32 reg = PIPESTAT(pipe); 376 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 377 378 assert_spin_locked(&dev_priv->irq_lock); 379 WARN_ON(!intel_irqs_enabled(dev_priv)); 380 381 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 382 status_mask & ~PIPESTAT_INT_STATUS_MASK, 383 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 384 pipe_name(pipe), enable_mask, status_mask)) 385 return; 386 387 if ((pipestat & enable_mask) == enable_mask) 388 return; 389 390 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 391 392 /* Enable the interrupt, clear any pending status */ 393 pipestat |= enable_mask | status_mask; 394 I915_WRITE(reg, pipestat); 395 POSTING_READ(reg); 396 } 397 398 static void 399 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 400 u32 enable_mask, u32 status_mask) 401 { 402 u32 reg = PIPESTAT(pipe); 403 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 404 405 assert_spin_locked(&dev_priv->irq_lock); 406 WARN_ON(!intel_irqs_enabled(dev_priv)); 407 408 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 409 status_mask & ~PIPESTAT_INT_STATUS_MASK, 410 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 411 pipe_name(pipe), enable_mask, status_mask)) 412 return; 413 414 if ((pipestat & enable_mask) == 0) 415 return; 416 417 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 418 419 pipestat &= ~enable_mask; 420 I915_WRITE(reg, pipestat); 421 POSTING_READ(reg); 422 } 423 424 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 425 { 426 u32 enable_mask = status_mask << 16; 427 428 /* 429 * On pipe A we don't support the PSR interrupt yet, 430 * on pipe B and C the same bit MBZ. 431 */ 432 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 433 return 0; 434 /* 435 * On pipe B and C we don't support the PSR interrupt yet, on pipe 436 * A the same bit is for perf counters which we don't use either. 437 */ 438 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 439 return 0; 440 441 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 442 SPRITE0_FLIP_DONE_INT_EN_VLV | 443 SPRITE1_FLIP_DONE_INT_EN_VLV); 444 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 445 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 446 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 447 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 448 449 return enable_mask; 450 } 451 452 void 453 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 454 u32 status_mask) 455 { 456 u32 enable_mask; 457 458 if (IS_VALLEYVIEW(dev_priv->dev)) 459 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 460 status_mask); 461 else 462 enable_mask = status_mask << 16; 463 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 464 } 465 466 void 467 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 468 u32 status_mask) 469 { 470 u32 enable_mask; 471 472 if (IS_VALLEYVIEW(dev_priv->dev)) 473 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 474 status_mask); 475 else 476 enable_mask = status_mask << 16; 477 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 478 } 479 480 /** 481 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 482 */ 483 static void i915_enable_asle_pipestat(struct drm_device *dev) 484 { 485 struct drm_i915_private *dev_priv = dev->dev_private; 486 487 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 488 return; 489 490 spin_lock_irq(&dev_priv->irq_lock); 491 492 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 493 if (INTEL_INFO(dev)->gen >= 4) 494 i915_enable_pipestat(dev_priv, PIPE_A, 495 PIPE_LEGACY_BLC_EVENT_STATUS); 496 497 spin_unlock_irq(&dev_priv->irq_lock); 498 } 499 500 /* 501 * This timing diagram depicts the video signal in and 502 * around the vertical blanking period. 503 * 504 * Assumptions about the fictitious mode used in this example: 505 * vblank_start >= 3 506 * vsync_start = vblank_start + 1 507 * vsync_end = vblank_start + 2 508 * vtotal = vblank_start + 3 509 * 510 * start of vblank: 511 * latch double buffered registers 512 * increment frame counter (ctg+) 513 * generate start of vblank interrupt (gen4+) 514 * | 515 * | frame start: 516 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 517 * | may be shifted forward 1-3 extra lines via PIPECONF 518 * | | 519 * | | start of vsync: 520 * | | generate vsync interrupt 521 * | | | 522 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 523 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 524 * ----va---> <-----------------vb--------------------> <--------va------------- 525 * | | <----vs-----> | 526 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 527 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 528 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 529 * | | | 530 * last visible pixel first visible pixel 531 * | increment frame counter (gen3/4) 532 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 533 * 534 * x = horizontal active 535 * _ = horizontal blanking 536 * hs = horizontal sync 537 * va = vertical active 538 * vb = vertical blanking 539 * vs = vertical sync 540 * vbs = vblank_start (number) 541 * 542 * Summary: 543 * - most events happen at the start of horizontal sync 544 * - frame start happens at the start of horizontal blank, 1-4 lines 545 * (depending on PIPECONF settings) after the start of vblank 546 * - gen3/4 pixel and frame counter are synchronized with the start 547 * of horizontal active on the first line of vertical active 548 */ 549 550 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 551 { 552 /* Gen2 doesn't have a hardware frame counter */ 553 return 0; 554 } 555 556 /* Called from drm generic code, passed a 'crtc', which 557 * we use as a pipe index 558 */ 559 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 560 { 561 struct drm_i915_private *dev_priv = dev->dev_private; 562 unsigned long high_frame; 563 unsigned long low_frame; 564 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 565 struct intel_crtc *intel_crtc = 566 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 567 const struct drm_display_mode *mode = 568 &intel_crtc->config->base.adjusted_mode; 569 570 htotal = mode->crtc_htotal; 571 hsync_start = mode->crtc_hsync_start; 572 vbl_start = mode->crtc_vblank_start; 573 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 574 vbl_start = DIV_ROUND_UP(vbl_start, 2); 575 576 /* Convert to pixel count */ 577 vbl_start *= htotal; 578 579 /* Start of vblank event occurs at start of hsync */ 580 vbl_start -= htotal - hsync_start; 581 582 high_frame = PIPEFRAME(pipe); 583 low_frame = PIPEFRAMEPIXEL(pipe); 584 585 /* 586 * High & low register fields aren't synchronized, so make sure 587 * we get a low value that's stable across two reads of the high 588 * register. 589 */ 590 do { 591 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 592 low = I915_READ(low_frame); 593 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 594 } while (high1 != high2); 595 596 high1 >>= PIPE_FRAME_HIGH_SHIFT; 597 pixel = low & PIPE_PIXEL_MASK; 598 low >>= PIPE_FRAME_LOW_SHIFT; 599 600 /* 601 * The frame counter increments at beginning of active. 602 * Cook up a vblank counter by also checking the pixel 603 * counter against vblank start. 604 */ 605 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 606 } 607 608 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 609 { 610 struct drm_i915_private *dev_priv = dev->dev_private; 611 int reg = PIPE_FRMCOUNT_GM45(pipe); 612 613 return I915_READ(reg); 614 } 615 616 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 617 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 618 619 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 620 { 621 struct drm_device *dev = crtc->base.dev; 622 struct drm_i915_private *dev_priv = dev->dev_private; 623 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 624 enum pipe pipe = crtc->pipe; 625 int position, vtotal; 626 627 vtotal = mode->crtc_vtotal; 628 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 629 vtotal /= 2; 630 631 if (IS_GEN2(dev)) 632 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 633 else 634 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 635 636 /* 637 * See update_scanline_offset() for the details on the 638 * scanline_offset adjustment. 639 */ 640 return (position + crtc->scanline_offset) % vtotal; 641 } 642 643 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 644 unsigned int flags, int *vpos, int *hpos, 645 ktime_t *stime, ktime_t *etime) 646 { 647 struct drm_i915_private *dev_priv = dev->dev_private; 648 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 649 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 650 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 651 int position; 652 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 653 bool in_vbl = true; 654 int ret = 0; 655 unsigned long irqflags; 656 657 if (!intel_crtc->active) { 658 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 659 "pipe %c\n", pipe_name(pipe)); 660 return 0; 661 } 662 663 htotal = mode->crtc_htotal; 664 hsync_start = mode->crtc_hsync_start; 665 vtotal = mode->crtc_vtotal; 666 vbl_start = mode->crtc_vblank_start; 667 vbl_end = mode->crtc_vblank_end; 668 669 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 670 vbl_start = DIV_ROUND_UP(vbl_start, 2); 671 vbl_end /= 2; 672 vtotal /= 2; 673 } 674 675 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 676 677 /* 678 * Lock uncore.lock, as we will do multiple timing critical raw 679 * register reads, potentially with preemption disabled, so the 680 * following code must not block on uncore.lock. 681 */ 682 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 683 684 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 685 686 /* Get optional system timestamp before query. */ 687 if (stime) 688 *stime = ktime_get(); 689 690 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 691 /* No obvious pixelcount register. Only query vertical 692 * scanout position from Display scan line register. 693 */ 694 position = __intel_get_crtc_scanline(intel_crtc); 695 } else { 696 /* Have access to pixelcount since start of frame. 697 * We can split this into vertical and horizontal 698 * scanout position. 699 */ 700 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 701 702 /* convert to pixel counts */ 703 vbl_start *= htotal; 704 vbl_end *= htotal; 705 vtotal *= htotal; 706 707 /* 708 * In interlaced modes, the pixel counter counts all pixels, 709 * so one field will have htotal more pixels. In order to avoid 710 * the reported position from jumping backwards when the pixel 711 * counter is beyond the length of the shorter field, just 712 * clamp the position the length of the shorter field. This 713 * matches how the scanline counter based position works since 714 * the scanline counter doesn't count the two half lines. 715 */ 716 if (position >= vtotal) 717 position = vtotal - 1; 718 719 /* 720 * Start of vblank interrupt is triggered at start of hsync, 721 * just prior to the first active line of vblank. However we 722 * consider lines to start at the leading edge of horizontal 723 * active. So, should we get here before we've crossed into 724 * the horizontal active of the first line in vblank, we would 725 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 726 * always add htotal-hsync_start to the current pixel position. 727 */ 728 position = (position + htotal - hsync_start) % vtotal; 729 } 730 731 /* Get optional system timestamp after query. */ 732 if (etime) 733 *etime = ktime_get(); 734 735 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 736 737 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 738 739 in_vbl = position >= vbl_start && position < vbl_end; 740 741 /* 742 * While in vblank, position will be negative 743 * counting up towards 0 at vbl_end. And outside 744 * vblank, position will be positive counting 745 * up since vbl_end. 746 */ 747 if (position >= vbl_start) 748 position -= vbl_end; 749 else 750 position += vtotal - vbl_end; 751 752 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 753 *vpos = position; 754 *hpos = 0; 755 } else { 756 *vpos = position / htotal; 757 *hpos = position - (*vpos * htotal); 758 } 759 760 /* In vblank? */ 761 if (in_vbl) 762 ret |= DRM_SCANOUTPOS_IN_VBLANK; 763 764 return ret; 765 } 766 767 int intel_get_crtc_scanline(struct intel_crtc *crtc) 768 { 769 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 770 unsigned long irqflags; 771 int position; 772 773 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 774 position = __intel_get_crtc_scanline(crtc); 775 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 776 777 return position; 778 } 779 780 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 781 int *max_error, 782 struct timeval *vblank_time, 783 unsigned flags) 784 { 785 struct drm_crtc *crtc; 786 787 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 788 DRM_ERROR("Invalid crtc %d\n", pipe); 789 return -EINVAL; 790 } 791 792 /* Get drm_crtc to timestamp: */ 793 crtc = intel_get_crtc_for_pipe(dev, pipe); 794 if (crtc == NULL) { 795 DRM_ERROR("Invalid crtc %d\n", pipe); 796 return -EINVAL; 797 } 798 799 if (!crtc->state->enable) { 800 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 801 return -EBUSY; 802 } 803 804 /* Helper routine in DRM core does all the work: */ 805 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 806 vblank_time, flags, 807 crtc, 808 &to_intel_crtc(crtc)->config->base.adjusted_mode); 809 } 810 811 static bool intel_hpd_irq_event(struct drm_device *dev, 812 struct drm_connector *connector) 813 { 814 enum drm_connector_status old_status; 815 816 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 817 old_status = connector->status; 818 819 connector->status = connector->funcs->detect(connector, false); 820 if (old_status == connector->status) 821 return false; 822 823 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 824 connector->base.id, 825 connector->name, 826 drm_get_connector_status_name(old_status), 827 drm_get_connector_status_name(connector->status)); 828 829 return true; 830 } 831 832 static void i915_digport_work_func(struct work_struct *work) 833 { 834 struct drm_i915_private *dev_priv = 835 container_of(work, struct drm_i915_private, dig_port_work); 836 u32 long_port_mask, short_port_mask; 837 struct intel_digital_port *intel_dig_port; 838 int i; 839 u32 old_bits = 0; 840 841 spin_lock_irq(&dev_priv->irq_lock); 842 long_port_mask = dev_priv->long_hpd_port_mask; 843 dev_priv->long_hpd_port_mask = 0; 844 short_port_mask = dev_priv->short_hpd_port_mask; 845 dev_priv->short_hpd_port_mask = 0; 846 spin_unlock_irq(&dev_priv->irq_lock); 847 848 for (i = 0; i < I915_MAX_PORTS; i++) { 849 bool valid = false; 850 bool long_hpd = false; 851 intel_dig_port = dev_priv->hpd_irq_port[i]; 852 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 853 continue; 854 855 if (long_port_mask & (1 << i)) { 856 valid = true; 857 long_hpd = true; 858 } else if (short_port_mask & (1 << i)) 859 valid = true; 860 861 if (valid) { 862 enum irqreturn ret; 863 864 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 865 if (ret == IRQ_NONE) { 866 /* fall back to old school hpd */ 867 old_bits |= (1 << intel_dig_port->base.hpd_pin); 868 } 869 } 870 } 871 872 if (old_bits) { 873 spin_lock_irq(&dev_priv->irq_lock); 874 dev_priv->hpd_event_bits |= old_bits; 875 spin_unlock_irq(&dev_priv->irq_lock); 876 schedule_work(&dev_priv->hotplug_work); 877 } 878 } 879 880 /* 881 * Handle hotplug events outside the interrupt handler proper. 882 */ 883 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 884 885 static void i915_hotplug_work_func(struct work_struct *work) 886 { 887 struct drm_i915_private *dev_priv = 888 container_of(work, struct drm_i915_private, hotplug_work); 889 struct drm_device *dev = dev_priv->dev; 890 struct drm_mode_config *mode_config = &dev->mode_config; 891 struct intel_connector *intel_connector; 892 struct intel_encoder *intel_encoder; 893 struct drm_connector *connector; 894 bool hpd_disabled = false; 895 bool changed = false; 896 u32 hpd_event_bits; 897 898 mutex_lock(&mode_config->mutex); 899 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 900 901 spin_lock_irq(&dev_priv->irq_lock); 902 903 hpd_event_bits = dev_priv->hpd_event_bits; 904 dev_priv->hpd_event_bits = 0; 905 list_for_each_entry(connector, &mode_config->connector_list, head) { 906 intel_connector = to_intel_connector(connector); 907 if (!intel_connector->encoder) 908 continue; 909 intel_encoder = intel_connector->encoder; 910 if (intel_encoder->hpd_pin > HPD_NONE && 911 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 912 connector->polled == DRM_CONNECTOR_POLL_HPD) { 913 DRM_INFO("HPD interrupt storm detected on connector %s: " 914 "switching from hotplug detection to polling\n", 915 connector->name); 916 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 917 connector->polled = DRM_CONNECTOR_POLL_CONNECT 918 | DRM_CONNECTOR_POLL_DISCONNECT; 919 hpd_disabled = true; 920 } 921 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 922 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 923 connector->name, intel_encoder->hpd_pin); 924 } 925 } 926 /* if there were no outputs to poll, poll was disabled, 927 * therefore make sure it's enabled when disabling HPD on 928 * some connectors */ 929 if (hpd_disabled) { 930 drm_kms_helper_poll_enable(dev); 931 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 932 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 933 } 934 935 spin_unlock_irq(&dev_priv->irq_lock); 936 937 list_for_each_entry(connector, &mode_config->connector_list, head) { 938 intel_connector = to_intel_connector(connector); 939 if (!intel_connector->encoder) 940 continue; 941 intel_encoder = intel_connector->encoder; 942 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 943 if (intel_encoder->hot_plug) 944 intel_encoder->hot_plug(intel_encoder); 945 if (intel_hpd_irq_event(dev, connector)) 946 changed = true; 947 } 948 } 949 mutex_unlock(&mode_config->mutex); 950 951 if (changed) 952 drm_kms_helper_hotplug_event(dev); 953 } 954 955 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 956 { 957 struct drm_i915_private *dev_priv = dev->dev_private; 958 u32 busy_up, busy_down, max_avg, min_avg; 959 u8 new_delay; 960 961 spin_lock(&mchdev_lock); 962 963 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 964 965 new_delay = dev_priv->ips.cur_delay; 966 967 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 968 busy_up = I915_READ(RCPREVBSYTUPAVG); 969 busy_down = I915_READ(RCPREVBSYTDNAVG); 970 max_avg = I915_READ(RCBMAXAVG); 971 min_avg = I915_READ(RCBMINAVG); 972 973 /* Handle RCS change request from hw */ 974 if (busy_up > max_avg) { 975 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 976 new_delay = dev_priv->ips.cur_delay - 1; 977 if (new_delay < dev_priv->ips.max_delay) 978 new_delay = dev_priv->ips.max_delay; 979 } else if (busy_down < min_avg) { 980 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 981 new_delay = dev_priv->ips.cur_delay + 1; 982 if (new_delay > dev_priv->ips.min_delay) 983 new_delay = dev_priv->ips.min_delay; 984 } 985 986 if (ironlake_set_drps(dev, new_delay)) 987 dev_priv->ips.cur_delay = new_delay; 988 989 spin_unlock(&mchdev_lock); 990 991 return; 992 } 993 994 static void notify_ring(struct intel_engine_cs *ring) 995 { 996 if (!intel_ring_initialized(ring)) 997 return; 998 999 trace_i915_gem_request_notify(ring); 1000 1001 wake_up_all(&ring->irq_queue); 1002 } 1003 1004 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1005 struct intel_rps_ei *ei) 1006 { 1007 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1008 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1009 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1010 } 1011 1012 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 1013 const struct intel_rps_ei *old, 1014 const struct intel_rps_ei *now, 1015 int threshold) 1016 { 1017 u64 time, c0; 1018 1019 if (old->cz_clock == 0) 1020 return false; 1021 1022 time = now->cz_clock - old->cz_clock; 1023 time *= threshold * dev_priv->mem_freq; 1024 1025 /* Workload can be split between render + media, e.g. SwapBuffers 1026 * being blitted in X after being rendered in mesa. To account for 1027 * this we need to combine both engines into our activity counter. 1028 */ 1029 c0 = now->render_c0 - old->render_c0; 1030 c0 += now->media_c0 - old->media_c0; 1031 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 1032 1033 return c0 >= time; 1034 } 1035 1036 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1037 { 1038 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1039 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1040 } 1041 1042 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1043 { 1044 struct intel_rps_ei now; 1045 u32 events = 0; 1046 1047 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1048 return 0; 1049 1050 vlv_c0_read(dev_priv, &now); 1051 if (now.cz_clock == 0) 1052 return 0; 1053 1054 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1055 if (!vlv_c0_above(dev_priv, 1056 &dev_priv->rps.down_ei, &now, 1057 dev_priv->rps.down_threshold)) 1058 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1059 dev_priv->rps.down_ei = now; 1060 } 1061 1062 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1063 if (vlv_c0_above(dev_priv, 1064 &dev_priv->rps.up_ei, &now, 1065 dev_priv->rps.up_threshold)) 1066 events |= GEN6_PM_RP_UP_THRESHOLD; 1067 dev_priv->rps.up_ei = now; 1068 } 1069 1070 return events; 1071 } 1072 1073 static bool any_waiters(struct drm_i915_private *dev_priv) 1074 { 1075 struct intel_engine_cs *ring; 1076 int i; 1077 1078 for_each_ring(ring, dev_priv, i) 1079 if (ring->irq_refcount) 1080 return true; 1081 1082 return false; 1083 } 1084 1085 static void gen6_pm_rps_work(struct work_struct *work) 1086 { 1087 struct drm_i915_private *dev_priv = 1088 container_of(work, struct drm_i915_private, rps.work); 1089 bool client_boost; 1090 int new_delay, adj, min, max; 1091 u32 pm_iir; 1092 1093 spin_lock_irq(&dev_priv->irq_lock); 1094 /* Speed up work cancelation during disabling rps interrupts. */ 1095 if (!dev_priv->rps.interrupts_enabled) { 1096 spin_unlock_irq(&dev_priv->irq_lock); 1097 return; 1098 } 1099 pm_iir = dev_priv->rps.pm_iir; 1100 dev_priv->rps.pm_iir = 0; 1101 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1102 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1103 client_boost = dev_priv->rps.client_boost; 1104 dev_priv->rps.client_boost = false; 1105 spin_unlock_irq(&dev_priv->irq_lock); 1106 1107 /* Make sure we didn't queue anything we're not going to process. */ 1108 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1109 1110 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1111 return; 1112 1113 mutex_lock(&dev_priv->rps.hw_lock); 1114 1115 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1116 1117 adj = dev_priv->rps.last_adj; 1118 new_delay = dev_priv->rps.cur_freq; 1119 min = dev_priv->rps.min_freq_softlimit; 1120 max = dev_priv->rps.max_freq_softlimit; 1121 1122 if (client_boost) { 1123 new_delay = dev_priv->rps.max_freq_softlimit; 1124 adj = 0; 1125 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1126 if (adj > 0) 1127 adj *= 2; 1128 else /* CHV needs even encode values */ 1129 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1130 /* 1131 * For better performance, jump directly 1132 * to RPe if we're below it. 1133 */ 1134 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1135 new_delay = dev_priv->rps.efficient_freq; 1136 adj = 0; 1137 } 1138 } else if (any_waiters(dev_priv)) { 1139 adj = 0; 1140 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1141 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1142 new_delay = dev_priv->rps.efficient_freq; 1143 else 1144 new_delay = dev_priv->rps.min_freq_softlimit; 1145 adj = 0; 1146 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1147 if (adj < 0) 1148 adj *= 2; 1149 else /* CHV needs even encode values */ 1150 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1151 } else { /* unknown event */ 1152 adj = 0; 1153 } 1154 1155 dev_priv->rps.last_adj = adj; 1156 1157 /* sysfs frequency interfaces may have snuck in while servicing the 1158 * interrupt 1159 */ 1160 new_delay += adj; 1161 new_delay = clamp_t(int, new_delay, min, max); 1162 1163 intel_set_rps(dev_priv->dev, new_delay); 1164 1165 mutex_unlock(&dev_priv->rps.hw_lock); 1166 } 1167 1168 1169 /** 1170 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1171 * occurred. 1172 * @work: workqueue struct 1173 * 1174 * Doesn't actually do anything except notify userspace. As a consequence of 1175 * this event, userspace should try to remap the bad rows since statistically 1176 * it is likely the same row is more likely to go bad again. 1177 */ 1178 static void ivybridge_parity_work(struct work_struct *work) 1179 { 1180 struct drm_i915_private *dev_priv = 1181 container_of(work, struct drm_i915_private, l3_parity.error_work); 1182 u32 error_status, row, bank, subbank; 1183 char *parity_event[6]; 1184 uint32_t misccpctl; 1185 uint8_t slice = 0; 1186 1187 /* We must turn off DOP level clock gating to access the L3 registers. 1188 * In order to prevent a get/put style interface, acquire struct mutex 1189 * any time we access those registers. 1190 */ 1191 mutex_lock(&dev_priv->dev->struct_mutex); 1192 1193 /* If we've screwed up tracking, just let the interrupt fire again */ 1194 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1195 goto out; 1196 1197 misccpctl = I915_READ(GEN7_MISCCPCTL); 1198 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1199 POSTING_READ(GEN7_MISCCPCTL); 1200 1201 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1202 u32 reg; 1203 1204 slice--; 1205 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1206 break; 1207 1208 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1209 1210 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1211 1212 error_status = I915_READ(reg); 1213 row = GEN7_PARITY_ERROR_ROW(error_status); 1214 bank = GEN7_PARITY_ERROR_BANK(error_status); 1215 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1216 1217 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1218 POSTING_READ(reg); 1219 1220 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1221 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1222 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1223 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1224 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1225 parity_event[5] = NULL; 1226 1227 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1228 KOBJ_CHANGE, parity_event); 1229 1230 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1231 slice, row, bank, subbank); 1232 1233 kfree(parity_event[4]); 1234 kfree(parity_event[3]); 1235 kfree(parity_event[2]); 1236 kfree(parity_event[1]); 1237 } 1238 1239 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1240 1241 out: 1242 WARN_ON(dev_priv->l3_parity.which_slice); 1243 spin_lock_irq(&dev_priv->irq_lock); 1244 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1245 spin_unlock_irq(&dev_priv->irq_lock); 1246 1247 mutex_unlock(&dev_priv->dev->struct_mutex); 1248 } 1249 1250 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1251 { 1252 struct drm_i915_private *dev_priv = dev->dev_private; 1253 1254 if (!HAS_L3_DPF(dev)) 1255 return; 1256 1257 spin_lock(&dev_priv->irq_lock); 1258 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1259 spin_unlock(&dev_priv->irq_lock); 1260 1261 iir &= GT_PARITY_ERROR(dev); 1262 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1263 dev_priv->l3_parity.which_slice |= 1 << 1; 1264 1265 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1266 dev_priv->l3_parity.which_slice |= 1 << 0; 1267 1268 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1269 } 1270 1271 static void ilk_gt_irq_handler(struct drm_device *dev, 1272 struct drm_i915_private *dev_priv, 1273 u32 gt_iir) 1274 { 1275 if (gt_iir & 1276 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1277 notify_ring(&dev_priv->ring[RCS]); 1278 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1279 notify_ring(&dev_priv->ring[VCS]); 1280 } 1281 1282 static void snb_gt_irq_handler(struct drm_device *dev, 1283 struct drm_i915_private *dev_priv, 1284 u32 gt_iir) 1285 { 1286 1287 if (gt_iir & 1288 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1289 notify_ring(&dev_priv->ring[RCS]); 1290 if (gt_iir & GT_BSD_USER_INTERRUPT) 1291 notify_ring(&dev_priv->ring[VCS]); 1292 if (gt_iir & GT_BLT_USER_INTERRUPT) 1293 notify_ring(&dev_priv->ring[BCS]); 1294 1295 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1296 GT_BSD_CS_ERROR_INTERRUPT | 1297 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1298 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1299 1300 if (gt_iir & GT_PARITY_ERROR(dev)) 1301 ivybridge_parity_error_irq_handler(dev, gt_iir); 1302 } 1303 1304 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1305 u32 master_ctl) 1306 { 1307 irqreturn_t ret = IRQ_NONE; 1308 1309 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1310 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1311 if (tmp) { 1312 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1313 ret = IRQ_HANDLED; 1314 1315 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1316 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1317 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1318 notify_ring(&dev_priv->ring[RCS]); 1319 1320 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1321 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1322 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1323 notify_ring(&dev_priv->ring[BCS]); 1324 } else 1325 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1326 } 1327 1328 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1329 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1330 if (tmp) { 1331 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1332 ret = IRQ_HANDLED; 1333 1334 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1335 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1336 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1337 notify_ring(&dev_priv->ring[VCS]); 1338 1339 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1340 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1341 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1342 notify_ring(&dev_priv->ring[VCS2]); 1343 } else 1344 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1345 } 1346 1347 if (master_ctl & GEN8_GT_VECS_IRQ) { 1348 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1349 if (tmp) { 1350 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1351 ret = IRQ_HANDLED; 1352 1353 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1354 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1355 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1356 notify_ring(&dev_priv->ring[VECS]); 1357 } else 1358 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1359 } 1360 1361 if (master_ctl & GEN8_GT_PM_IRQ) { 1362 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1363 if (tmp & dev_priv->pm_rps_events) { 1364 I915_WRITE_FW(GEN8_GT_IIR(2), 1365 tmp & dev_priv->pm_rps_events); 1366 ret = IRQ_HANDLED; 1367 gen6_rps_irq_handler(dev_priv, tmp); 1368 } else 1369 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1370 } 1371 1372 return ret; 1373 } 1374 1375 #define HPD_STORM_DETECT_PERIOD 1000 1376 #define HPD_STORM_THRESHOLD 5 1377 1378 static int pch_port_to_hotplug_shift(enum port port) 1379 { 1380 switch (port) { 1381 case PORT_A: 1382 case PORT_E: 1383 default: 1384 return -1; 1385 case PORT_B: 1386 return 0; 1387 case PORT_C: 1388 return 8; 1389 case PORT_D: 1390 return 16; 1391 } 1392 } 1393 1394 static int i915_port_to_hotplug_shift(enum port port) 1395 { 1396 switch (port) { 1397 case PORT_A: 1398 case PORT_E: 1399 default: 1400 return -1; 1401 case PORT_B: 1402 return 17; 1403 case PORT_C: 1404 return 19; 1405 case PORT_D: 1406 return 21; 1407 } 1408 } 1409 1410 static enum port get_port_from_pin(enum hpd_pin pin) 1411 { 1412 switch (pin) { 1413 case HPD_PORT_B: 1414 return PORT_B; 1415 case HPD_PORT_C: 1416 return PORT_C; 1417 case HPD_PORT_D: 1418 return PORT_D; 1419 default: 1420 return PORT_A; /* no hpd */ 1421 } 1422 } 1423 1424 static void intel_hpd_irq_handler(struct drm_device *dev, 1425 u32 hotplug_trigger, 1426 u32 dig_hotplug_reg, 1427 const u32 hpd[HPD_NUM_PINS]) 1428 { 1429 struct drm_i915_private *dev_priv = dev->dev_private; 1430 int i; 1431 enum port port; 1432 bool storm_detected = false; 1433 bool queue_dig = false, queue_hp = false; 1434 u32 dig_shift; 1435 u32 dig_port_mask = 0; 1436 1437 if (!hotplug_trigger) 1438 return; 1439 1440 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1441 hotplug_trigger, dig_hotplug_reg); 1442 1443 spin_lock(&dev_priv->irq_lock); 1444 for (i = 1; i < HPD_NUM_PINS; i++) { 1445 if (!(hpd[i] & hotplug_trigger)) 1446 continue; 1447 1448 port = get_port_from_pin(i); 1449 if (port && dev_priv->hpd_irq_port[port]) { 1450 bool long_hpd; 1451 1452 if (!HAS_GMCH_DISPLAY(dev_priv)) { 1453 dig_shift = pch_port_to_hotplug_shift(port); 1454 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1455 } else { 1456 dig_shift = i915_port_to_hotplug_shift(port); 1457 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1458 } 1459 1460 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1461 port_name(port), 1462 long_hpd ? "long" : "short"); 1463 /* for long HPD pulses we want to have the digital queue happen, 1464 but we still want HPD storm detection to function. */ 1465 if (long_hpd) { 1466 dev_priv->long_hpd_port_mask |= (1 << port); 1467 dig_port_mask |= hpd[i]; 1468 } else { 1469 /* for short HPD just trigger the digital queue */ 1470 dev_priv->short_hpd_port_mask |= (1 << port); 1471 hotplug_trigger &= ~hpd[i]; 1472 } 1473 queue_dig = true; 1474 } 1475 } 1476 1477 for (i = 1; i < HPD_NUM_PINS; i++) { 1478 if (hpd[i] & hotplug_trigger && 1479 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1480 /* 1481 * On GMCH platforms the interrupt mask bits only 1482 * prevent irq generation, not the setting of the 1483 * hotplug bits itself. So only WARN about unexpected 1484 * interrupts on saner platforms. 1485 */ 1486 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1487 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1488 hotplug_trigger, i, hpd[i]); 1489 1490 continue; 1491 } 1492 1493 if (!(hpd[i] & hotplug_trigger) || 1494 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1495 continue; 1496 1497 if (!(dig_port_mask & hpd[i])) { 1498 dev_priv->hpd_event_bits |= (1 << i); 1499 queue_hp = true; 1500 } 1501 1502 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1503 dev_priv->hpd_stats[i].hpd_last_jiffies 1504 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1505 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1506 dev_priv->hpd_stats[i].hpd_cnt = 0; 1507 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1508 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1509 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1510 dev_priv->hpd_event_bits &= ~(1 << i); 1511 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1512 storm_detected = true; 1513 } else { 1514 dev_priv->hpd_stats[i].hpd_cnt++; 1515 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1516 dev_priv->hpd_stats[i].hpd_cnt); 1517 } 1518 } 1519 1520 if (storm_detected) 1521 dev_priv->display.hpd_irq_setup(dev); 1522 spin_unlock(&dev_priv->irq_lock); 1523 1524 /* 1525 * Our hotplug handler can grab modeset locks (by calling down into the 1526 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1527 * queue for otherwise the flush_work in the pageflip code will 1528 * deadlock. 1529 */ 1530 if (queue_dig) 1531 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); 1532 if (queue_hp) 1533 schedule_work(&dev_priv->hotplug_work); 1534 } 1535 1536 static void gmbus_irq_handler(struct drm_device *dev) 1537 { 1538 struct drm_i915_private *dev_priv = dev->dev_private; 1539 1540 wake_up_all(&dev_priv->gmbus_wait_queue); 1541 } 1542 1543 static void dp_aux_irq_handler(struct drm_device *dev) 1544 { 1545 struct drm_i915_private *dev_priv = dev->dev_private; 1546 1547 wake_up_all(&dev_priv->gmbus_wait_queue); 1548 } 1549 1550 #if defined(CONFIG_DEBUG_FS) 1551 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1552 uint32_t crc0, uint32_t crc1, 1553 uint32_t crc2, uint32_t crc3, 1554 uint32_t crc4) 1555 { 1556 struct drm_i915_private *dev_priv = dev->dev_private; 1557 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1558 struct intel_pipe_crc_entry *entry; 1559 int head, tail; 1560 1561 spin_lock(&pipe_crc->lock); 1562 1563 if (!pipe_crc->entries) { 1564 spin_unlock(&pipe_crc->lock); 1565 DRM_DEBUG_KMS("spurious interrupt\n"); 1566 return; 1567 } 1568 1569 head = pipe_crc->head; 1570 tail = pipe_crc->tail; 1571 1572 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1573 spin_unlock(&pipe_crc->lock); 1574 DRM_ERROR("CRC buffer overflowing\n"); 1575 return; 1576 } 1577 1578 entry = &pipe_crc->entries[head]; 1579 1580 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1581 entry->crc[0] = crc0; 1582 entry->crc[1] = crc1; 1583 entry->crc[2] = crc2; 1584 entry->crc[3] = crc3; 1585 entry->crc[4] = crc4; 1586 1587 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1588 pipe_crc->head = head; 1589 1590 spin_unlock(&pipe_crc->lock); 1591 1592 wake_up_interruptible(&pipe_crc->wq); 1593 } 1594 #else 1595 static inline void 1596 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1597 uint32_t crc0, uint32_t crc1, 1598 uint32_t crc2, uint32_t crc3, 1599 uint32_t crc4) {} 1600 #endif 1601 1602 1603 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1604 { 1605 struct drm_i915_private *dev_priv = dev->dev_private; 1606 1607 display_pipe_crc_irq_handler(dev, pipe, 1608 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1609 0, 0, 0, 0); 1610 } 1611 1612 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1613 { 1614 struct drm_i915_private *dev_priv = dev->dev_private; 1615 1616 display_pipe_crc_irq_handler(dev, pipe, 1617 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1618 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1619 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1620 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1621 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1622 } 1623 1624 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1625 { 1626 struct drm_i915_private *dev_priv = dev->dev_private; 1627 uint32_t res1, res2; 1628 1629 if (INTEL_INFO(dev)->gen >= 3) 1630 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1631 else 1632 res1 = 0; 1633 1634 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1635 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1636 else 1637 res2 = 0; 1638 1639 display_pipe_crc_irq_handler(dev, pipe, 1640 I915_READ(PIPE_CRC_RES_RED(pipe)), 1641 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1642 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1643 res1, res2); 1644 } 1645 1646 /* The RPS events need forcewake, so we add them to a work queue and mask their 1647 * IMR bits until the work is done. Other interrupts can be processed without 1648 * the work queue. */ 1649 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1650 { 1651 if (pm_iir & dev_priv->pm_rps_events) { 1652 spin_lock(&dev_priv->irq_lock); 1653 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1654 if (dev_priv->rps.interrupts_enabled) { 1655 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1656 queue_work(dev_priv->wq, &dev_priv->rps.work); 1657 } 1658 spin_unlock(&dev_priv->irq_lock); 1659 } 1660 1661 if (INTEL_INFO(dev_priv)->gen >= 8) 1662 return; 1663 1664 if (HAS_VEBOX(dev_priv->dev)) { 1665 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1666 notify_ring(&dev_priv->ring[VECS]); 1667 1668 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1669 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1670 } 1671 } 1672 1673 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1674 { 1675 if (!drm_handle_vblank(dev, pipe)) 1676 return false; 1677 1678 return true; 1679 } 1680 1681 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1682 { 1683 struct drm_i915_private *dev_priv = dev->dev_private; 1684 u32 pipe_stats[I915_MAX_PIPES] = { }; 1685 int pipe; 1686 1687 spin_lock(&dev_priv->irq_lock); 1688 for_each_pipe(dev_priv, pipe) { 1689 int reg; 1690 u32 mask, iir_bit = 0; 1691 1692 /* 1693 * PIPESTAT bits get signalled even when the interrupt is 1694 * disabled with the mask bits, and some of the status bits do 1695 * not generate interrupts at all (like the underrun bit). Hence 1696 * we need to be careful that we only handle what we want to 1697 * handle. 1698 */ 1699 1700 /* fifo underruns are filterered in the underrun handler. */ 1701 mask = PIPE_FIFO_UNDERRUN_STATUS; 1702 1703 switch (pipe) { 1704 case PIPE_A: 1705 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1706 break; 1707 case PIPE_B: 1708 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1709 break; 1710 case PIPE_C: 1711 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1712 break; 1713 } 1714 if (iir & iir_bit) 1715 mask |= dev_priv->pipestat_irq_mask[pipe]; 1716 1717 if (!mask) 1718 continue; 1719 1720 reg = PIPESTAT(pipe); 1721 mask |= PIPESTAT_INT_ENABLE_MASK; 1722 pipe_stats[pipe] = I915_READ(reg) & mask; 1723 1724 /* 1725 * Clear the PIPE*STAT regs before the IIR 1726 */ 1727 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1728 PIPESTAT_INT_STATUS_MASK)) 1729 I915_WRITE(reg, pipe_stats[pipe]); 1730 } 1731 spin_unlock(&dev_priv->irq_lock); 1732 1733 for_each_pipe(dev_priv, pipe) { 1734 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1735 intel_pipe_handle_vblank(dev, pipe)) 1736 intel_check_page_flip(dev, pipe); 1737 1738 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1739 intel_prepare_page_flip(dev, pipe); 1740 intel_finish_page_flip(dev, pipe); 1741 } 1742 1743 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1744 i9xx_pipe_crc_irq_handler(dev, pipe); 1745 1746 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1747 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1748 } 1749 1750 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1751 gmbus_irq_handler(dev); 1752 } 1753 1754 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1755 { 1756 struct drm_i915_private *dev_priv = dev->dev_private; 1757 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1758 1759 if (hotplug_status) { 1760 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1761 /* 1762 * Make sure hotplug status is cleared before we clear IIR, or else we 1763 * may miss hotplug events. 1764 */ 1765 POSTING_READ(PORT_HOTPLUG_STAT); 1766 1767 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 1768 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1769 1770 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1771 } else { 1772 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1773 1774 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1775 } 1776 1777 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1778 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1779 dp_aux_irq_handler(dev); 1780 } 1781 } 1782 1783 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1784 { 1785 struct drm_device *dev = arg; 1786 struct drm_i915_private *dev_priv = dev->dev_private; 1787 u32 iir, gt_iir, pm_iir; 1788 irqreturn_t ret = IRQ_NONE; 1789 1790 if (!intel_irqs_enabled(dev_priv)) 1791 return IRQ_NONE; 1792 1793 while (true) { 1794 /* Find, clear, then process each source of interrupt */ 1795 1796 gt_iir = I915_READ(GTIIR); 1797 if (gt_iir) 1798 I915_WRITE(GTIIR, gt_iir); 1799 1800 pm_iir = I915_READ(GEN6_PMIIR); 1801 if (pm_iir) 1802 I915_WRITE(GEN6_PMIIR, pm_iir); 1803 1804 iir = I915_READ(VLV_IIR); 1805 if (iir) { 1806 /* Consume port before clearing IIR or we'll miss events */ 1807 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1808 i9xx_hpd_irq_handler(dev); 1809 I915_WRITE(VLV_IIR, iir); 1810 } 1811 1812 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1813 goto out; 1814 1815 ret = IRQ_HANDLED; 1816 1817 if (gt_iir) 1818 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1819 if (pm_iir) 1820 gen6_rps_irq_handler(dev_priv, pm_iir); 1821 /* Call regardless, as some status bits might not be 1822 * signalled in iir */ 1823 valleyview_pipestat_irq_handler(dev, iir); 1824 } 1825 1826 out: 1827 return ret; 1828 } 1829 1830 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1831 { 1832 struct drm_device *dev = arg; 1833 struct drm_i915_private *dev_priv = dev->dev_private; 1834 u32 master_ctl, iir; 1835 irqreturn_t ret = IRQ_NONE; 1836 1837 if (!intel_irqs_enabled(dev_priv)) 1838 return IRQ_NONE; 1839 1840 for (;;) { 1841 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1842 iir = I915_READ(VLV_IIR); 1843 1844 if (master_ctl == 0 && iir == 0) 1845 break; 1846 1847 ret = IRQ_HANDLED; 1848 1849 I915_WRITE(GEN8_MASTER_IRQ, 0); 1850 1851 /* Find, clear, then process each source of interrupt */ 1852 1853 if (iir) { 1854 /* Consume port before clearing IIR or we'll miss events */ 1855 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1856 i9xx_hpd_irq_handler(dev); 1857 I915_WRITE(VLV_IIR, iir); 1858 } 1859 1860 gen8_gt_irq_handler(dev_priv, master_ctl); 1861 1862 /* Call regardless, as some status bits might not be 1863 * signalled in iir */ 1864 valleyview_pipestat_irq_handler(dev, iir); 1865 1866 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1867 POSTING_READ(GEN8_MASTER_IRQ); 1868 } 1869 1870 return ret; 1871 } 1872 1873 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1874 { 1875 struct drm_i915_private *dev_priv = dev->dev_private; 1876 int pipe; 1877 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1878 u32 dig_hotplug_reg; 1879 1880 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1881 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1882 1883 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1884 1885 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1886 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1887 SDE_AUDIO_POWER_SHIFT); 1888 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1889 port_name(port)); 1890 } 1891 1892 if (pch_iir & SDE_AUX_MASK) 1893 dp_aux_irq_handler(dev); 1894 1895 if (pch_iir & SDE_GMBUS) 1896 gmbus_irq_handler(dev); 1897 1898 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1899 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1900 1901 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1902 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1903 1904 if (pch_iir & SDE_POISON) 1905 DRM_ERROR("PCH poison interrupt\n"); 1906 1907 if (pch_iir & SDE_FDI_MASK) 1908 for_each_pipe(dev_priv, pipe) 1909 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1910 pipe_name(pipe), 1911 I915_READ(FDI_RX_IIR(pipe))); 1912 1913 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1914 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1915 1916 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1917 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1918 1919 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1920 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1921 1922 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1923 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1924 } 1925 1926 static void ivb_err_int_handler(struct drm_device *dev) 1927 { 1928 struct drm_i915_private *dev_priv = dev->dev_private; 1929 u32 err_int = I915_READ(GEN7_ERR_INT); 1930 enum pipe pipe; 1931 1932 if (err_int & ERR_INT_POISON) 1933 DRM_ERROR("Poison interrupt\n"); 1934 1935 for_each_pipe(dev_priv, pipe) { 1936 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1937 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1938 1939 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1940 if (IS_IVYBRIDGE(dev)) 1941 ivb_pipe_crc_irq_handler(dev, pipe); 1942 else 1943 hsw_pipe_crc_irq_handler(dev, pipe); 1944 } 1945 } 1946 1947 I915_WRITE(GEN7_ERR_INT, err_int); 1948 } 1949 1950 static void cpt_serr_int_handler(struct drm_device *dev) 1951 { 1952 struct drm_i915_private *dev_priv = dev->dev_private; 1953 u32 serr_int = I915_READ(SERR_INT); 1954 1955 if (serr_int & SERR_INT_POISON) 1956 DRM_ERROR("PCH poison interrupt\n"); 1957 1958 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1959 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1960 1961 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1962 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1963 1964 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1965 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1966 1967 I915_WRITE(SERR_INT, serr_int); 1968 } 1969 1970 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1971 { 1972 struct drm_i915_private *dev_priv = dev->dev_private; 1973 int pipe; 1974 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1975 u32 dig_hotplug_reg; 1976 1977 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1978 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1979 1980 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 1981 1982 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1983 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1984 SDE_AUDIO_POWER_SHIFT_CPT); 1985 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1986 port_name(port)); 1987 } 1988 1989 if (pch_iir & SDE_AUX_MASK_CPT) 1990 dp_aux_irq_handler(dev); 1991 1992 if (pch_iir & SDE_GMBUS_CPT) 1993 gmbus_irq_handler(dev); 1994 1995 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1996 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1997 1998 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1999 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2000 2001 if (pch_iir & SDE_FDI_MASK_CPT) 2002 for_each_pipe(dev_priv, pipe) 2003 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2004 pipe_name(pipe), 2005 I915_READ(FDI_RX_IIR(pipe))); 2006 2007 if (pch_iir & SDE_ERROR_CPT) 2008 cpt_serr_int_handler(dev); 2009 } 2010 2011 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2012 { 2013 struct drm_i915_private *dev_priv = dev->dev_private; 2014 enum pipe pipe; 2015 2016 if (de_iir & DE_AUX_CHANNEL_A) 2017 dp_aux_irq_handler(dev); 2018 2019 if (de_iir & DE_GSE) 2020 intel_opregion_asle_intr(dev); 2021 2022 if (de_iir & DE_POISON) 2023 DRM_ERROR("Poison interrupt\n"); 2024 2025 for_each_pipe(dev_priv, pipe) { 2026 if (de_iir & DE_PIPE_VBLANK(pipe) && 2027 intel_pipe_handle_vblank(dev, pipe)) 2028 intel_check_page_flip(dev, pipe); 2029 2030 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2031 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2032 2033 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2034 i9xx_pipe_crc_irq_handler(dev, pipe); 2035 2036 /* plane/pipes map 1:1 on ilk+ */ 2037 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2038 intel_prepare_page_flip(dev, pipe); 2039 intel_finish_page_flip_plane(dev, pipe); 2040 } 2041 } 2042 2043 /* check event from PCH */ 2044 if (de_iir & DE_PCH_EVENT) { 2045 u32 pch_iir = I915_READ(SDEIIR); 2046 2047 if (HAS_PCH_CPT(dev)) 2048 cpt_irq_handler(dev, pch_iir); 2049 else 2050 ibx_irq_handler(dev, pch_iir); 2051 2052 /* should clear PCH hotplug event before clear CPU irq */ 2053 I915_WRITE(SDEIIR, pch_iir); 2054 } 2055 2056 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2057 ironlake_rps_change_irq_handler(dev); 2058 } 2059 2060 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2061 { 2062 struct drm_i915_private *dev_priv = dev->dev_private; 2063 enum pipe pipe; 2064 2065 if (de_iir & DE_ERR_INT_IVB) 2066 ivb_err_int_handler(dev); 2067 2068 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2069 dp_aux_irq_handler(dev); 2070 2071 if (de_iir & DE_GSE_IVB) 2072 intel_opregion_asle_intr(dev); 2073 2074 for_each_pipe(dev_priv, pipe) { 2075 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2076 intel_pipe_handle_vblank(dev, pipe)) 2077 intel_check_page_flip(dev, pipe); 2078 2079 /* plane/pipes map 1:1 on ilk+ */ 2080 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2081 intel_prepare_page_flip(dev, pipe); 2082 intel_finish_page_flip_plane(dev, pipe); 2083 } 2084 } 2085 2086 /* check event from PCH */ 2087 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2088 u32 pch_iir = I915_READ(SDEIIR); 2089 2090 cpt_irq_handler(dev, pch_iir); 2091 2092 /* clear PCH hotplug event before clear CPU irq */ 2093 I915_WRITE(SDEIIR, pch_iir); 2094 } 2095 } 2096 2097 /* 2098 * To handle irqs with the minimum potential races with fresh interrupts, we: 2099 * 1 - Disable Master Interrupt Control. 2100 * 2 - Find the source(s) of the interrupt. 2101 * 3 - Clear the Interrupt Identity bits (IIR). 2102 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2103 * 5 - Re-enable Master Interrupt Control. 2104 */ 2105 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2106 { 2107 struct drm_device *dev = arg; 2108 struct drm_i915_private *dev_priv = dev->dev_private; 2109 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2110 irqreturn_t ret = IRQ_NONE; 2111 2112 if (!intel_irqs_enabled(dev_priv)) 2113 return IRQ_NONE; 2114 2115 /* We get interrupts on unclaimed registers, so check for this before we 2116 * do any I915_{READ,WRITE}. */ 2117 intel_uncore_check_errors(dev); 2118 2119 /* disable master interrupt before clearing iir */ 2120 de_ier = I915_READ(DEIER); 2121 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2122 POSTING_READ(DEIER); 2123 2124 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2125 * interrupts will will be stored on its back queue, and then we'll be 2126 * able to process them after we restore SDEIER (as soon as we restore 2127 * it, we'll get an interrupt if SDEIIR still has something to process 2128 * due to its back queue). */ 2129 if (!HAS_PCH_NOP(dev)) { 2130 sde_ier = I915_READ(SDEIER); 2131 I915_WRITE(SDEIER, 0); 2132 POSTING_READ(SDEIER); 2133 } 2134 2135 /* Find, clear, then process each source of interrupt */ 2136 2137 gt_iir = I915_READ(GTIIR); 2138 if (gt_iir) { 2139 I915_WRITE(GTIIR, gt_iir); 2140 ret = IRQ_HANDLED; 2141 if (INTEL_INFO(dev)->gen >= 6) 2142 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2143 else 2144 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2145 } 2146 2147 de_iir = I915_READ(DEIIR); 2148 if (de_iir) { 2149 I915_WRITE(DEIIR, de_iir); 2150 ret = IRQ_HANDLED; 2151 if (INTEL_INFO(dev)->gen >= 7) 2152 ivb_display_irq_handler(dev, de_iir); 2153 else 2154 ilk_display_irq_handler(dev, de_iir); 2155 } 2156 2157 if (INTEL_INFO(dev)->gen >= 6) { 2158 u32 pm_iir = I915_READ(GEN6_PMIIR); 2159 if (pm_iir) { 2160 I915_WRITE(GEN6_PMIIR, pm_iir); 2161 ret = IRQ_HANDLED; 2162 gen6_rps_irq_handler(dev_priv, pm_iir); 2163 } 2164 } 2165 2166 I915_WRITE(DEIER, de_ier); 2167 POSTING_READ(DEIER); 2168 if (!HAS_PCH_NOP(dev)) { 2169 I915_WRITE(SDEIER, sde_ier); 2170 POSTING_READ(SDEIER); 2171 } 2172 2173 return ret; 2174 } 2175 2176 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) 2177 { 2178 struct drm_i915_private *dev_priv = dev->dev_private; 2179 uint32_t hp_control; 2180 uint32_t hp_trigger; 2181 2182 /* Get the status */ 2183 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; 2184 hp_control = I915_READ(BXT_HOTPLUG_CTL); 2185 2186 /* Hotplug not enabled ? */ 2187 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { 2188 DRM_ERROR("Interrupt when HPD disabled\n"); 2189 return; 2190 } 2191 2192 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2193 hp_control & BXT_HOTPLUG_CTL_MASK); 2194 2195 /* Check for HPD storm and schedule bottom half */ 2196 intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt); 2197 2198 /* 2199 * FIXME: Save the hot plug status for bottom half before 2200 * clearing the sticky status bits, else the status will be 2201 * lost. 2202 */ 2203 2204 /* Clear sticky bits in hpd status */ 2205 I915_WRITE(BXT_HOTPLUG_CTL, hp_control); 2206 } 2207 2208 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2209 { 2210 struct drm_device *dev = arg; 2211 struct drm_i915_private *dev_priv = dev->dev_private; 2212 u32 master_ctl; 2213 irqreturn_t ret = IRQ_NONE; 2214 uint32_t tmp = 0; 2215 enum pipe pipe; 2216 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2217 2218 if (!intel_irqs_enabled(dev_priv)) 2219 return IRQ_NONE; 2220 2221 if (IS_GEN9(dev)) 2222 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2223 GEN9_AUX_CHANNEL_D; 2224 2225 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2226 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2227 if (!master_ctl) 2228 return IRQ_NONE; 2229 2230 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2231 2232 /* Find, clear, then process each source of interrupt */ 2233 2234 ret = gen8_gt_irq_handler(dev_priv, master_ctl); 2235 2236 if (master_ctl & GEN8_DE_MISC_IRQ) { 2237 tmp = I915_READ(GEN8_DE_MISC_IIR); 2238 if (tmp) { 2239 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2240 ret = IRQ_HANDLED; 2241 if (tmp & GEN8_DE_MISC_GSE) 2242 intel_opregion_asle_intr(dev); 2243 else 2244 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2245 } 2246 else 2247 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2248 } 2249 2250 if (master_ctl & GEN8_DE_PORT_IRQ) { 2251 tmp = I915_READ(GEN8_DE_PORT_IIR); 2252 if (tmp) { 2253 bool found = false; 2254 2255 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2256 ret = IRQ_HANDLED; 2257 2258 if (tmp & aux_mask) { 2259 dp_aux_irq_handler(dev); 2260 found = true; 2261 } 2262 2263 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { 2264 bxt_hpd_handler(dev, tmp); 2265 found = true; 2266 } 2267 2268 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { 2269 gmbus_irq_handler(dev); 2270 found = true; 2271 } 2272 2273 if (!found) 2274 DRM_ERROR("Unexpected DE Port interrupt\n"); 2275 } 2276 else 2277 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2278 } 2279 2280 for_each_pipe(dev_priv, pipe) { 2281 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2282 2283 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2284 continue; 2285 2286 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2287 if (pipe_iir) { 2288 ret = IRQ_HANDLED; 2289 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2290 2291 if (pipe_iir & GEN8_PIPE_VBLANK && 2292 intel_pipe_handle_vblank(dev, pipe)) 2293 intel_check_page_flip(dev, pipe); 2294 2295 if (IS_GEN9(dev)) 2296 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2297 else 2298 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2299 2300 if (flip_done) { 2301 intel_prepare_page_flip(dev, pipe); 2302 intel_finish_page_flip_plane(dev, pipe); 2303 } 2304 2305 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2306 hsw_pipe_crc_irq_handler(dev, pipe); 2307 2308 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2309 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2310 pipe); 2311 2312 2313 if (IS_GEN9(dev)) 2314 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2315 else 2316 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2317 2318 if (fault_errors) 2319 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2320 pipe_name(pipe), 2321 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2322 } else 2323 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2324 } 2325 2326 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2327 master_ctl & GEN8_DE_PCH_IRQ) { 2328 /* 2329 * FIXME(BDW): Assume for now that the new interrupt handling 2330 * scheme also closed the SDE interrupt handling race we've seen 2331 * on older pch-split platforms. But this needs testing. 2332 */ 2333 u32 pch_iir = I915_READ(SDEIIR); 2334 if (pch_iir) { 2335 I915_WRITE(SDEIIR, pch_iir); 2336 ret = IRQ_HANDLED; 2337 cpt_irq_handler(dev, pch_iir); 2338 } else 2339 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2340 2341 } 2342 2343 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2344 POSTING_READ_FW(GEN8_MASTER_IRQ); 2345 2346 return ret; 2347 } 2348 2349 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2350 bool reset_completed) 2351 { 2352 struct intel_engine_cs *ring; 2353 int i; 2354 2355 /* 2356 * Notify all waiters for GPU completion events that reset state has 2357 * been changed, and that they need to restart their wait after 2358 * checking for potential errors (and bail out to drop locks if there is 2359 * a gpu reset pending so that i915_error_work_func can acquire them). 2360 */ 2361 2362 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2363 for_each_ring(ring, dev_priv, i) 2364 wake_up_all(&ring->irq_queue); 2365 2366 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2367 wake_up_all(&dev_priv->pending_flip_queue); 2368 2369 /* 2370 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2371 * reset state is cleared. 2372 */ 2373 if (reset_completed) 2374 wake_up_all(&dev_priv->gpu_error.reset_queue); 2375 } 2376 2377 /** 2378 * i915_reset_and_wakeup - do process context error handling work 2379 * 2380 * Fire an error uevent so userspace can see that a hang or error 2381 * was detected. 2382 */ 2383 static void i915_reset_and_wakeup(struct drm_device *dev) 2384 { 2385 struct drm_i915_private *dev_priv = to_i915(dev); 2386 struct i915_gpu_error *error = &dev_priv->gpu_error; 2387 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2388 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2389 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2390 int ret; 2391 2392 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2393 2394 /* 2395 * Note that there's only one work item which does gpu resets, so we 2396 * need not worry about concurrent gpu resets potentially incrementing 2397 * error->reset_counter twice. We only need to take care of another 2398 * racing irq/hangcheck declaring the gpu dead for a second time. A 2399 * quick check for that is good enough: schedule_work ensures the 2400 * correct ordering between hang detection and this work item, and since 2401 * the reset in-progress bit is only ever set by code outside of this 2402 * work we don't need to worry about any other races. 2403 */ 2404 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2405 DRM_DEBUG_DRIVER("resetting chip\n"); 2406 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2407 reset_event); 2408 2409 /* 2410 * In most cases it's guaranteed that we get here with an RPM 2411 * reference held, for example because there is a pending GPU 2412 * request that won't finish until the reset is done. This 2413 * isn't the case at least when we get here by doing a 2414 * simulated reset via debugs, so get an RPM reference. 2415 */ 2416 intel_runtime_pm_get(dev_priv); 2417 2418 intel_prepare_reset(dev); 2419 2420 /* 2421 * All state reset _must_ be completed before we update the 2422 * reset counter, for otherwise waiters might miss the reset 2423 * pending state and not properly drop locks, resulting in 2424 * deadlocks with the reset work. 2425 */ 2426 ret = i915_reset(dev); 2427 2428 intel_finish_reset(dev); 2429 2430 intel_runtime_pm_put(dev_priv); 2431 2432 if (ret == 0) { 2433 /* 2434 * After all the gem state is reset, increment the reset 2435 * counter and wake up everyone waiting for the reset to 2436 * complete. 2437 * 2438 * Since unlock operations are a one-sided barrier only, 2439 * we need to insert a barrier here to order any seqno 2440 * updates before 2441 * the counter increment. 2442 */ 2443 smp_mb__before_atomic(); 2444 atomic_inc(&dev_priv->gpu_error.reset_counter); 2445 2446 kobject_uevent_env(&dev->primary->kdev->kobj, 2447 KOBJ_CHANGE, reset_done_event); 2448 } else { 2449 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2450 } 2451 2452 /* 2453 * Note: The wake_up also serves as a memory barrier so that 2454 * waiters see the update value of the reset counter atomic_t. 2455 */ 2456 i915_error_wake_up(dev_priv, true); 2457 } 2458 } 2459 2460 static void i915_report_and_clear_eir(struct drm_device *dev) 2461 { 2462 struct drm_i915_private *dev_priv = dev->dev_private; 2463 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2464 u32 eir = I915_READ(EIR); 2465 int pipe, i; 2466 2467 if (!eir) 2468 return; 2469 2470 pr_err("render error detected, EIR: 0x%08x\n", eir); 2471 2472 i915_get_extra_instdone(dev, instdone); 2473 2474 if (IS_G4X(dev)) { 2475 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2476 u32 ipeir = I915_READ(IPEIR_I965); 2477 2478 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2479 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2480 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2481 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2482 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2483 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2484 I915_WRITE(IPEIR_I965, ipeir); 2485 POSTING_READ(IPEIR_I965); 2486 } 2487 if (eir & GM45_ERROR_PAGE_TABLE) { 2488 u32 pgtbl_err = I915_READ(PGTBL_ER); 2489 pr_err("page table error\n"); 2490 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2491 I915_WRITE(PGTBL_ER, pgtbl_err); 2492 POSTING_READ(PGTBL_ER); 2493 } 2494 } 2495 2496 if (!IS_GEN2(dev)) { 2497 if (eir & I915_ERROR_PAGE_TABLE) { 2498 u32 pgtbl_err = I915_READ(PGTBL_ER); 2499 pr_err("page table error\n"); 2500 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2501 I915_WRITE(PGTBL_ER, pgtbl_err); 2502 POSTING_READ(PGTBL_ER); 2503 } 2504 } 2505 2506 if (eir & I915_ERROR_MEMORY_REFRESH) { 2507 pr_err("memory refresh error:\n"); 2508 for_each_pipe(dev_priv, pipe) 2509 pr_err("pipe %c stat: 0x%08x\n", 2510 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2511 /* pipestat has already been acked */ 2512 } 2513 if (eir & I915_ERROR_INSTRUCTION) { 2514 pr_err("instruction error\n"); 2515 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2516 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2517 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2518 if (INTEL_INFO(dev)->gen < 4) { 2519 u32 ipeir = I915_READ(IPEIR); 2520 2521 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2522 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2523 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2524 I915_WRITE(IPEIR, ipeir); 2525 POSTING_READ(IPEIR); 2526 } else { 2527 u32 ipeir = I915_READ(IPEIR_I965); 2528 2529 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2530 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2531 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2532 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2533 I915_WRITE(IPEIR_I965, ipeir); 2534 POSTING_READ(IPEIR_I965); 2535 } 2536 } 2537 2538 I915_WRITE(EIR, eir); 2539 POSTING_READ(EIR); 2540 eir = I915_READ(EIR); 2541 if (eir) { 2542 /* 2543 * some errors might have become stuck, 2544 * mask them. 2545 */ 2546 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2547 I915_WRITE(EMR, I915_READ(EMR) | eir); 2548 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2549 } 2550 } 2551 2552 /** 2553 * i915_handle_error - handle a gpu error 2554 * @dev: drm device 2555 * 2556 * Do some basic checking of regsiter state at error time and 2557 * dump it to the syslog. Also call i915_capture_error_state() to make 2558 * sure we get a record and make it available in debugfs. Fire a uevent 2559 * so userspace knows something bad happened (should trigger collection 2560 * of a ring dump etc.). 2561 */ 2562 void i915_handle_error(struct drm_device *dev, bool wedged, 2563 const char *fmt, ...) 2564 { 2565 struct drm_i915_private *dev_priv = dev->dev_private; 2566 va_list args; 2567 char error_msg[80]; 2568 2569 va_start(args, fmt); 2570 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2571 va_end(args); 2572 2573 i915_capture_error_state(dev, wedged, error_msg); 2574 i915_report_and_clear_eir(dev); 2575 2576 if (wedged) { 2577 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2578 &dev_priv->gpu_error.reset_counter); 2579 2580 /* 2581 * Wakeup waiting processes so that the reset function 2582 * i915_reset_and_wakeup doesn't deadlock trying to grab 2583 * various locks. By bumping the reset counter first, the woken 2584 * processes will see a reset in progress and back off, 2585 * releasing their locks and then wait for the reset completion. 2586 * We must do this for _all_ gpu waiters that might hold locks 2587 * that the reset work needs to acquire. 2588 * 2589 * Note: The wake_up serves as the required memory barrier to 2590 * ensure that the waiters see the updated value of the reset 2591 * counter atomic_t. 2592 */ 2593 i915_error_wake_up(dev_priv, false); 2594 } 2595 2596 i915_reset_and_wakeup(dev); 2597 } 2598 2599 /* Called from drm generic code, passed 'crtc' which 2600 * we use as a pipe index 2601 */ 2602 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2603 { 2604 struct drm_i915_private *dev_priv = dev->dev_private; 2605 unsigned long irqflags; 2606 2607 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2608 if (INTEL_INFO(dev)->gen >= 4) 2609 i915_enable_pipestat(dev_priv, pipe, 2610 PIPE_START_VBLANK_INTERRUPT_STATUS); 2611 else 2612 i915_enable_pipestat(dev_priv, pipe, 2613 PIPE_VBLANK_INTERRUPT_STATUS); 2614 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2615 2616 return 0; 2617 } 2618 2619 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2620 { 2621 struct drm_i915_private *dev_priv = dev->dev_private; 2622 unsigned long irqflags; 2623 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2624 DE_PIPE_VBLANK(pipe); 2625 2626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2627 ironlake_enable_display_irq(dev_priv, bit); 2628 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2629 2630 return 0; 2631 } 2632 2633 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2634 { 2635 struct drm_i915_private *dev_priv = dev->dev_private; 2636 unsigned long irqflags; 2637 2638 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2639 i915_enable_pipestat(dev_priv, pipe, 2640 PIPE_START_VBLANK_INTERRUPT_STATUS); 2641 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2642 2643 return 0; 2644 } 2645 2646 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2647 { 2648 struct drm_i915_private *dev_priv = dev->dev_private; 2649 unsigned long irqflags; 2650 2651 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2652 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2653 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2654 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2655 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2656 return 0; 2657 } 2658 2659 /* Called from drm generic code, passed 'crtc' which 2660 * we use as a pipe index 2661 */ 2662 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2663 { 2664 struct drm_i915_private *dev_priv = dev->dev_private; 2665 unsigned long irqflags; 2666 2667 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2668 i915_disable_pipestat(dev_priv, pipe, 2669 PIPE_VBLANK_INTERRUPT_STATUS | 2670 PIPE_START_VBLANK_INTERRUPT_STATUS); 2671 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2672 } 2673 2674 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2675 { 2676 struct drm_i915_private *dev_priv = dev->dev_private; 2677 unsigned long irqflags; 2678 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2679 DE_PIPE_VBLANK(pipe); 2680 2681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2682 ironlake_disable_display_irq(dev_priv, bit); 2683 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2684 } 2685 2686 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2687 { 2688 struct drm_i915_private *dev_priv = dev->dev_private; 2689 unsigned long irqflags; 2690 2691 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2692 i915_disable_pipestat(dev_priv, pipe, 2693 PIPE_START_VBLANK_INTERRUPT_STATUS); 2694 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2695 } 2696 2697 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2698 { 2699 struct drm_i915_private *dev_priv = dev->dev_private; 2700 unsigned long irqflags; 2701 2702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2703 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2704 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2705 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2707 } 2708 2709 static struct drm_i915_gem_request * 2710 ring_last_request(struct intel_engine_cs *ring) 2711 { 2712 return list_entry(ring->request_list.prev, 2713 struct drm_i915_gem_request, list); 2714 } 2715 2716 static bool 2717 ring_idle(struct intel_engine_cs *ring) 2718 { 2719 return (list_empty(&ring->request_list) || 2720 i915_gem_request_completed(ring_last_request(ring), false)); 2721 } 2722 2723 static bool 2724 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2725 { 2726 if (INTEL_INFO(dev)->gen >= 8) { 2727 return (ipehr >> 23) == 0x1c; 2728 } else { 2729 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2730 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2731 MI_SEMAPHORE_REGISTER); 2732 } 2733 } 2734 2735 static struct intel_engine_cs * 2736 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2737 { 2738 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2739 struct intel_engine_cs *signaller; 2740 int i; 2741 2742 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2743 for_each_ring(signaller, dev_priv, i) { 2744 if (ring == signaller) 2745 continue; 2746 2747 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2748 return signaller; 2749 } 2750 } else { 2751 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2752 2753 for_each_ring(signaller, dev_priv, i) { 2754 if(ring == signaller) 2755 continue; 2756 2757 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2758 return signaller; 2759 } 2760 } 2761 2762 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2763 ring->id, ipehr, offset); 2764 2765 return NULL; 2766 } 2767 2768 static struct intel_engine_cs * 2769 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2770 { 2771 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2772 u32 cmd, ipehr, head; 2773 u64 offset = 0; 2774 int i, backwards; 2775 2776 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2777 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2778 return NULL; 2779 2780 /* 2781 * HEAD is likely pointing to the dword after the actual command, 2782 * so scan backwards until we find the MBOX. But limit it to just 3 2783 * or 4 dwords depending on the semaphore wait command size. 2784 * Note that we don't care about ACTHD here since that might 2785 * point at at batch, and semaphores are always emitted into the 2786 * ringbuffer itself. 2787 */ 2788 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2789 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2790 2791 for (i = backwards; i; --i) { 2792 /* 2793 * Be paranoid and presume the hw has gone off into the wild - 2794 * our ring is smaller than what the hardware (and hence 2795 * HEAD_ADDR) allows. Also handles wrap-around. 2796 */ 2797 head &= ring->buffer->size - 1; 2798 2799 /* This here seems to blow up */ 2800 cmd = ioread32(ring->buffer->virtual_start + head); 2801 if (cmd == ipehr) 2802 break; 2803 2804 head -= 4; 2805 } 2806 2807 if (!i) 2808 return NULL; 2809 2810 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2811 if (INTEL_INFO(ring->dev)->gen >= 8) { 2812 offset = ioread32(ring->buffer->virtual_start + head + 12); 2813 offset <<= 32; 2814 offset = ioread32(ring->buffer->virtual_start + head + 8); 2815 } 2816 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2817 } 2818 2819 static int semaphore_passed(struct intel_engine_cs *ring) 2820 { 2821 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2822 struct intel_engine_cs *signaller; 2823 u32 seqno; 2824 2825 ring->hangcheck.deadlock++; 2826 2827 signaller = semaphore_waits_for(ring, &seqno); 2828 if (signaller == NULL) 2829 return -1; 2830 2831 /* Prevent pathological recursion due to driver bugs */ 2832 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2833 return -1; 2834 2835 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2836 return 1; 2837 2838 /* cursory check for an unkickable deadlock */ 2839 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2840 semaphore_passed(signaller) < 0) 2841 return -1; 2842 2843 return 0; 2844 } 2845 2846 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2847 { 2848 struct intel_engine_cs *ring; 2849 int i; 2850 2851 for_each_ring(ring, dev_priv, i) 2852 ring->hangcheck.deadlock = 0; 2853 } 2854 2855 static enum intel_ring_hangcheck_action 2856 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2857 { 2858 struct drm_device *dev = ring->dev; 2859 struct drm_i915_private *dev_priv = dev->dev_private; 2860 u32 tmp; 2861 2862 if (acthd != ring->hangcheck.acthd) { 2863 if (acthd > ring->hangcheck.max_acthd) { 2864 ring->hangcheck.max_acthd = acthd; 2865 return HANGCHECK_ACTIVE; 2866 } 2867 2868 return HANGCHECK_ACTIVE_LOOP; 2869 } 2870 2871 if (IS_GEN2(dev)) 2872 return HANGCHECK_HUNG; 2873 2874 /* Is the chip hanging on a WAIT_FOR_EVENT? 2875 * If so we can simply poke the RB_WAIT bit 2876 * and break the hang. This should work on 2877 * all but the second generation chipsets. 2878 */ 2879 tmp = I915_READ_CTL(ring); 2880 if (tmp & RING_WAIT) { 2881 i915_handle_error(dev, false, 2882 "Kicking stuck wait on %s", 2883 ring->name); 2884 I915_WRITE_CTL(ring, tmp); 2885 return HANGCHECK_KICK; 2886 } 2887 2888 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2889 switch (semaphore_passed(ring)) { 2890 default: 2891 return HANGCHECK_HUNG; 2892 case 1: 2893 i915_handle_error(dev, false, 2894 "Kicking stuck semaphore on %s", 2895 ring->name); 2896 I915_WRITE_CTL(ring, tmp); 2897 return HANGCHECK_KICK; 2898 case 0: 2899 return HANGCHECK_WAIT; 2900 } 2901 } 2902 2903 return HANGCHECK_HUNG; 2904 } 2905 2906 /* 2907 * This is called when the chip hasn't reported back with completed 2908 * batchbuffers in a long time. We keep track per ring seqno progress and 2909 * if there are no progress, hangcheck score for that ring is increased. 2910 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2911 * we kick the ring. If we see no progress on three subsequent calls 2912 * we assume chip is wedged and try to fix it by resetting the chip. 2913 */ 2914 static void i915_hangcheck_elapsed(struct work_struct *work) 2915 { 2916 struct drm_i915_private *dev_priv = 2917 container_of(work, typeof(*dev_priv), 2918 gpu_error.hangcheck_work.work); 2919 struct drm_device *dev = dev_priv->dev; 2920 struct intel_engine_cs *ring; 2921 int i; 2922 int busy_count = 0, rings_hung = 0; 2923 bool stuck[I915_NUM_RINGS] = { 0 }; 2924 #define BUSY 1 2925 #define KICK 5 2926 #define HUNG 20 2927 2928 if (!i915.enable_hangcheck) 2929 return; 2930 2931 for_each_ring(ring, dev_priv, i) { 2932 u64 acthd; 2933 u32 seqno; 2934 bool busy = true; 2935 2936 semaphore_clear_deadlocks(dev_priv); 2937 2938 seqno = ring->get_seqno(ring, false); 2939 acthd = intel_ring_get_active_head(ring); 2940 2941 if (ring->hangcheck.seqno == seqno) { 2942 if (ring_idle(ring)) { 2943 ring->hangcheck.action = HANGCHECK_IDLE; 2944 2945 if (waitqueue_active(&ring->irq_queue)) { 2946 /* Issue a wake-up to catch stuck h/w. */ 2947 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2948 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2949 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2950 ring->name); 2951 else 2952 DRM_INFO("Fake missed irq on %s\n", 2953 ring->name); 2954 wake_up_all(&ring->irq_queue); 2955 } 2956 /* Safeguard against driver failure */ 2957 ring->hangcheck.score += BUSY; 2958 } else 2959 busy = false; 2960 } else { 2961 /* We always increment the hangcheck score 2962 * if the ring is busy and still processing 2963 * the same request, so that no single request 2964 * can run indefinitely (such as a chain of 2965 * batches). The only time we do not increment 2966 * the hangcheck score on this ring, if this 2967 * ring is in a legitimate wait for another 2968 * ring. In that case the waiting ring is a 2969 * victim and we want to be sure we catch the 2970 * right culprit. Then every time we do kick 2971 * the ring, add a small increment to the 2972 * score so that we can catch a batch that is 2973 * being repeatedly kicked and so responsible 2974 * for stalling the machine. 2975 */ 2976 ring->hangcheck.action = ring_stuck(ring, 2977 acthd); 2978 2979 switch (ring->hangcheck.action) { 2980 case HANGCHECK_IDLE: 2981 case HANGCHECK_WAIT: 2982 case HANGCHECK_ACTIVE: 2983 break; 2984 case HANGCHECK_ACTIVE_LOOP: 2985 ring->hangcheck.score += BUSY; 2986 break; 2987 case HANGCHECK_KICK: 2988 ring->hangcheck.score += KICK; 2989 break; 2990 case HANGCHECK_HUNG: 2991 ring->hangcheck.score += HUNG; 2992 stuck[i] = true; 2993 break; 2994 } 2995 } 2996 } else { 2997 ring->hangcheck.action = HANGCHECK_ACTIVE; 2998 2999 /* Gradually reduce the count so that we catch DoS 3000 * attempts across multiple batches. 3001 */ 3002 if (ring->hangcheck.score > 0) 3003 ring->hangcheck.score--; 3004 3005 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3006 } 3007 3008 ring->hangcheck.seqno = seqno; 3009 ring->hangcheck.acthd = acthd; 3010 busy_count += busy; 3011 } 3012 3013 for_each_ring(ring, dev_priv, i) { 3014 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3015 DRM_INFO("%s on %s\n", 3016 stuck[i] ? "stuck" : "no progress", 3017 ring->name); 3018 rings_hung++; 3019 } 3020 } 3021 3022 if (rings_hung) 3023 return i915_handle_error(dev, true, "Ring hung"); 3024 3025 if (busy_count) 3026 /* Reset timer case chip hangs without another request 3027 * being added */ 3028 i915_queue_hangcheck(dev); 3029 } 3030 3031 void i915_queue_hangcheck(struct drm_device *dev) 3032 { 3033 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3034 3035 if (!i915.enable_hangcheck) 3036 return; 3037 3038 /* Don't continually defer the hangcheck so that it is always run at 3039 * least once after work has been scheduled on any ring. Otherwise, 3040 * we will ignore a hung ring if a second ring is kept busy. 3041 */ 3042 3043 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 3044 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 3045 } 3046 3047 static void ibx_irq_reset(struct drm_device *dev) 3048 { 3049 struct drm_i915_private *dev_priv = dev->dev_private; 3050 3051 if (HAS_PCH_NOP(dev)) 3052 return; 3053 3054 GEN5_IRQ_RESET(SDE); 3055 3056 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3057 I915_WRITE(SERR_INT, 0xffffffff); 3058 } 3059 3060 /* 3061 * SDEIER is also touched by the interrupt handler to work around missed PCH 3062 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3063 * instead we unconditionally enable all PCH interrupt sources here, but then 3064 * only unmask them as needed with SDEIMR. 3065 * 3066 * This function needs to be called before interrupts are enabled. 3067 */ 3068 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3069 { 3070 struct drm_i915_private *dev_priv = dev->dev_private; 3071 3072 if (HAS_PCH_NOP(dev)) 3073 return; 3074 3075 WARN_ON(I915_READ(SDEIER) != 0); 3076 I915_WRITE(SDEIER, 0xffffffff); 3077 POSTING_READ(SDEIER); 3078 } 3079 3080 static void gen5_gt_irq_reset(struct drm_device *dev) 3081 { 3082 struct drm_i915_private *dev_priv = dev->dev_private; 3083 3084 GEN5_IRQ_RESET(GT); 3085 if (INTEL_INFO(dev)->gen >= 6) 3086 GEN5_IRQ_RESET(GEN6_PM); 3087 } 3088 3089 /* drm_dma.h hooks 3090 */ 3091 static void ironlake_irq_reset(struct drm_device *dev) 3092 { 3093 struct drm_i915_private *dev_priv = dev->dev_private; 3094 3095 I915_WRITE(HWSTAM, 0xffffffff); 3096 3097 GEN5_IRQ_RESET(DE); 3098 if (IS_GEN7(dev)) 3099 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3100 3101 gen5_gt_irq_reset(dev); 3102 3103 ibx_irq_reset(dev); 3104 } 3105 3106 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3107 { 3108 enum pipe pipe; 3109 3110 I915_WRITE(PORT_HOTPLUG_EN, 0); 3111 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3112 3113 for_each_pipe(dev_priv, pipe) 3114 I915_WRITE(PIPESTAT(pipe), 0xffff); 3115 3116 GEN5_IRQ_RESET(VLV_); 3117 } 3118 3119 static void valleyview_irq_preinstall(struct drm_device *dev) 3120 { 3121 struct drm_i915_private *dev_priv = dev->dev_private; 3122 3123 /* VLV magic */ 3124 I915_WRITE(VLV_IMR, 0); 3125 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3126 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3127 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3128 3129 gen5_gt_irq_reset(dev); 3130 3131 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3132 3133 vlv_display_irq_reset(dev_priv); 3134 } 3135 3136 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3137 { 3138 GEN8_IRQ_RESET_NDX(GT, 0); 3139 GEN8_IRQ_RESET_NDX(GT, 1); 3140 GEN8_IRQ_RESET_NDX(GT, 2); 3141 GEN8_IRQ_RESET_NDX(GT, 3); 3142 } 3143 3144 static void gen8_irq_reset(struct drm_device *dev) 3145 { 3146 struct drm_i915_private *dev_priv = dev->dev_private; 3147 int pipe; 3148 3149 I915_WRITE(GEN8_MASTER_IRQ, 0); 3150 POSTING_READ(GEN8_MASTER_IRQ); 3151 3152 gen8_gt_irq_reset(dev_priv); 3153 3154 for_each_pipe(dev_priv, pipe) 3155 if (intel_display_power_is_enabled(dev_priv, 3156 POWER_DOMAIN_PIPE(pipe))) 3157 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3158 3159 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3160 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3161 GEN5_IRQ_RESET(GEN8_PCU_); 3162 3163 if (HAS_PCH_SPLIT(dev)) 3164 ibx_irq_reset(dev); 3165 } 3166 3167 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3168 unsigned int pipe_mask) 3169 { 3170 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3171 3172 spin_lock_irq(&dev_priv->irq_lock); 3173 if (pipe_mask & 1 << PIPE_A) 3174 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 3175 dev_priv->de_irq_mask[PIPE_A], 3176 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); 3177 if (pipe_mask & 1 << PIPE_B) 3178 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, 3179 dev_priv->de_irq_mask[PIPE_B], 3180 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3181 if (pipe_mask & 1 << PIPE_C) 3182 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, 3183 dev_priv->de_irq_mask[PIPE_C], 3184 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3185 spin_unlock_irq(&dev_priv->irq_lock); 3186 } 3187 3188 static void cherryview_irq_preinstall(struct drm_device *dev) 3189 { 3190 struct drm_i915_private *dev_priv = dev->dev_private; 3191 3192 I915_WRITE(GEN8_MASTER_IRQ, 0); 3193 POSTING_READ(GEN8_MASTER_IRQ); 3194 3195 gen8_gt_irq_reset(dev_priv); 3196 3197 GEN5_IRQ_RESET(GEN8_PCU_); 3198 3199 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3200 3201 vlv_display_irq_reset(dev_priv); 3202 } 3203 3204 static void ibx_hpd_irq_setup(struct drm_device *dev) 3205 { 3206 struct drm_i915_private *dev_priv = dev->dev_private; 3207 struct intel_encoder *intel_encoder; 3208 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3209 3210 if (HAS_PCH_IBX(dev)) { 3211 hotplug_irqs = SDE_HOTPLUG_MASK; 3212 for_each_intel_encoder(dev, intel_encoder) 3213 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3214 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3215 } else { 3216 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3217 for_each_intel_encoder(dev, intel_encoder) 3218 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3219 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3220 } 3221 3222 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3223 3224 /* 3225 * Enable digital hotplug on the PCH, and configure the DP short pulse 3226 * duration to 2ms (which is the minimum in the Display Port spec) 3227 * 3228 * This register is the same on all known PCH chips. 3229 */ 3230 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3231 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3232 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3233 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3234 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3235 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3236 } 3237 3238 static void bxt_hpd_irq_setup(struct drm_device *dev) 3239 { 3240 struct drm_i915_private *dev_priv = dev->dev_private; 3241 struct intel_encoder *intel_encoder; 3242 u32 hotplug_port = 0; 3243 u32 hotplug_ctrl; 3244 3245 /* Now, enable HPD */ 3246 for_each_intel_encoder(dev, intel_encoder) { 3247 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark 3248 == HPD_ENABLED) 3249 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; 3250 } 3251 3252 /* Mask all HPD control bits */ 3253 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; 3254 3255 /* Enable requested port in hotplug control */ 3256 /* TODO: implement (short) HPD support on port A */ 3257 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA); 3258 if (hotplug_port & BXT_DE_PORT_HP_DDIB) 3259 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE; 3260 if (hotplug_port & BXT_DE_PORT_HP_DDIC) 3261 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE; 3262 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl); 3263 3264 /* Unmask DDI hotplug in IMR */ 3265 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port; 3266 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl); 3267 3268 /* Enable DDI hotplug in IER */ 3269 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; 3270 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); 3271 POSTING_READ(GEN8_DE_PORT_IER); 3272 } 3273 3274 static void ibx_irq_postinstall(struct drm_device *dev) 3275 { 3276 struct drm_i915_private *dev_priv = dev->dev_private; 3277 u32 mask; 3278 3279 if (HAS_PCH_NOP(dev)) 3280 return; 3281 3282 if (HAS_PCH_IBX(dev)) 3283 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3284 else 3285 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3286 3287 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3288 I915_WRITE(SDEIMR, ~mask); 3289 } 3290 3291 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3292 { 3293 struct drm_i915_private *dev_priv = dev->dev_private; 3294 u32 pm_irqs, gt_irqs; 3295 3296 pm_irqs = gt_irqs = 0; 3297 3298 dev_priv->gt_irq_mask = ~0; 3299 if (HAS_L3_DPF(dev)) { 3300 /* L3 parity interrupt is always unmasked. */ 3301 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3302 gt_irqs |= GT_PARITY_ERROR(dev); 3303 } 3304 3305 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3306 if (IS_GEN5(dev)) { 3307 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3308 ILK_BSD_USER_INTERRUPT; 3309 } else { 3310 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3311 } 3312 3313 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3314 3315 if (INTEL_INFO(dev)->gen >= 6) { 3316 /* 3317 * RPS interrupts will get enabled/disabled on demand when RPS 3318 * itself is enabled/disabled. 3319 */ 3320 if (HAS_VEBOX(dev)) 3321 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3322 3323 dev_priv->pm_irq_mask = 0xffffffff; 3324 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3325 } 3326 } 3327 3328 static int ironlake_irq_postinstall(struct drm_device *dev) 3329 { 3330 struct drm_i915_private *dev_priv = dev->dev_private; 3331 u32 display_mask, extra_mask; 3332 3333 if (INTEL_INFO(dev)->gen >= 7) { 3334 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3335 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3336 DE_PLANEB_FLIP_DONE_IVB | 3337 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3338 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3339 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3340 } else { 3341 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3342 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3343 DE_AUX_CHANNEL_A | 3344 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3345 DE_POISON); 3346 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3347 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3348 } 3349 3350 dev_priv->irq_mask = ~display_mask; 3351 3352 I915_WRITE(HWSTAM, 0xeffe); 3353 3354 ibx_irq_pre_postinstall(dev); 3355 3356 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3357 3358 gen5_gt_irq_postinstall(dev); 3359 3360 ibx_irq_postinstall(dev); 3361 3362 if (IS_IRONLAKE_M(dev)) { 3363 /* Enable PCU event interrupts 3364 * 3365 * spinlocking not required here for correctness since interrupt 3366 * setup is guaranteed to run in single-threaded context. But we 3367 * need it to make the assert_spin_locked happy. */ 3368 spin_lock_irq(&dev_priv->irq_lock); 3369 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3370 spin_unlock_irq(&dev_priv->irq_lock); 3371 } 3372 3373 return 0; 3374 } 3375 3376 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3377 { 3378 u32 pipestat_mask; 3379 u32 iir_mask; 3380 enum pipe pipe; 3381 3382 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3383 PIPE_FIFO_UNDERRUN_STATUS; 3384 3385 for_each_pipe(dev_priv, pipe) 3386 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3387 POSTING_READ(PIPESTAT(PIPE_A)); 3388 3389 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3390 PIPE_CRC_DONE_INTERRUPT_STATUS; 3391 3392 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3393 for_each_pipe(dev_priv, pipe) 3394 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3395 3396 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3397 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3398 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3399 if (IS_CHERRYVIEW(dev_priv)) 3400 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3401 dev_priv->irq_mask &= ~iir_mask; 3402 3403 I915_WRITE(VLV_IIR, iir_mask); 3404 I915_WRITE(VLV_IIR, iir_mask); 3405 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3406 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3407 POSTING_READ(VLV_IMR); 3408 } 3409 3410 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3411 { 3412 u32 pipestat_mask; 3413 u32 iir_mask; 3414 enum pipe pipe; 3415 3416 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3417 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3418 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3419 if (IS_CHERRYVIEW(dev_priv)) 3420 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3421 3422 dev_priv->irq_mask |= iir_mask; 3423 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3424 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3425 I915_WRITE(VLV_IIR, iir_mask); 3426 I915_WRITE(VLV_IIR, iir_mask); 3427 POSTING_READ(VLV_IIR); 3428 3429 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3430 PIPE_CRC_DONE_INTERRUPT_STATUS; 3431 3432 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3433 for_each_pipe(dev_priv, pipe) 3434 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3435 3436 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3437 PIPE_FIFO_UNDERRUN_STATUS; 3438 3439 for_each_pipe(dev_priv, pipe) 3440 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3441 POSTING_READ(PIPESTAT(PIPE_A)); 3442 } 3443 3444 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3445 { 3446 assert_spin_locked(&dev_priv->irq_lock); 3447 3448 if (dev_priv->display_irqs_enabled) 3449 return; 3450 3451 dev_priv->display_irqs_enabled = true; 3452 3453 if (intel_irqs_enabled(dev_priv)) 3454 valleyview_display_irqs_install(dev_priv); 3455 } 3456 3457 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3458 { 3459 assert_spin_locked(&dev_priv->irq_lock); 3460 3461 if (!dev_priv->display_irqs_enabled) 3462 return; 3463 3464 dev_priv->display_irqs_enabled = false; 3465 3466 if (intel_irqs_enabled(dev_priv)) 3467 valleyview_display_irqs_uninstall(dev_priv); 3468 } 3469 3470 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3471 { 3472 dev_priv->irq_mask = ~0; 3473 3474 I915_WRITE(PORT_HOTPLUG_EN, 0); 3475 POSTING_READ(PORT_HOTPLUG_EN); 3476 3477 I915_WRITE(VLV_IIR, 0xffffffff); 3478 I915_WRITE(VLV_IIR, 0xffffffff); 3479 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3480 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3481 POSTING_READ(VLV_IMR); 3482 3483 /* Interrupt setup is already guaranteed to be single-threaded, this is 3484 * just to make the assert_spin_locked check happy. */ 3485 spin_lock_irq(&dev_priv->irq_lock); 3486 if (dev_priv->display_irqs_enabled) 3487 valleyview_display_irqs_install(dev_priv); 3488 spin_unlock_irq(&dev_priv->irq_lock); 3489 } 3490 3491 static int valleyview_irq_postinstall(struct drm_device *dev) 3492 { 3493 struct drm_i915_private *dev_priv = dev->dev_private; 3494 3495 vlv_display_irq_postinstall(dev_priv); 3496 3497 gen5_gt_irq_postinstall(dev); 3498 3499 /* ack & enable invalid PTE error interrupts */ 3500 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3501 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3502 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3503 #endif 3504 3505 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3506 3507 return 0; 3508 } 3509 3510 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3511 { 3512 /* These are interrupts we'll toggle with the ring mask register */ 3513 uint32_t gt_interrupts[] = { 3514 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3515 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3516 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3517 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3518 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3519 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3520 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3521 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3522 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3523 0, 3524 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3525 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3526 }; 3527 3528 dev_priv->pm_irq_mask = 0xffffffff; 3529 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3530 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3531 /* 3532 * RPS interrupts will get enabled/disabled on demand when RPS itself 3533 * is enabled/disabled. 3534 */ 3535 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3536 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3537 } 3538 3539 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3540 { 3541 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3542 uint32_t de_pipe_enables; 3543 int pipe; 3544 u32 de_port_en = GEN8_AUX_CHANNEL_A; 3545 3546 if (IS_GEN9(dev_priv)) { 3547 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3548 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3549 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3550 GEN9_AUX_CHANNEL_D; 3551 3552 if (IS_BROXTON(dev_priv)) 3553 de_port_en |= BXT_DE_PORT_GMBUS; 3554 } else 3555 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3556 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3557 3558 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3559 GEN8_PIPE_FIFO_UNDERRUN; 3560 3561 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3562 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3563 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3564 3565 for_each_pipe(dev_priv, pipe) 3566 if (intel_display_power_is_enabled(dev_priv, 3567 POWER_DOMAIN_PIPE(pipe))) 3568 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3569 dev_priv->de_irq_mask[pipe], 3570 de_pipe_enables); 3571 3572 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); 3573 } 3574 3575 static int gen8_irq_postinstall(struct drm_device *dev) 3576 { 3577 struct drm_i915_private *dev_priv = dev->dev_private; 3578 3579 if (HAS_PCH_SPLIT(dev)) 3580 ibx_irq_pre_postinstall(dev); 3581 3582 gen8_gt_irq_postinstall(dev_priv); 3583 gen8_de_irq_postinstall(dev_priv); 3584 3585 if (HAS_PCH_SPLIT(dev)) 3586 ibx_irq_postinstall(dev); 3587 3588 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3589 POSTING_READ(GEN8_MASTER_IRQ); 3590 3591 return 0; 3592 } 3593 3594 static int cherryview_irq_postinstall(struct drm_device *dev) 3595 { 3596 struct drm_i915_private *dev_priv = dev->dev_private; 3597 3598 vlv_display_irq_postinstall(dev_priv); 3599 3600 gen8_gt_irq_postinstall(dev_priv); 3601 3602 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3603 POSTING_READ(GEN8_MASTER_IRQ); 3604 3605 return 0; 3606 } 3607 3608 static void gen8_irq_uninstall(struct drm_device *dev) 3609 { 3610 struct drm_i915_private *dev_priv = dev->dev_private; 3611 3612 if (!dev_priv) 3613 return; 3614 3615 gen8_irq_reset(dev); 3616 } 3617 3618 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3619 { 3620 /* Interrupt setup is already guaranteed to be single-threaded, this is 3621 * just to make the assert_spin_locked check happy. */ 3622 spin_lock_irq(&dev_priv->irq_lock); 3623 if (dev_priv->display_irqs_enabled) 3624 valleyview_display_irqs_uninstall(dev_priv); 3625 spin_unlock_irq(&dev_priv->irq_lock); 3626 3627 vlv_display_irq_reset(dev_priv); 3628 3629 dev_priv->irq_mask = ~0; 3630 } 3631 3632 static void valleyview_irq_uninstall(struct drm_device *dev) 3633 { 3634 struct drm_i915_private *dev_priv = dev->dev_private; 3635 3636 if (!dev_priv) 3637 return; 3638 3639 I915_WRITE(VLV_MASTER_IER, 0); 3640 3641 gen5_gt_irq_reset(dev); 3642 3643 I915_WRITE(HWSTAM, 0xffffffff); 3644 3645 vlv_display_irq_uninstall(dev_priv); 3646 } 3647 3648 static void cherryview_irq_uninstall(struct drm_device *dev) 3649 { 3650 struct drm_i915_private *dev_priv = dev->dev_private; 3651 3652 if (!dev_priv) 3653 return; 3654 3655 I915_WRITE(GEN8_MASTER_IRQ, 0); 3656 POSTING_READ(GEN8_MASTER_IRQ); 3657 3658 gen8_gt_irq_reset(dev_priv); 3659 3660 GEN5_IRQ_RESET(GEN8_PCU_); 3661 3662 vlv_display_irq_uninstall(dev_priv); 3663 } 3664 3665 static void ironlake_irq_uninstall(struct drm_device *dev) 3666 { 3667 struct drm_i915_private *dev_priv = dev->dev_private; 3668 3669 if (!dev_priv) 3670 return; 3671 3672 ironlake_irq_reset(dev); 3673 } 3674 3675 static void i8xx_irq_preinstall(struct drm_device * dev) 3676 { 3677 struct drm_i915_private *dev_priv = dev->dev_private; 3678 int pipe; 3679 3680 for_each_pipe(dev_priv, pipe) 3681 I915_WRITE(PIPESTAT(pipe), 0); 3682 I915_WRITE16(IMR, 0xffff); 3683 I915_WRITE16(IER, 0x0); 3684 POSTING_READ16(IER); 3685 } 3686 3687 static int i8xx_irq_postinstall(struct drm_device *dev) 3688 { 3689 struct drm_i915_private *dev_priv = dev->dev_private; 3690 3691 I915_WRITE16(EMR, 3692 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3693 3694 /* Unmask the interrupts that we always want on. */ 3695 dev_priv->irq_mask = 3696 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3697 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3698 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3699 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3700 I915_WRITE16(IMR, dev_priv->irq_mask); 3701 3702 I915_WRITE16(IER, 3703 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3704 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3705 I915_USER_INTERRUPT); 3706 POSTING_READ16(IER); 3707 3708 /* Interrupt setup is already guaranteed to be single-threaded, this is 3709 * just to make the assert_spin_locked check happy. */ 3710 spin_lock_irq(&dev_priv->irq_lock); 3711 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3712 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3713 spin_unlock_irq(&dev_priv->irq_lock); 3714 3715 return 0; 3716 } 3717 3718 /* 3719 * Returns true when a page flip has completed. 3720 */ 3721 static bool i8xx_handle_vblank(struct drm_device *dev, 3722 int plane, int pipe, u32 iir) 3723 { 3724 struct drm_i915_private *dev_priv = dev->dev_private; 3725 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3726 3727 if (!intel_pipe_handle_vblank(dev, pipe)) 3728 return false; 3729 3730 if ((iir & flip_pending) == 0) 3731 goto check_page_flip; 3732 3733 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3734 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3735 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3736 * the flip is completed (no longer pending). Since this doesn't raise 3737 * an interrupt per se, we watch for the change at vblank. 3738 */ 3739 if (I915_READ16(ISR) & flip_pending) 3740 goto check_page_flip; 3741 3742 intel_prepare_page_flip(dev, plane); 3743 intel_finish_page_flip(dev, pipe); 3744 return true; 3745 3746 check_page_flip: 3747 intel_check_page_flip(dev, pipe); 3748 return false; 3749 } 3750 3751 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3752 { 3753 struct drm_device *dev = arg; 3754 struct drm_i915_private *dev_priv = dev->dev_private; 3755 u16 iir, new_iir; 3756 u32 pipe_stats[2]; 3757 int pipe; 3758 u16 flip_mask = 3759 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3760 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3761 3762 if (!intel_irqs_enabled(dev_priv)) 3763 return IRQ_NONE; 3764 3765 iir = I915_READ16(IIR); 3766 if (iir == 0) 3767 return IRQ_NONE; 3768 3769 while (iir & ~flip_mask) { 3770 /* Can't rely on pipestat interrupt bit in iir as it might 3771 * have been cleared after the pipestat interrupt was received. 3772 * It doesn't set the bit in iir again, but it still produces 3773 * interrupts (for non-MSI). 3774 */ 3775 spin_lock(&dev_priv->irq_lock); 3776 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3777 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3778 3779 for_each_pipe(dev_priv, pipe) { 3780 int reg = PIPESTAT(pipe); 3781 pipe_stats[pipe] = I915_READ(reg); 3782 3783 /* 3784 * Clear the PIPE*STAT regs before the IIR 3785 */ 3786 if (pipe_stats[pipe] & 0x8000ffff) 3787 I915_WRITE(reg, pipe_stats[pipe]); 3788 } 3789 spin_unlock(&dev_priv->irq_lock); 3790 3791 I915_WRITE16(IIR, iir & ~flip_mask); 3792 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3793 3794 if (iir & I915_USER_INTERRUPT) 3795 notify_ring(&dev_priv->ring[RCS]); 3796 3797 for_each_pipe(dev_priv, pipe) { 3798 int plane = pipe; 3799 if (HAS_FBC(dev)) 3800 plane = !plane; 3801 3802 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3803 i8xx_handle_vblank(dev, plane, pipe, iir)) 3804 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3805 3806 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3807 i9xx_pipe_crc_irq_handler(dev, pipe); 3808 3809 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3810 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3811 pipe); 3812 } 3813 3814 iir = new_iir; 3815 } 3816 3817 return IRQ_HANDLED; 3818 } 3819 3820 static void i8xx_irq_uninstall(struct drm_device * dev) 3821 { 3822 struct drm_i915_private *dev_priv = dev->dev_private; 3823 int pipe; 3824 3825 for_each_pipe(dev_priv, pipe) { 3826 /* Clear enable bits; then clear status bits */ 3827 I915_WRITE(PIPESTAT(pipe), 0); 3828 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3829 } 3830 I915_WRITE16(IMR, 0xffff); 3831 I915_WRITE16(IER, 0x0); 3832 I915_WRITE16(IIR, I915_READ16(IIR)); 3833 } 3834 3835 static void i915_irq_preinstall(struct drm_device * dev) 3836 { 3837 struct drm_i915_private *dev_priv = dev->dev_private; 3838 int pipe; 3839 3840 if (I915_HAS_HOTPLUG(dev)) { 3841 I915_WRITE(PORT_HOTPLUG_EN, 0); 3842 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3843 } 3844 3845 I915_WRITE16(HWSTAM, 0xeffe); 3846 for_each_pipe(dev_priv, pipe) 3847 I915_WRITE(PIPESTAT(pipe), 0); 3848 I915_WRITE(IMR, 0xffffffff); 3849 I915_WRITE(IER, 0x0); 3850 POSTING_READ(IER); 3851 } 3852 3853 static int i915_irq_postinstall(struct drm_device *dev) 3854 { 3855 struct drm_i915_private *dev_priv = dev->dev_private; 3856 u32 enable_mask; 3857 3858 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3859 3860 /* Unmask the interrupts that we always want on. */ 3861 dev_priv->irq_mask = 3862 ~(I915_ASLE_INTERRUPT | 3863 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3864 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3865 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3866 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3867 3868 enable_mask = 3869 I915_ASLE_INTERRUPT | 3870 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3871 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3872 I915_USER_INTERRUPT; 3873 3874 if (I915_HAS_HOTPLUG(dev)) { 3875 I915_WRITE(PORT_HOTPLUG_EN, 0); 3876 POSTING_READ(PORT_HOTPLUG_EN); 3877 3878 /* Enable in IER... */ 3879 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3880 /* and unmask in IMR */ 3881 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3882 } 3883 3884 I915_WRITE(IMR, dev_priv->irq_mask); 3885 I915_WRITE(IER, enable_mask); 3886 POSTING_READ(IER); 3887 3888 i915_enable_asle_pipestat(dev); 3889 3890 /* Interrupt setup is already guaranteed to be single-threaded, this is 3891 * just to make the assert_spin_locked check happy. */ 3892 spin_lock_irq(&dev_priv->irq_lock); 3893 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3894 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3895 spin_unlock_irq(&dev_priv->irq_lock); 3896 3897 return 0; 3898 } 3899 3900 /* 3901 * Returns true when a page flip has completed. 3902 */ 3903 static bool i915_handle_vblank(struct drm_device *dev, 3904 int plane, int pipe, u32 iir) 3905 { 3906 struct drm_i915_private *dev_priv = dev->dev_private; 3907 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3908 3909 if (!intel_pipe_handle_vblank(dev, pipe)) 3910 return false; 3911 3912 if ((iir & flip_pending) == 0) 3913 goto check_page_flip; 3914 3915 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3916 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3917 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3918 * the flip is completed (no longer pending). Since this doesn't raise 3919 * an interrupt per se, we watch for the change at vblank. 3920 */ 3921 if (I915_READ(ISR) & flip_pending) 3922 goto check_page_flip; 3923 3924 intel_prepare_page_flip(dev, plane); 3925 intel_finish_page_flip(dev, pipe); 3926 return true; 3927 3928 check_page_flip: 3929 intel_check_page_flip(dev, pipe); 3930 return false; 3931 } 3932 3933 static irqreturn_t i915_irq_handler(int irq, void *arg) 3934 { 3935 struct drm_device *dev = arg; 3936 struct drm_i915_private *dev_priv = dev->dev_private; 3937 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3938 u32 flip_mask = 3939 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3940 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3941 int pipe, ret = IRQ_NONE; 3942 3943 if (!intel_irqs_enabled(dev_priv)) 3944 return IRQ_NONE; 3945 3946 iir = I915_READ(IIR); 3947 do { 3948 bool irq_received = (iir & ~flip_mask) != 0; 3949 bool blc_event = false; 3950 3951 /* Can't rely on pipestat interrupt bit in iir as it might 3952 * have been cleared after the pipestat interrupt was received. 3953 * It doesn't set the bit in iir again, but it still produces 3954 * interrupts (for non-MSI). 3955 */ 3956 spin_lock(&dev_priv->irq_lock); 3957 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3958 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3959 3960 for_each_pipe(dev_priv, pipe) { 3961 int reg = PIPESTAT(pipe); 3962 pipe_stats[pipe] = I915_READ(reg); 3963 3964 /* Clear the PIPE*STAT regs before the IIR */ 3965 if (pipe_stats[pipe] & 0x8000ffff) { 3966 I915_WRITE(reg, pipe_stats[pipe]); 3967 irq_received = true; 3968 } 3969 } 3970 spin_unlock(&dev_priv->irq_lock); 3971 3972 if (!irq_received) 3973 break; 3974 3975 /* Consume port. Then clear IIR or we'll miss events */ 3976 if (I915_HAS_HOTPLUG(dev) && 3977 iir & I915_DISPLAY_PORT_INTERRUPT) 3978 i9xx_hpd_irq_handler(dev); 3979 3980 I915_WRITE(IIR, iir & ~flip_mask); 3981 new_iir = I915_READ(IIR); /* Flush posted writes */ 3982 3983 if (iir & I915_USER_INTERRUPT) 3984 notify_ring(&dev_priv->ring[RCS]); 3985 3986 for_each_pipe(dev_priv, pipe) { 3987 int plane = pipe; 3988 if (HAS_FBC(dev)) 3989 plane = !plane; 3990 3991 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3992 i915_handle_vblank(dev, plane, pipe, iir)) 3993 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3994 3995 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3996 blc_event = true; 3997 3998 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3999 i9xx_pipe_crc_irq_handler(dev, pipe); 4000 4001 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4002 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4003 pipe); 4004 } 4005 4006 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4007 intel_opregion_asle_intr(dev); 4008 4009 /* With MSI, interrupts are only generated when iir 4010 * transitions from zero to nonzero. If another bit got 4011 * set while we were handling the existing iir bits, then 4012 * we would never get another interrupt. 4013 * 4014 * This is fine on non-MSI as well, as if we hit this path 4015 * we avoid exiting the interrupt handler only to generate 4016 * another one. 4017 * 4018 * Note that for MSI this could cause a stray interrupt report 4019 * if an interrupt landed in the time between writing IIR and 4020 * the posting read. This should be rare enough to never 4021 * trigger the 99% of 100,000 interrupts test for disabling 4022 * stray interrupts. 4023 */ 4024 ret = IRQ_HANDLED; 4025 iir = new_iir; 4026 } while (iir & ~flip_mask); 4027 4028 return ret; 4029 } 4030 4031 static void i915_irq_uninstall(struct drm_device * dev) 4032 { 4033 struct drm_i915_private *dev_priv = dev->dev_private; 4034 int pipe; 4035 4036 if (I915_HAS_HOTPLUG(dev)) { 4037 I915_WRITE(PORT_HOTPLUG_EN, 0); 4038 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4039 } 4040 4041 I915_WRITE16(HWSTAM, 0xffff); 4042 for_each_pipe(dev_priv, pipe) { 4043 /* Clear enable bits; then clear status bits */ 4044 I915_WRITE(PIPESTAT(pipe), 0); 4045 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4046 } 4047 I915_WRITE(IMR, 0xffffffff); 4048 I915_WRITE(IER, 0x0); 4049 4050 I915_WRITE(IIR, I915_READ(IIR)); 4051 } 4052 4053 static void i965_irq_preinstall(struct drm_device * dev) 4054 { 4055 struct drm_i915_private *dev_priv = dev->dev_private; 4056 int pipe; 4057 4058 I915_WRITE(PORT_HOTPLUG_EN, 0); 4059 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4060 4061 I915_WRITE(HWSTAM, 0xeffe); 4062 for_each_pipe(dev_priv, pipe) 4063 I915_WRITE(PIPESTAT(pipe), 0); 4064 I915_WRITE(IMR, 0xffffffff); 4065 I915_WRITE(IER, 0x0); 4066 POSTING_READ(IER); 4067 } 4068 4069 static int i965_irq_postinstall(struct drm_device *dev) 4070 { 4071 struct drm_i915_private *dev_priv = dev->dev_private; 4072 u32 enable_mask; 4073 u32 error_mask; 4074 4075 /* Unmask the interrupts that we always want on. */ 4076 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4077 I915_DISPLAY_PORT_INTERRUPT | 4078 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4079 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4080 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4081 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4082 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4083 4084 enable_mask = ~dev_priv->irq_mask; 4085 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4086 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4087 enable_mask |= I915_USER_INTERRUPT; 4088 4089 if (IS_G4X(dev)) 4090 enable_mask |= I915_BSD_USER_INTERRUPT; 4091 4092 /* Interrupt setup is already guaranteed to be single-threaded, this is 4093 * just to make the assert_spin_locked check happy. */ 4094 spin_lock_irq(&dev_priv->irq_lock); 4095 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4096 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4097 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4098 spin_unlock_irq(&dev_priv->irq_lock); 4099 4100 /* 4101 * Enable some error detection, note the instruction error mask 4102 * bit is reserved, so we leave it masked. 4103 */ 4104 if (IS_G4X(dev)) { 4105 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4106 GM45_ERROR_MEM_PRIV | 4107 GM45_ERROR_CP_PRIV | 4108 I915_ERROR_MEMORY_REFRESH); 4109 } else { 4110 error_mask = ~(I915_ERROR_PAGE_TABLE | 4111 I915_ERROR_MEMORY_REFRESH); 4112 } 4113 I915_WRITE(EMR, error_mask); 4114 4115 I915_WRITE(IMR, dev_priv->irq_mask); 4116 I915_WRITE(IER, enable_mask); 4117 POSTING_READ(IER); 4118 4119 I915_WRITE(PORT_HOTPLUG_EN, 0); 4120 POSTING_READ(PORT_HOTPLUG_EN); 4121 4122 i915_enable_asle_pipestat(dev); 4123 4124 return 0; 4125 } 4126 4127 static void i915_hpd_irq_setup(struct drm_device *dev) 4128 { 4129 struct drm_i915_private *dev_priv = dev->dev_private; 4130 struct intel_encoder *intel_encoder; 4131 u32 hotplug_en; 4132 4133 assert_spin_locked(&dev_priv->irq_lock); 4134 4135 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4136 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4137 /* Note HDMI and DP share hotplug bits */ 4138 /* enable bits are the same for all generations */ 4139 for_each_intel_encoder(dev, intel_encoder) 4140 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4141 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4142 /* Programming the CRT detection parameters tends 4143 to generate a spurious hotplug event about three 4144 seconds later. So just do it once. 4145 */ 4146 if (IS_G4X(dev)) 4147 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4148 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4149 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4150 4151 /* Ignore TV since it's buggy */ 4152 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4153 } 4154 4155 static irqreturn_t i965_irq_handler(int irq, void *arg) 4156 { 4157 struct drm_device *dev = arg; 4158 struct drm_i915_private *dev_priv = dev->dev_private; 4159 u32 iir, new_iir; 4160 u32 pipe_stats[I915_MAX_PIPES]; 4161 int ret = IRQ_NONE, pipe; 4162 u32 flip_mask = 4163 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4164 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4165 4166 if (!intel_irqs_enabled(dev_priv)) 4167 return IRQ_NONE; 4168 4169 iir = I915_READ(IIR); 4170 4171 for (;;) { 4172 bool irq_received = (iir & ~flip_mask) != 0; 4173 bool blc_event = false; 4174 4175 /* Can't rely on pipestat interrupt bit in iir as it might 4176 * have been cleared after the pipestat interrupt was received. 4177 * It doesn't set the bit in iir again, but it still produces 4178 * interrupts (for non-MSI). 4179 */ 4180 spin_lock(&dev_priv->irq_lock); 4181 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4182 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4183 4184 for_each_pipe(dev_priv, pipe) { 4185 int reg = PIPESTAT(pipe); 4186 pipe_stats[pipe] = I915_READ(reg); 4187 4188 /* 4189 * Clear the PIPE*STAT regs before the IIR 4190 */ 4191 if (pipe_stats[pipe] & 0x8000ffff) { 4192 I915_WRITE(reg, pipe_stats[pipe]); 4193 irq_received = true; 4194 } 4195 } 4196 spin_unlock(&dev_priv->irq_lock); 4197 4198 if (!irq_received) 4199 break; 4200 4201 ret = IRQ_HANDLED; 4202 4203 /* Consume port. Then clear IIR or we'll miss events */ 4204 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4205 i9xx_hpd_irq_handler(dev); 4206 4207 I915_WRITE(IIR, iir & ~flip_mask); 4208 new_iir = I915_READ(IIR); /* Flush posted writes */ 4209 4210 if (iir & I915_USER_INTERRUPT) 4211 notify_ring(&dev_priv->ring[RCS]); 4212 if (iir & I915_BSD_USER_INTERRUPT) 4213 notify_ring(&dev_priv->ring[VCS]); 4214 4215 for_each_pipe(dev_priv, pipe) { 4216 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4217 i915_handle_vblank(dev, pipe, pipe, iir)) 4218 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4219 4220 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4221 blc_event = true; 4222 4223 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4224 i9xx_pipe_crc_irq_handler(dev, pipe); 4225 4226 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4227 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4228 } 4229 4230 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4231 intel_opregion_asle_intr(dev); 4232 4233 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4234 gmbus_irq_handler(dev); 4235 4236 /* With MSI, interrupts are only generated when iir 4237 * transitions from zero to nonzero. If another bit got 4238 * set while we were handling the existing iir bits, then 4239 * we would never get another interrupt. 4240 * 4241 * This is fine on non-MSI as well, as if we hit this path 4242 * we avoid exiting the interrupt handler only to generate 4243 * another one. 4244 * 4245 * Note that for MSI this could cause a stray interrupt report 4246 * if an interrupt landed in the time between writing IIR and 4247 * the posting read. This should be rare enough to never 4248 * trigger the 99% of 100,000 interrupts test for disabling 4249 * stray interrupts. 4250 */ 4251 iir = new_iir; 4252 } 4253 4254 return ret; 4255 } 4256 4257 static void i965_irq_uninstall(struct drm_device * dev) 4258 { 4259 struct drm_i915_private *dev_priv = dev->dev_private; 4260 int pipe; 4261 4262 if (!dev_priv) 4263 return; 4264 4265 I915_WRITE(PORT_HOTPLUG_EN, 0); 4266 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4267 4268 I915_WRITE(HWSTAM, 0xffffffff); 4269 for_each_pipe(dev_priv, pipe) 4270 I915_WRITE(PIPESTAT(pipe), 0); 4271 I915_WRITE(IMR, 0xffffffff); 4272 I915_WRITE(IER, 0x0); 4273 4274 for_each_pipe(dev_priv, pipe) 4275 I915_WRITE(PIPESTAT(pipe), 4276 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4277 I915_WRITE(IIR, I915_READ(IIR)); 4278 } 4279 4280 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4281 { 4282 struct drm_i915_private *dev_priv = 4283 container_of(work, typeof(*dev_priv), 4284 hotplug_reenable_work.work); 4285 struct drm_device *dev = dev_priv->dev; 4286 struct drm_mode_config *mode_config = &dev->mode_config; 4287 int i; 4288 4289 intel_runtime_pm_get(dev_priv); 4290 4291 spin_lock_irq(&dev_priv->irq_lock); 4292 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4293 struct drm_connector *connector; 4294 4295 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4296 continue; 4297 4298 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4299 4300 list_for_each_entry(connector, &mode_config->connector_list, head) { 4301 struct intel_connector *intel_connector = to_intel_connector(connector); 4302 4303 if (intel_connector->encoder->hpd_pin == i) { 4304 if (connector->polled != intel_connector->polled) 4305 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4306 connector->name); 4307 connector->polled = intel_connector->polled; 4308 if (!connector->polled) 4309 connector->polled = DRM_CONNECTOR_POLL_HPD; 4310 } 4311 } 4312 } 4313 if (dev_priv->display.hpd_irq_setup) 4314 dev_priv->display.hpd_irq_setup(dev); 4315 spin_unlock_irq(&dev_priv->irq_lock); 4316 4317 intel_runtime_pm_put(dev_priv); 4318 } 4319 4320 /** 4321 * intel_irq_init - initializes irq support 4322 * @dev_priv: i915 device instance 4323 * 4324 * This function initializes all the irq support including work items, timers 4325 * and all the vtables. It does not setup the interrupt itself though. 4326 */ 4327 void intel_irq_init(struct drm_i915_private *dev_priv) 4328 { 4329 struct drm_device *dev = dev_priv->dev; 4330 4331 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4332 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4333 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4334 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4335 4336 /* Let's track the enabled rps events */ 4337 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4338 /* WaGsvRC0ResidencyMethod:vlv */ 4339 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4340 else 4341 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4342 4343 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4344 i915_hangcheck_elapsed); 4345 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4346 intel_hpd_irq_reenable_work); 4347 4348 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4349 4350 if (IS_GEN2(dev_priv)) { 4351 dev->max_vblank_count = 0; 4352 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4353 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4354 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4355 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4356 } else { 4357 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4358 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4359 } 4360 4361 /* 4362 * Opt out of the vblank disable timer on everything except gen2. 4363 * Gen2 doesn't have a hardware frame counter and so depends on 4364 * vblank interrupts to produce sane vblank seuquence numbers. 4365 */ 4366 if (!IS_GEN2(dev_priv)) 4367 dev->vblank_disable_immediate = true; 4368 4369 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4370 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4371 4372 if (IS_CHERRYVIEW(dev_priv)) { 4373 dev->driver->irq_handler = cherryview_irq_handler; 4374 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4375 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4376 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4377 dev->driver->enable_vblank = valleyview_enable_vblank; 4378 dev->driver->disable_vblank = valleyview_disable_vblank; 4379 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4380 } else if (IS_VALLEYVIEW(dev_priv)) { 4381 dev->driver->irq_handler = valleyview_irq_handler; 4382 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4383 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4384 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4385 dev->driver->enable_vblank = valleyview_enable_vblank; 4386 dev->driver->disable_vblank = valleyview_disable_vblank; 4387 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4388 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4389 dev->driver->irq_handler = gen8_irq_handler; 4390 dev->driver->irq_preinstall = gen8_irq_reset; 4391 dev->driver->irq_postinstall = gen8_irq_postinstall; 4392 dev->driver->irq_uninstall = gen8_irq_uninstall; 4393 dev->driver->enable_vblank = gen8_enable_vblank; 4394 dev->driver->disable_vblank = gen8_disable_vblank; 4395 if (HAS_PCH_SPLIT(dev)) 4396 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4397 else 4398 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4399 } else if (HAS_PCH_SPLIT(dev)) { 4400 dev->driver->irq_handler = ironlake_irq_handler; 4401 dev->driver->irq_preinstall = ironlake_irq_reset; 4402 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4403 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4404 dev->driver->enable_vblank = ironlake_enable_vblank; 4405 dev->driver->disable_vblank = ironlake_disable_vblank; 4406 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4407 } else { 4408 if (INTEL_INFO(dev_priv)->gen == 2) { 4409 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4410 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4411 dev->driver->irq_handler = i8xx_irq_handler; 4412 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4413 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4414 dev->driver->irq_preinstall = i915_irq_preinstall; 4415 dev->driver->irq_postinstall = i915_irq_postinstall; 4416 dev->driver->irq_uninstall = i915_irq_uninstall; 4417 dev->driver->irq_handler = i915_irq_handler; 4418 } else { 4419 dev->driver->irq_preinstall = i965_irq_preinstall; 4420 dev->driver->irq_postinstall = i965_irq_postinstall; 4421 dev->driver->irq_uninstall = i965_irq_uninstall; 4422 dev->driver->irq_handler = i965_irq_handler; 4423 } 4424 if (I915_HAS_HOTPLUG(dev_priv)) 4425 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4426 dev->driver->enable_vblank = i915_enable_vblank; 4427 dev->driver->disable_vblank = i915_disable_vblank; 4428 } 4429 } 4430 4431 /** 4432 * intel_hpd_init - initializes and enables hpd support 4433 * @dev_priv: i915 device instance 4434 * 4435 * This function enables the hotplug support. It requires that interrupts have 4436 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4437 * poll request can run concurrently to other code, so locking rules must be 4438 * obeyed. 4439 * 4440 * This is a separate step from interrupt enabling to simplify the locking rules 4441 * in the driver load and resume code. 4442 */ 4443 void intel_hpd_init(struct drm_i915_private *dev_priv) 4444 { 4445 struct drm_device *dev = dev_priv->dev; 4446 struct drm_mode_config *mode_config = &dev->mode_config; 4447 struct drm_connector *connector; 4448 int i; 4449 4450 for (i = 1; i < HPD_NUM_PINS; i++) { 4451 dev_priv->hpd_stats[i].hpd_cnt = 0; 4452 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4453 } 4454 list_for_each_entry(connector, &mode_config->connector_list, head) { 4455 struct intel_connector *intel_connector = to_intel_connector(connector); 4456 connector->polled = intel_connector->polled; 4457 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4458 connector->polled = DRM_CONNECTOR_POLL_HPD; 4459 if (intel_connector->mst_port) 4460 connector->polled = DRM_CONNECTOR_POLL_HPD; 4461 } 4462 4463 /* Interrupt setup is already guaranteed to be single-threaded, this is 4464 * just to make the assert_spin_locked checks happy. */ 4465 spin_lock_irq(&dev_priv->irq_lock); 4466 if (dev_priv->display.hpd_irq_setup) 4467 dev_priv->display.hpd_irq_setup(dev); 4468 spin_unlock_irq(&dev_priv->irq_lock); 4469 } 4470 4471 /** 4472 * intel_irq_install - enables the hardware interrupt 4473 * @dev_priv: i915 device instance 4474 * 4475 * This function enables the hardware interrupt handling, but leaves the hotplug 4476 * handling still disabled. It is called after intel_irq_init(). 4477 * 4478 * In the driver load and resume code we need working interrupts in a few places 4479 * but don't want to deal with the hassle of concurrent probe and hotplug 4480 * workers. Hence the split into this two-stage approach. 4481 */ 4482 int intel_irq_install(struct drm_i915_private *dev_priv) 4483 { 4484 /* 4485 * We enable some interrupt sources in our postinstall hooks, so mark 4486 * interrupts as enabled _before_ actually enabling them to avoid 4487 * special cases in our ordering checks. 4488 */ 4489 dev_priv->pm.irqs_enabled = true; 4490 4491 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4492 } 4493 4494 /** 4495 * intel_irq_uninstall - finilizes all irq handling 4496 * @dev_priv: i915 device instance 4497 * 4498 * This stops interrupt and hotplug handling and unregisters and frees all 4499 * resources acquired in the init functions. 4500 */ 4501 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4502 { 4503 drm_irq_uninstall(dev_priv->dev); 4504 intel_hpd_cancel_work(dev_priv); 4505 dev_priv->pm.irqs_enabled = false; 4506 } 4507 4508 /** 4509 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4510 * @dev_priv: i915 device instance 4511 * 4512 * This function is used to disable interrupts at runtime, both in the runtime 4513 * pm and the system suspend/resume code. 4514 */ 4515 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4516 { 4517 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4518 dev_priv->pm.irqs_enabled = false; 4519 synchronize_irq(dev_priv->dev->irq); 4520 } 4521 4522 /** 4523 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4524 * @dev_priv: i915 device instance 4525 * 4526 * This function is used to enable interrupts at runtime, both in the runtime 4527 * pm and the system suspend/resume code. 4528 */ 4529 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4530 { 4531 dev_priv->pm.irqs_enabled = true; 4532 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4533 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4534 } 4535