1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ibx[HPD_NUM_PINS] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 54 }; 55 56 static const u32 hpd_cpt[HPD_NUM_PINS] = { 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 62 }; 63 64 static const u32 hpd_spt[HPD_NUM_PINS] = { 65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 68 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 69 }; 70 71 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 72 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 73 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 74 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 75 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 76 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 77 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 78 }; 79 80 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 81 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 82 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 83 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 84 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 85 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 86 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 87 }; 88 89 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 90 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 91 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 92 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 93 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 94 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 95 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 96 }; 97 98 /* BXT hpd list */ 99 static const u32 hpd_bxt[HPD_NUM_PINS] = { 100 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 101 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 102 }; 103 104 /* IIR can theoretically queue up two events. Be paranoid. */ 105 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 106 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 107 POSTING_READ(GEN8_##type##_IMR(which)); \ 108 I915_WRITE(GEN8_##type##_IER(which), 0); \ 109 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 110 POSTING_READ(GEN8_##type##_IIR(which)); \ 111 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 112 POSTING_READ(GEN8_##type##_IIR(which)); \ 113 } while (0) 114 115 #define GEN5_IRQ_RESET(type) do { \ 116 I915_WRITE(type##IMR, 0xffffffff); \ 117 POSTING_READ(type##IMR); \ 118 I915_WRITE(type##IER, 0); \ 119 I915_WRITE(type##IIR, 0xffffffff); \ 120 POSTING_READ(type##IIR); \ 121 I915_WRITE(type##IIR, 0xffffffff); \ 122 POSTING_READ(type##IIR); \ 123 } while (0) 124 125 /* 126 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 127 */ 128 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 129 u32 val = I915_READ(reg); \ 130 if (val) { \ 131 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 132 (reg), val); \ 133 I915_WRITE((reg), 0xffffffff); \ 134 POSTING_READ(reg); \ 135 I915_WRITE((reg), 0xffffffff); \ 136 POSTING_READ(reg); \ 137 } \ 138 } while (0) 139 140 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 141 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 142 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 143 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 144 POSTING_READ(GEN8_##type##_IMR(which)); \ 145 } while (0) 146 147 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 148 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 149 I915_WRITE(type##IER, (ier_val)); \ 150 I915_WRITE(type##IMR, (imr_val)); \ 151 POSTING_READ(type##IMR); \ 152 } while (0) 153 154 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 155 156 /* For display hotplug interrupt */ 157 void 158 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 159 { 160 assert_spin_locked(&dev_priv->irq_lock); 161 162 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 163 return; 164 165 if ((dev_priv->irq_mask & mask) != 0) { 166 dev_priv->irq_mask &= ~mask; 167 I915_WRITE(DEIMR, dev_priv->irq_mask); 168 POSTING_READ(DEIMR); 169 } 170 } 171 172 void 173 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 174 { 175 assert_spin_locked(&dev_priv->irq_lock); 176 177 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 178 return; 179 180 if ((dev_priv->irq_mask & mask) != mask) { 181 dev_priv->irq_mask |= mask; 182 I915_WRITE(DEIMR, dev_priv->irq_mask); 183 POSTING_READ(DEIMR); 184 } 185 } 186 187 /** 188 * ilk_update_gt_irq - update GTIMR 189 * @dev_priv: driver private 190 * @interrupt_mask: mask of interrupt bits to update 191 * @enabled_irq_mask: mask of interrupt bits to enable 192 */ 193 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 194 uint32_t interrupt_mask, 195 uint32_t enabled_irq_mask) 196 { 197 assert_spin_locked(&dev_priv->irq_lock); 198 199 WARN_ON(enabled_irq_mask & ~interrupt_mask); 200 201 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 202 return; 203 204 dev_priv->gt_irq_mask &= ~interrupt_mask; 205 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 206 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 207 POSTING_READ(GTIMR); 208 } 209 210 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 211 { 212 ilk_update_gt_irq(dev_priv, mask, mask); 213 } 214 215 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 216 { 217 ilk_update_gt_irq(dev_priv, mask, 0); 218 } 219 220 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 221 { 222 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 223 } 224 225 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 226 { 227 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 228 } 229 230 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 231 { 232 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 233 } 234 235 /** 236 * snb_update_pm_irq - update GEN6_PMIMR 237 * @dev_priv: driver private 238 * @interrupt_mask: mask of interrupt bits to update 239 * @enabled_irq_mask: mask of interrupt bits to enable 240 */ 241 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 242 uint32_t interrupt_mask, 243 uint32_t enabled_irq_mask) 244 { 245 uint32_t new_val; 246 247 WARN_ON(enabled_irq_mask & ~interrupt_mask); 248 249 assert_spin_locked(&dev_priv->irq_lock); 250 251 new_val = dev_priv->pm_irq_mask; 252 new_val &= ~interrupt_mask; 253 new_val |= (~enabled_irq_mask & interrupt_mask); 254 255 if (new_val != dev_priv->pm_irq_mask) { 256 dev_priv->pm_irq_mask = new_val; 257 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 258 POSTING_READ(gen6_pm_imr(dev_priv)); 259 } 260 } 261 262 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 263 { 264 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 265 return; 266 267 snb_update_pm_irq(dev_priv, mask, mask); 268 } 269 270 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 271 uint32_t mask) 272 { 273 snb_update_pm_irq(dev_priv, mask, 0); 274 } 275 276 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 277 { 278 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 279 return; 280 281 __gen6_disable_pm_irq(dev_priv, mask); 282 } 283 284 void gen6_reset_rps_interrupts(struct drm_device *dev) 285 { 286 struct drm_i915_private *dev_priv = dev->dev_private; 287 uint32_t reg = gen6_pm_iir(dev_priv); 288 289 spin_lock_irq(&dev_priv->irq_lock); 290 I915_WRITE(reg, dev_priv->pm_rps_events); 291 I915_WRITE(reg, dev_priv->pm_rps_events); 292 POSTING_READ(reg); 293 dev_priv->rps.pm_iir = 0; 294 spin_unlock_irq(&dev_priv->irq_lock); 295 } 296 297 void gen6_enable_rps_interrupts(struct drm_device *dev) 298 { 299 struct drm_i915_private *dev_priv = dev->dev_private; 300 301 spin_lock_irq(&dev_priv->irq_lock); 302 303 WARN_ON(dev_priv->rps.pm_iir); 304 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 305 dev_priv->rps.interrupts_enabled = true; 306 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 307 dev_priv->pm_rps_events); 308 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 309 310 spin_unlock_irq(&dev_priv->irq_lock); 311 } 312 313 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 314 { 315 /* 316 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 317 * if GEN6_PM_UP_EI_EXPIRED is masked. 318 * 319 * TODO: verify if this can be reproduced on VLV,CHV. 320 */ 321 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 322 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 323 324 if (INTEL_INFO(dev_priv)->gen >= 8) 325 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 326 327 return mask; 328 } 329 330 void gen6_disable_rps_interrupts(struct drm_device *dev) 331 { 332 struct drm_i915_private *dev_priv = dev->dev_private; 333 334 spin_lock_irq(&dev_priv->irq_lock); 335 dev_priv->rps.interrupts_enabled = false; 336 spin_unlock_irq(&dev_priv->irq_lock); 337 338 cancel_work_sync(&dev_priv->rps.work); 339 340 spin_lock_irq(&dev_priv->irq_lock); 341 342 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 343 344 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 345 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 346 ~dev_priv->pm_rps_events); 347 348 spin_unlock_irq(&dev_priv->irq_lock); 349 350 synchronize_irq(dev->irq); 351 } 352 353 /** 354 * ibx_display_interrupt_update - update SDEIMR 355 * @dev_priv: driver private 356 * @interrupt_mask: mask of interrupt bits to update 357 * @enabled_irq_mask: mask of interrupt bits to enable 358 */ 359 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 360 uint32_t interrupt_mask, 361 uint32_t enabled_irq_mask) 362 { 363 uint32_t sdeimr = I915_READ(SDEIMR); 364 sdeimr &= ~interrupt_mask; 365 sdeimr |= (~enabled_irq_mask & interrupt_mask); 366 367 WARN_ON(enabled_irq_mask & ~interrupt_mask); 368 369 assert_spin_locked(&dev_priv->irq_lock); 370 371 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 372 return; 373 374 I915_WRITE(SDEIMR, sdeimr); 375 POSTING_READ(SDEIMR); 376 } 377 378 static void 379 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 380 u32 enable_mask, u32 status_mask) 381 { 382 u32 reg = PIPESTAT(pipe); 383 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 384 385 assert_spin_locked(&dev_priv->irq_lock); 386 WARN_ON(!intel_irqs_enabled(dev_priv)); 387 388 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 389 status_mask & ~PIPESTAT_INT_STATUS_MASK, 390 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 391 pipe_name(pipe), enable_mask, status_mask)) 392 return; 393 394 if ((pipestat & enable_mask) == enable_mask) 395 return; 396 397 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 398 399 /* Enable the interrupt, clear any pending status */ 400 pipestat |= enable_mask | status_mask; 401 I915_WRITE(reg, pipestat); 402 POSTING_READ(reg); 403 } 404 405 static void 406 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 407 u32 enable_mask, u32 status_mask) 408 { 409 u32 reg = PIPESTAT(pipe); 410 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 411 412 assert_spin_locked(&dev_priv->irq_lock); 413 WARN_ON(!intel_irqs_enabled(dev_priv)); 414 415 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 416 status_mask & ~PIPESTAT_INT_STATUS_MASK, 417 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 418 pipe_name(pipe), enable_mask, status_mask)) 419 return; 420 421 if ((pipestat & enable_mask) == 0) 422 return; 423 424 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 425 426 pipestat &= ~enable_mask; 427 I915_WRITE(reg, pipestat); 428 POSTING_READ(reg); 429 } 430 431 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 432 { 433 u32 enable_mask = status_mask << 16; 434 435 /* 436 * On pipe A we don't support the PSR interrupt yet, 437 * on pipe B and C the same bit MBZ. 438 */ 439 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 440 return 0; 441 /* 442 * On pipe B and C we don't support the PSR interrupt yet, on pipe 443 * A the same bit is for perf counters which we don't use either. 444 */ 445 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 446 return 0; 447 448 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 449 SPRITE0_FLIP_DONE_INT_EN_VLV | 450 SPRITE1_FLIP_DONE_INT_EN_VLV); 451 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 452 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 453 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 454 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 455 456 return enable_mask; 457 } 458 459 void 460 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 461 u32 status_mask) 462 { 463 u32 enable_mask; 464 465 if (IS_VALLEYVIEW(dev_priv->dev)) 466 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 467 status_mask); 468 else 469 enable_mask = status_mask << 16; 470 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 471 } 472 473 void 474 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 475 u32 status_mask) 476 { 477 u32 enable_mask; 478 479 if (IS_VALLEYVIEW(dev_priv->dev)) 480 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 481 status_mask); 482 else 483 enable_mask = status_mask << 16; 484 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 485 } 486 487 /** 488 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 489 */ 490 static void i915_enable_asle_pipestat(struct drm_device *dev) 491 { 492 struct drm_i915_private *dev_priv = dev->dev_private; 493 494 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 495 return; 496 497 spin_lock_irq(&dev_priv->irq_lock); 498 499 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 500 if (INTEL_INFO(dev)->gen >= 4) 501 i915_enable_pipestat(dev_priv, PIPE_A, 502 PIPE_LEGACY_BLC_EVENT_STATUS); 503 504 spin_unlock_irq(&dev_priv->irq_lock); 505 } 506 507 /* 508 * This timing diagram depicts the video signal in and 509 * around the vertical blanking period. 510 * 511 * Assumptions about the fictitious mode used in this example: 512 * vblank_start >= 3 513 * vsync_start = vblank_start + 1 514 * vsync_end = vblank_start + 2 515 * vtotal = vblank_start + 3 516 * 517 * start of vblank: 518 * latch double buffered registers 519 * increment frame counter (ctg+) 520 * generate start of vblank interrupt (gen4+) 521 * | 522 * | frame start: 523 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 524 * | may be shifted forward 1-3 extra lines via PIPECONF 525 * | | 526 * | | start of vsync: 527 * | | generate vsync interrupt 528 * | | | 529 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 530 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 531 * ----va---> <-----------------vb--------------------> <--------va------------- 532 * | | <----vs-----> | 533 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 534 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 535 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 536 * | | | 537 * last visible pixel first visible pixel 538 * | increment frame counter (gen3/4) 539 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 540 * 541 * x = horizontal active 542 * _ = horizontal blanking 543 * hs = horizontal sync 544 * va = vertical active 545 * vb = vertical blanking 546 * vs = vertical sync 547 * vbs = vblank_start (number) 548 * 549 * Summary: 550 * - most events happen at the start of horizontal sync 551 * - frame start happens at the start of horizontal blank, 1-4 lines 552 * (depending on PIPECONF settings) after the start of vblank 553 * - gen3/4 pixel and frame counter are synchronized with the start 554 * of horizontal active on the first line of vertical active 555 */ 556 557 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 558 { 559 /* Gen2 doesn't have a hardware frame counter */ 560 return 0; 561 } 562 563 /* Called from drm generic code, passed a 'crtc', which 564 * we use as a pipe index 565 */ 566 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 567 { 568 struct drm_i915_private *dev_priv = dev->dev_private; 569 unsigned long high_frame; 570 unsigned long low_frame; 571 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 572 struct intel_crtc *intel_crtc = 573 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 574 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 575 576 htotal = mode->crtc_htotal; 577 hsync_start = mode->crtc_hsync_start; 578 vbl_start = mode->crtc_vblank_start; 579 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 580 vbl_start = DIV_ROUND_UP(vbl_start, 2); 581 582 /* Convert to pixel count */ 583 vbl_start *= htotal; 584 585 /* Start of vblank event occurs at start of hsync */ 586 vbl_start -= htotal - hsync_start; 587 588 high_frame = PIPEFRAME(pipe); 589 low_frame = PIPEFRAMEPIXEL(pipe); 590 591 /* 592 * High & low register fields aren't synchronized, so make sure 593 * we get a low value that's stable across two reads of the high 594 * register. 595 */ 596 do { 597 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 598 low = I915_READ(low_frame); 599 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 600 } while (high1 != high2); 601 602 high1 >>= PIPE_FRAME_HIGH_SHIFT; 603 pixel = low & PIPE_PIXEL_MASK; 604 low >>= PIPE_FRAME_LOW_SHIFT; 605 606 /* 607 * The frame counter increments at beginning of active. 608 * Cook up a vblank counter by also checking the pixel 609 * counter against vblank start. 610 */ 611 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 612 } 613 614 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 615 { 616 struct drm_i915_private *dev_priv = dev->dev_private; 617 int reg = PIPE_FRMCOUNT_GM45(pipe); 618 619 return I915_READ(reg); 620 } 621 622 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 623 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 624 625 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 626 { 627 struct drm_device *dev = crtc->base.dev; 628 struct drm_i915_private *dev_priv = dev->dev_private; 629 const struct drm_display_mode *mode = &crtc->base.hwmode; 630 enum pipe pipe = crtc->pipe; 631 int position, vtotal; 632 633 vtotal = mode->crtc_vtotal; 634 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 635 vtotal /= 2; 636 637 if (IS_GEN2(dev)) 638 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 639 else 640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 641 642 /* 643 * See update_scanline_offset() for the details on the 644 * scanline_offset adjustment. 645 */ 646 return (position + crtc->scanline_offset) % vtotal; 647 } 648 649 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 650 unsigned int flags, int *vpos, int *hpos, 651 ktime_t *stime, ktime_t *etime) 652 { 653 struct drm_i915_private *dev_priv = dev->dev_private; 654 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 656 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 657 int position; 658 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 659 bool in_vbl = true; 660 int ret = 0; 661 unsigned long irqflags; 662 663 if (WARN_ON(!mode->crtc_clock)) { 664 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 665 "pipe %c\n", pipe_name(pipe)); 666 return 0; 667 } 668 669 htotal = mode->crtc_htotal; 670 hsync_start = mode->crtc_hsync_start; 671 vtotal = mode->crtc_vtotal; 672 vbl_start = mode->crtc_vblank_start; 673 vbl_end = mode->crtc_vblank_end; 674 675 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 676 vbl_start = DIV_ROUND_UP(vbl_start, 2); 677 vbl_end /= 2; 678 vtotal /= 2; 679 } 680 681 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 682 683 /* 684 * Lock uncore.lock, as we will do multiple timing critical raw 685 * register reads, potentially with preemption disabled, so the 686 * following code must not block on uncore.lock. 687 */ 688 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 689 690 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 691 692 /* Get optional system timestamp before query. */ 693 if (stime) 694 *stime = ktime_get(); 695 696 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 697 /* No obvious pixelcount register. Only query vertical 698 * scanout position from Display scan line register. 699 */ 700 position = __intel_get_crtc_scanline(intel_crtc); 701 } else { 702 /* Have access to pixelcount since start of frame. 703 * We can split this into vertical and horizontal 704 * scanout position. 705 */ 706 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 707 708 /* convert to pixel counts */ 709 vbl_start *= htotal; 710 vbl_end *= htotal; 711 vtotal *= htotal; 712 713 /* 714 * In interlaced modes, the pixel counter counts all pixels, 715 * so one field will have htotal more pixels. In order to avoid 716 * the reported position from jumping backwards when the pixel 717 * counter is beyond the length of the shorter field, just 718 * clamp the position the length of the shorter field. This 719 * matches how the scanline counter based position works since 720 * the scanline counter doesn't count the two half lines. 721 */ 722 if (position >= vtotal) 723 position = vtotal - 1; 724 725 /* 726 * Start of vblank interrupt is triggered at start of hsync, 727 * just prior to the first active line of vblank. However we 728 * consider lines to start at the leading edge of horizontal 729 * active. So, should we get here before we've crossed into 730 * the horizontal active of the first line in vblank, we would 731 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 732 * always add htotal-hsync_start to the current pixel position. 733 */ 734 position = (position + htotal - hsync_start) % vtotal; 735 } 736 737 /* Get optional system timestamp after query. */ 738 if (etime) 739 *etime = ktime_get(); 740 741 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 742 743 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 744 745 in_vbl = position >= vbl_start && position < vbl_end; 746 747 /* 748 * While in vblank, position will be negative 749 * counting up towards 0 at vbl_end. And outside 750 * vblank, position will be positive counting 751 * up since vbl_end. 752 */ 753 if (position >= vbl_start) 754 position -= vbl_end; 755 else 756 position += vtotal - vbl_end; 757 758 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 759 *vpos = position; 760 *hpos = 0; 761 } else { 762 *vpos = position / htotal; 763 *hpos = position - (*vpos * htotal); 764 } 765 766 /* In vblank? */ 767 if (in_vbl) 768 ret |= DRM_SCANOUTPOS_IN_VBLANK; 769 770 return ret; 771 } 772 773 int intel_get_crtc_scanline(struct intel_crtc *crtc) 774 { 775 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 776 unsigned long irqflags; 777 int position; 778 779 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 780 position = __intel_get_crtc_scanline(crtc); 781 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 782 783 return position; 784 } 785 786 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 787 int *max_error, 788 struct timeval *vblank_time, 789 unsigned flags) 790 { 791 struct drm_crtc *crtc; 792 793 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 794 DRM_ERROR("Invalid crtc %d\n", pipe); 795 return -EINVAL; 796 } 797 798 /* Get drm_crtc to timestamp: */ 799 crtc = intel_get_crtc_for_pipe(dev, pipe); 800 if (crtc == NULL) { 801 DRM_ERROR("Invalid crtc %d\n", pipe); 802 return -EINVAL; 803 } 804 805 if (!crtc->hwmode.crtc_clock) { 806 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 807 return -EBUSY; 808 } 809 810 /* Helper routine in DRM core does all the work: */ 811 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 812 vblank_time, flags, 813 crtc, 814 &crtc->hwmode); 815 } 816 817 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 818 { 819 struct drm_i915_private *dev_priv = dev->dev_private; 820 u32 busy_up, busy_down, max_avg, min_avg; 821 u8 new_delay; 822 823 spin_lock(&mchdev_lock); 824 825 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 826 827 new_delay = dev_priv->ips.cur_delay; 828 829 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 830 busy_up = I915_READ(RCPREVBSYTUPAVG); 831 busy_down = I915_READ(RCPREVBSYTDNAVG); 832 max_avg = I915_READ(RCBMAXAVG); 833 min_avg = I915_READ(RCBMINAVG); 834 835 /* Handle RCS change request from hw */ 836 if (busy_up > max_avg) { 837 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 838 new_delay = dev_priv->ips.cur_delay - 1; 839 if (new_delay < dev_priv->ips.max_delay) 840 new_delay = dev_priv->ips.max_delay; 841 } else if (busy_down < min_avg) { 842 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 843 new_delay = dev_priv->ips.cur_delay + 1; 844 if (new_delay > dev_priv->ips.min_delay) 845 new_delay = dev_priv->ips.min_delay; 846 } 847 848 if (ironlake_set_drps(dev, new_delay)) 849 dev_priv->ips.cur_delay = new_delay; 850 851 spin_unlock(&mchdev_lock); 852 853 return; 854 } 855 856 static void notify_ring(struct intel_engine_cs *ring) 857 { 858 if (!intel_ring_initialized(ring)) 859 return; 860 861 trace_i915_gem_request_notify(ring); 862 863 wake_up_all(&ring->irq_queue); 864 } 865 866 static void vlv_c0_read(struct drm_i915_private *dev_priv, 867 struct intel_rps_ei *ei) 868 { 869 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 870 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 871 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 872 } 873 874 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 875 const struct intel_rps_ei *old, 876 const struct intel_rps_ei *now, 877 int threshold) 878 { 879 u64 time, c0; 880 881 if (old->cz_clock == 0) 882 return false; 883 884 time = now->cz_clock - old->cz_clock; 885 time *= threshold * dev_priv->mem_freq; 886 887 /* Workload can be split between render + media, e.g. SwapBuffers 888 * being blitted in X after being rendered in mesa. To account for 889 * this we need to combine both engines into our activity counter. 890 */ 891 c0 = now->render_c0 - old->render_c0; 892 c0 += now->media_c0 - old->media_c0; 893 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 894 895 return c0 >= time; 896 } 897 898 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 899 { 900 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 901 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 902 } 903 904 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 905 { 906 struct intel_rps_ei now; 907 u32 events = 0; 908 909 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 910 return 0; 911 912 vlv_c0_read(dev_priv, &now); 913 if (now.cz_clock == 0) 914 return 0; 915 916 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 917 if (!vlv_c0_above(dev_priv, 918 &dev_priv->rps.down_ei, &now, 919 dev_priv->rps.down_threshold)) 920 events |= GEN6_PM_RP_DOWN_THRESHOLD; 921 dev_priv->rps.down_ei = now; 922 } 923 924 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 925 if (vlv_c0_above(dev_priv, 926 &dev_priv->rps.up_ei, &now, 927 dev_priv->rps.up_threshold)) 928 events |= GEN6_PM_RP_UP_THRESHOLD; 929 dev_priv->rps.up_ei = now; 930 } 931 932 return events; 933 } 934 935 static bool any_waiters(struct drm_i915_private *dev_priv) 936 { 937 struct intel_engine_cs *ring; 938 int i; 939 940 for_each_ring(ring, dev_priv, i) 941 if (ring->irq_refcount) 942 return true; 943 944 return false; 945 } 946 947 static void gen6_pm_rps_work(struct work_struct *work) 948 { 949 struct drm_i915_private *dev_priv = 950 container_of(work, struct drm_i915_private, rps.work); 951 bool client_boost; 952 int new_delay, adj, min, max; 953 u32 pm_iir; 954 955 spin_lock_irq(&dev_priv->irq_lock); 956 /* Speed up work cancelation during disabling rps interrupts. */ 957 if (!dev_priv->rps.interrupts_enabled) { 958 spin_unlock_irq(&dev_priv->irq_lock); 959 return; 960 } 961 pm_iir = dev_priv->rps.pm_iir; 962 dev_priv->rps.pm_iir = 0; 963 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 964 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 965 client_boost = dev_priv->rps.client_boost; 966 dev_priv->rps.client_boost = false; 967 spin_unlock_irq(&dev_priv->irq_lock); 968 969 /* Make sure we didn't queue anything we're not going to process. */ 970 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 971 972 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 973 return; 974 975 mutex_lock(&dev_priv->rps.hw_lock); 976 977 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 978 979 adj = dev_priv->rps.last_adj; 980 new_delay = dev_priv->rps.cur_freq; 981 min = dev_priv->rps.min_freq_softlimit; 982 max = dev_priv->rps.max_freq_softlimit; 983 984 if (client_boost) { 985 new_delay = dev_priv->rps.max_freq_softlimit; 986 adj = 0; 987 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 988 if (adj > 0) 989 adj *= 2; 990 else /* CHV needs even encode values */ 991 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 992 /* 993 * For better performance, jump directly 994 * to RPe if we're below it. 995 */ 996 if (new_delay < dev_priv->rps.efficient_freq - adj) { 997 new_delay = dev_priv->rps.efficient_freq; 998 adj = 0; 999 } 1000 } else if (any_waiters(dev_priv)) { 1001 adj = 0; 1002 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1003 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1004 new_delay = dev_priv->rps.efficient_freq; 1005 else 1006 new_delay = dev_priv->rps.min_freq_softlimit; 1007 adj = 0; 1008 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1009 if (adj < 0) 1010 adj *= 2; 1011 else /* CHV needs even encode values */ 1012 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1013 } else { /* unknown event */ 1014 adj = 0; 1015 } 1016 1017 dev_priv->rps.last_adj = adj; 1018 1019 /* sysfs frequency interfaces may have snuck in while servicing the 1020 * interrupt 1021 */ 1022 new_delay += adj; 1023 new_delay = clamp_t(int, new_delay, min, max); 1024 1025 intel_set_rps(dev_priv->dev, new_delay); 1026 1027 mutex_unlock(&dev_priv->rps.hw_lock); 1028 } 1029 1030 1031 /** 1032 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1033 * occurred. 1034 * @work: workqueue struct 1035 * 1036 * Doesn't actually do anything except notify userspace. As a consequence of 1037 * this event, userspace should try to remap the bad rows since statistically 1038 * it is likely the same row is more likely to go bad again. 1039 */ 1040 static void ivybridge_parity_work(struct work_struct *work) 1041 { 1042 struct drm_i915_private *dev_priv = 1043 container_of(work, struct drm_i915_private, l3_parity.error_work); 1044 u32 error_status, row, bank, subbank; 1045 char *parity_event[6]; 1046 uint32_t misccpctl; 1047 uint8_t slice = 0; 1048 1049 /* We must turn off DOP level clock gating to access the L3 registers. 1050 * In order to prevent a get/put style interface, acquire struct mutex 1051 * any time we access those registers. 1052 */ 1053 mutex_lock(&dev_priv->dev->struct_mutex); 1054 1055 /* If we've screwed up tracking, just let the interrupt fire again */ 1056 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1057 goto out; 1058 1059 misccpctl = I915_READ(GEN7_MISCCPCTL); 1060 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1061 POSTING_READ(GEN7_MISCCPCTL); 1062 1063 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1064 u32 reg; 1065 1066 slice--; 1067 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1068 break; 1069 1070 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1071 1072 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1073 1074 error_status = I915_READ(reg); 1075 row = GEN7_PARITY_ERROR_ROW(error_status); 1076 bank = GEN7_PARITY_ERROR_BANK(error_status); 1077 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1078 1079 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1080 POSTING_READ(reg); 1081 1082 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1083 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1084 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1085 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1086 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1087 parity_event[5] = NULL; 1088 1089 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1090 KOBJ_CHANGE, parity_event); 1091 1092 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1093 slice, row, bank, subbank); 1094 1095 kfree(parity_event[4]); 1096 kfree(parity_event[3]); 1097 kfree(parity_event[2]); 1098 kfree(parity_event[1]); 1099 } 1100 1101 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1102 1103 out: 1104 WARN_ON(dev_priv->l3_parity.which_slice); 1105 spin_lock_irq(&dev_priv->irq_lock); 1106 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1107 spin_unlock_irq(&dev_priv->irq_lock); 1108 1109 mutex_unlock(&dev_priv->dev->struct_mutex); 1110 } 1111 1112 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1113 { 1114 struct drm_i915_private *dev_priv = dev->dev_private; 1115 1116 if (!HAS_L3_DPF(dev)) 1117 return; 1118 1119 spin_lock(&dev_priv->irq_lock); 1120 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1121 spin_unlock(&dev_priv->irq_lock); 1122 1123 iir &= GT_PARITY_ERROR(dev); 1124 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1125 dev_priv->l3_parity.which_slice |= 1 << 1; 1126 1127 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1128 dev_priv->l3_parity.which_slice |= 1 << 0; 1129 1130 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1131 } 1132 1133 static void ilk_gt_irq_handler(struct drm_device *dev, 1134 struct drm_i915_private *dev_priv, 1135 u32 gt_iir) 1136 { 1137 if (gt_iir & 1138 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1139 notify_ring(&dev_priv->ring[RCS]); 1140 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1141 notify_ring(&dev_priv->ring[VCS]); 1142 } 1143 1144 static void snb_gt_irq_handler(struct drm_device *dev, 1145 struct drm_i915_private *dev_priv, 1146 u32 gt_iir) 1147 { 1148 1149 if (gt_iir & 1150 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1151 notify_ring(&dev_priv->ring[RCS]); 1152 if (gt_iir & GT_BSD_USER_INTERRUPT) 1153 notify_ring(&dev_priv->ring[VCS]); 1154 if (gt_iir & GT_BLT_USER_INTERRUPT) 1155 notify_ring(&dev_priv->ring[BCS]); 1156 1157 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1158 GT_BSD_CS_ERROR_INTERRUPT | 1159 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1160 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1161 1162 if (gt_iir & GT_PARITY_ERROR(dev)) 1163 ivybridge_parity_error_irq_handler(dev, gt_iir); 1164 } 1165 1166 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1167 u32 master_ctl) 1168 { 1169 irqreturn_t ret = IRQ_NONE; 1170 1171 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1172 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1173 if (tmp) { 1174 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1175 ret = IRQ_HANDLED; 1176 1177 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1178 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1179 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1180 notify_ring(&dev_priv->ring[RCS]); 1181 1182 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1183 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1184 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1185 notify_ring(&dev_priv->ring[BCS]); 1186 } else 1187 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1188 } 1189 1190 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1191 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1192 if (tmp) { 1193 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1194 ret = IRQ_HANDLED; 1195 1196 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1197 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1198 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1199 notify_ring(&dev_priv->ring[VCS]); 1200 1201 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1202 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1203 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1204 notify_ring(&dev_priv->ring[VCS2]); 1205 } else 1206 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1207 } 1208 1209 if (master_ctl & GEN8_GT_VECS_IRQ) { 1210 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1211 if (tmp) { 1212 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1213 ret = IRQ_HANDLED; 1214 1215 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1216 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1217 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1218 notify_ring(&dev_priv->ring[VECS]); 1219 } else 1220 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1221 } 1222 1223 if (master_ctl & GEN8_GT_PM_IRQ) { 1224 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1225 if (tmp & dev_priv->pm_rps_events) { 1226 I915_WRITE_FW(GEN8_GT_IIR(2), 1227 tmp & dev_priv->pm_rps_events); 1228 ret = IRQ_HANDLED; 1229 gen6_rps_irq_handler(dev_priv, tmp); 1230 } else 1231 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1232 } 1233 1234 return ret; 1235 } 1236 1237 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1238 { 1239 switch (port) { 1240 case PORT_A: 1241 return val & BXT_PORTA_HOTPLUG_LONG_DETECT; 1242 case PORT_B: 1243 return val & PORTB_HOTPLUG_LONG_DETECT; 1244 case PORT_C: 1245 return val & PORTC_HOTPLUG_LONG_DETECT; 1246 case PORT_D: 1247 return val & PORTD_HOTPLUG_LONG_DETECT; 1248 default: 1249 return false; 1250 } 1251 } 1252 1253 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1254 { 1255 switch (port) { 1256 case PORT_B: 1257 return val & PORTB_HOTPLUG_LONG_DETECT; 1258 case PORT_C: 1259 return val & PORTC_HOTPLUG_LONG_DETECT; 1260 case PORT_D: 1261 return val & PORTD_HOTPLUG_LONG_DETECT; 1262 case PORT_E: 1263 return val & PORTE_HOTPLUG_LONG_DETECT; 1264 default: 1265 return false; 1266 } 1267 } 1268 1269 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1270 { 1271 switch (port) { 1272 case PORT_B: 1273 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1274 case PORT_C: 1275 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1276 case PORT_D: 1277 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1278 default: 1279 return false; 1280 } 1281 } 1282 1283 /* Get a bit mask of pins that have triggered, and which ones may be long. */ 1284 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1285 u32 hotplug_trigger, u32 dig_hotplug_reg, 1286 const u32 hpd[HPD_NUM_PINS], 1287 bool long_pulse_detect(enum port port, u32 val)) 1288 { 1289 enum port port; 1290 int i; 1291 1292 *pin_mask = 0; 1293 *long_mask = 0; 1294 1295 for_each_hpd_pin(i) { 1296 if ((hpd[i] & hotplug_trigger) == 0) 1297 continue; 1298 1299 *pin_mask |= BIT(i); 1300 1301 if (!intel_hpd_pin_to_port(i, &port)) 1302 continue; 1303 1304 if (long_pulse_detect(port, dig_hotplug_reg)) 1305 *long_mask |= BIT(i); 1306 } 1307 1308 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1309 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1310 1311 } 1312 1313 static void gmbus_irq_handler(struct drm_device *dev) 1314 { 1315 struct drm_i915_private *dev_priv = dev->dev_private; 1316 1317 wake_up_all(&dev_priv->gmbus_wait_queue); 1318 } 1319 1320 static void dp_aux_irq_handler(struct drm_device *dev) 1321 { 1322 struct drm_i915_private *dev_priv = dev->dev_private; 1323 1324 wake_up_all(&dev_priv->gmbus_wait_queue); 1325 } 1326 1327 #if defined(CONFIG_DEBUG_FS) 1328 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1329 uint32_t crc0, uint32_t crc1, 1330 uint32_t crc2, uint32_t crc3, 1331 uint32_t crc4) 1332 { 1333 struct drm_i915_private *dev_priv = dev->dev_private; 1334 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1335 struct intel_pipe_crc_entry *entry; 1336 int head, tail; 1337 1338 spin_lock(&pipe_crc->lock); 1339 1340 if (!pipe_crc->entries) { 1341 spin_unlock(&pipe_crc->lock); 1342 DRM_DEBUG_KMS("spurious interrupt\n"); 1343 return; 1344 } 1345 1346 head = pipe_crc->head; 1347 tail = pipe_crc->tail; 1348 1349 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1350 spin_unlock(&pipe_crc->lock); 1351 DRM_ERROR("CRC buffer overflowing\n"); 1352 return; 1353 } 1354 1355 entry = &pipe_crc->entries[head]; 1356 1357 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1358 entry->crc[0] = crc0; 1359 entry->crc[1] = crc1; 1360 entry->crc[2] = crc2; 1361 entry->crc[3] = crc3; 1362 entry->crc[4] = crc4; 1363 1364 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1365 pipe_crc->head = head; 1366 1367 spin_unlock(&pipe_crc->lock); 1368 1369 wake_up_interruptible(&pipe_crc->wq); 1370 } 1371 #else 1372 static inline void 1373 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1374 uint32_t crc0, uint32_t crc1, 1375 uint32_t crc2, uint32_t crc3, 1376 uint32_t crc4) {} 1377 #endif 1378 1379 1380 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1381 { 1382 struct drm_i915_private *dev_priv = dev->dev_private; 1383 1384 display_pipe_crc_irq_handler(dev, pipe, 1385 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1386 0, 0, 0, 0); 1387 } 1388 1389 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1390 { 1391 struct drm_i915_private *dev_priv = dev->dev_private; 1392 1393 display_pipe_crc_irq_handler(dev, pipe, 1394 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1395 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1396 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1397 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1398 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1399 } 1400 1401 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1402 { 1403 struct drm_i915_private *dev_priv = dev->dev_private; 1404 uint32_t res1, res2; 1405 1406 if (INTEL_INFO(dev)->gen >= 3) 1407 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1408 else 1409 res1 = 0; 1410 1411 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1412 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1413 else 1414 res2 = 0; 1415 1416 display_pipe_crc_irq_handler(dev, pipe, 1417 I915_READ(PIPE_CRC_RES_RED(pipe)), 1418 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1419 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1420 res1, res2); 1421 } 1422 1423 /* The RPS events need forcewake, so we add them to a work queue and mask their 1424 * IMR bits until the work is done. Other interrupts can be processed without 1425 * the work queue. */ 1426 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1427 { 1428 if (pm_iir & dev_priv->pm_rps_events) { 1429 spin_lock(&dev_priv->irq_lock); 1430 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1431 if (dev_priv->rps.interrupts_enabled) { 1432 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1433 queue_work(dev_priv->wq, &dev_priv->rps.work); 1434 } 1435 spin_unlock(&dev_priv->irq_lock); 1436 } 1437 1438 if (INTEL_INFO(dev_priv)->gen >= 8) 1439 return; 1440 1441 if (HAS_VEBOX(dev_priv->dev)) { 1442 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1443 notify_ring(&dev_priv->ring[VECS]); 1444 1445 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1446 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1447 } 1448 } 1449 1450 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1451 { 1452 if (!drm_handle_vblank(dev, pipe)) 1453 return false; 1454 1455 return true; 1456 } 1457 1458 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1459 { 1460 struct drm_i915_private *dev_priv = dev->dev_private; 1461 u32 pipe_stats[I915_MAX_PIPES] = { }; 1462 int pipe; 1463 1464 spin_lock(&dev_priv->irq_lock); 1465 for_each_pipe(dev_priv, pipe) { 1466 int reg; 1467 u32 mask, iir_bit = 0; 1468 1469 /* 1470 * PIPESTAT bits get signalled even when the interrupt is 1471 * disabled with the mask bits, and some of the status bits do 1472 * not generate interrupts at all (like the underrun bit). Hence 1473 * we need to be careful that we only handle what we want to 1474 * handle. 1475 */ 1476 1477 /* fifo underruns are filterered in the underrun handler. */ 1478 mask = PIPE_FIFO_UNDERRUN_STATUS; 1479 1480 switch (pipe) { 1481 case PIPE_A: 1482 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1483 break; 1484 case PIPE_B: 1485 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1486 break; 1487 case PIPE_C: 1488 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1489 break; 1490 } 1491 if (iir & iir_bit) 1492 mask |= dev_priv->pipestat_irq_mask[pipe]; 1493 1494 if (!mask) 1495 continue; 1496 1497 reg = PIPESTAT(pipe); 1498 mask |= PIPESTAT_INT_ENABLE_MASK; 1499 pipe_stats[pipe] = I915_READ(reg) & mask; 1500 1501 /* 1502 * Clear the PIPE*STAT regs before the IIR 1503 */ 1504 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1505 PIPESTAT_INT_STATUS_MASK)) 1506 I915_WRITE(reg, pipe_stats[pipe]); 1507 } 1508 spin_unlock(&dev_priv->irq_lock); 1509 1510 for_each_pipe(dev_priv, pipe) { 1511 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1512 intel_pipe_handle_vblank(dev, pipe)) 1513 intel_check_page_flip(dev, pipe); 1514 1515 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1516 intel_prepare_page_flip(dev, pipe); 1517 intel_finish_page_flip(dev, pipe); 1518 } 1519 1520 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1521 i9xx_pipe_crc_irq_handler(dev, pipe); 1522 1523 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1524 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1525 } 1526 1527 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1528 gmbus_irq_handler(dev); 1529 } 1530 1531 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1532 { 1533 struct drm_i915_private *dev_priv = dev->dev_private; 1534 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1535 u32 pin_mask, long_mask; 1536 1537 if (!hotplug_status) 1538 return; 1539 1540 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1541 /* 1542 * Make sure hotplug status is cleared before we clear IIR, or else we 1543 * may miss hotplug events. 1544 */ 1545 POSTING_READ(PORT_HOTPLUG_STAT); 1546 1547 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 1548 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1549 1550 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1551 hotplug_trigger, hpd_status_g4x, 1552 i9xx_port_hotplug_long_detect); 1553 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1554 1555 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1556 dp_aux_irq_handler(dev); 1557 } else { 1558 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1559 1560 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1561 hotplug_trigger, hpd_status_g4x, 1562 i9xx_port_hotplug_long_detect); 1563 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1564 } 1565 } 1566 1567 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1568 { 1569 struct drm_device *dev = arg; 1570 struct drm_i915_private *dev_priv = dev->dev_private; 1571 u32 iir, gt_iir, pm_iir; 1572 irqreturn_t ret = IRQ_NONE; 1573 1574 if (!intel_irqs_enabled(dev_priv)) 1575 return IRQ_NONE; 1576 1577 while (true) { 1578 /* Find, clear, then process each source of interrupt */ 1579 1580 gt_iir = I915_READ(GTIIR); 1581 if (gt_iir) 1582 I915_WRITE(GTIIR, gt_iir); 1583 1584 pm_iir = I915_READ(GEN6_PMIIR); 1585 if (pm_iir) 1586 I915_WRITE(GEN6_PMIIR, pm_iir); 1587 1588 iir = I915_READ(VLV_IIR); 1589 if (iir) { 1590 /* Consume port before clearing IIR or we'll miss events */ 1591 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1592 i9xx_hpd_irq_handler(dev); 1593 I915_WRITE(VLV_IIR, iir); 1594 } 1595 1596 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1597 goto out; 1598 1599 ret = IRQ_HANDLED; 1600 1601 if (gt_iir) 1602 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1603 if (pm_iir) 1604 gen6_rps_irq_handler(dev_priv, pm_iir); 1605 /* Call regardless, as some status bits might not be 1606 * signalled in iir */ 1607 valleyview_pipestat_irq_handler(dev, iir); 1608 } 1609 1610 out: 1611 return ret; 1612 } 1613 1614 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1615 { 1616 struct drm_device *dev = arg; 1617 struct drm_i915_private *dev_priv = dev->dev_private; 1618 u32 master_ctl, iir; 1619 irqreturn_t ret = IRQ_NONE; 1620 1621 if (!intel_irqs_enabled(dev_priv)) 1622 return IRQ_NONE; 1623 1624 for (;;) { 1625 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1626 iir = I915_READ(VLV_IIR); 1627 1628 if (master_ctl == 0 && iir == 0) 1629 break; 1630 1631 ret = IRQ_HANDLED; 1632 1633 I915_WRITE(GEN8_MASTER_IRQ, 0); 1634 1635 /* Find, clear, then process each source of interrupt */ 1636 1637 if (iir) { 1638 /* Consume port before clearing IIR or we'll miss events */ 1639 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1640 i9xx_hpd_irq_handler(dev); 1641 I915_WRITE(VLV_IIR, iir); 1642 } 1643 1644 gen8_gt_irq_handler(dev_priv, master_ctl); 1645 1646 /* Call regardless, as some status bits might not be 1647 * signalled in iir */ 1648 valleyview_pipestat_irq_handler(dev, iir); 1649 1650 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1651 POSTING_READ(GEN8_MASTER_IRQ); 1652 } 1653 1654 return ret; 1655 } 1656 1657 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1658 { 1659 struct drm_i915_private *dev_priv = dev->dev_private; 1660 int pipe; 1661 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1662 1663 if (hotplug_trigger) { 1664 u32 dig_hotplug_reg, pin_mask, long_mask; 1665 1666 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1667 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1668 1669 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1670 dig_hotplug_reg, hpd_ibx, 1671 pch_port_hotplug_long_detect); 1672 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1673 } 1674 1675 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1676 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1677 SDE_AUDIO_POWER_SHIFT); 1678 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1679 port_name(port)); 1680 } 1681 1682 if (pch_iir & SDE_AUX_MASK) 1683 dp_aux_irq_handler(dev); 1684 1685 if (pch_iir & SDE_GMBUS) 1686 gmbus_irq_handler(dev); 1687 1688 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1689 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1690 1691 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1692 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1693 1694 if (pch_iir & SDE_POISON) 1695 DRM_ERROR("PCH poison interrupt\n"); 1696 1697 if (pch_iir & SDE_FDI_MASK) 1698 for_each_pipe(dev_priv, pipe) 1699 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1700 pipe_name(pipe), 1701 I915_READ(FDI_RX_IIR(pipe))); 1702 1703 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1704 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1705 1706 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1707 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1708 1709 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1710 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1711 1712 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1713 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1714 } 1715 1716 static void ivb_err_int_handler(struct drm_device *dev) 1717 { 1718 struct drm_i915_private *dev_priv = dev->dev_private; 1719 u32 err_int = I915_READ(GEN7_ERR_INT); 1720 enum pipe pipe; 1721 1722 if (err_int & ERR_INT_POISON) 1723 DRM_ERROR("Poison interrupt\n"); 1724 1725 for_each_pipe(dev_priv, pipe) { 1726 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1727 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1728 1729 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1730 if (IS_IVYBRIDGE(dev)) 1731 ivb_pipe_crc_irq_handler(dev, pipe); 1732 else 1733 hsw_pipe_crc_irq_handler(dev, pipe); 1734 } 1735 } 1736 1737 I915_WRITE(GEN7_ERR_INT, err_int); 1738 } 1739 1740 static void cpt_serr_int_handler(struct drm_device *dev) 1741 { 1742 struct drm_i915_private *dev_priv = dev->dev_private; 1743 u32 serr_int = I915_READ(SERR_INT); 1744 1745 if (serr_int & SERR_INT_POISON) 1746 DRM_ERROR("PCH poison interrupt\n"); 1747 1748 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1749 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1750 1751 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1752 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1753 1754 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1755 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1756 1757 I915_WRITE(SERR_INT, serr_int); 1758 } 1759 1760 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1761 { 1762 struct drm_i915_private *dev_priv = dev->dev_private; 1763 int pipe; 1764 u32 hotplug_trigger; 1765 1766 if (HAS_PCH_SPT(dev)) 1767 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT; 1768 else 1769 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1770 1771 if (hotplug_trigger) { 1772 u32 dig_hotplug_reg, pin_mask, long_mask; 1773 1774 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1775 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1776 1777 if (HAS_PCH_SPT(dev)) { 1778 intel_get_hpd_pins(&pin_mask, &long_mask, 1779 hotplug_trigger, 1780 dig_hotplug_reg, hpd_spt, 1781 pch_port_hotplug_long_detect); 1782 1783 /* detect PORTE HP event */ 1784 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 1785 if (pch_port_hotplug_long_detect(PORT_E, 1786 dig_hotplug_reg)) 1787 long_mask |= 1 << HPD_PORT_E; 1788 } else 1789 intel_get_hpd_pins(&pin_mask, &long_mask, 1790 hotplug_trigger, 1791 dig_hotplug_reg, hpd_cpt, 1792 pch_port_hotplug_long_detect); 1793 1794 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1795 } 1796 1797 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1798 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1799 SDE_AUDIO_POWER_SHIFT_CPT); 1800 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1801 port_name(port)); 1802 } 1803 1804 if (pch_iir & SDE_AUX_MASK_CPT) 1805 dp_aux_irq_handler(dev); 1806 1807 if (pch_iir & SDE_GMBUS_CPT) 1808 gmbus_irq_handler(dev); 1809 1810 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1811 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1812 1813 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1814 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1815 1816 if (pch_iir & SDE_FDI_MASK_CPT) 1817 for_each_pipe(dev_priv, pipe) 1818 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1819 pipe_name(pipe), 1820 I915_READ(FDI_RX_IIR(pipe))); 1821 1822 if (pch_iir & SDE_ERROR_CPT) 1823 cpt_serr_int_handler(dev); 1824 } 1825 1826 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1827 { 1828 struct drm_i915_private *dev_priv = dev->dev_private; 1829 enum pipe pipe; 1830 1831 if (de_iir & DE_AUX_CHANNEL_A) 1832 dp_aux_irq_handler(dev); 1833 1834 if (de_iir & DE_GSE) 1835 intel_opregion_asle_intr(dev); 1836 1837 if (de_iir & DE_POISON) 1838 DRM_ERROR("Poison interrupt\n"); 1839 1840 for_each_pipe(dev_priv, pipe) { 1841 if (de_iir & DE_PIPE_VBLANK(pipe) && 1842 intel_pipe_handle_vblank(dev, pipe)) 1843 intel_check_page_flip(dev, pipe); 1844 1845 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1846 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1847 1848 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1849 i9xx_pipe_crc_irq_handler(dev, pipe); 1850 1851 /* plane/pipes map 1:1 on ilk+ */ 1852 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1853 intel_prepare_page_flip(dev, pipe); 1854 intel_finish_page_flip_plane(dev, pipe); 1855 } 1856 } 1857 1858 /* check event from PCH */ 1859 if (de_iir & DE_PCH_EVENT) { 1860 u32 pch_iir = I915_READ(SDEIIR); 1861 1862 if (HAS_PCH_CPT(dev)) 1863 cpt_irq_handler(dev, pch_iir); 1864 else 1865 ibx_irq_handler(dev, pch_iir); 1866 1867 /* should clear PCH hotplug event before clear CPU irq */ 1868 I915_WRITE(SDEIIR, pch_iir); 1869 } 1870 1871 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1872 ironlake_rps_change_irq_handler(dev); 1873 } 1874 1875 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1876 { 1877 struct drm_i915_private *dev_priv = dev->dev_private; 1878 enum pipe pipe; 1879 1880 if (de_iir & DE_ERR_INT_IVB) 1881 ivb_err_int_handler(dev); 1882 1883 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1884 dp_aux_irq_handler(dev); 1885 1886 if (de_iir & DE_GSE_IVB) 1887 intel_opregion_asle_intr(dev); 1888 1889 for_each_pipe(dev_priv, pipe) { 1890 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 1891 intel_pipe_handle_vblank(dev, pipe)) 1892 intel_check_page_flip(dev, pipe); 1893 1894 /* plane/pipes map 1:1 on ilk+ */ 1895 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 1896 intel_prepare_page_flip(dev, pipe); 1897 intel_finish_page_flip_plane(dev, pipe); 1898 } 1899 } 1900 1901 /* check event from PCH */ 1902 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1903 u32 pch_iir = I915_READ(SDEIIR); 1904 1905 cpt_irq_handler(dev, pch_iir); 1906 1907 /* clear PCH hotplug event before clear CPU irq */ 1908 I915_WRITE(SDEIIR, pch_iir); 1909 } 1910 } 1911 1912 /* 1913 * To handle irqs with the minimum potential races with fresh interrupts, we: 1914 * 1 - Disable Master Interrupt Control. 1915 * 2 - Find the source(s) of the interrupt. 1916 * 3 - Clear the Interrupt Identity bits (IIR). 1917 * 4 - Process the interrupt(s) that had bits set in the IIRs. 1918 * 5 - Re-enable Master Interrupt Control. 1919 */ 1920 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1921 { 1922 struct drm_device *dev = arg; 1923 struct drm_i915_private *dev_priv = dev->dev_private; 1924 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1925 irqreturn_t ret = IRQ_NONE; 1926 1927 if (!intel_irqs_enabled(dev_priv)) 1928 return IRQ_NONE; 1929 1930 /* We get interrupts on unclaimed registers, so check for this before we 1931 * do any I915_{READ,WRITE}. */ 1932 intel_uncore_check_errors(dev); 1933 1934 /* disable master interrupt before clearing iir */ 1935 de_ier = I915_READ(DEIER); 1936 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1937 POSTING_READ(DEIER); 1938 1939 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1940 * interrupts will will be stored on its back queue, and then we'll be 1941 * able to process them after we restore SDEIER (as soon as we restore 1942 * it, we'll get an interrupt if SDEIIR still has something to process 1943 * due to its back queue). */ 1944 if (!HAS_PCH_NOP(dev)) { 1945 sde_ier = I915_READ(SDEIER); 1946 I915_WRITE(SDEIER, 0); 1947 POSTING_READ(SDEIER); 1948 } 1949 1950 /* Find, clear, then process each source of interrupt */ 1951 1952 gt_iir = I915_READ(GTIIR); 1953 if (gt_iir) { 1954 I915_WRITE(GTIIR, gt_iir); 1955 ret = IRQ_HANDLED; 1956 if (INTEL_INFO(dev)->gen >= 6) 1957 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1958 else 1959 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1960 } 1961 1962 de_iir = I915_READ(DEIIR); 1963 if (de_iir) { 1964 I915_WRITE(DEIIR, de_iir); 1965 ret = IRQ_HANDLED; 1966 if (INTEL_INFO(dev)->gen >= 7) 1967 ivb_display_irq_handler(dev, de_iir); 1968 else 1969 ilk_display_irq_handler(dev, de_iir); 1970 } 1971 1972 if (INTEL_INFO(dev)->gen >= 6) { 1973 u32 pm_iir = I915_READ(GEN6_PMIIR); 1974 if (pm_iir) { 1975 I915_WRITE(GEN6_PMIIR, pm_iir); 1976 ret = IRQ_HANDLED; 1977 gen6_rps_irq_handler(dev_priv, pm_iir); 1978 } 1979 } 1980 1981 I915_WRITE(DEIER, de_ier); 1982 POSTING_READ(DEIER); 1983 if (!HAS_PCH_NOP(dev)) { 1984 I915_WRITE(SDEIER, sde_ier); 1985 POSTING_READ(SDEIER); 1986 } 1987 1988 return ret; 1989 } 1990 1991 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) 1992 { 1993 struct drm_i915_private *dev_priv = dev->dev_private; 1994 u32 hp_control, hp_trigger; 1995 u32 pin_mask, long_mask; 1996 1997 /* Get the status */ 1998 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; 1999 hp_control = I915_READ(BXT_HOTPLUG_CTL); 2000 2001 /* Hotplug not enabled ? */ 2002 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { 2003 DRM_ERROR("Interrupt when HPD disabled\n"); 2004 return; 2005 } 2006 2007 /* Clear sticky bits in hpd status */ 2008 I915_WRITE(BXT_HOTPLUG_CTL, hp_control); 2009 2010 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control, 2011 hpd_bxt, bxt_port_hotplug_long_detect); 2012 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2013 } 2014 2015 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2016 { 2017 struct drm_device *dev = arg; 2018 struct drm_i915_private *dev_priv = dev->dev_private; 2019 u32 master_ctl; 2020 irqreturn_t ret = IRQ_NONE; 2021 uint32_t tmp = 0; 2022 enum pipe pipe; 2023 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2024 2025 if (!intel_irqs_enabled(dev_priv)) 2026 return IRQ_NONE; 2027 2028 if (IS_GEN9(dev)) 2029 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2030 GEN9_AUX_CHANNEL_D; 2031 2032 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2033 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2034 if (!master_ctl) 2035 return IRQ_NONE; 2036 2037 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2038 2039 /* Find, clear, then process each source of interrupt */ 2040 2041 ret = gen8_gt_irq_handler(dev_priv, master_ctl); 2042 2043 if (master_ctl & GEN8_DE_MISC_IRQ) { 2044 tmp = I915_READ(GEN8_DE_MISC_IIR); 2045 if (tmp) { 2046 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2047 ret = IRQ_HANDLED; 2048 if (tmp & GEN8_DE_MISC_GSE) 2049 intel_opregion_asle_intr(dev); 2050 else 2051 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2052 } 2053 else 2054 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2055 } 2056 2057 if (master_ctl & GEN8_DE_PORT_IRQ) { 2058 tmp = I915_READ(GEN8_DE_PORT_IIR); 2059 if (tmp) { 2060 bool found = false; 2061 2062 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2063 ret = IRQ_HANDLED; 2064 2065 if (tmp & aux_mask) { 2066 dp_aux_irq_handler(dev); 2067 found = true; 2068 } 2069 2070 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { 2071 bxt_hpd_handler(dev, tmp); 2072 found = true; 2073 } 2074 2075 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { 2076 gmbus_irq_handler(dev); 2077 found = true; 2078 } 2079 2080 if (!found) 2081 DRM_ERROR("Unexpected DE Port interrupt\n"); 2082 } 2083 else 2084 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2085 } 2086 2087 for_each_pipe(dev_priv, pipe) { 2088 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2089 2090 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2091 continue; 2092 2093 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2094 if (pipe_iir) { 2095 ret = IRQ_HANDLED; 2096 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2097 2098 if (pipe_iir & GEN8_PIPE_VBLANK && 2099 intel_pipe_handle_vblank(dev, pipe)) 2100 intel_check_page_flip(dev, pipe); 2101 2102 if (IS_GEN9(dev)) 2103 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2104 else 2105 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2106 2107 if (flip_done) { 2108 intel_prepare_page_flip(dev, pipe); 2109 intel_finish_page_flip_plane(dev, pipe); 2110 } 2111 2112 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2113 hsw_pipe_crc_irq_handler(dev, pipe); 2114 2115 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2116 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2117 pipe); 2118 2119 2120 if (IS_GEN9(dev)) 2121 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2122 else 2123 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2124 2125 if (fault_errors) 2126 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2127 pipe_name(pipe), 2128 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2129 } else 2130 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2131 } 2132 2133 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2134 master_ctl & GEN8_DE_PCH_IRQ) { 2135 /* 2136 * FIXME(BDW): Assume for now that the new interrupt handling 2137 * scheme also closed the SDE interrupt handling race we've seen 2138 * on older pch-split platforms. But this needs testing. 2139 */ 2140 u32 pch_iir = I915_READ(SDEIIR); 2141 if (pch_iir) { 2142 I915_WRITE(SDEIIR, pch_iir); 2143 ret = IRQ_HANDLED; 2144 cpt_irq_handler(dev, pch_iir); 2145 } else 2146 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2147 2148 } 2149 2150 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2151 POSTING_READ_FW(GEN8_MASTER_IRQ); 2152 2153 return ret; 2154 } 2155 2156 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2157 bool reset_completed) 2158 { 2159 struct intel_engine_cs *ring; 2160 int i; 2161 2162 /* 2163 * Notify all waiters for GPU completion events that reset state has 2164 * been changed, and that they need to restart their wait after 2165 * checking for potential errors (and bail out to drop locks if there is 2166 * a gpu reset pending so that i915_error_work_func can acquire them). 2167 */ 2168 2169 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2170 for_each_ring(ring, dev_priv, i) 2171 wake_up_all(&ring->irq_queue); 2172 2173 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2174 wake_up_all(&dev_priv->pending_flip_queue); 2175 2176 /* 2177 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2178 * reset state is cleared. 2179 */ 2180 if (reset_completed) 2181 wake_up_all(&dev_priv->gpu_error.reset_queue); 2182 } 2183 2184 /** 2185 * i915_reset_and_wakeup - do process context error handling work 2186 * 2187 * Fire an error uevent so userspace can see that a hang or error 2188 * was detected. 2189 */ 2190 static void i915_reset_and_wakeup(struct drm_device *dev) 2191 { 2192 struct drm_i915_private *dev_priv = to_i915(dev); 2193 struct i915_gpu_error *error = &dev_priv->gpu_error; 2194 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2195 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2196 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2197 int ret; 2198 2199 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2200 2201 /* 2202 * Note that there's only one work item which does gpu resets, so we 2203 * need not worry about concurrent gpu resets potentially incrementing 2204 * error->reset_counter twice. We only need to take care of another 2205 * racing irq/hangcheck declaring the gpu dead for a second time. A 2206 * quick check for that is good enough: schedule_work ensures the 2207 * correct ordering between hang detection and this work item, and since 2208 * the reset in-progress bit is only ever set by code outside of this 2209 * work we don't need to worry about any other races. 2210 */ 2211 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2212 DRM_DEBUG_DRIVER("resetting chip\n"); 2213 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2214 reset_event); 2215 2216 /* 2217 * In most cases it's guaranteed that we get here with an RPM 2218 * reference held, for example because there is a pending GPU 2219 * request that won't finish until the reset is done. This 2220 * isn't the case at least when we get here by doing a 2221 * simulated reset via debugs, so get an RPM reference. 2222 */ 2223 intel_runtime_pm_get(dev_priv); 2224 2225 intel_prepare_reset(dev); 2226 2227 /* 2228 * All state reset _must_ be completed before we update the 2229 * reset counter, for otherwise waiters might miss the reset 2230 * pending state and not properly drop locks, resulting in 2231 * deadlocks with the reset work. 2232 */ 2233 ret = i915_reset(dev); 2234 2235 intel_finish_reset(dev); 2236 2237 intel_runtime_pm_put(dev_priv); 2238 2239 if (ret == 0) { 2240 /* 2241 * After all the gem state is reset, increment the reset 2242 * counter and wake up everyone waiting for the reset to 2243 * complete. 2244 * 2245 * Since unlock operations are a one-sided barrier only, 2246 * we need to insert a barrier here to order any seqno 2247 * updates before 2248 * the counter increment. 2249 */ 2250 smp_mb__before_atomic(); 2251 atomic_inc(&dev_priv->gpu_error.reset_counter); 2252 2253 kobject_uevent_env(&dev->primary->kdev->kobj, 2254 KOBJ_CHANGE, reset_done_event); 2255 } else { 2256 atomic_or(I915_WEDGED, &error->reset_counter); 2257 } 2258 2259 /* 2260 * Note: The wake_up also serves as a memory barrier so that 2261 * waiters see the update value of the reset counter atomic_t. 2262 */ 2263 i915_error_wake_up(dev_priv, true); 2264 } 2265 } 2266 2267 static void i915_report_and_clear_eir(struct drm_device *dev) 2268 { 2269 struct drm_i915_private *dev_priv = dev->dev_private; 2270 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2271 u32 eir = I915_READ(EIR); 2272 int pipe, i; 2273 2274 if (!eir) 2275 return; 2276 2277 pr_err("render error detected, EIR: 0x%08x\n", eir); 2278 2279 i915_get_extra_instdone(dev, instdone); 2280 2281 if (IS_G4X(dev)) { 2282 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2283 u32 ipeir = I915_READ(IPEIR_I965); 2284 2285 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2286 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2287 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2288 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2289 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2290 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2291 I915_WRITE(IPEIR_I965, ipeir); 2292 POSTING_READ(IPEIR_I965); 2293 } 2294 if (eir & GM45_ERROR_PAGE_TABLE) { 2295 u32 pgtbl_err = I915_READ(PGTBL_ER); 2296 pr_err("page table error\n"); 2297 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2298 I915_WRITE(PGTBL_ER, pgtbl_err); 2299 POSTING_READ(PGTBL_ER); 2300 } 2301 } 2302 2303 if (!IS_GEN2(dev)) { 2304 if (eir & I915_ERROR_PAGE_TABLE) { 2305 u32 pgtbl_err = I915_READ(PGTBL_ER); 2306 pr_err("page table error\n"); 2307 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2308 I915_WRITE(PGTBL_ER, pgtbl_err); 2309 POSTING_READ(PGTBL_ER); 2310 } 2311 } 2312 2313 if (eir & I915_ERROR_MEMORY_REFRESH) { 2314 pr_err("memory refresh error:\n"); 2315 for_each_pipe(dev_priv, pipe) 2316 pr_err("pipe %c stat: 0x%08x\n", 2317 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2318 /* pipestat has already been acked */ 2319 } 2320 if (eir & I915_ERROR_INSTRUCTION) { 2321 pr_err("instruction error\n"); 2322 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2323 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2324 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2325 if (INTEL_INFO(dev)->gen < 4) { 2326 u32 ipeir = I915_READ(IPEIR); 2327 2328 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2329 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2330 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2331 I915_WRITE(IPEIR, ipeir); 2332 POSTING_READ(IPEIR); 2333 } else { 2334 u32 ipeir = I915_READ(IPEIR_I965); 2335 2336 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2337 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2338 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2339 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2340 I915_WRITE(IPEIR_I965, ipeir); 2341 POSTING_READ(IPEIR_I965); 2342 } 2343 } 2344 2345 I915_WRITE(EIR, eir); 2346 POSTING_READ(EIR); 2347 eir = I915_READ(EIR); 2348 if (eir) { 2349 /* 2350 * some errors might have become stuck, 2351 * mask them. 2352 */ 2353 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2354 I915_WRITE(EMR, I915_READ(EMR) | eir); 2355 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2356 } 2357 } 2358 2359 /** 2360 * i915_handle_error - handle a gpu error 2361 * @dev: drm device 2362 * 2363 * Do some basic checking of regsiter state at error time and 2364 * dump it to the syslog. Also call i915_capture_error_state() to make 2365 * sure we get a record and make it available in debugfs. Fire a uevent 2366 * so userspace knows something bad happened (should trigger collection 2367 * of a ring dump etc.). 2368 */ 2369 void i915_handle_error(struct drm_device *dev, bool wedged, 2370 const char *fmt, ...) 2371 { 2372 struct drm_i915_private *dev_priv = dev->dev_private; 2373 va_list args; 2374 char error_msg[80]; 2375 2376 va_start(args, fmt); 2377 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2378 va_end(args); 2379 2380 i915_capture_error_state(dev, wedged, error_msg); 2381 i915_report_and_clear_eir(dev); 2382 2383 if (wedged) { 2384 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2385 &dev_priv->gpu_error.reset_counter); 2386 2387 /* 2388 * Wakeup waiting processes so that the reset function 2389 * i915_reset_and_wakeup doesn't deadlock trying to grab 2390 * various locks. By bumping the reset counter first, the woken 2391 * processes will see a reset in progress and back off, 2392 * releasing their locks and then wait for the reset completion. 2393 * We must do this for _all_ gpu waiters that might hold locks 2394 * that the reset work needs to acquire. 2395 * 2396 * Note: The wake_up serves as the required memory barrier to 2397 * ensure that the waiters see the updated value of the reset 2398 * counter atomic_t. 2399 */ 2400 i915_error_wake_up(dev_priv, false); 2401 } 2402 2403 i915_reset_and_wakeup(dev); 2404 } 2405 2406 /* Called from drm generic code, passed 'crtc' which 2407 * we use as a pipe index 2408 */ 2409 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2410 { 2411 struct drm_i915_private *dev_priv = dev->dev_private; 2412 unsigned long irqflags; 2413 2414 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2415 if (INTEL_INFO(dev)->gen >= 4) 2416 i915_enable_pipestat(dev_priv, pipe, 2417 PIPE_START_VBLANK_INTERRUPT_STATUS); 2418 else 2419 i915_enable_pipestat(dev_priv, pipe, 2420 PIPE_VBLANK_INTERRUPT_STATUS); 2421 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2422 2423 return 0; 2424 } 2425 2426 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2427 { 2428 struct drm_i915_private *dev_priv = dev->dev_private; 2429 unsigned long irqflags; 2430 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2431 DE_PIPE_VBLANK(pipe); 2432 2433 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2434 ironlake_enable_display_irq(dev_priv, bit); 2435 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2436 2437 return 0; 2438 } 2439 2440 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2441 { 2442 struct drm_i915_private *dev_priv = dev->dev_private; 2443 unsigned long irqflags; 2444 2445 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2446 i915_enable_pipestat(dev_priv, pipe, 2447 PIPE_START_VBLANK_INTERRUPT_STATUS); 2448 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2449 2450 return 0; 2451 } 2452 2453 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2454 { 2455 struct drm_i915_private *dev_priv = dev->dev_private; 2456 unsigned long irqflags; 2457 2458 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2459 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2460 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2461 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2462 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2463 return 0; 2464 } 2465 2466 /* Called from drm generic code, passed 'crtc' which 2467 * we use as a pipe index 2468 */ 2469 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2470 { 2471 struct drm_i915_private *dev_priv = dev->dev_private; 2472 unsigned long irqflags; 2473 2474 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2475 i915_disable_pipestat(dev_priv, pipe, 2476 PIPE_VBLANK_INTERRUPT_STATUS | 2477 PIPE_START_VBLANK_INTERRUPT_STATUS); 2478 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2479 } 2480 2481 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2482 { 2483 struct drm_i915_private *dev_priv = dev->dev_private; 2484 unsigned long irqflags; 2485 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2486 DE_PIPE_VBLANK(pipe); 2487 2488 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2489 ironlake_disable_display_irq(dev_priv, bit); 2490 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2491 } 2492 2493 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2494 { 2495 struct drm_i915_private *dev_priv = dev->dev_private; 2496 unsigned long irqflags; 2497 2498 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2499 i915_disable_pipestat(dev_priv, pipe, 2500 PIPE_START_VBLANK_INTERRUPT_STATUS); 2501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2502 } 2503 2504 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2505 { 2506 struct drm_i915_private *dev_priv = dev->dev_private; 2507 unsigned long irqflags; 2508 2509 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2510 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2511 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2512 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2513 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2514 } 2515 2516 static bool 2517 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2518 { 2519 return (list_empty(&ring->request_list) || 2520 i915_seqno_passed(seqno, ring->last_submitted_seqno)); 2521 } 2522 2523 static bool 2524 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2525 { 2526 if (INTEL_INFO(dev)->gen >= 8) { 2527 return (ipehr >> 23) == 0x1c; 2528 } else { 2529 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2530 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2531 MI_SEMAPHORE_REGISTER); 2532 } 2533 } 2534 2535 static struct intel_engine_cs * 2536 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2537 { 2538 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2539 struct intel_engine_cs *signaller; 2540 int i; 2541 2542 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2543 for_each_ring(signaller, dev_priv, i) { 2544 if (ring == signaller) 2545 continue; 2546 2547 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2548 return signaller; 2549 } 2550 } else { 2551 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2552 2553 for_each_ring(signaller, dev_priv, i) { 2554 if(ring == signaller) 2555 continue; 2556 2557 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2558 return signaller; 2559 } 2560 } 2561 2562 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2563 ring->id, ipehr, offset); 2564 2565 return NULL; 2566 } 2567 2568 static struct intel_engine_cs * 2569 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2570 { 2571 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2572 u32 cmd, ipehr, head; 2573 u64 offset = 0; 2574 int i, backwards; 2575 2576 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2577 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2578 return NULL; 2579 2580 /* 2581 * HEAD is likely pointing to the dword after the actual command, 2582 * so scan backwards until we find the MBOX. But limit it to just 3 2583 * or 4 dwords depending on the semaphore wait command size. 2584 * Note that we don't care about ACTHD here since that might 2585 * point at at batch, and semaphores are always emitted into the 2586 * ringbuffer itself. 2587 */ 2588 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2589 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2590 2591 for (i = backwards; i; --i) { 2592 /* 2593 * Be paranoid and presume the hw has gone off into the wild - 2594 * our ring is smaller than what the hardware (and hence 2595 * HEAD_ADDR) allows. Also handles wrap-around. 2596 */ 2597 head &= ring->buffer->size - 1; 2598 2599 /* This here seems to blow up */ 2600 cmd = ioread32(ring->buffer->virtual_start + head); 2601 if (cmd == ipehr) 2602 break; 2603 2604 head -= 4; 2605 } 2606 2607 if (!i) 2608 return NULL; 2609 2610 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2611 if (INTEL_INFO(ring->dev)->gen >= 8) { 2612 offset = ioread32(ring->buffer->virtual_start + head + 12); 2613 offset <<= 32; 2614 offset = ioread32(ring->buffer->virtual_start + head + 8); 2615 } 2616 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2617 } 2618 2619 static int semaphore_passed(struct intel_engine_cs *ring) 2620 { 2621 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2622 struct intel_engine_cs *signaller; 2623 u32 seqno; 2624 2625 ring->hangcheck.deadlock++; 2626 2627 signaller = semaphore_waits_for(ring, &seqno); 2628 if (signaller == NULL) 2629 return -1; 2630 2631 /* Prevent pathological recursion due to driver bugs */ 2632 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2633 return -1; 2634 2635 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2636 return 1; 2637 2638 /* cursory check for an unkickable deadlock */ 2639 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2640 semaphore_passed(signaller) < 0) 2641 return -1; 2642 2643 return 0; 2644 } 2645 2646 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2647 { 2648 struct intel_engine_cs *ring; 2649 int i; 2650 2651 for_each_ring(ring, dev_priv, i) 2652 ring->hangcheck.deadlock = 0; 2653 } 2654 2655 static enum intel_ring_hangcheck_action 2656 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2657 { 2658 struct drm_device *dev = ring->dev; 2659 struct drm_i915_private *dev_priv = dev->dev_private; 2660 u32 tmp; 2661 2662 if (acthd != ring->hangcheck.acthd) { 2663 if (acthd > ring->hangcheck.max_acthd) { 2664 ring->hangcheck.max_acthd = acthd; 2665 return HANGCHECK_ACTIVE; 2666 } 2667 2668 return HANGCHECK_ACTIVE_LOOP; 2669 } 2670 2671 if (IS_GEN2(dev)) 2672 return HANGCHECK_HUNG; 2673 2674 /* Is the chip hanging on a WAIT_FOR_EVENT? 2675 * If so we can simply poke the RB_WAIT bit 2676 * and break the hang. This should work on 2677 * all but the second generation chipsets. 2678 */ 2679 tmp = I915_READ_CTL(ring); 2680 if (tmp & RING_WAIT) { 2681 i915_handle_error(dev, false, 2682 "Kicking stuck wait on %s", 2683 ring->name); 2684 I915_WRITE_CTL(ring, tmp); 2685 return HANGCHECK_KICK; 2686 } 2687 2688 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2689 switch (semaphore_passed(ring)) { 2690 default: 2691 return HANGCHECK_HUNG; 2692 case 1: 2693 i915_handle_error(dev, false, 2694 "Kicking stuck semaphore on %s", 2695 ring->name); 2696 I915_WRITE_CTL(ring, tmp); 2697 return HANGCHECK_KICK; 2698 case 0: 2699 return HANGCHECK_WAIT; 2700 } 2701 } 2702 2703 return HANGCHECK_HUNG; 2704 } 2705 2706 /* 2707 * This is called when the chip hasn't reported back with completed 2708 * batchbuffers in a long time. We keep track per ring seqno progress and 2709 * if there are no progress, hangcheck score for that ring is increased. 2710 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2711 * we kick the ring. If we see no progress on three subsequent calls 2712 * we assume chip is wedged and try to fix it by resetting the chip. 2713 */ 2714 static void i915_hangcheck_elapsed(struct work_struct *work) 2715 { 2716 struct drm_i915_private *dev_priv = 2717 container_of(work, typeof(*dev_priv), 2718 gpu_error.hangcheck_work.work); 2719 struct drm_device *dev = dev_priv->dev; 2720 struct intel_engine_cs *ring; 2721 int i; 2722 int busy_count = 0, rings_hung = 0; 2723 bool stuck[I915_NUM_RINGS] = { 0 }; 2724 #define BUSY 1 2725 #define KICK 5 2726 #define HUNG 20 2727 2728 if (!i915.enable_hangcheck) 2729 return; 2730 2731 for_each_ring(ring, dev_priv, i) { 2732 u64 acthd; 2733 u32 seqno; 2734 bool busy = true; 2735 2736 semaphore_clear_deadlocks(dev_priv); 2737 2738 seqno = ring->get_seqno(ring, false); 2739 acthd = intel_ring_get_active_head(ring); 2740 2741 if (ring->hangcheck.seqno == seqno) { 2742 if (ring_idle(ring, seqno)) { 2743 ring->hangcheck.action = HANGCHECK_IDLE; 2744 2745 if (waitqueue_active(&ring->irq_queue)) { 2746 /* Issue a wake-up to catch stuck h/w. */ 2747 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2748 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2749 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2750 ring->name); 2751 else 2752 DRM_INFO("Fake missed irq on %s\n", 2753 ring->name); 2754 wake_up_all(&ring->irq_queue); 2755 } 2756 /* Safeguard against driver failure */ 2757 ring->hangcheck.score += BUSY; 2758 } else 2759 busy = false; 2760 } else { 2761 /* We always increment the hangcheck score 2762 * if the ring is busy and still processing 2763 * the same request, so that no single request 2764 * can run indefinitely (such as a chain of 2765 * batches). The only time we do not increment 2766 * the hangcheck score on this ring, if this 2767 * ring is in a legitimate wait for another 2768 * ring. In that case the waiting ring is a 2769 * victim and we want to be sure we catch the 2770 * right culprit. Then every time we do kick 2771 * the ring, add a small increment to the 2772 * score so that we can catch a batch that is 2773 * being repeatedly kicked and so responsible 2774 * for stalling the machine. 2775 */ 2776 ring->hangcheck.action = ring_stuck(ring, 2777 acthd); 2778 2779 switch (ring->hangcheck.action) { 2780 case HANGCHECK_IDLE: 2781 case HANGCHECK_WAIT: 2782 case HANGCHECK_ACTIVE: 2783 break; 2784 case HANGCHECK_ACTIVE_LOOP: 2785 ring->hangcheck.score += BUSY; 2786 break; 2787 case HANGCHECK_KICK: 2788 ring->hangcheck.score += KICK; 2789 break; 2790 case HANGCHECK_HUNG: 2791 ring->hangcheck.score += HUNG; 2792 stuck[i] = true; 2793 break; 2794 } 2795 } 2796 } else { 2797 ring->hangcheck.action = HANGCHECK_ACTIVE; 2798 2799 /* Gradually reduce the count so that we catch DoS 2800 * attempts across multiple batches. 2801 */ 2802 if (ring->hangcheck.score > 0) 2803 ring->hangcheck.score--; 2804 2805 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 2806 } 2807 2808 ring->hangcheck.seqno = seqno; 2809 ring->hangcheck.acthd = acthd; 2810 busy_count += busy; 2811 } 2812 2813 for_each_ring(ring, dev_priv, i) { 2814 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2815 DRM_INFO("%s on %s\n", 2816 stuck[i] ? "stuck" : "no progress", 2817 ring->name); 2818 rings_hung++; 2819 } 2820 } 2821 2822 if (rings_hung) 2823 return i915_handle_error(dev, true, "Ring hung"); 2824 2825 if (busy_count) 2826 /* Reset timer case chip hangs without another request 2827 * being added */ 2828 i915_queue_hangcheck(dev); 2829 } 2830 2831 void i915_queue_hangcheck(struct drm_device *dev) 2832 { 2833 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 2834 2835 if (!i915.enable_hangcheck) 2836 return; 2837 2838 /* Don't continually defer the hangcheck so that it is always run at 2839 * least once after work has been scheduled on any ring. Otherwise, 2840 * we will ignore a hung ring if a second ring is kept busy. 2841 */ 2842 2843 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 2844 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 2845 } 2846 2847 static void ibx_irq_reset(struct drm_device *dev) 2848 { 2849 struct drm_i915_private *dev_priv = dev->dev_private; 2850 2851 if (HAS_PCH_NOP(dev)) 2852 return; 2853 2854 GEN5_IRQ_RESET(SDE); 2855 2856 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2857 I915_WRITE(SERR_INT, 0xffffffff); 2858 } 2859 2860 /* 2861 * SDEIER is also touched by the interrupt handler to work around missed PCH 2862 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2863 * instead we unconditionally enable all PCH interrupt sources here, but then 2864 * only unmask them as needed with SDEIMR. 2865 * 2866 * This function needs to be called before interrupts are enabled. 2867 */ 2868 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2869 { 2870 struct drm_i915_private *dev_priv = dev->dev_private; 2871 2872 if (HAS_PCH_NOP(dev)) 2873 return; 2874 2875 WARN_ON(I915_READ(SDEIER) != 0); 2876 I915_WRITE(SDEIER, 0xffffffff); 2877 POSTING_READ(SDEIER); 2878 } 2879 2880 static void gen5_gt_irq_reset(struct drm_device *dev) 2881 { 2882 struct drm_i915_private *dev_priv = dev->dev_private; 2883 2884 GEN5_IRQ_RESET(GT); 2885 if (INTEL_INFO(dev)->gen >= 6) 2886 GEN5_IRQ_RESET(GEN6_PM); 2887 } 2888 2889 /* drm_dma.h hooks 2890 */ 2891 static void ironlake_irq_reset(struct drm_device *dev) 2892 { 2893 struct drm_i915_private *dev_priv = dev->dev_private; 2894 2895 I915_WRITE(HWSTAM, 0xffffffff); 2896 2897 GEN5_IRQ_RESET(DE); 2898 if (IS_GEN7(dev)) 2899 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2900 2901 gen5_gt_irq_reset(dev); 2902 2903 ibx_irq_reset(dev); 2904 } 2905 2906 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2907 { 2908 enum pipe pipe; 2909 2910 I915_WRITE(PORT_HOTPLUG_EN, 0); 2911 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2912 2913 for_each_pipe(dev_priv, pipe) 2914 I915_WRITE(PIPESTAT(pipe), 0xffff); 2915 2916 GEN5_IRQ_RESET(VLV_); 2917 } 2918 2919 static void valleyview_irq_preinstall(struct drm_device *dev) 2920 { 2921 struct drm_i915_private *dev_priv = dev->dev_private; 2922 2923 /* VLV magic */ 2924 I915_WRITE(VLV_IMR, 0); 2925 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2926 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2927 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2928 2929 gen5_gt_irq_reset(dev); 2930 2931 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2932 2933 vlv_display_irq_reset(dev_priv); 2934 } 2935 2936 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 2937 { 2938 GEN8_IRQ_RESET_NDX(GT, 0); 2939 GEN8_IRQ_RESET_NDX(GT, 1); 2940 GEN8_IRQ_RESET_NDX(GT, 2); 2941 GEN8_IRQ_RESET_NDX(GT, 3); 2942 } 2943 2944 static void gen8_irq_reset(struct drm_device *dev) 2945 { 2946 struct drm_i915_private *dev_priv = dev->dev_private; 2947 int pipe; 2948 2949 I915_WRITE(GEN8_MASTER_IRQ, 0); 2950 POSTING_READ(GEN8_MASTER_IRQ); 2951 2952 gen8_gt_irq_reset(dev_priv); 2953 2954 for_each_pipe(dev_priv, pipe) 2955 if (intel_display_power_is_enabled(dev_priv, 2956 POWER_DOMAIN_PIPE(pipe))) 2957 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 2958 2959 GEN5_IRQ_RESET(GEN8_DE_PORT_); 2960 GEN5_IRQ_RESET(GEN8_DE_MISC_); 2961 GEN5_IRQ_RESET(GEN8_PCU_); 2962 2963 if (HAS_PCH_SPLIT(dev)) 2964 ibx_irq_reset(dev); 2965 } 2966 2967 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 2968 unsigned int pipe_mask) 2969 { 2970 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 2971 2972 spin_lock_irq(&dev_priv->irq_lock); 2973 if (pipe_mask & 1 << PIPE_A) 2974 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 2975 dev_priv->de_irq_mask[PIPE_A], 2976 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); 2977 if (pipe_mask & 1 << PIPE_B) 2978 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, 2979 dev_priv->de_irq_mask[PIPE_B], 2980 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 2981 if (pipe_mask & 1 << PIPE_C) 2982 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, 2983 dev_priv->de_irq_mask[PIPE_C], 2984 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 2985 spin_unlock_irq(&dev_priv->irq_lock); 2986 } 2987 2988 static void cherryview_irq_preinstall(struct drm_device *dev) 2989 { 2990 struct drm_i915_private *dev_priv = dev->dev_private; 2991 2992 I915_WRITE(GEN8_MASTER_IRQ, 0); 2993 POSTING_READ(GEN8_MASTER_IRQ); 2994 2995 gen8_gt_irq_reset(dev_priv); 2996 2997 GEN5_IRQ_RESET(GEN8_PCU_); 2998 2999 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3000 3001 vlv_display_irq_reset(dev_priv); 3002 } 3003 3004 static void ibx_hpd_irq_setup(struct drm_device *dev) 3005 { 3006 struct drm_i915_private *dev_priv = dev->dev_private; 3007 struct intel_encoder *intel_encoder; 3008 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3009 3010 if (HAS_PCH_IBX(dev)) { 3011 hotplug_irqs = SDE_HOTPLUG_MASK; 3012 for_each_intel_encoder(dev, intel_encoder) 3013 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3014 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3015 } else if (HAS_PCH_SPT(dev)) { 3016 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3017 for_each_intel_encoder(dev, intel_encoder) 3018 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3019 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin]; 3020 } else { 3021 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3022 for_each_intel_encoder(dev, intel_encoder) 3023 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3024 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3025 } 3026 3027 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3028 3029 /* 3030 * Enable digital hotplug on the PCH, and configure the DP short pulse 3031 * duration to 2ms (which is the minimum in the Display Port spec) 3032 * 3033 * This register is the same on all known PCH chips. 3034 */ 3035 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3036 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3037 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3038 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3039 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3040 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3041 3042 /* enable SPT PORTE hot plug */ 3043 if (HAS_PCH_SPT(dev)) { 3044 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3045 hotplug |= PORTE_HOTPLUG_ENABLE; 3046 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3047 } 3048 } 3049 3050 static void bxt_hpd_irq_setup(struct drm_device *dev) 3051 { 3052 struct drm_i915_private *dev_priv = dev->dev_private; 3053 struct intel_encoder *intel_encoder; 3054 u32 hotplug_port = 0; 3055 u32 hotplug_ctrl; 3056 3057 /* Now, enable HPD */ 3058 for_each_intel_encoder(dev, intel_encoder) { 3059 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state 3060 == HPD_ENABLED) 3061 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; 3062 } 3063 3064 /* Mask all HPD control bits */ 3065 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; 3066 3067 /* Enable requested port in hotplug control */ 3068 /* TODO: implement (short) HPD support on port A */ 3069 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA); 3070 if (hotplug_port & BXT_DE_PORT_HP_DDIB) 3071 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE; 3072 if (hotplug_port & BXT_DE_PORT_HP_DDIC) 3073 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE; 3074 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl); 3075 3076 /* Unmask DDI hotplug in IMR */ 3077 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port; 3078 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl); 3079 3080 /* Enable DDI hotplug in IER */ 3081 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; 3082 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); 3083 POSTING_READ(GEN8_DE_PORT_IER); 3084 } 3085 3086 static void ibx_irq_postinstall(struct drm_device *dev) 3087 { 3088 struct drm_i915_private *dev_priv = dev->dev_private; 3089 u32 mask; 3090 3091 if (HAS_PCH_NOP(dev)) 3092 return; 3093 3094 if (HAS_PCH_IBX(dev)) 3095 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3096 else 3097 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3098 3099 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3100 I915_WRITE(SDEIMR, ~mask); 3101 } 3102 3103 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3104 { 3105 struct drm_i915_private *dev_priv = dev->dev_private; 3106 u32 pm_irqs, gt_irqs; 3107 3108 pm_irqs = gt_irqs = 0; 3109 3110 dev_priv->gt_irq_mask = ~0; 3111 if (HAS_L3_DPF(dev)) { 3112 /* L3 parity interrupt is always unmasked. */ 3113 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3114 gt_irqs |= GT_PARITY_ERROR(dev); 3115 } 3116 3117 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3118 if (IS_GEN5(dev)) { 3119 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3120 ILK_BSD_USER_INTERRUPT; 3121 } else { 3122 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3123 } 3124 3125 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3126 3127 if (INTEL_INFO(dev)->gen >= 6) { 3128 /* 3129 * RPS interrupts will get enabled/disabled on demand when RPS 3130 * itself is enabled/disabled. 3131 */ 3132 if (HAS_VEBOX(dev)) 3133 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3134 3135 dev_priv->pm_irq_mask = 0xffffffff; 3136 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3137 } 3138 } 3139 3140 static int ironlake_irq_postinstall(struct drm_device *dev) 3141 { 3142 struct drm_i915_private *dev_priv = dev->dev_private; 3143 u32 display_mask, extra_mask; 3144 3145 if (INTEL_INFO(dev)->gen >= 7) { 3146 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3147 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3148 DE_PLANEB_FLIP_DONE_IVB | 3149 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3150 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3151 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3152 } else { 3153 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3154 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3155 DE_AUX_CHANNEL_A | 3156 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3157 DE_POISON); 3158 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3159 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3160 } 3161 3162 dev_priv->irq_mask = ~display_mask; 3163 3164 I915_WRITE(HWSTAM, 0xeffe); 3165 3166 ibx_irq_pre_postinstall(dev); 3167 3168 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3169 3170 gen5_gt_irq_postinstall(dev); 3171 3172 ibx_irq_postinstall(dev); 3173 3174 if (IS_IRONLAKE_M(dev)) { 3175 /* Enable PCU event interrupts 3176 * 3177 * spinlocking not required here for correctness since interrupt 3178 * setup is guaranteed to run in single-threaded context. But we 3179 * need it to make the assert_spin_locked happy. */ 3180 spin_lock_irq(&dev_priv->irq_lock); 3181 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3182 spin_unlock_irq(&dev_priv->irq_lock); 3183 } 3184 3185 return 0; 3186 } 3187 3188 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3189 { 3190 u32 pipestat_mask; 3191 u32 iir_mask; 3192 enum pipe pipe; 3193 3194 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3195 PIPE_FIFO_UNDERRUN_STATUS; 3196 3197 for_each_pipe(dev_priv, pipe) 3198 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3199 POSTING_READ(PIPESTAT(PIPE_A)); 3200 3201 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3202 PIPE_CRC_DONE_INTERRUPT_STATUS; 3203 3204 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3205 for_each_pipe(dev_priv, pipe) 3206 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3207 3208 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3209 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3210 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3211 if (IS_CHERRYVIEW(dev_priv)) 3212 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3213 dev_priv->irq_mask &= ~iir_mask; 3214 3215 I915_WRITE(VLV_IIR, iir_mask); 3216 I915_WRITE(VLV_IIR, iir_mask); 3217 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3218 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3219 POSTING_READ(VLV_IMR); 3220 } 3221 3222 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3223 { 3224 u32 pipestat_mask; 3225 u32 iir_mask; 3226 enum pipe pipe; 3227 3228 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3229 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3230 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3231 if (IS_CHERRYVIEW(dev_priv)) 3232 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3233 3234 dev_priv->irq_mask |= iir_mask; 3235 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3236 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3237 I915_WRITE(VLV_IIR, iir_mask); 3238 I915_WRITE(VLV_IIR, iir_mask); 3239 POSTING_READ(VLV_IIR); 3240 3241 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3242 PIPE_CRC_DONE_INTERRUPT_STATUS; 3243 3244 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3245 for_each_pipe(dev_priv, pipe) 3246 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3247 3248 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3249 PIPE_FIFO_UNDERRUN_STATUS; 3250 3251 for_each_pipe(dev_priv, pipe) 3252 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3253 POSTING_READ(PIPESTAT(PIPE_A)); 3254 } 3255 3256 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3257 { 3258 assert_spin_locked(&dev_priv->irq_lock); 3259 3260 if (dev_priv->display_irqs_enabled) 3261 return; 3262 3263 dev_priv->display_irqs_enabled = true; 3264 3265 if (intel_irqs_enabled(dev_priv)) 3266 valleyview_display_irqs_install(dev_priv); 3267 } 3268 3269 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3270 { 3271 assert_spin_locked(&dev_priv->irq_lock); 3272 3273 if (!dev_priv->display_irqs_enabled) 3274 return; 3275 3276 dev_priv->display_irqs_enabled = false; 3277 3278 if (intel_irqs_enabled(dev_priv)) 3279 valleyview_display_irqs_uninstall(dev_priv); 3280 } 3281 3282 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3283 { 3284 dev_priv->irq_mask = ~0; 3285 3286 I915_WRITE(PORT_HOTPLUG_EN, 0); 3287 POSTING_READ(PORT_HOTPLUG_EN); 3288 3289 I915_WRITE(VLV_IIR, 0xffffffff); 3290 I915_WRITE(VLV_IIR, 0xffffffff); 3291 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3292 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3293 POSTING_READ(VLV_IMR); 3294 3295 /* Interrupt setup is already guaranteed to be single-threaded, this is 3296 * just to make the assert_spin_locked check happy. */ 3297 spin_lock_irq(&dev_priv->irq_lock); 3298 if (dev_priv->display_irqs_enabled) 3299 valleyview_display_irqs_install(dev_priv); 3300 spin_unlock_irq(&dev_priv->irq_lock); 3301 } 3302 3303 static int valleyview_irq_postinstall(struct drm_device *dev) 3304 { 3305 struct drm_i915_private *dev_priv = dev->dev_private; 3306 3307 vlv_display_irq_postinstall(dev_priv); 3308 3309 gen5_gt_irq_postinstall(dev); 3310 3311 /* ack & enable invalid PTE error interrupts */ 3312 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3313 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3314 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3315 #endif 3316 3317 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3318 3319 return 0; 3320 } 3321 3322 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3323 { 3324 /* These are interrupts we'll toggle with the ring mask register */ 3325 uint32_t gt_interrupts[] = { 3326 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3327 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3328 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3329 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3330 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3331 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3332 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3333 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3334 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3335 0, 3336 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3337 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3338 }; 3339 3340 dev_priv->pm_irq_mask = 0xffffffff; 3341 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3342 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3343 /* 3344 * RPS interrupts will get enabled/disabled on demand when RPS itself 3345 * is enabled/disabled. 3346 */ 3347 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3348 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3349 } 3350 3351 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3352 { 3353 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3354 uint32_t de_pipe_enables; 3355 int pipe; 3356 u32 de_port_en = GEN8_AUX_CHANNEL_A; 3357 3358 if (IS_GEN9(dev_priv)) { 3359 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3360 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3361 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3362 GEN9_AUX_CHANNEL_D; 3363 3364 if (IS_BROXTON(dev_priv)) 3365 de_port_en |= BXT_DE_PORT_GMBUS; 3366 } else 3367 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3368 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3369 3370 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3371 GEN8_PIPE_FIFO_UNDERRUN; 3372 3373 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3374 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3375 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3376 3377 for_each_pipe(dev_priv, pipe) 3378 if (intel_display_power_is_enabled(dev_priv, 3379 POWER_DOMAIN_PIPE(pipe))) 3380 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3381 dev_priv->de_irq_mask[pipe], 3382 de_pipe_enables); 3383 3384 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); 3385 } 3386 3387 static int gen8_irq_postinstall(struct drm_device *dev) 3388 { 3389 struct drm_i915_private *dev_priv = dev->dev_private; 3390 3391 if (HAS_PCH_SPLIT(dev)) 3392 ibx_irq_pre_postinstall(dev); 3393 3394 gen8_gt_irq_postinstall(dev_priv); 3395 gen8_de_irq_postinstall(dev_priv); 3396 3397 if (HAS_PCH_SPLIT(dev)) 3398 ibx_irq_postinstall(dev); 3399 3400 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3401 POSTING_READ(GEN8_MASTER_IRQ); 3402 3403 return 0; 3404 } 3405 3406 static int cherryview_irq_postinstall(struct drm_device *dev) 3407 { 3408 struct drm_i915_private *dev_priv = dev->dev_private; 3409 3410 vlv_display_irq_postinstall(dev_priv); 3411 3412 gen8_gt_irq_postinstall(dev_priv); 3413 3414 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3415 POSTING_READ(GEN8_MASTER_IRQ); 3416 3417 return 0; 3418 } 3419 3420 static void gen8_irq_uninstall(struct drm_device *dev) 3421 { 3422 struct drm_i915_private *dev_priv = dev->dev_private; 3423 3424 if (!dev_priv) 3425 return; 3426 3427 gen8_irq_reset(dev); 3428 } 3429 3430 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3431 { 3432 /* Interrupt setup is already guaranteed to be single-threaded, this is 3433 * just to make the assert_spin_locked check happy. */ 3434 spin_lock_irq(&dev_priv->irq_lock); 3435 if (dev_priv->display_irqs_enabled) 3436 valleyview_display_irqs_uninstall(dev_priv); 3437 spin_unlock_irq(&dev_priv->irq_lock); 3438 3439 vlv_display_irq_reset(dev_priv); 3440 3441 dev_priv->irq_mask = ~0; 3442 } 3443 3444 static void valleyview_irq_uninstall(struct drm_device *dev) 3445 { 3446 struct drm_i915_private *dev_priv = dev->dev_private; 3447 3448 if (!dev_priv) 3449 return; 3450 3451 I915_WRITE(VLV_MASTER_IER, 0); 3452 3453 gen5_gt_irq_reset(dev); 3454 3455 I915_WRITE(HWSTAM, 0xffffffff); 3456 3457 vlv_display_irq_uninstall(dev_priv); 3458 } 3459 3460 static void cherryview_irq_uninstall(struct drm_device *dev) 3461 { 3462 struct drm_i915_private *dev_priv = dev->dev_private; 3463 3464 if (!dev_priv) 3465 return; 3466 3467 I915_WRITE(GEN8_MASTER_IRQ, 0); 3468 POSTING_READ(GEN8_MASTER_IRQ); 3469 3470 gen8_gt_irq_reset(dev_priv); 3471 3472 GEN5_IRQ_RESET(GEN8_PCU_); 3473 3474 vlv_display_irq_uninstall(dev_priv); 3475 } 3476 3477 static void ironlake_irq_uninstall(struct drm_device *dev) 3478 { 3479 struct drm_i915_private *dev_priv = dev->dev_private; 3480 3481 if (!dev_priv) 3482 return; 3483 3484 ironlake_irq_reset(dev); 3485 } 3486 3487 static void i8xx_irq_preinstall(struct drm_device * dev) 3488 { 3489 struct drm_i915_private *dev_priv = dev->dev_private; 3490 int pipe; 3491 3492 for_each_pipe(dev_priv, pipe) 3493 I915_WRITE(PIPESTAT(pipe), 0); 3494 I915_WRITE16(IMR, 0xffff); 3495 I915_WRITE16(IER, 0x0); 3496 POSTING_READ16(IER); 3497 } 3498 3499 static int i8xx_irq_postinstall(struct drm_device *dev) 3500 { 3501 struct drm_i915_private *dev_priv = dev->dev_private; 3502 3503 I915_WRITE16(EMR, 3504 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3505 3506 /* Unmask the interrupts that we always want on. */ 3507 dev_priv->irq_mask = 3508 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3509 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3510 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3511 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3512 I915_WRITE16(IMR, dev_priv->irq_mask); 3513 3514 I915_WRITE16(IER, 3515 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3516 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3517 I915_USER_INTERRUPT); 3518 POSTING_READ16(IER); 3519 3520 /* Interrupt setup is already guaranteed to be single-threaded, this is 3521 * just to make the assert_spin_locked check happy. */ 3522 spin_lock_irq(&dev_priv->irq_lock); 3523 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3524 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3525 spin_unlock_irq(&dev_priv->irq_lock); 3526 3527 return 0; 3528 } 3529 3530 /* 3531 * Returns true when a page flip has completed. 3532 */ 3533 static bool i8xx_handle_vblank(struct drm_device *dev, 3534 int plane, int pipe, u32 iir) 3535 { 3536 struct drm_i915_private *dev_priv = dev->dev_private; 3537 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3538 3539 if (!intel_pipe_handle_vblank(dev, pipe)) 3540 return false; 3541 3542 if ((iir & flip_pending) == 0) 3543 goto check_page_flip; 3544 3545 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3546 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3547 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3548 * the flip is completed (no longer pending). Since this doesn't raise 3549 * an interrupt per se, we watch for the change at vblank. 3550 */ 3551 if (I915_READ16(ISR) & flip_pending) 3552 goto check_page_flip; 3553 3554 intel_prepare_page_flip(dev, plane); 3555 intel_finish_page_flip(dev, pipe); 3556 return true; 3557 3558 check_page_flip: 3559 intel_check_page_flip(dev, pipe); 3560 return false; 3561 } 3562 3563 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3564 { 3565 struct drm_device *dev = arg; 3566 struct drm_i915_private *dev_priv = dev->dev_private; 3567 u16 iir, new_iir; 3568 u32 pipe_stats[2]; 3569 int pipe; 3570 u16 flip_mask = 3571 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3572 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3573 3574 if (!intel_irqs_enabled(dev_priv)) 3575 return IRQ_NONE; 3576 3577 iir = I915_READ16(IIR); 3578 if (iir == 0) 3579 return IRQ_NONE; 3580 3581 while (iir & ~flip_mask) { 3582 /* Can't rely on pipestat interrupt bit in iir as it might 3583 * have been cleared after the pipestat interrupt was received. 3584 * It doesn't set the bit in iir again, but it still produces 3585 * interrupts (for non-MSI). 3586 */ 3587 spin_lock(&dev_priv->irq_lock); 3588 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3589 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3590 3591 for_each_pipe(dev_priv, pipe) { 3592 int reg = PIPESTAT(pipe); 3593 pipe_stats[pipe] = I915_READ(reg); 3594 3595 /* 3596 * Clear the PIPE*STAT regs before the IIR 3597 */ 3598 if (pipe_stats[pipe] & 0x8000ffff) 3599 I915_WRITE(reg, pipe_stats[pipe]); 3600 } 3601 spin_unlock(&dev_priv->irq_lock); 3602 3603 I915_WRITE16(IIR, iir & ~flip_mask); 3604 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3605 3606 if (iir & I915_USER_INTERRUPT) 3607 notify_ring(&dev_priv->ring[RCS]); 3608 3609 for_each_pipe(dev_priv, pipe) { 3610 int plane = pipe; 3611 if (HAS_FBC(dev)) 3612 plane = !plane; 3613 3614 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3615 i8xx_handle_vblank(dev, plane, pipe, iir)) 3616 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3617 3618 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3619 i9xx_pipe_crc_irq_handler(dev, pipe); 3620 3621 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3622 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3623 pipe); 3624 } 3625 3626 iir = new_iir; 3627 } 3628 3629 return IRQ_HANDLED; 3630 } 3631 3632 static void i8xx_irq_uninstall(struct drm_device * dev) 3633 { 3634 struct drm_i915_private *dev_priv = dev->dev_private; 3635 int pipe; 3636 3637 for_each_pipe(dev_priv, pipe) { 3638 /* Clear enable bits; then clear status bits */ 3639 I915_WRITE(PIPESTAT(pipe), 0); 3640 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3641 } 3642 I915_WRITE16(IMR, 0xffff); 3643 I915_WRITE16(IER, 0x0); 3644 I915_WRITE16(IIR, I915_READ16(IIR)); 3645 } 3646 3647 static void i915_irq_preinstall(struct drm_device * dev) 3648 { 3649 struct drm_i915_private *dev_priv = dev->dev_private; 3650 int pipe; 3651 3652 if (I915_HAS_HOTPLUG(dev)) { 3653 I915_WRITE(PORT_HOTPLUG_EN, 0); 3654 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3655 } 3656 3657 I915_WRITE16(HWSTAM, 0xeffe); 3658 for_each_pipe(dev_priv, pipe) 3659 I915_WRITE(PIPESTAT(pipe), 0); 3660 I915_WRITE(IMR, 0xffffffff); 3661 I915_WRITE(IER, 0x0); 3662 POSTING_READ(IER); 3663 } 3664 3665 static int i915_irq_postinstall(struct drm_device *dev) 3666 { 3667 struct drm_i915_private *dev_priv = dev->dev_private; 3668 u32 enable_mask; 3669 3670 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3671 3672 /* Unmask the interrupts that we always want on. */ 3673 dev_priv->irq_mask = 3674 ~(I915_ASLE_INTERRUPT | 3675 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3676 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3677 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3678 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3679 3680 enable_mask = 3681 I915_ASLE_INTERRUPT | 3682 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3683 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3684 I915_USER_INTERRUPT; 3685 3686 if (I915_HAS_HOTPLUG(dev)) { 3687 I915_WRITE(PORT_HOTPLUG_EN, 0); 3688 POSTING_READ(PORT_HOTPLUG_EN); 3689 3690 /* Enable in IER... */ 3691 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3692 /* and unmask in IMR */ 3693 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3694 } 3695 3696 I915_WRITE(IMR, dev_priv->irq_mask); 3697 I915_WRITE(IER, enable_mask); 3698 POSTING_READ(IER); 3699 3700 i915_enable_asle_pipestat(dev); 3701 3702 /* Interrupt setup is already guaranteed to be single-threaded, this is 3703 * just to make the assert_spin_locked check happy. */ 3704 spin_lock_irq(&dev_priv->irq_lock); 3705 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3706 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3707 spin_unlock_irq(&dev_priv->irq_lock); 3708 3709 return 0; 3710 } 3711 3712 /* 3713 * Returns true when a page flip has completed. 3714 */ 3715 static bool i915_handle_vblank(struct drm_device *dev, 3716 int plane, int pipe, u32 iir) 3717 { 3718 struct drm_i915_private *dev_priv = dev->dev_private; 3719 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3720 3721 if (!intel_pipe_handle_vblank(dev, pipe)) 3722 return false; 3723 3724 if ((iir & flip_pending) == 0) 3725 goto check_page_flip; 3726 3727 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3728 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3729 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3730 * the flip is completed (no longer pending). Since this doesn't raise 3731 * an interrupt per se, we watch for the change at vblank. 3732 */ 3733 if (I915_READ(ISR) & flip_pending) 3734 goto check_page_flip; 3735 3736 intel_prepare_page_flip(dev, plane); 3737 intel_finish_page_flip(dev, pipe); 3738 return true; 3739 3740 check_page_flip: 3741 intel_check_page_flip(dev, pipe); 3742 return false; 3743 } 3744 3745 static irqreturn_t i915_irq_handler(int irq, void *arg) 3746 { 3747 struct drm_device *dev = arg; 3748 struct drm_i915_private *dev_priv = dev->dev_private; 3749 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3750 u32 flip_mask = 3751 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3752 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3753 int pipe, ret = IRQ_NONE; 3754 3755 if (!intel_irqs_enabled(dev_priv)) 3756 return IRQ_NONE; 3757 3758 iir = I915_READ(IIR); 3759 do { 3760 bool irq_received = (iir & ~flip_mask) != 0; 3761 bool blc_event = false; 3762 3763 /* Can't rely on pipestat interrupt bit in iir as it might 3764 * have been cleared after the pipestat interrupt was received. 3765 * It doesn't set the bit in iir again, but it still produces 3766 * interrupts (for non-MSI). 3767 */ 3768 spin_lock(&dev_priv->irq_lock); 3769 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3770 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3771 3772 for_each_pipe(dev_priv, pipe) { 3773 int reg = PIPESTAT(pipe); 3774 pipe_stats[pipe] = I915_READ(reg); 3775 3776 /* Clear the PIPE*STAT regs before the IIR */ 3777 if (pipe_stats[pipe] & 0x8000ffff) { 3778 I915_WRITE(reg, pipe_stats[pipe]); 3779 irq_received = true; 3780 } 3781 } 3782 spin_unlock(&dev_priv->irq_lock); 3783 3784 if (!irq_received) 3785 break; 3786 3787 /* Consume port. Then clear IIR or we'll miss events */ 3788 if (I915_HAS_HOTPLUG(dev) && 3789 iir & I915_DISPLAY_PORT_INTERRUPT) 3790 i9xx_hpd_irq_handler(dev); 3791 3792 I915_WRITE(IIR, iir & ~flip_mask); 3793 new_iir = I915_READ(IIR); /* Flush posted writes */ 3794 3795 if (iir & I915_USER_INTERRUPT) 3796 notify_ring(&dev_priv->ring[RCS]); 3797 3798 for_each_pipe(dev_priv, pipe) { 3799 int plane = pipe; 3800 if (HAS_FBC(dev)) 3801 plane = !plane; 3802 3803 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3804 i915_handle_vblank(dev, plane, pipe, iir)) 3805 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3806 3807 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3808 blc_event = true; 3809 3810 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3811 i9xx_pipe_crc_irq_handler(dev, pipe); 3812 3813 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3814 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3815 pipe); 3816 } 3817 3818 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3819 intel_opregion_asle_intr(dev); 3820 3821 /* With MSI, interrupts are only generated when iir 3822 * transitions from zero to nonzero. If another bit got 3823 * set while we were handling the existing iir bits, then 3824 * we would never get another interrupt. 3825 * 3826 * This is fine on non-MSI as well, as if we hit this path 3827 * we avoid exiting the interrupt handler only to generate 3828 * another one. 3829 * 3830 * Note that for MSI this could cause a stray interrupt report 3831 * if an interrupt landed in the time between writing IIR and 3832 * the posting read. This should be rare enough to never 3833 * trigger the 99% of 100,000 interrupts test for disabling 3834 * stray interrupts. 3835 */ 3836 ret = IRQ_HANDLED; 3837 iir = new_iir; 3838 } while (iir & ~flip_mask); 3839 3840 return ret; 3841 } 3842 3843 static void i915_irq_uninstall(struct drm_device * dev) 3844 { 3845 struct drm_i915_private *dev_priv = dev->dev_private; 3846 int pipe; 3847 3848 if (I915_HAS_HOTPLUG(dev)) { 3849 I915_WRITE(PORT_HOTPLUG_EN, 0); 3850 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3851 } 3852 3853 I915_WRITE16(HWSTAM, 0xffff); 3854 for_each_pipe(dev_priv, pipe) { 3855 /* Clear enable bits; then clear status bits */ 3856 I915_WRITE(PIPESTAT(pipe), 0); 3857 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3858 } 3859 I915_WRITE(IMR, 0xffffffff); 3860 I915_WRITE(IER, 0x0); 3861 3862 I915_WRITE(IIR, I915_READ(IIR)); 3863 } 3864 3865 static void i965_irq_preinstall(struct drm_device * dev) 3866 { 3867 struct drm_i915_private *dev_priv = dev->dev_private; 3868 int pipe; 3869 3870 I915_WRITE(PORT_HOTPLUG_EN, 0); 3871 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3872 3873 I915_WRITE(HWSTAM, 0xeffe); 3874 for_each_pipe(dev_priv, pipe) 3875 I915_WRITE(PIPESTAT(pipe), 0); 3876 I915_WRITE(IMR, 0xffffffff); 3877 I915_WRITE(IER, 0x0); 3878 POSTING_READ(IER); 3879 } 3880 3881 static int i965_irq_postinstall(struct drm_device *dev) 3882 { 3883 struct drm_i915_private *dev_priv = dev->dev_private; 3884 u32 enable_mask; 3885 u32 error_mask; 3886 3887 /* Unmask the interrupts that we always want on. */ 3888 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3889 I915_DISPLAY_PORT_INTERRUPT | 3890 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3891 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3892 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3893 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3894 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3895 3896 enable_mask = ~dev_priv->irq_mask; 3897 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3898 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3899 enable_mask |= I915_USER_INTERRUPT; 3900 3901 if (IS_G4X(dev)) 3902 enable_mask |= I915_BSD_USER_INTERRUPT; 3903 3904 /* Interrupt setup is already guaranteed to be single-threaded, this is 3905 * just to make the assert_spin_locked check happy. */ 3906 spin_lock_irq(&dev_priv->irq_lock); 3907 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3908 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3909 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3910 spin_unlock_irq(&dev_priv->irq_lock); 3911 3912 /* 3913 * Enable some error detection, note the instruction error mask 3914 * bit is reserved, so we leave it masked. 3915 */ 3916 if (IS_G4X(dev)) { 3917 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3918 GM45_ERROR_MEM_PRIV | 3919 GM45_ERROR_CP_PRIV | 3920 I915_ERROR_MEMORY_REFRESH); 3921 } else { 3922 error_mask = ~(I915_ERROR_PAGE_TABLE | 3923 I915_ERROR_MEMORY_REFRESH); 3924 } 3925 I915_WRITE(EMR, error_mask); 3926 3927 I915_WRITE(IMR, dev_priv->irq_mask); 3928 I915_WRITE(IER, enable_mask); 3929 POSTING_READ(IER); 3930 3931 I915_WRITE(PORT_HOTPLUG_EN, 0); 3932 POSTING_READ(PORT_HOTPLUG_EN); 3933 3934 i915_enable_asle_pipestat(dev); 3935 3936 return 0; 3937 } 3938 3939 static void i915_hpd_irq_setup(struct drm_device *dev) 3940 { 3941 struct drm_i915_private *dev_priv = dev->dev_private; 3942 struct intel_encoder *intel_encoder; 3943 u32 hotplug_en; 3944 3945 assert_spin_locked(&dev_priv->irq_lock); 3946 3947 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3948 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3949 /* Note HDMI and DP share hotplug bits */ 3950 /* enable bits are the same for all generations */ 3951 for_each_intel_encoder(dev, intel_encoder) 3952 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3953 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3954 /* Programming the CRT detection parameters tends 3955 to generate a spurious hotplug event about three 3956 seconds later. So just do it once. 3957 */ 3958 if (IS_G4X(dev)) 3959 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3960 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3961 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3962 3963 /* Ignore TV since it's buggy */ 3964 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3965 } 3966 3967 static irqreturn_t i965_irq_handler(int irq, void *arg) 3968 { 3969 struct drm_device *dev = arg; 3970 struct drm_i915_private *dev_priv = dev->dev_private; 3971 u32 iir, new_iir; 3972 u32 pipe_stats[I915_MAX_PIPES]; 3973 int ret = IRQ_NONE, pipe; 3974 u32 flip_mask = 3975 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3976 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3977 3978 if (!intel_irqs_enabled(dev_priv)) 3979 return IRQ_NONE; 3980 3981 iir = I915_READ(IIR); 3982 3983 for (;;) { 3984 bool irq_received = (iir & ~flip_mask) != 0; 3985 bool blc_event = false; 3986 3987 /* Can't rely on pipestat interrupt bit in iir as it might 3988 * have been cleared after the pipestat interrupt was received. 3989 * It doesn't set the bit in iir again, but it still produces 3990 * interrupts (for non-MSI). 3991 */ 3992 spin_lock(&dev_priv->irq_lock); 3993 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3994 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3995 3996 for_each_pipe(dev_priv, pipe) { 3997 int reg = PIPESTAT(pipe); 3998 pipe_stats[pipe] = I915_READ(reg); 3999 4000 /* 4001 * Clear the PIPE*STAT regs before the IIR 4002 */ 4003 if (pipe_stats[pipe] & 0x8000ffff) { 4004 I915_WRITE(reg, pipe_stats[pipe]); 4005 irq_received = true; 4006 } 4007 } 4008 spin_unlock(&dev_priv->irq_lock); 4009 4010 if (!irq_received) 4011 break; 4012 4013 ret = IRQ_HANDLED; 4014 4015 /* Consume port. Then clear IIR or we'll miss events */ 4016 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4017 i9xx_hpd_irq_handler(dev); 4018 4019 I915_WRITE(IIR, iir & ~flip_mask); 4020 new_iir = I915_READ(IIR); /* Flush posted writes */ 4021 4022 if (iir & I915_USER_INTERRUPT) 4023 notify_ring(&dev_priv->ring[RCS]); 4024 if (iir & I915_BSD_USER_INTERRUPT) 4025 notify_ring(&dev_priv->ring[VCS]); 4026 4027 for_each_pipe(dev_priv, pipe) { 4028 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4029 i915_handle_vblank(dev, pipe, pipe, iir)) 4030 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4031 4032 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4033 blc_event = true; 4034 4035 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4036 i9xx_pipe_crc_irq_handler(dev, pipe); 4037 4038 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4039 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4040 } 4041 4042 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4043 intel_opregion_asle_intr(dev); 4044 4045 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4046 gmbus_irq_handler(dev); 4047 4048 /* With MSI, interrupts are only generated when iir 4049 * transitions from zero to nonzero. If another bit got 4050 * set while we were handling the existing iir bits, then 4051 * we would never get another interrupt. 4052 * 4053 * This is fine on non-MSI as well, as if we hit this path 4054 * we avoid exiting the interrupt handler only to generate 4055 * another one. 4056 * 4057 * Note that for MSI this could cause a stray interrupt report 4058 * if an interrupt landed in the time between writing IIR and 4059 * the posting read. This should be rare enough to never 4060 * trigger the 99% of 100,000 interrupts test for disabling 4061 * stray interrupts. 4062 */ 4063 iir = new_iir; 4064 } 4065 4066 return ret; 4067 } 4068 4069 static void i965_irq_uninstall(struct drm_device * dev) 4070 { 4071 struct drm_i915_private *dev_priv = dev->dev_private; 4072 int pipe; 4073 4074 if (!dev_priv) 4075 return; 4076 4077 I915_WRITE(PORT_HOTPLUG_EN, 0); 4078 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4079 4080 I915_WRITE(HWSTAM, 0xffffffff); 4081 for_each_pipe(dev_priv, pipe) 4082 I915_WRITE(PIPESTAT(pipe), 0); 4083 I915_WRITE(IMR, 0xffffffff); 4084 I915_WRITE(IER, 0x0); 4085 4086 for_each_pipe(dev_priv, pipe) 4087 I915_WRITE(PIPESTAT(pipe), 4088 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4089 I915_WRITE(IIR, I915_READ(IIR)); 4090 } 4091 4092 /** 4093 * intel_irq_init - initializes irq support 4094 * @dev_priv: i915 device instance 4095 * 4096 * This function initializes all the irq support including work items, timers 4097 * and all the vtables. It does not setup the interrupt itself though. 4098 */ 4099 void intel_irq_init(struct drm_i915_private *dev_priv) 4100 { 4101 struct drm_device *dev = dev_priv->dev; 4102 4103 intel_hpd_init_work(dev_priv); 4104 4105 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4106 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4107 4108 /* Let's track the enabled rps events */ 4109 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4110 /* WaGsvRC0ResidencyMethod:vlv */ 4111 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4112 else 4113 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4114 4115 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4116 i915_hangcheck_elapsed); 4117 4118 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4119 4120 if (IS_GEN2(dev_priv)) { 4121 dev->max_vblank_count = 0; 4122 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4123 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4124 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4125 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4126 } else { 4127 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4128 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4129 } 4130 4131 /* 4132 * Opt out of the vblank disable timer on everything except gen2. 4133 * Gen2 doesn't have a hardware frame counter and so depends on 4134 * vblank interrupts to produce sane vblank seuquence numbers. 4135 */ 4136 if (!IS_GEN2(dev_priv)) 4137 dev->vblank_disable_immediate = true; 4138 4139 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4140 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4141 4142 if (IS_CHERRYVIEW(dev_priv)) { 4143 dev->driver->irq_handler = cherryview_irq_handler; 4144 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4145 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4146 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4147 dev->driver->enable_vblank = valleyview_enable_vblank; 4148 dev->driver->disable_vblank = valleyview_disable_vblank; 4149 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4150 } else if (IS_VALLEYVIEW(dev_priv)) { 4151 dev->driver->irq_handler = valleyview_irq_handler; 4152 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4153 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4154 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4155 dev->driver->enable_vblank = valleyview_enable_vblank; 4156 dev->driver->disable_vblank = valleyview_disable_vblank; 4157 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4158 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4159 dev->driver->irq_handler = gen8_irq_handler; 4160 dev->driver->irq_preinstall = gen8_irq_reset; 4161 dev->driver->irq_postinstall = gen8_irq_postinstall; 4162 dev->driver->irq_uninstall = gen8_irq_uninstall; 4163 dev->driver->enable_vblank = gen8_enable_vblank; 4164 dev->driver->disable_vblank = gen8_disable_vblank; 4165 if (HAS_PCH_SPLIT(dev)) 4166 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4167 else 4168 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4169 } else if (HAS_PCH_SPLIT(dev)) { 4170 dev->driver->irq_handler = ironlake_irq_handler; 4171 dev->driver->irq_preinstall = ironlake_irq_reset; 4172 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4173 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4174 dev->driver->enable_vblank = ironlake_enable_vblank; 4175 dev->driver->disable_vblank = ironlake_disable_vblank; 4176 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4177 } else { 4178 if (INTEL_INFO(dev_priv)->gen == 2) { 4179 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4180 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4181 dev->driver->irq_handler = i8xx_irq_handler; 4182 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4183 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4184 dev->driver->irq_preinstall = i915_irq_preinstall; 4185 dev->driver->irq_postinstall = i915_irq_postinstall; 4186 dev->driver->irq_uninstall = i915_irq_uninstall; 4187 dev->driver->irq_handler = i915_irq_handler; 4188 } else { 4189 dev->driver->irq_preinstall = i965_irq_preinstall; 4190 dev->driver->irq_postinstall = i965_irq_postinstall; 4191 dev->driver->irq_uninstall = i965_irq_uninstall; 4192 dev->driver->irq_handler = i965_irq_handler; 4193 } 4194 if (I915_HAS_HOTPLUG(dev_priv)) 4195 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4196 dev->driver->enable_vblank = i915_enable_vblank; 4197 dev->driver->disable_vblank = i915_disable_vblank; 4198 } 4199 } 4200 4201 /** 4202 * intel_irq_install - enables the hardware interrupt 4203 * @dev_priv: i915 device instance 4204 * 4205 * This function enables the hardware interrupt handling, but leaves the hotplug 4206 * handling still disabled. It is called after intel_irq_init(). 4207 * 4208 * In the driver load and resume code we need working interrupts in a few places 4209 * but don't want to deal with the hassle of concurrent probe and hotplug 4210 * workers. Hence the split into this two-stage approach. 4211 */ 4212 int intel_irq_install(struct drm_i915_private *dev_priv) 4213 { 4214 /* 4215 * We enable some interrupt sources in our postinstall hooks, so mark 4216 * interrupts as enabled _before_ actually enabling them to avoid 4217 * special cases in our ordering checks. 4218 */ 4219 dev_priv->pm.irqs_enabled = true; 4220 4221 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4222 } 4223 4224 /** 4225 * intel_irq_uninstall - finilizes all irq handling 4226 * @dev_priv: i915 device instance 4227 * 4228 * This stops interrupt and hotplug handling and unregisters and frees all 4229 * resources acquired in the init functions. 4230 */ 4231 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4232 { 4233 drm_irq_uninstall(dev_priv->dev); 4234 intel_hpd_cancel_work(dev_priv); 4235 dev_priv->pm.irqs_enabled = false; 4236 } 4237 4238 /** 4239 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4240 * @dev_priv: i915 device instance 4241 * 4242 * This function is used to disable interrupts at runtime, both in the runtime 4243 * pm and the system suspend/resume code. 4244 */ 4245 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4246 { 4247 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4248 dev_priv->pm.irqs_enabled = false; 4249 synchronize_irq(dev_priv->dev->irq); 4250 } 4251 4252 /** 4253 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4254 * @dev_priv: i915 device instance 4255 * 4256 * This function is used to enable interrupts at runtime, both in the runtime 4257 * pm and the system suspend/resume code. 4258 */ 4259 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4260 { 4261 dev_priv->pm.irqs_enabled = true; 4262 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4263 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4264 } 4265