1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ibx[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 54 }; 55 56 static const u32 hpd_cpt[] = { 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 62 }; 63 64 static const u32 hpd_mask_i915[] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 71 }; 72 73 static const u32 hpd_status_g4x[] = { 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 89 }; 90 91 /* IIR can theoretically queue up two events. Be paranoid. */ 92 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 94 POSTING_READ(GEN8_##type##_IMR(which)); \ 95 I915_WRITE(GEN8_##type##_IER(which), 0); \ 96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 97 POSTING_READ(GEN8_##type##_IIR(which)); \ 98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 99 POSTING_READ(GEN8_##type##_IIR(which)); \ 100 } while (0) 101 102 #define GEN5_IRQ_RESET(type) do { \ 103 I915_WRITE(type##IMR, 0xffffffff); \ 104 POSTING_READ(type##IMR); \ 105 I915_WRITE(type##IER, 0); \ 106 I915_WRITE(type##IIR, 0xffffffff); \ 107 POSTING_READ(type##IIR); \ 108 I915_WRITE(type##IIR, 0xffffffff); \ 109 POSTING_READ(type##IIR); \ 110 } while (0) 111 112 /* 113 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 114 */ 115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 116 u32 val = I915_READ(reg); \ 117 if (val) { \ 118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 119 (reg), val); \ 120 I915_WRITE((reg), 0xffffffff); \ 121 POSTING_READ(reg); \ 122 I915_WRITE((reg), 0xffffffff); \ 123 POSTING_READ(reg); \ 124 } \ 125 } while (0) 126 127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 131 POSTING_READ(GEN8_##type##_IMR(which)); \ 132 } while (0) 133 134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 136 I915_WRITE(type##IER, (ier_val)); \ 137 I915_WRITE(type##IMR, (imr_val)); \ 138 POSTING_READ(type##IMR); \ 139 } while (0) 140 141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 142 143 /* For display hotplug interrupt */ 144 void 145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 146 { 147 assert_spin_locked(&dev_priv->irq_lock); 148 149 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 150 return; 151 152 if ((dev_priv->irq_mask & mask) != 0) { 153 dev_priv->irq_mask &= ~mask; 154 I915_WRITE(DEIMR, dev_priv->irq_mask); 155 POSTING_READ(DEIMR); 156 } 157 } 158 159 void 160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 161 { 162 assert_spin_locked(&dev_priv->irq_lock); 163 164 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 165 return; 166 167 if ((dev_priv->irq_mask & mask) != mask) { 168 dev_priv->irq_mask |= mask; 169 I915_WRITE(DEIMR, dev_priv->irq_mask); 170 POSTING_READ(DEIMR); 171 } 172 } 173 174 /** 175 * ilk_update_gt_irq - update GTIMR 176 * @dev_priv: driver private 177 * @interrupt_mask: mask of interrupt bits to update 178 * @enabled_irq_mask: mask of interrupt bits to enable 179 */ 180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 181 uint32_t interrupt_mask, 182 uint32_t enabled_irq_mask) 183 { 184 assert_spin_locked(&dev_priv->irq_lock); 185 186 WARN_ON(enabled_irq_mask & ~interrupt_mask); 187 188 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 189 return; 190 191 dev_priv->gt_irq_mask &= ~interrupt_mask; 192 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 193 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 194 POSTING_READ(GTIMR); 195 } 196 197 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 198 { 199 ilk_update_gt_irq(dev_priv, mask, mask); 200 } 201 202 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 203 { 204 ilk_update_gt_irq(dev_priv, mask, 0); 205 } 206 207 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 208 { 209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 210 } 211 212 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 213 { 214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 215 } 216 217 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 218 { 219 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 220 } 221 222 /** 223 * snb_update_pm_irq - update GEN6_PMIMR 224 * @dev_priv: driver private 225 * @interrupt_mask: mask of interrupt bits to update 226 * @enabled_irq_mask: mask of interrupt bits to enable 227 */ 228 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 229 uint32_t interrupt_mask, 230 uint32_t enabled_irq_mask) 231 { 232 uint32_t new_val; 233 234 WARN_ON(enabled_irq_mask & ~interrupt_mask); 235 236 assert_spin_locked(&dev_priv->irq_lock); 237 238 new_val = dev_priv->pm_irq_mask; 239 new_val &= ~interrupt_mask; 240 new_val |= (~enabled_irq_mask & interrupt_mask); 241 242 if (new_val != dev_priv->pm_irq_mask) { 243 dev_priv->pm_irq_mask = new_val; 244 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 245 POSTING_READ(gen6_pm_imr(dev_priv)); 246 } 247 } 248 249 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 250 { 251 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 252 return; 253 254 snb_update_pm_irq(dev_priv, mask, mask); 255 } 256 257 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 258 uint32_t mask) 259 { 260 snb_update_pm_irq(dev_priv, mask, 0); 261 } 262 263 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 264 { 265 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 266 return; 267 268 __gen6_disable_pm_irq(dev_priv, mask); 269 } 270 271 void gen6_reset_rps_interrupts(struct drm_device *dev) 272 { 273 struct drm_i915_private *dev_priv = dev->dev_private; 274 uint32_t reg = gen6_pm_iir(dev_priv); 275 276 spin_lock_irq(&dev_priv->irq_lock); 277 I915_WRITE(reg, dev_priv->pm_rps_events); 278 I915_WRITE(reg, dev_priv->pm_rps_events); 279 POSTING_READ(reg); 280 spin_unlock_irq(&dev_priv->irq_lock); 281 } 282 283 void gen6_enable_rps_interrupts(struct drm_device *dev) 284 { 285 struct drm_i915_private *dev_priv = dev->dev_private; 286 287 spin_lock_irq(&dev_priv->irq_lock); 288 289 WARN_ON(dev_priv->rps.pm_iir); 290 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 291 dev_priv->rps.interrupts_enabled = true; 292 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 293 dev_priv->pm_rps_events); 294 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 295 296 spin_unlock_irq(&dev_priv->irq_lock); 297 } 298 299 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 300 { 301 /* 302 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 303 * if GEN6_PM_UP_EI_EXPIRED is masked. 304 * 305 * TODO: verify if this can be reproduced on VLV,CHV. 306 */ 307 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 308 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 309 310 if (INTEL_INFO(dev_priv)->gen >= 8) 311 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 312 313 return mask; 314 } 315 316 void gen6_disable_rps_interrupts(struct drm_device *dev) 317 { 318 struct drm_i915_private *dev_priv = dev->dev_private; 319 320 spin_lock_irq(&dev_priv->irq_lock); 321 dev_priv->rps.interrupts_enabled = false; 322 spin_unlock_irq(&dev_priv->irq_lock); 323 324 cancel_work_sync(&dev_priv->rps.work); 325 326 spin_lock_irq(&dev_priv->irq_lock); 327 328 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 329 330 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 331 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 332 ~dev_priv->pm_rps_events); 333 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 334 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 335 336 dev_priv->rps.pm_iir = 0; 337 338 spin_unlock_irq(&dev_priv->irq_lock); 339 } 340 341 /** 342 * ibx_display_interrupt_update - update SDEIMR 343 * @dev_priv: driver private 344 * @interrupt_mask: mask of interrupt bits to update 345 * @enabled_irq_mask: mask of interrupt bits to enable 346 */ 347 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 348 uint32_t interrupt_mask, 349 uint32_t enabled_irq_mask) 350 { 351 uint32_t sdeimr = I915_READ(SDEIMR); 352 sdeimr &= ~interrupt_mask; 353 sdeimr |= (~enabled_irq_mask & interrupt_mask); 354 355 WARN_ON(enabled_irq_mask & ~interrupt_mask); 356 357 assert_spin_locked(&dev_priv->irq_lock); 358 359 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 360 return; 361 362 I915_WRITE(SDEIMR, sdeimr); 363 POSTING_READ(SDEIMR); 364 } 365 366 static void 367 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 368 u32 enable_mask, u32 status_mask) 369 { 370 u32 reg = PIPESTAT(pipe); 371 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 372 373 assert_spin_locked(&dev_priv->irq_lock); 374 WARN_ON(!intel_irqs_enabled(dev_priv)); 375 376 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 377 status_mask & ~PIPESTAT_INT_STATUS_MASK, 378 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 379 pipe_name(pipe), enable_mask, status_mask)) 380 return; 381 382 if ((pipestat & enable_mask) == enable_mask) 383 return; 384 385 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 386 387 /* Enable the interrupt, clear any pending status */ 388 pipestat |= enable_mask | status_mask; 389 I915_WRITE(reg, pipestat); 390 POSTING_READ(reg); 391 } 392 393 static void 394 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 395 u32 enable_mask, u32 status_mask) 396 { 397 u32 reg = PIPESTAT(pipe); 398 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 399 400 assert_spin_locked(&dev_priv->irq_lock); 401 WARN_ON(!intel_irqs_enabled(dev_priv)); 402 403 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 404 status_mask & ~PIPESTAT_INT_STATUS_MASK, 405 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 406 pipe_name(pipe), enable_mask, status_mask)) 407 return; 408 409 if ((pipestat & enable_mask) == 0) 410 return; 411 412 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 413 414 pipestat &= ~enable_mask; 415 I915_WRITE(reg, pipestat); 416 POSTING_READ(reg); 417 } 418 419 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 420 { 421 u32 enable_mask = status_mask << 16; 422 423 /* 424 * On pipe A we don't support the PSR interrupt yet, 425 * on pipe B and C the same bit MBZ. 426 */ 427 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 428 return 0; 429 /* 430 * On pipe B and C we don't support the PSR interrupt yet, on pipe 431 * A the same bit is for perf counters which we don't use either. 432 */ 433 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 434 return 0; 435 436 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 437 SPRITE0_FLIP_DONE_INT_EN_VLV | 438 SPRITE1_FLIP_DONE_INT_EN_VLV); 439 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 440 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 441 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 442 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 443 444 return enable_mask; 445 } 446 447 void 448 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 449 u32 status_mask) 450 { 451 u32 enable_mask; 452 453 if (IS_VALLEYVIEW(dev_priv->dev)) 454 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 455 status_mask); 456 else 457 enable_mask = status_mask << 16; 458 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 459 } 460 461 void 462 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 463 u32 status_mask) 464 { 465 u32 enable_mask; 466 467 if (IS_VALLEYVIEW(dev_priv->dev)) 468 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 469 status_mask); 470 else 471 enable_mask = status_mask << 16; 472 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 473 } 474 475 /** 476 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 477 */ 478 static void i915_enable_asle_pipestat(struct drm_device *dev) 479 { 480 struct drm_i915_private *dev_priv = dev->dev_private; 481 482 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 483 return; 484 485 spin_lock_irq(&dev_priv->irq_lock); 486 487 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 488 if (INTEL_INFO(dev)->gen >= 4) 489 i915_enable_pipestat(dev_priv, PIPE_A, 490 PIPE_LEGACY_BLC_EVENT_STATUS); 491 492 spin_unlock_irq(&dev_priv->irq_lock); 493 } 494 495 /** 496 * i915_pipe_enabled - check if a pipe is enabled 497 * @dev: DRM device 498 * @pipe: pipe to check 499 * 500 * Reading certain registers when the pipe is disabled can hang the chip. 501 * Use this routine to make sure the PLL is running and the pipe is active 502 * before reading such registers if unsure. 503 */ 504 static int 505 i915_pipe_enabled(struct drm_device *dev, int pipe) 506 { 507 struct drm_i915_private *dev_priv = dev->dev_private; 508 509 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 510 /* Locking is horribly broken here, but whatever. */ 511 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 513 514 return intel_crtc->active; 515 } else { 516 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 517 } 518 } 519 520 /* 521 * This timing diagram depicts the video signal in and 522 * around the vertical blanking period. 523 * 524 * Assumptions about the fictitious mode used in this example: 525 * vblank_start >= 3 526 * vsync_start = vblank_start + 1 527 * vsync_end = vblank_start + 2 528 * vtotal = vblank_start + 3 529 * 530 * start of vblank: 531 * latch double buffered registers 532 * increment frame counter (ctg+) 533 * generate start of vblank interrupt (gen4+) 534 * | 535 * | frame start: 536 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 537 * | may be shifted forward 1-3 extra lines via PIPECONF 538 * | | 539 * | | start of vsync: 540 * | | generate vsync interrupt 541 * | | | 542 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 543 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 544 * ----va---> <-----------------vb--------------------> <--------va------------- 545 * | | <----vs-----> | 546 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 547 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 548 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 549 * | | | 550 * last visible pixel first visible pixel 551 * | increment frame counter (gen3/4) 552 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 553 * 554 * x = horizontal active 555 * _ = horizontal blanking 556 * hs = horizontal sync 557 * va = vertical active 558 * vb = vertical blanking 559 * vs = vertical sync 560 * vbs = vblank_start (number) 561 * 562 * Summary: 563 * - most events happen at the start of horizontal sync 564 * - frame start happens at the start of horizontal blank, 1-4 lines 565 * (depending on PIPECONF settings) after the start of vblank 566 * - gen3/4 pixel and frame counter are synchronized with the start 567 * of horizontal active on the first line of vertical active 568 */ 569 570 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 571 { 572 /* Gen2 doesn't have a hardware frame counter */ 573 return 0; 574 } 575 576 /* Called from drm generic code, passed a 'crtc', which 577 * we use as a pipe index 578 */ 579 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 580 { 581 struct drm_i915_private *dev_priv = dev->dev_private; 582 unsigned long high_frame; 583 unsigned long low_frame; 584 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 585 586 if (!i915_pipe_enabled(dev, pipe)) { 587 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 588 "pipe %c\n", pipe_name(pipe)); 589 return 0; 590 } 591 592 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 593 struct intel_crtc *intel_crtc = 594 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 595 const struct drm_display_mode *mode = 596 &intel_crtc->config.adjusted_mode; 597 598 htotal = mode->crtc_htotal; 599 hsync_start = mode->crtc_hsync_start; 600 vbl_start = mode->crtc_vblank_start; 601 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 602 vbl_start = DIV_ROUND_UP(vbl_start, 2); 603 } else { 604 enum transcoder cpu_transcoder = (enum transcoder) pipe; 605 606 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 607 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 608 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 609 if ((I915_READ(PIPECONF(cpu_transcoder)) & 610 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 611 vbl_start = DIV_ROUND_UP(vbl_start, 2); 612 } 613 614 /* Convert to pixel count */ 615 vbl_start *= htotal; 616 617 /* Start of vblank event occurs at start of hsync */ 618 vbl_start -= htotal - hsync_start; 619 620 high_frame = PIPEFRAME(pipe); 621 low_frame = PIPEFRAMEPIXEL(pipe); 622 623 /* 624 * High & low register fields aren't synchronized, so make sure 625 * we get a low value that's stable across two reads of the high 626 * register. 627 */ 628 do { 629 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 630 low = I915_READ(low_frame); 631 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 632 } while (high1 != high2); 633 634 high1 >>= PIPE_FRAME_HIGH_SHIFT; 635 pixel = low & PIPE_PIXEL_MASK; 636 low >>= PIPE_FRAME_LOW_SHIFT; 637 638 /* 639 * The frame counter increments at beginning of active. 640 * Cook up a vblank counter by also checking the pixel 641 * counter against vblank start. 642 */ 643 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 644 } 645 646 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 647 { 648 struct drm_i915_private *dev_priv = dev->dev_private; 649 int reg = PIPE_FRMCOUNT_GM45(pipe); 650 651 if (!i915_pipe_enabled(dev, pipe)) { 652 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 653 "pipe %c\n", pipe_name(pipe)); 654 return 0; 655 } 656 657 return I915_READ(reg); 658 } 659 660 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 661 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 662 663 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 664 { 665 struct drm_device *dev = crtc->base.dev; 666 struct drm_i915_private *dev_priv = dev->dev_private; 667 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 668 enum pipe pipe = crtc->pipe; 669 int position, vtotal; 670 671 vtotal = mode->crtc_vtotal; 672 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 673 vtotal /= 2; 674 675 if (IS_GEN2(dev)) 676 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 677 else 678 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 679 680 /* 681 * See update_scanline_offset() for the details on the 682 * scanline_offset adjustment. 683 */ 684 return (position + crtc->scanline_offset) % vtotal; 685 } 686 687 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 688 unsigned int flags, int *vpos, int *hpos, 689 ktime_t *stime, ktime_t *etime) 690 { 691 struct drm_i915_private *dev_priv = dev->dev_private; 692 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 693 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 694 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 695 int position; 696 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 697 bool in_vbl = true; 698 int ret = 0; 699 unsigned long irqflags; 700 701 if (!intel_crtc->active) { 702 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 703 "pipe %c\n", pipe_name(pipe)); 704 return 0; 705 } 706 707 htotal = mode->crtc_htotal; 708 hsync_start = mode->crtc_hsync_start; 709 vtotal = mode->crtc_vtotal; 710 vbl_start = mode->crtc_vblank_start; 711 vbl_end = mode->crtc_vblank_end; 712 713 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 714 vbl_start = DIV_ROUND_UP(vbl_start, 2); 715 vbl_end /= 2; 716 vtotal /= 2; 717 } 718 719 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 720 721 /* 722 * Lock uncore.lock, as we will do multiple timing critical raw 723 * register reads, potentially with preemption disabled, so the 724 * following code must not block on uncore.lock. 725 */ 726 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 727 728 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 729 730 /* Get optional system timestamp before query. */ 731 if (stime) 732 *stime = ktime_get(); 733 734 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 735 /* No obvious pixelcount register. Only query vertical 736 * scanout position from Display scan line register. 737 */ 738 position = __intel_get_crtc_scanline(intel_crtc); 739 } else { 740 /* Have access to pixelcount since start of frame. 741 * We can split this into vertical and horizontal 742 * scanout position. 743 */ 744 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 745 746 /* convert to pixel counts */ 747 vbl_start *= htotal; 748 vbl_end *= htotal; 749 vtotal *= htotal; 750 751 /* 752 * In interlaced modes, the pixel counter counts all pixels, 753 * so one field will have htotal more pixels. In order to avoid 754 * the reported position from jumping backwards when the pixel 755 * counter is beyond the length of the shorter field, just 756 * clamp the position the length of the shorter field. This 757 * matches how the scanline counter based position works since 758 * the scanline counter doesn't count the two half lines. 759 */ 760 if (position >= vtotal) 761 position = vtotal - 1; 762 763 /* 764 * Start of vblank interrupt is triggered at start of hsync, 765 * just prior to the first active line of vblank. However we 766 * consider lines to start at the leading edge of horizontal 767 * active. So, should we get here before we've crossed into 768 * the horizontal active of the first line in vblank, we would 769 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 770 * always add htotal-hsync_start to the current pixel position. 771 */ 772 position = (position + htotal - hsync_start) % vtotal; 773 } 774 775 /* Get optional system timestamp after query. */ 776 if (etime) 777 *etime = ktime_get(); 778 779 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 780 781 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 782 783 in_vbl = position >= vbl_start && position < vbl_end; 784 785 /* 786 * While in vblank, position will be negative 787 * counting up towards 0 at vbl_end. And outside 788 * vblank, position will be positive counting 789 * up since vbl_end. 790 */ 791 if (position >= vbl_start) 792 position -= vbl_end; 793 else 794 position += vtotal - vbl_end; 795 796 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 797 *vpos = position; 798 *hpos = 0; 799 } else { 800 *vpos = position / htotal; 801 *hpos = position - (*vpos * htotal); 802 } 803 804 /* In vblank? */ 805 if (in_vbl) 806 ret |= DRM_SCANOUTPOS_IN_VBLANK; 807 808 return ret; 809 } 810 811 int intel_get_crtc_scanline(struct intel_crtc *crtc) 812 { 813 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 814 unsigned long irqflags; 815 int position; 816 817 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 818 position = __intel_get_crtc_scanline(crtc); 819 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 820 821 return position; 822 } 823 824 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 825 int *max_error, 826 struct timeval *vblank_time, 827 unsigned flags) 828 { 829 struct drm_crtc *crtc; 830 831 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 832 DRM_ERROR("Invalid crtc %d\n", pipe); 833 return -EINVAL; 834 } 835 836 /* Get drm_crtc to timestamp: */ 837 crtc = intel_get_crtc_for_pipe(dev, pipe); 838 if (crtc == NULL) { 839 DRM_ERROR("Invalid crtc %d\n", pipe); 840 return -EINVAL; 841 } 842 843 if (!crtc->enabled) { 844 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 845 return -EBUSY; 846 } 847 848 /* Helper routine in DRM core does all the work: */ 849 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 850 vblank_time, flags, 851 crtc, 852 &to_intel_crtc(crtc)->config.adjusted_mode); 853 } 854 855 static bool intel_hpd_irq_event(struct drm_device *dev, 856 struct drm_connector *connector) 857 { 858 enum drm_connector_status old_status; 859 860 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 861 old_status = connector->status; 862 863 connector->status = connector->funcs->detect(connector, false); 864 if (old_status == connector->status) 865 return false; 866 867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 868 connector->base.id, 869 connector->name, 870 drm_get_connector_status_name(old_status), 871 drm_get_connector_status_name(connector->status)); 872 873 return true; 874 } 875 876 static void i915_digport_work_func(struct work_struct *work) 877 { 878 struct drm_i915_private *dev_priv = 879 container_of(work, struct drm_i915_private, dig_port_work); 880 u32 long_port_mask, short_port_mask; 881 struct intel_digital_port *intel_dig_port; 882 int i, ret; 883 u32 old_bits = 0; 884 885 spin_lock_irq(&dev_priv->irq_lock); 886 long_port_mask = dev_priv->long_hpd_port_mask; 887 dev_priv->long_hpd_port_mask = 0; 888 short_port_mask = dev_priv->short_hpd_port_mask; 889 dev_priv->short_hpd_port_mask = 0; 890 spin_unlock_irq(&dev_priv->irq_lock); 891 892 for (i = 0; i < I915_MAX_PORTS; i++) { 893 bool valid = false; 894 bool long_hpd = false; 895 intel_dig_port = dev_priv->hpd_irq_port[i]; 896 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 897 continue; 898 899 if (long_port_mask & (1 << i)) { 900 valid = true; 901 long_hpd = true; 902 } else if (short_port_mask & (1 << i)) 903 valid = true; 904 905 if (valid) { 906 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 907 if (ret == true) { 908 /* if we get true fallback to old school hpd */ 909 old_bits |= (1 << intel_dig_port->base.hpd_pin); 910 } 911 } 912 } 913 914 if (old_bits) { 915 spin_lock_irq(&dev_priv->irq_lock); 916 dev_priv->hpd_event_bits |= old_bits; 917 spin_unlock_irq(&dev_priv->irq_lock); 918 schedule_work(&dev_priv->hotplug_work); 919 } 920 } 921 922 /* 923 * Handle hotplug events outside the interrupt handler proper. 924 */ 925 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 926 927 static void i915_hotplug_work_func(struct work_struct *work) 928 { 929 struct drm_i915_private *dev_priv = 930 container_of(work, struct drm_i915_private, hotplug_work); 931 struct drm_device *dev = dev_priv->dev; 932 struct drm_mode_config *mode_config = &dev->mode_config; 933 struct intel_connector *intel_connector; 934 struct intel_encoder *intel_encoder; 935 struct drm_connector *connector; 936 bool hpd_disabled = false; 937 bool changed = false; 938 u32 hpd_event_bits; 939 940 mutex_lock(&mode_config->mutex); 941 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 942 943 spin_lock_irq(&dev_priv->irq_lock); 944 945 hpd_event_bits = dev_priv->hpd_event_bits; 946 dev_priv->hpd_event_bits = 0; 947 list_for_each_entry(connector, &mode_config->connector_list, head) { 948 intel_connector = to_intel_connector(connector); 949 if (!intel_connector->encoder) 950 continue; 951 intel_encoder = intel_connector->encoder; 952 if (intel_encoder->hpd_pin > HPD_NONE && 953 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 954 connector->polled == DRM_CONNECTOR_POLL_HPD) { 955 DRM_INFO("HPD interrupt storm detected on connector %s: " 956 "switching from hotplug detection to polling\n", 957 connector->name); 958 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 959 connector->polled = DRM_CONNECTOR_POLL_CONNECT 960 | DRM_CONNECTOR_POLL_DISCONNECT; 961 hpd_disabled = true; 962 } 963 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 964 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 965 connector->name, intel_encoder->hpd_pin); 966 } 967 } 968 /* if there were no outputs to poll, poll was disabled, 969 * therefore make sure it's enabled when disabling HPD on 970 * some connectors */ 971 if (hpd_disabled) { 972 drm_kms_helper_poll_enable(dev); 973 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 974 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 975 } 976 977 spin_unlock_irq(&dev_priv->irq_lock); 978 979 list_for_each_entry(connector, &mode_config->connector_list, head) { 980 intel_connector = to_intel_connector(connector); 981 if (!intel_connector->encoder) 982 continue; 983 intel_encoder = intel_connector->encoder; 984 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 985 if (intel_encoder->hot_plug) 986 intel_encoder->hot_plug(intel_encoder); 987 if (intel_hpd_irq_event(dev, connector)) 988 changed = true; 989 } 990 } 991 mutex_unlock(&mode_config->mutex); 992 993 if (changed) 994 drm_kms_helper_hotplug_event(dev); 995 } 996 997 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 998 { 999 struct drm_i915_private *dev_priv = dev->dev_private; 1000 u32 busy_up, busy_down, max_avg, min_avg; 1001 u8 new_delay; 1002 1003 spin_lock(&mchdev_lock); 1004 1005 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1006 1007 new_delay = dev_priv->ips.cur_delay; 1008 1009 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1010 busy_up = I915_READ(RCPREVBSYTUPAVG); 1011 busy_down = I915_READ(RCPREVBSYTDNAVG); 1012 max_avg = I915_READ(RCBMAXAVG); 1013 min_avg = I915_READ(RCBMINAVG); 1014 1015 /* Handle RCS change request from hw */ 1016 if (busy_up > max_avg) { 1017 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1018 new_delay = dev_priv->ips.cur_delay - 1; 1019 if (new_delay < dev_priv->ips.max_delay) 1020 new_delay = dev_priv->ips.max_delay; 1021 } else if (busy_down < min_avg) { 1022 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1023 new_delay = dev_priv->ips.cur_delay + 1; 1024 if (new_delay > dev_priv->ips.min_delay) 1025 new_delay = dev_priv->ips.min_delay; 1026 } 1027 1028 if (ironlake_set_drps(dev, new_delay)) 1029 dev_priv->ips.cur_delay = new_delay; 1030 1031 spin_unlock(&mchdev_lock); 1032 1033 return; 1034 } 1035 1036 static void notify_ring(struct drm_device *dev, 1037 struct intel_engine_cs *ring) 1038 { 1039 if (!intel_ring_initialized(ring)) 1040 return; 1041 1042 trace_i915_gem_request_notify(ring); 1043 1044 wake_up_all(&ring->irq_queue); 1045 } 1046 1047 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1048 struct intel_rps_ei *rps_ei) 1049 { 1050 u32 cz_ts, cz_freq_khz; 1051 u32 render_count, media_count; 1052 u32 elapsed_render, elapsed_media, elapsed_time; 1053 u32 residency = 0; 1054 1055 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1056 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); 1057 1058 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); 1059 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); 1060 1061 if (rps_ei->cz_clock == 0) { 1062 rps_ei->cz_clock = cz_ts; 1063 rps_ei->render_c0 = render_count; 1064 rps_ei->media_c0 = media_count; 1065 1066 return dev_priv->rps.cur_freq; 1067 } 1068 1069 elapsed_time = cz_ts - rps_ei->cz_clock; 1070 rps_ei->cz_clock = cz_ts; 1071 1072 elapsed_render = render_count - rps_ei->render_c0; 1073 rps_ei->render_c0 = render_count; 1074 1075 elapsed_media = media_count - rps_ei->media_c0; 1076 rps_ei->media_c0 = media_count; 1077 1078 /* Convert all the counters into common unit of milli sec */ 1079 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1080 elapsed_render /= cz_freq_khz; 1081 elapsed_media /= cz_freq_khz; 1082 1083 /* 1084 * Calculate overall C0 residency percentage 1085 * only if elapsed time is non zero 1086 */ 1087 if (elapsed_time) { 1088 residency = 1089 ((max(elapsed_render, elapsed_media) * 100) 1090 / elapsed_time); 1091 } 1092 1093 return residency; 1094 } 1095 1096 /** 1097 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU 1098 * busy-ness calculated from C0 counters of render & media power wells 1099 * @dev_priv: DRM device private 1100 * 1101 */ 1102 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1103 { 1104 u32 residency_C0_up = 0, residency_C0_down = 0; 1105 int new_delay, adj; 1106 1107 dev_priv->rps.ei_interrupt_count++; 1108 1109 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 1110 1111 1112 if (dev_priv->rps.up_ei.cz_clock == 0) { 1113 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); 1114 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); 1115 return dev_priv->rps.cur_freq; 1116 } 1117 1118 1119 /* 1120 * To down throttle, C0 residency should be less than down threshold 1121 * for continous EI intervals. So calculate down EI counters 1122 * once in VLV_INT_COUNT_FOR_DOWN_EI 1123 */ 1124 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { 1125 1126 dev_priv->rps.ei_interrupt_count = 0; 1127 1128 residency_C0_down = vlv_c0_residency(dev_priv, 1129 &dev_priv->rps.down_ei); 1130 } else { 1131 residency_C0_up = vlv_c0_residency(dev_priv, 1132 &dev_priv->rps.up_ei); 1133 } 1134 1135 new_delay = dev_priv->rps.cur_freq; 1136 1137 adj = dev_priv->rps.last_adj; 1138 /* C0 residency is greater than UP threshold. Increase Frequency */ 1139 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1140 if (adj > 0) 1141 adj *= 2; 1142 else 1143 adj = 1; 1144 1145 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) 1146 new_delay = dev_priv->rps.cur_freq + adj; 1147 1148 /* 1149 * For better performance, jump directly 1150 * to RPe if we're below it. 1151 */ 1152 if (new_delay < dev_priv->rps.efficient_freq) 1153 new_delay = dev_priv->rps.efficient_freq; 1154 1155 } else if (!dev_priv->rps.ei_interrupt_count && 1156 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { 1157 if (adj < 0) 1158 adj *= 2; 1159 else 1160 adj = -1; 1161 /* 1162 * This means, C0 residency is less than down threshold over 1163 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq 1164 */ 1165 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1166 new_delay = dev_priv->rps.cur_freq + adj; 1167 } 1168 1169 return new_delay; 1170 } 1171 1172 static void gen6_pm_rps_work(struct work_struct *work) 1173 { 1174 struct drm_i915_private *dev_priv = 1175 container_of(work, struct drm_i915_private, rps.work); 1176 u32 pm_iir; 1177 int new_delay, adj; 1178 1179 spin_lock_irq(&dev_priv->irq_lock); 1180 /* Speed up work cancelation during disabling rps interrupts. */ 1181 if (!dev_priv->rps.interrupts_enabled) { 1182 spin_unlock_irq(&dev_priv->irq_lock); 1183 return; 1184 } 1185 pm_iir = dev_priv->rps.pm_iir; 1186 dev_priv->rps.pm_iir = 0; 1187 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1188 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1189 spin_unlock_irq(&dev_priv->irq_lock); 1190 1191 /* Make sure we didn't queue anything we're not going to process. */ 1192 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1193 1194 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1195 return; 1196 1197 mutex_lock(&dev_priv->rps.hw_lock); 1198 1199 adj = dev_priv->rps.last_adj; 1200 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1201 if (adj > 0) 1202 adj *= 2; 1203 else { 1204 /* CHV needs even encode values */ 1205 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1206 } 1207 new_delay = dev_priv->rps.cur_freq + adj; 1208 1209 /* 1210 * For better performance, jump directly 1211 * to RPe if we're below it. 1212 */ 1213 if (new_delay < dev_priv->rps.efficient_freq) 1214 new_delay = dev_priv->rps.efficient_freq; 1215 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1216 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1217 new_delay = dev_priv->rps.efficient_freq; 1218 else 1219 new_delay = dev_priv->rps.min_freq_softlimit; 1220 adj = 0; 1221 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1222 new_delay = vlv_calc_delay_from_C0_counters(dev_priv); 1223 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1224 if (adj < 0) 1225 adj *= 2; 1226 else { 1227 /* CHV needs even encode values */ 1228 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1229 } 1230 new_delay = dev_priv->rps.cur_freq + adj; 1231 } else { /* unknown event */ 1232 new_delay = dev_priv->rps.cur_freq; 1233 } 1234 1235 /* sysfs frequency interfaces may have snuck in while servicing the 1236 * interrupt 1237 */ 1238 new_delay = clamp_t(int, new_delay, 1239 dev_priv->rps.min_freq_softlimit, 1240 dev_priv->rps.max_freq_softlimit); 1241 1242 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1243 1244 if (IS_VALLEYVIEW(dev_priv->dev)) 1245 valleyview_set_rps(dev_priv->dev, new_delay); 1246 else 1247 gen6_set_rps(dev_priv->dev, new_delay); 1248 1249 mutex_unlock(&dev_priv->rps.hw_lock); 1250 } 1251 1252 1253 /** 1254 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1255 * occurred. 1256 * @work: workqueue struct 1257 * 1258 * Doesn't actually do anything except notify userspace. As a consequence of 1259 * this event, userspace should try to remap the bad rows since statistically 1260 * it is likely the same row is more likely to go bad again. 1261 */ 1262 static void ivybridge_parity_work(struct work_struct *work) 1263 { 1264 struct drm_i915_private *dev_priv = 1265 container_of(work, struct drm_i915_private, l3_parity.error_work); 1266 u32 error_status, row, bank, subbank; 1267 char *parity_event[6]; 1268 uint32_t misccpctl; 1269 uint8_t slice = 0; 1270 1271 /* We must turn off DOP level clock gating to access the L3 registers. 1272 * In order to prevent a get/put style interface, acquire struct mutex 1273 * any time we access those registers. 1274 */ 1275 mutex_lock(&dev_priv->dev->struct_mutex); 1276 1277 /* If we've screwed up tracking, just let the interrupt fire again */ 1278 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1279 goto out; 1280 1281 misccpctl = I915_READ(GEN7_MISCCPCTL); 1282 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1283 POSTING_READ(GEN7_MISCCPCTL); 1284 1285 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1286 u32 reg; 1287 1288 slice--; 1289 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1290 break; 1291 1292 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1293 1294 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1295 1296 error_status = I915_READ(reg); 1297 row = GEN7_PARITY_ERROR_ROW(error_status); 1298 bank = GEN7_PARITY_ERROR_BANK(error_status); 1299 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1300 1301 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1302 POSTING_READ(reg); 1303 1304 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1305 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1306 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1307 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1308 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1309 parity_event[5] = NULL; 1310 1311 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1312 KOBJ_CHANGE, parity_event); 1313 1314 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1315 slice, row, bank, subbank); 1316 1317 kfree(parity_event[4]); 1318 kfree(parity_event[3]); 1319 kfree(parity_event[2]); 1320 kfree(parity_event[1]); 1321 } 1322 1323 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1324 1325 out: 1326 WARN_ON(dev_priv->l3_parity.which_slice); 1327 spin_lock_irq(&dev_priv->irq_lock); 1328 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1329 spin_unlock_irq(&dev_priv->irq_lock); 1330 1331 mutex_unlock(&dev_priv->dev->struct_mutex); 1332 } 1333 1334 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1335 { 1336 struct drm_i915_private *dev_priv = dev->dev_private; 1337 1338 if (!HAS_L3_DPF(dev)) 1339 return; 1340 1341 spin_lock(&dev_priv->irq_lock); 1342 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1343 spin_unlock(&dev_priv->irq_lock); 1344 1345 iir &= GT_PARITY_ERROR(dev); 1346 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1347 dev_priv->l3_parity.which_slice |= 1 << 1; 1348 1349 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1350 dev_priv->l3_parity.which_slice |= 1 << 0; 1351 1352 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1353 } 1354 1355 static void ilk_gt_irq_handler(struct drm_device *dev, 1356 struct drm_i915_private *dev_priv, 1357 u32 gt_iir) 1358 { 1359 if (gt_iir & 1360 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1361 notify_ring(dev, &dev_priv->ring[RCS]); 1362 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1363 notify_ring(dev, &dev_priv->ring[VCS]); 1364 } 1365 1366 static void snb_gt_irq_handler(struct drm_device *dev, 1367 struct drm_i915_private *dev_priv, 1368 u32 gt_iir) 1369 { 1370 1371 if (gt_iir & 1372 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1373 notify_ring(dev, &dev_priv->ring[RCS]); 1374 if (gt_iir & GT_BSD_USER_INTERRUPT) 1375 notify_ring(dev, &dev_priv->ring[VCS]); 1376 if (gt_iir & GT_BLT_USER_INTERRUPT) 1377 notify_ring(dev, &dev_priv->ring[BCS]); 1378 1379 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1380 GT_BSD_CS_ERROR_INTERRUPT | 1381 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1382 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1383 1384 if (gt_iir & GT_PARITY_ERROR(dev)) 1385 ivybridge_parity_error_irq_handler(dev, gt_iir); 1386 } 1387 1388 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1389 struct drm_i915_private *dev_priv, 1390 u32 master_ctl) 1391 { 1392 struct intel_engine_cs *ring; 1393 u32 rcs, bcs, vcs; 1394 uint32_t tmp = 0; 1395 irqreturn_t ret = IRQ_NONE; 1396 1397 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1398 tmp = I915_READ(GEN8_GT_IIR(0)); 1399 if (tmp) { 1400 I915_WRITE(GEN8_GT_IIR(0), tmp); 1401 ret = IRQ_HANDLED; 1402 1403 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1404 ring = &dev_priv->ring[RCS]; 1405 if (rcs & GT_RENDER_USER_INTERRUPT) 1406 notify_ring(dev, ring); 1407 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) 1408 intel_lrc_irq_handler(ring); 1409 1410 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1411 ring = &dev_priv->ring[BCS]; 1412 if (bcs & GT_RENDER_USER_INTERRUPT) 1413 notify_ring(dev, ring); 1414 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) 1415 intel_lrc_irq_handler(ring); 1416 } else 1417 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1418 } 1419 1420 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1421 tmp = I915_READ(GEN8_GT_IIR(1)); 1422 if (tmp) { 1423 I915_WRITE(GEN8_GT_IIR(1), tmp); 1424 ret = IRQ_HANDLED; 1425 1426 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1427 ring = &dev_priv->ring[VCS]; 1428 if (vcs & GT_RENDER_USER_INTERRUPT) 1429 notify_ring(dev, ring); 1430 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1431 intel_lrc_irq_handler(ring); 1432 1433 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1434 ring = &dev_priv->ring[VCS2]; 1435 if (vcs & GT_RENDER_USER_INTERRUPT) 1436 notify_ring(dev, ring); 1437 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1438 intel_lrc_irq_handler(ring); 1439 } else 1440 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1441 } 1442 1443 if (master_ctl & GEN8_GT_PM_IRQ) { 1444 tmp = I915_READ(GEN8_GT_IIR(2)); 1445 if (tmp & dev_priv->pm_rps_events) { 1446 I915_WRITE(GEN8_GT_IIR(2), 1447 tmp & dev_priv->pm_rps_events); 1448 ret = IRQ_HANDLED; 1449 gen6_rps_irq_handler(dev_priv, tmp); 1450 } else 1451 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1452 } 1453 1454 if (master_ctl & GEN8_GT_VECS_IRQ) { 1455 tmp = I915_READ(GEN8_GT_IIR(3)); 1456 if (tmp) { 1457 I915_WRITE(GEN8_GT_IIR(3), tmp); 1458 ret = IRQ_HANDLED; 1459 1460 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1461 ring = &dev_priv->ring[VECS]; 1462 if (vcs & GT_RENDER_USER_INTERRUPT) 1463 notify_ring(dev, ring); 1464 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1465 intel_lrc_irq_handler(ring); 1466 } else 1467 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1468 } 1469 1470 return ret; 1471 } 1472 1473 #define HPD_STORM_DETECT_PERIOD 1000 1474 #define HPD_STORM_THRESHOLD 5 1475 1476 static int pch_port_to_hotplug_shift(enum port port) 1477 { 1478 switch (port) { 1479 case PORT_A: 1480 case PORT_E: 1481 default: 1482 return -1; 1483 case PORT_B: 1484 return 0; 1485 case PORT_C: 1486 return 8; 1487 case PORT_D: 1488 return 16; 1489 } 1490 } 1491 1492 static int i915_port_to_hotplug_shift(enum port port) 1493 { 1494 switch (port) { 1495 case PORT_A: 1496 case PORT_E: 1497 default: 1498 return -1; 1499 case PORT_B: 1500 return 17; 1501 case PORT_C: 1502 return 19; 1503 case PORT_D: 1504 return 21; 1505 } 1506 } 1507 1508 static inline enum port get_port_from_pin(enum hpd_pin pin) 1509 { 1510 switch (pin) { 1511 case HPD_PORT_B: 1512 return PORT_B; 1513 case HPD_PORT_C: 1514 return PORT_C; 1515 case HPD_PORT_D: 1516 return PORT_D; 1517 default: 1518 return PORT_A; /* no hpd */ 1519 } 1520 } 1521 1522 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1523 u32 hotplug_trigger, 1524 u32 dig_hotplug_reg, 1525 const u32 *hpd) 1526 { 1527 struct drm_i915_private *dev_priv = dev->dev_private; 1528 int i; 1529 enum port port; 1530 bool storm_detected = false; 1531 bool queue_dig = false, queue_hp = false; 1532 u32 dig_shift; 1533 u32 dig_port_mask = 0; 1534 1535 if (!hotplug_trigger) 1536 return; 1537 1538 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1539 hotplug_trigger, dig_hotplug_reg); 1540 1541 spin_lock(&dev_priv->irq_lock); 1542 for (i = 1; i < HPD_NUM_PINS; i++) { 1543 if (!(hpd[i] & hotplug_trigger)) 1544 continue; 1545 1546 port = get_port_from_pin(i); 1547 if (port && dev_priv->hpd_irq_port[port]) { 1548 bool long_hpd; 1549 1550 if (HAS_PCH_SPLIT(dev)) { 1551 dig_shift = pch_port_to_hotplug_shift(port); 1552 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1553 } else { 1554 dig_shift = i915_port_to_hotplug_shift(port); 1555 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1556 } 1557 1558 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1559 port_name(port), 1560 long_hpd ? "long" : "short"); 1561 /* for long HPD pulses we want to have the digital queue happen, 1562 but we still want HPD storm detection to function. */ 1563 if (long_hpd) { 1564 dev_priv->long_hpd_port_mask |= (1 << port); 1565 dig_port_mask |= hpd[i]; 1566 } else { 1567 /* for short HPD just trigger the digital queue */ 1568 dev_priv->short_hpd_port_mask |= (1 << port); 1569 hotplug_trigger &= ~hpd[i]; 1570 } 1571 queue_dig = true; 1572 } 1573 } 1574 1575 for (i = 1; i < HPD_NUM_PINS; i++) { 1576 if (hpd[i] & hotplug_trigger && 1577 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1578 /* 1579 * On GMCH platforms the interrupt mask bits only 1580 * prevent irq generation, not the setting of the 1581 * hotplug bits itself. So only WARN about unexpected 1582 * interrupts on saner platforms. 1583 */ 1584 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1585 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1586 hotplug_trigger, i, hpd[i]); 1587 1588 continue; 1589 } 1590 1591 if (!(hpd[i] & hotplug_trigger) || 1592 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1593 continue; 1594 1595 if (!(dig_port_mask & hpd[i])) { 1596 dev_priv->hpd_event_bits |= (1 << i); 1597 queue_hp = true; 1598 } 1599 1600 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1601 dev_priv->hpd_stats[i].hpd_last_jiffies 1602 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1603 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1604 dev_priv->hpd_stats[i].hpd_cnt = 0; 1605 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1606 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1607 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1608 dev_priv->hpd_event_bits &= ~(1 << i); 1609 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1610 storm_detected = true; 1611 } else { 1612 dev_priv->hpd_stats[i].hpd_cnt++; 1613 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1614 dev_priv->hpd_stats[i].hpd_cnt); 1615 } 1616 } 1617 1618 if (storm_detected) 1619 dev_priv->display.hpd_irq_setup(dev); 1620 spin_unlock(&dev_priv->irq_lock); 1621 1622 /* 1623 * Our hotplug handler can grab modeset locks (by calling down into the 1624 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1625 * queue for otherwise the flush_work in the pageflip code will 1626 * deadlock. 1627 */ 1628 if (queue_dig) 1629 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); 1630 if (queue_hp) 1631 schedule_work(&dev_priv->hotplug_work); 1632 } 1633 1634 static void gmbus_irq_handler(struct drm_device *dev) 1635 { 1636 struct drm_i915_private *dev_priv = dev->dev_private; 1637 1638 wake_up_all(&dev_priv->gmbus_wait_queue); 1639 } 1640 1641 static void dp_aux_irq_handler(struct drm_device *dev) 1642 { 1643 struct drm_i915_private *dev_priv = dev->dev_private; 1644 1645 wake_up_all(&dev_priv->gmbus_wait_queue); 1646 } 1647 1648 #if defined(CONFIG_DEBUG_FS) 1649 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1650 uint32_t crc0, uint32_t crc1, 1651 uint32_t crc2, uint32_t crc3, 1652 uint32_t crc4) 1653 { 1654 struct drm_i915_private *dev_priv = dev->dev_private; 1655 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1656 struct intel_pipe_crc_entry *entry; 1657 int head, tail; 1658 1659 spin_lock(&pipe_crc->lock); 1660 1661 if (!pipe_crc->entries) { 1662 spin_unlock(&pipe_crc->lock); 1663 DRM_DEBUG_KMS("spurious interrupt\n"); 1664 return; 1665 } 1666 1667 head = pipe_crc->head; 1668 tail = pipe_crc->tail; 1669 1670 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1671 spin_unlock(&pipe_crc->lock); 1672 DRM_ERROR("CRC buffer overflowing\n"); 1673 return; 1674 } 1675 1676 entry = &pipe_crc->entries[head]; 1677 1678 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1679 entry->crc[0] = crc0; 1680 entry->crc[1] = crc1; 1681 entry->crc[2] = crc2; 1682 entry->crc[3] = crc3; 1683 entry->crc[4] = crc4; 1684 1685 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1686 pipe_crc->head = head; 1687 1688 spin_unlock(&pipe_crc->lock); 1689 1690 wake_up_interruptible(&pipe_crc->wq); 1691 } 1692 #else 1693 static inline void 1694 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1695 uint32_t crc0, uint32_t crc1, 1696 uint32_t crc2, uint32_t crc3, 1697 uint32_t crc4) {} 1698 #endif 1699 1700 1701 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1702 { 1703 struct drm_i915_private *dev_priv = dev->dev_private; 1704 1705 display_pipe_crc_irq_handler(dev, pipe, 1706 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1707 0, 0, 0, 0); 1708 } 1709 1710 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1711 { 1712 struct drm_i915_private *dev_priv = dev->dev_private; 1713 1714 display_pipe_crc_irq_handler(dev, pipe, 1715 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1716 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1717 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1718 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1719 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1720 } 1721 1722 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1723 { 1724 struct drm_i915_private *dev_priv = dev->dev_private; 1725 uint32_t res1, res2; 1726 1727 if (INTEL_INFO(dev)->gen >= 3) 1728 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1729 else 1730 res1 = 0; 1731 1732 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1733 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1734 else 1735 res2 = 0; 1736 1737 display_pipe_crc_irq_handler(dev, pipe, 1738 I915_READ(PIPE_CRC_RES_RED(pipe)), 1739 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1740 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1741 res1, res2); 1742 } 1743 1744 /* The RPS events need forcewake, so we add them to a work queue and mask their 1745 * IMR bits until the work is done. Other interrupts can be processed without 1746 * the work queue. */ 1747 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1748 { 1749 /* TODO: RPS on GEN9+ is not supported yet. */ 1750 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, 1751 "GEN9+: unexpected RPS IRQ\n")) 1752 return; 1753 1754 if (pm_iir & dev_priv->pm_rps_events) { 1755 spin_lock(&dev_priv->irq_lock); 1756 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1757 if (dev_priv->rps.interrupts_enabled) { 1758 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1759 queue_work(dev_priv->wq, &dev_priv->rps.work); 1760 } 1761 spin_unlock(&dev_priv->irq_lock); 1762 } 1763 1764 if (INTEL_INFO(dev_priv)->gen >= 8) 1765 return; 1766 1767 if (HAS_VEBOX(dev_priv->dev)) { 1768 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1769 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1770 1771 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1772 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1773 } 1774 } 1775 1776 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1777 { 1778 if (!drm_handle_vblank(dev, pipe)) 1779 return false; 1780 1781 return true; 1782 } 1783 1784 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1785 { 1786 struct drm_i915_private *dev_priv = dev->dev_private; 1787 u32 pipe_stats[I915_MAX_PIPES] = { }; 1788 int pipe; 1789 1790 spin_lock(&dev_priv->irq_lock); 1791 for_each_pipe(dev_priv, pipe) { 1792 int reg; 1793 u32 mask, iir_bit = 0; 1794 1795 /* 1796 * PIPESTAT bits get signalled even when the interrupt is 1797 * disabled with the mask bits, and some of the status bits do 1798 * not generate interrupts at all (like the underrun bit). Hence 1799 * we need to be careful that we only handle what we want to 1800 * handle. 1801 */ 1802 1803 /* fifo underruns are filterered in the underrun handler. */ 1804 mask = PIPE_FIFO_UNDERRUN_STATUS; 1805 1806 switch (pipe) { 1807 case PIPE_A: 1808 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1809 break; 1810 case PIPE_B: 1811 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1812 break; 1813 case PIPE_C: 1814 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1815 break; 1816 } 1817 if (iir & iir_bit) 1818 mask |= dev_priv->pipestat_irq_mask[pipe]; 1819 1820 if (!mask) 1821 continue; 1822 1823 reg = PIPESTAT(pipe); 1824 mask |= PIPESTAT_INT_ENABLE_MASK; 1825 pipe_stats[pipe] = I915_READ(reg) & mask; 1826 1827 /* 1828 * Clear the PIPE*STAT regs before the IIR 1829 */ 1830 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1831 PIPESTAT_INT_STATUS_MASK)) 1832 I915_WRITE(reg, pipe_stats[pipe]); 1833 } 1834 spin_unlock(&dev_priv->irq_lock); 1835 1836 for_each_pipe(dev_priv, pipe) { 1837 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1838 intel_pipe_handle_vblank(dev, pipe)) 1839 intel_check_page_flip(dev, pipe); 1840 1841 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1842 intel_prepare_page_flip(dev, pipe); 1843 intel_finish_page_flip(dev, pipe); 1844 } 1845 1846 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1847 i9xx_pipe_crc_irq_handler(dev, pipe); 1848 1849 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1850 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1851 } 1852 1853 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1854 gmbus_irq_handler(dev); 1855 } 1856 1857 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1858 { 1859 struct drm_i915_private *dev_priv = dev->dev_private; 1860 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1861 1862 if (hotplug_status) { 1863 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1864 /* 1865 * Make sure hotplug status is cleared before we clear IIR, or else we 1866 * may miss hotplug events. 1867 */ 1868 POSTING_READ(PORT_HOTPLUG_STAT); 1869 1870 if (IS_G4X(dev)) { 1871 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1872 1873 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1874 } else { 1875 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1876 1877 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1878 } 1879 1880 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1881 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1882 dp_aux_irq_handler(dev); 1883 } 1884 } 1885 1886 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1887 { 1888 struct drm_device *dev = arg; 1889 struct drm_i915_private *dev_priv = dev->dev_private; 1890 u32 iir, gt_iir, pm_iir; 1891 irqreturn_t ret = IRQ_NONE; 1892 1893 while (true) { 1894 /* Find, clear, then process each source of interrupt */ 1895 1896 gt_iir = I915_READ(GTIIR); 1897 if (gt_iir) 1898 I915_WRITE(GTIIR, gt_iir); 1899 1900 pm_iir = I915_READ(GEN6_PMIIR); 1901 if (pm_iir) 1902 I915_WRITE(GEN6_PMIIR, pm_iir); 1903 1904 iir = I915_READ(VLV_IIR); 1905 if (iir) { 1906 /* Consume port before clearing IIR or we'll miss events */ 1907 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1908 i9xx_hpd_irq_handler(dev); 1909 I915_WRITE(VLV_IIR, iir); 1910 } 1911 1912 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1913 goto out; 1914 1915 ret = IRQ_HANDLED; 1916 1917 if (gt_iir) 1918 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1919 if (pm_iir) 1920 gen6_rps_irq_handler(dev_priv, pm_iir); 1921 /* Call regardless, as some status bits might not be 1922 * signalled in iir */ 1923 valleyview_pipestat_irq_handler(dev, iir); 1924 } 1925 1926 out: 1927 return ret; 1928 } 1929 1930 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1931 { 1932 struct drm_device *dev = arg; 1933 struct drm_i915_private *dev_priv = dev->dev_private; 1934 u32 master_ctl, iir; 1935 irqreturn_t ret = IRQ_NONE; 1936 1937 for (;;) { 1938 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1939 iir = I915_READ(VLV_IIR); 1940 1941 if (master_ctl == 0 && iir == 0) 1942 break; 1943 1944 ret = IRQ_HANDLED; 1945 1946 I915_WRITE(GEN8_MASTER_IRQ, 0); 1947 1948 /* Find, clear, then process each source of interrupt */ 1949 1950 if (iir) { 1951 /* Consume port before clearing IIR or we'll miss events */ 1952 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1953 i9xx_hpd_irq_handler(dev); 1954 I915_WRITE(VLV_IIR, iir); 1955 } 1956 1957 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1958 1959 /* Call regardless, as some status bits might not be 1960 * signalled in iir */ 1961 valleyview_pipestat_irq_handler(dev, iir); 1962 1963 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1964 POSTING_READ(GEN8_MASTER_IRQ); 1965 } 1966 1967 return ret; 1968 } 1969 1970 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1971 { 1972 struct drm_i915_private *dev_priv = dev->dev_private; 1973 int pipe; 1974 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1975 u32 dig_hotplug_reg; 1976 1977 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1978 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1979 1980 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1981 1982 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1983 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1984 SDE_AUDIO_POWER_SHIFT); 1985 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1986 port_name(port)); 1987 } 1988 1989 if (pch_iir & SDE_AUX_MASK) 1990 dp_aux_irq_handler(dev); 1991 1992 if (pch_iir & SDE_GMBUS) 1993 gmbus_irq_handler(dev); 1994 1995 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1996 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1997 1998 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1999 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2000 2001 if (pch_iir & SDE_POISON) 2002 DRM_ERROR("PCH poison interrupt\n"); 2003 2004 if (pch_iir & SDE_FDI_MASK) 2005 for_each_pipe(dev_priv, pipe) 2006 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2007 pipe_name(pipe), 2008 I915_READ(FDI_RX_IIR(pipe))); 2009 2010 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2011 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2012 2013 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2014 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2015 2016 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2017 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2018 2019 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2020 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2021 } 2022 2023 static void ivb_err_int_handler(struct drm_device *dev) 2024 { 2025 struct drm_i915_private *dev_priv = dev->dev_private; 2026 u32 err_int = I915_READ(GEN7_ERR_INT); 2027 enum pipe pipe; 2028 2029 if (err_int & ERR_INT_POISON) 2030 DRM_ERROR("Poison interrupt\n"); 2031 2032 for_each_pipe(dev_priv, pipe) { 2033 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2034 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2035 2036 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2037 if (IS_IVYBRIDGE(dev)) 2038 ivb_pipe_crc_irq_handler(dev, pipe); 2039 else 2040 hsw_pipe_crc_irq_handler(dev, pipe); 2041 } 2042 } 2043 2044 I915_WRITE(GEN7_ERR_INT, err_int); 2045 } 2046 2047 static void cpt_serr_int_handler(struct drm_device *dev) 2048 { 2049 struct drm_i915_private *dev_priv = dev->dev_private; 2050 u32 serr_int = I915_READ(SERR_INT); 2051 2052 if (serr_int & SERR_INT_POISON) 2053 DRM_ERROR("PCH poison interrupt\n"); 2054 2055 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2056 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2057 2058 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2059 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2060 2061 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2062 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2063 2064 I915_WRITE(SERR_INT, serr_int); 2065 } 2066 2067 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2068 { 2069 struct drm_i915_private *dev_priv = dev->dev_private; 2070 int pipe; 2071 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2072 u32 dig_hotplug_reg; 2073 2074 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2075 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2076 2077 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 2078 2079 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2080 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2081 SDE_AUDIO_POWER_SHIFT_CPT); 2082 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2083 port_name(port)); 2084 } 2085 2086 if (pch_iir & SDE_AUX_MASK_CPT) 2087 dp_aux_irq_handler(dev); 2088 2089 if (pch_iir & SDE_GMBUS_CPT) 2090 gmbus_irq_handler(dev); 2091 2092 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2093 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2094 2095 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2096 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2097 2098 if (pch_iir & SDE_FDI_MASK_CPT) 2099 for_each_pipe(dev_priv, pipe) 2100 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2101 pipe_name(pipe), 2102 I915_READ(FDI_RX_IIR(pipe))); 2103 2104 if (pch_iir & SDE_ERROR_CPT) 2105 cpt_serr_int_handler(dev); 2106 } 2107 2108 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2109 { 2110 struct drm_i915_private *dev_priv = dev->dev_private; 2111 enum pipe pipe; 2112 2113 if (de_iir & DE_AUX_CHANNEL_A) 2114 dp_aux_irq_handler(dev); 2115 2116 if (de_iir & DE_GSE) 2117 intel_opregion_asle_intr(dev); 2118 2119 if (de_iir & DE_POISON) 2120 DRM_ERROR("Poison interrupt\n"); 2121 2122 for_each_pipe(dev_priv, pipe) { 2123 if (de_iir & DE_PIPE_VBLANK(pipe) && 2124 intel_pipe_handle_vblank(dev, pipe)) 2125 intel_check_page_flip(dev, pipe); 2126 2127 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2128 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2129 2130 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2131 i9xx_pipe_crc_irq_handler(dev, pipe); 2132 2133 /* plane/pipes map 1:1 on ilk+ */ 2134 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2135 intel_prepare_page_flip(dev, pipe); 2136 intel_finish_page_flip_plane(dev, pipe); 2137 } 2138 } 2139 2140 /* check event from PCH */ 2141 if (de_iir & DE_PCH_EVENT) { 2142 u32 pch_iir = I915_READ(SDEIIR); 2143 2144 if (HAS_PCH_CPT(dev)) 2145 cpt_irq_handler(dev, pch_iir); 2146 else 2147 ibx_irq_handler(dev, pch_iir); 2148 2149 /* should clear PCH hotplug event before clear CPU irq */ 2150 I915_WRITE(SDEIIR, pch_iir); 2151 } 2152 2153 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2154 ironlake_rps_change_irq_handler(dev); 2155 } 2156 2157 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2158 { 2159 struct drm_i915_private *dev_priv = dev->dev_private; 2160 enum pipe pipe; 2161 2162 if (de_iir & DE_ERR_INT_IVB) 2163 ivb_err_int_handler(dev); 2164 2165 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2166 dp_aux_irq_handler(dev); 2167 2168 if (de_iir & DE_GSE_IVB) 2169 intel_opregion_asle_intr(dev); 2170 2171 for_each_pipe(dev_priv, pipe) { 2172 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2173 intel_pipe_handle_vblank(dev, pipe)) 2174 intel_check_page_flip(dev, pipe); 2175 2176 /* plane/pipes map 1:1 on ilk+ */ 2177 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2178 intel_prepare_page_flip(dev, pipe); 2179 intel_finish_page_flip_plane(dev, pipe); 2180 } 2181 } 2182 2183 /* check event from PCH */ 2184 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2185 u32 pch_iir = I915_READ(SDEIIR); 2186 2187 cpt_irq_handler(dev, pch_iir); 2188 2189 /* clear PCH hotplug event before clear CPU irq */ 2190 I915_WRITE(SDEIIR, pch_iir); 2191 } 2192 } 2193 2194 /* 2195 * To handle irqs with the minimum potential races with fresh interrupts, we: 2196 * 1 - Disable Master Interrupt Control. 2197 * 2 - Find the source(s) of the interrupt. 2198 * 3 - Clear the Interrupt Identity bits (IIR). 2199 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2200 * 5 - Re-enable Master Interrupt Control. 2201 */ 2202 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2203 { 2204 struct drm_device *dev = arg; 2205 struct drm_i915_private *dev_priv = dev->dev_private; 2206 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2207 irqreturn_t ret = IRQ_NONE; 2208 2209 /* We get interrupts on unclaimed registers, so check for this before we 2210 * do any I915_{READ,WRITE}. */ 2211 intel_uncore_check_errors(dev); 2212 2213 /* disable master interrupt before clearing iir */ 2214 de_ier = I915_READ(DEIER); 2215 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2216 POSTING_READ(DEIER); 2217 2218 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2219 * interrupts will will be stored on its back queue, and then we'll be 2220 * able to process them after we restore SDEIER (as soon as we restore 2221 * it, we'll get an interrupt if SDEIIR still has something to process 2222 * due to its back queue). */ 2223 if (!HAS_PCH_NOP(dev)) { 2224 sde_ier = I915_READ(SDEIER); 2225 I915_WRITE(SDEIER, 0); 2226 POSTING_READ(SDEIER); 2227 } 2228 2229 /* Find, clear, then process each source of interrupt */ 2230 2231 gt_iir = I915_READ(GTIIR); 2232 if (gt_iir) { 2233 I915_WRITE(GTIIR, gt_iir); 2234 ret = IRQ_HANDLED; 2235 if (INTEL_INFO(dev)->gen >= 6) 2236 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2237 else 2238 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2239 } 2240 2241 de_iir = I915_READ(DEIIR); 2242 if (de_iir) { 2243 I915_WRITE(DEIIR, de_iir); 2244 ret = IRQ_HANDLED; 2245 if (INTEL_INFO(dev)->gen >= 7) 2246 ivb_display_irq_handler(dev, de_iir); 2247 else 2248 ilk_display_irq_handler(dev, de_iir); 2249 } 2250 2251 if (INTEL_INFO(dev)->gen >= 6) { 2252 u32 pm_iir = I915_READ(GEN6_PMIIR); 2253 if (pm_iir) { 2254 I915_WRITE(GEN6_PMIIR, pm_iir); 2255 ret = IRQ_HANDLED; 2256 gen6_rps_irq_handler(dev_priv, pm_iir); 2257 } 2258 } 2259 2260 I915_WRITE(DEIER, de_ier); 2261 POSTING_READ(DEIER); 2262 if (!HAS_PCH_NOP(dev)) { 2263 I915_WRITE(SDEIER, sde_ier); 2264 POSTING_READ(SDEIER); 2265 } 2266 2267 return ret; 2268 } 2269 2270 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2271 { 2272 struct drm_device *dev = arg; 2273 struct drm_i915_private *dev_priv = dev->dev_private; 2274 u32 master_ctl; 2275 irqreturn_t ret = IRQ_NONE; 2276 uint32_t tmp = 0; 2277 enum pipe pipe; 2278 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2279 2280 if (IS_GEN9(dev)) 2281 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2282 GEN9_AUX_CHANNEL_D; 2283 2284 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2285 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2286 if (!master_ctl) 2287 return IRQ_NONE; 2288 2289 I915_WRITE(GEN8_MASTER_IRQ, 0); 2290 POSTING_READ(GEN8_MASTER_IRQ); 2291 2292 /* Find, clear, then process each source of interrupt */ 2293 2294 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2295 2296 if (master_ctl & GEN8_DE_MISC_IRQ) { 2297 tmp = I915_READ(GEN8_DE_MISC_IIR); 2298 if (tmp) { 2299 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2300 ret = IRQ_HANDLED; 2301 if (tmp & GEN8_DE_MISC_GSE) 2302 intel_opregion_asle_intr(dev); 2303 else 2304 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2305 } 2306 else 2307 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2308 } 2309 2310 if (master_ctl & GEN8_DE_PORT_IRQ) { 2311 tmp = I915_READ(GEN8_DE_PORT_IIR); 2312 if (tmp) { 2313 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2314 ret = IRQ_HANDLED; 2315 2316 if (tmp & aux_mask) 2317 dp_aux_irq_handler(dev); 2318 else 2319 DRM_ERROR("Unexpected DE Port interrupt\n"); 2320 } 2321 else 2322 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2323 } 2324 2325 for_each_pipe(dev_priv, pipe) { 2326 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2327 2328 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2329 continue; 2330 2331 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2332 if (pipe_iir) { 2333 ret = IRQ_HANDLED; 2334 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2335 2336 if (pipe_iir & GEN8_PIPE_VBLANK && 2337 intel_pipe_handle_vblank(dev, pipe)) 2338 intel_check_page_flip(dev, pipe); 2339 2340 if (IS_GEN9(dev)) 2341 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2342 else 2343 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2344 2345 if (flip_done) { 2346 intel_prepare_page_flip(dev, pipe); 2347 intel_finish_page_flip_plane(dev, pipe); 2348 } 2349 2350 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2351 hsw_pipe_crc_irq_handler(dev, pipe); 2352 2353 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2354 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2355 pipe); 2356 2357 2358 if (IS_GEN9(dev)) 2359 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2360 else 2361 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2362 2363 if (fault_errors) 2364 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2365 pipe_name(pipe), 2366 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2367 } else 2368 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2369 } 2370 2371 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2372 /* 2373 * FIXME(BDW): Assume for now that the new interrupt handling 2374 * scheme also closed the SDE interrupt handling race we've seen 2375 * on older pch-split platforms. But this needs testing. 2376 */ 2377 u32 pch_iir = I915_READ(SDEIIR); 2378 if (pch_iir) { 2379 I915_WRITE(SDEIIR, pch_iir); 2380 ret = IRQ_HANDLED; 2381 cpt_irq_handler(dev, pch_iir); 2382 } else 2383 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2384 2385 } 2386 2387 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2388 POSTING_READ(GEN8_MASTER_IRQ); 2389 2390 return ret; 2391 } 2392 2393 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2394 bool reset_completed) 2395 { 2396 struct intel_engine_cs *ring; 2397 int i; 2398 2399 /* 2400 * Notify all waiters for GPU completion events that reset state has 2401 * been changed, and that they need to restart their wait after 2402 * checking for potential errors (and bail out to drop locks if there is 2403 * a gpu reset pending so that i915_error_work_func can acquire them). 2404 */ 2405 2406 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2407 for_each_ring(ring, dev_priv, i) 2408 wake_up_all(&ring->irq_queue); 2409 2410 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2411 wake_up_all(&dev_priv->pending_flip_queue); 2412 2413 /* 2414 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2415 * reset state is cleared. 2416 */ 2417 if (reset_completed) 2418 wake_up_all(&dev_priv->gpu_error.reset_queue); 2419 } 2420 2421 /** 2422 * i915_error_work_func - do process context error handling work 2423 * @work: work struct 2424 * 2425 * Fire an error uevent so userspace can see that a hang or error 2426 * was detected. 2427 */ 2428 static void i915_error_work_func(struct work_struct *work) 2429 { 2430 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2431 work); 2432 struct drm_i915_private *dev_priv = 2433 container_of(error, struct drm_i915_private, gpu_error); 2434 struct drm_device *dev = dev_priv->dev; 2435 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2436 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2437 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2438 int ret; 2439 2440 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2441 2442 /* 2443 * Note that there's only one work item which does gpu resets, so we 2444 * need not worry about concurrent gpu resets potentially incrementing 2445 * error->reset_counter twice. We only need to take care of another 2446 * racing irq/hangcheck declaring the gpu dead for a second time. A 2447 * quick check for that is good enough: schedule_work ensures the 2448 * correct ordering between hang detection and this work item, and since 2449 * the reset in-progress bit is only ever set by code outside of this 2450 * work we don't need to worry about any other races. 2451 */ 2452 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2453 DRM_DEBUG_DRIVER("resetting chip\n"); 2454 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2455 reset_event); 2456 2457 /* 2458 * In most cases it's guaranteed that we get here with an RPM 2459 * reference held, for example because there is a pending GPU 2460 * request that won't finish until the reset is done. This 2461 * isn't the case at least when we get here by doing a 2462 * simulated reset via debugs, so get an RPM reference. 2463 */ 2464 intel_runtime_pm_get(dev_priv); 2465 2466 intel_prepare_reset(dev); 2467 2468 /* 2469 * All state reset _must_ be completed before we update the 2470 * reset counter, for otherwise waiters might miss the reset 2471 * pending state and not properly drop locks, resulting in 2472 * deadlocks with the reset work. 2473 */ 2474 ret = i915_reset(dev); 2475 2476 intel_finish_reset(dev); 2477 2478 intel_runtime_pm_put(dev_priv); 2479 2480 if (ret == 0) { 2481 /* 2482 * After all the gem state is reset, increment the reset 2483 * counter and wake up everyone waiting for the reset to 2484 * complete. 2485 * 2486 * Since unlock operations are a one-sided barrier only, 2487 * we need to insert a barrier here to order any seqno 2488 * updates before 2489 * the counter increment. 2490 */ 2491 smp_mb__before_atomic(); 2492 atomic_inc(&dev_priv->gpu_error.reset_counter); 2493 2494 kobject_uevent_env(&dev->primary->kdev->kobj, 2495 KOBJ_CHANGE, reset_done_event); 2496 } else { 2497 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2498 } 2499 2500 /* 2501 * Note: The wake_up also serves as a memory barrier so that 2502 * waiters see the update value of the reset counter atomic_t. 2503 */ 2504 i915_error_wake_up(dev_priv, true); 2505 } 2506 } 2507 2508 static void i915_report_and_clear_eir(struct drm_device *dev) 2509 { 2510 struct drm_i915_private *dev_priv = dev->dev_private; 2511 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2512 u32 eir = I915_READ(EIR); 2513 int pipe, i; 2514 2515 if (!eir) 2516 return; 2517 2518 pr_err("render error detected, EIR: 0x%08x\n", eir); 2519 2520 i915_get_extra_instdone(dev, instdone); 2521 2522 if (IS_G4X(dev)) { 2523 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2524 u32 ipeir = I915_READ(IPEIR_I965); 2525 2526 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2527 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2528 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2529 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2530 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2531 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2532 I915_WRITE(IPEIR_I965, ipeir); 2533 POSTING_READ(IPEIR_I965); 2534 } 2535 if (eir & GM45_ERROR_PAGE_TABLE) { 2536 u32 pgtbl_err = I915_READ(PGTBL_ER); 2537 pr_err("page table error\n"); 2538 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2539 I915_WRITE(PGTBL_ER, pgtbl_err); 2540 POSTING_READ(PGTBL_ER); 2541 } 2542 } 2543 2544 if (!IS_GEN2(dev)) { 2545 if (eir & I915_ERROR_PAGE_TABLE) { 2546 u32 pgtbl_err = I915_READ(PGTBL_ER); 2547 pr_err("page table error\n"); 2548 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2549 I915_WRITE(PGTBL_ER, pgtbl_err); 2550 POSTING_READ(PGTBL_ER); 2551 } 2552 } 2553 2554 if (eir & I915_ERROR_MEMORY_REFRESH) { 2555 pr_err("memory refresh error:\n"); 2556 for_each_pipe(dev_priv, pipe) 2557 pr_err("pipe %c stat: 0x%08x\n", 2558 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2559 /* pipestat has already been acked */ 2560 } 2561 if (eir & I915_ERROR_INSTRUCTION) { 2562 pr_err("instruction error\n"); 2563 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2564 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2565 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2566 if (INTEL_INFO(dev)->gen < 4) { 2567 u32 ipeir = I915_READ(IPEIR); 2568 2569 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2570 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2571 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2572 I915_WRITE(IPEIR, ipeir); 2573 POSTING_READ(IPEIR); 2574 } else { 2575 u32 ipeir = I915_READ(IPEIR_I965); 2576 2577 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2578 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2579 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2580 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2581 I915_WRITE(IPEIR_I965, ipeir); 2582 POSTING_READ(IPEIR_I965); 2583 } 2584 } 2585 2586 I915_WRITE(EIR, eir); 2587 POSTING_READ(EIR); 2588 eir = I915_READ(EIR); 2589 if (eir) { 2590 /* 2591 * some errors might have become stuck, 2592 * mask them. 2593 */ 2594 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2595 I915_WRITE(EMR, I915_READ(EMR) | eir); 2596 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2597 } 2598 } 2599 2600 /** 2601 * i915_handle_error - handle an error interrupt 2602 * @dev: drm device 2603 * 2604 * Do some basic checking of regsiter state at error interrupt time and 2605 * dump it to the syslog. Also call i915_capture_error_state() to make 2606 * sure we get a record and make it available in debugfs. Fire a uevent 2607 * so userspace knows something bad happened (should trigger collection 2608 * of a ring dump etc.). 2609 */ 2610 void i915_handle_error(struct drm_device *dev, bool wedged, 2611 const char *fmt, ...) 2612 { 2613 struct drm_i915_private *dev_priv = dev->dev_private; 2614 va_list args; 2615 char error_msg[80]; 2616 2617 va_start(args, fmt); 2618 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2619 va_end(args); 2620 2621 i915_capture_error_state(dev, wedged, error_msg); 2622 i915_report_and_clear_eir(dev); 2623 2624 if (wedged) { 2625 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2626 &dev_priv->gpu_error.reset_counter); 2627 2628 /* 2629 * Wakeup waiting processes so that the reset work function 2630 * i915_error_work_func doesn't deadlock trying to grab various 2631 * locks. By bumping the reset counter first, the woken 2632 * processes will see a reset in progress and back off, 2633 * releasing their locks and then wait for the reset completion. 2634 * We must do this for _all_ gpu waiters that might hold locks 2635 * that the reset work needs to acquire. 2636 * 2637 * Note: The wake_up serves as the required memory barrier to 2638 * ensure that the waiters see the updated value of the reset 2639 * counter atomic_t. 2640 */ 2641 i915_error_wake_up(dev_priv, false); 2642 } 2643 2644 /* 2645 * Our reset work can grab modeset locks (since it needs to reset the 2646 * state of outstanding pagelips). Hence it must not be run on our own 2647 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2648 * code will deadlock. 2649 */ 2650 schedule_work(&dev_priv->gpu_error.work); 2651 } 2652 2653 /* Called from drm generic code, passed 'crtc' which 2654 * we use as a pipe index 2655 */ 2656 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2657 { 2658 struct drm_i915_private *dev_priv = dev->dev_private; 2659 unsigned long irqflags; 2660 2661 if (!i915_pipe_enabled(dev, pipe)) 2662 return -EINVAL; 2663 2664 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2665 if (INTEL_INFO(dev)->gen >= 4) 2666 i915_enable_pipestat(dev_priv, pipe, 2667 PIPE_START_VBLANK_INTERRUPT_STATUS); 2668 else 2669 i915_enable_pipestat(dev_priv, pipe, 2670 PIPE_VBLANK_INTERRUPT_STATUS); 2671 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2672 2673 return 0; 2674 } 2675 2676 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2677 { 2678 struct drm_i915_private *dev_priv = dev->dev_private; 2679 unsigned long irqflags; 2680 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2681 DE_PIPE_VBLANK(pipe); 2682 2683 if (!i915_pipe_enabled(dev, pipe)) 2684 return -EINVAL; 2685 2686 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2687 ironlake_enable_display_irq(dev_priv, bit); 2688 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2689 2690 return 0; 2691 } 2692 2693 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2694 { 2695 struct drm_i915_private *dev_priv = dev->dev_private; 2696 unsigned long irqflags; 2697 2698 if (!i915_pipe_enabled(dev, pipe)) 2699 return -EINVAL; 2700 2701 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2702 i915_enable_pipestat(dev_priv, pipe, 2703 PIPE_START_VBLANK_INTERRUPT_STATUS); 2704 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2705 2706 return 0; 2707 } 2708 2709 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2710 { 2711 struct drm_i915_private *dev_priv = dev->dev_private; 2712 unsigned long irqflags; 2713 2714 if (!i915_pipe_enabled(dev, pipe)) 2715 return -EINVAL; 2716 2717 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2718 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2719 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2720 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2721 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2722 return 0; 2723 } 2724 2725 /* Called from drm generic code, passed 'crtc' which 2726 * we use as a pipe index 2727 */ 2728 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2729 { 2730 struct drm_i915_private *dev_priv = dev->dev_private; 2731 unsigned long irqflags; 2732 2733 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2734 i915_disable_pipestat(dev_priv, pipe, 2735 PIPE_VBLANK_INTERRUPT_STATUS | 2736 PIPE_START_VBLANK_INTERRUPT_STATUS); 2737 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2738 } 2739 2740 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2741 { 2742 struct drm_i915_private *dev_priv = dev->dev_private; 2743 unsigned long irqflags; 2744 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2745 DE_PIPE_VBLANK(pipe); 2746 2747 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2748 ironlake_disable_display_irq(dev_priv, bit); 2749 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2750 } 2751 2752 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2753 { 2754 struct drm_i915_private *dev_priv = dev->dev_private; 2755 unsigned long irqflags; 2756 2757 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2758 i915_disable_pipestat(dev_priv, pipe, 2759 PIPE_START_VBLANK_INTERRUPT_STATUS); 2760 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2761 } 2762 2763 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2764 { 2765 struct drm_i915_private *dev_priv = dev->dev_private; 2766 unsigned long irqflags; 2767 2768 if (!i915_pipe_enabled(dev, pipe)) 2769 return; 2770 2771 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2772 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2773 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2774 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2775 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2776 } 2777 2778 static struct drm_i915_gem_request * 2779 ring_last_request(struct intel_engine_cs *ring) 2780 { 2781 return list_entry(ring->request_list.prev, 2782 struct drm_i915_gem_request, list); 2783 } 2784 2785 static bool 2786 ring_idle(struct intel_engine_cs *ring) 2787 { 2788 return (list_empty(&ring->request_list) || 2789 i915_gem_request_completed(ring_last_request(ring), false)); 2790 } 2791 2792 static bool 2793 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2794 { 2795 if (INTEL_INFO(dev)->gen >= 8) { 2796 return (ipehr >> 23) == 0x1c; 2797 } else { 2798 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2799 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2800 MI_SEMAPHORE_REGISTER); 2801 } 2802 } 2803 2804 static struct intel_engine_cs * 2805 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2806 { 2807 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2808 struct intel_engine_cs *signaller; 2809 int i; 2810 2811 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2812 for_each_ring(signaller, dev_priv, i) { 2813 if (ring == signaller) 2814 continue; 2815 2816 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2817 return signaller; 2818 } 2819 } else { 2820 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2821 2822 for_each_ring(signaller, dev_priv, i) { 2823 if(ring == signaller) 2824 continue; 2825 2826 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2827 return signaller; 2828 } 2829 } 2830 2831 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2832 ring->id, ipehr, offset); 2833 2834 return NULL; 2835 } 2836 2837 static struct intel_engine_cs * 2838 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2839 { 2840 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2841 u32 cmd, ipehr, head; 2842 u64 offset = 0; 2843 int i, backwards; 2844 2845 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2846 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2847 return NULL; 2848 2849 /* 2850 * HEAD is likely pointing to the dword after the actual command, 2851 * so scan backwards until we find the MBOX. But limit it to just 3 2852 * or 4 dwords depending on the semaphore wait command size. 2853 * Note that we don't care about ACTHD here since that might 2854 * point at at batch, and semaphores are always emitted into the 2855 * ringbuffer itself. 2856 */ 2857 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2858 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2859 2860 for (i = backwards; i; --i) { 2861 /* 2862 * Be paranoid and presume the hw has gone off into the wild - 2863 * our ring is smaller than what the hardware (and hence 2864 * HEAD_ADDR) allows. Also handles wrap-around. 2865 */ 2866 head &= ring->buffer->size - 1; 2867 2868 /* This here seems to blow up */ 2869 cmd = ioread32(ring->buffer->virtual_start + head); 2870 if (cmd == ipehr) 2871 break; 2872 2873 head -= 4; 2874 } 2875 2876 if (!i) 2877 return NULL; 2878 2879 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2880 if (INTEL_INFO(ring->dev)->gen >= 8) { 2881 offset = ioread32(ring->buffer->virtual_start + head + 12); 2882 offset <<= 32; 2883 offset = ioread32(ring->buffer->virtual_start + head + 8); 2884 } 2885 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2886 } 2887 2888 static int semaphore_passed(struct intel_engine_cs *ring) 2889 { 2890 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2891 struct intel_engine_cs *signaller; 2892 u32 seqno; 2893 2894 ring->hangcheck.deadlock++; 2895 2896 signaller = semaphore_waits_for(ring, &seqno); 2897 if (signaller == NULL) 2898 return -1; 2899 2900 /* Prevent pathological recursion due to driver bugs */ 2901 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2902 return -1; 2903 2904 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2905 return 1; 2906 2907 /* cursory check for an unkickable deadlock */ 2908 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2909 semaphore_passed(signaller) < 0) 2910 return -1; 2911 2912 return 0; 2913 } 2914 2915 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2916 { 2917 struct intel_engine_cs *ring; 2918 int i; 2919 2920 for_each_ring(ring, dev_priv, i) 2921 ring->hangcheck.deadlock = 0; 2922 } 2923 2924 static enum intel_ring_hangcheck_action 2925 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2926 { 2927 struct drm_device *dev = ring->dev; 2928 struct drm_i915_private *dev_priv = dev->dev_private; 2929 u32 tmp; 2930 2931 if (acthd != ring->hangcheck.acthd) { 2932 if (acthd > ring->hangcheck.max_acthd) { 2933 ring->hangcheck.max_acthd = acthd; 2934 return HANGCHECK_ACTIVE; 2935 } 2936 2937 return HANGCHECK_ACTIVE_LOOP; 2938 } 2939 2940 if (IS_GEN2(dev)) 2941 return HANGCHECK_HUNG; 2942 2943 /* Is the chip hanging on a WAIT_FOR_EVENT? 2944 * If so we can simply poke the RB_WAIT bit 2945 * and break the hang. This should work on 2946 * all but the second generation chipsets. 2947 */ 2948 tmp = I915_READ_CTL(ring); 2949 if (tmp & RING_WAIT) { 2950 i915_handle_error(dev, false, 2951 "Kicking stuck wait on %s", 2952 ring->name); 2953 I915_WRITE_CTL(ring, tmp); 2954 return HANGCHECK_KICK; 2955 } 2956 2957 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2958 switch (semaphore_passed(ring)) { 2959 default: 2960 return HANGCHECK_HUNG; 2961 case 1: 2962 i915_handle_error(dev, false, 2963 "Kicking stuck semaphore on %s", 2964 ring->name); 2965 I915_WRITE_CTL(ring, tmp); 2966 return HANGCHECK_KICK; 2967 case 0: 2968 return HANGCHECK_WAIT; 2969 } 2970 } 2971 2972 return HANGCHECK_HUNG; 2973 } 2974 2975 /** 2976 * This is called when the chip hasn't reported back with completed 2977 * batchbuffers in a long time. We keep track per ring seqno progress and 2978 * if there are no progress, hangcheck score for that ring is increased. 2979 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2980 * we kick the ring. If we see no progress on three subsequent calls 2981 * we assume chip is wedged and try to fix it by resetting the chip. 2982 */ 2983 static void i915_hangcheck_elapsed(unsigned long data) 2984 { 2985 struct drm_device *dev = (struct drm_device *)data; 2986 struct drm_i915_private *dev_priv = dev->dev_private; 2987 struct intel_engine_cs *ring; 2988 int i; 2989 int busy_count = 0, rings_hung = 0; 2990 bool stuck[I915_NUM_RINGS] = { 0 }; 2991 #define BUSY 1 2992 #define KICK 5 2993 #define HUNG 20 2994 2995 if (!i915.enable_hangcheck) 2996 return; 2997 2998 for_each_ring(ring, dev_priv, i) { 2999 u64 acthd; 3000 u32 seqno; 3001 bool busy = true; 3002 3003 semaphore_clear_deadlocks(dev_priv); 3004 3005 seqno = ring->get_seqno(ring, false); 3006 acthd = intel_ring_get_active_head(ring); 3007 3008 if (ring->hangcheck.seqno == seqno) { 3009 if (ring_idle(ring)) { 3010 ring->hangcheck.action = HANGCHECK_IDLE; 3011 3012 if (waitqueue_active(&ring->irq_queue)) { 3013 /* Issue a wake-up to catch stuck h/w. */ 3014 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 3015 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 3016 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3017 ring->name); 3018 else 3019 DRM_INFO("Fake missed irq on %s\n", 3020 ring->name); 3021 wake_up_all(&ring->irq_queue); 3022 } 3023 /* Safeguard against driver failure */ 3024 ring->hangcheck.score += BUSY; 3025 } else 3026 busy = false; 3027 } else { 3028 /* We always increment the hangcheck score 3029 * if the ring is busy and still processing 3030 * the same request, so that no single request 3031 * can run indefinitely (such as a chain of 3032 * batches). The only time we do not increment 3033 * the hangcheck score on this ring, if this 3034 * ring is in a legitimate wait for another 3035 * ring. In that case the waiting ring is a 3036 * victim and we want to be sure we catch the 3037 * right culprit. Then every time we do kick 3038 * the ring, add a small increment to the 3039 * score so that we can catch a batch that is 3040 * being repeatedly kicked and so responsible 3041 * for stalling the machine. 3042 */ 3043 ring->hangcheck.action = ring_stuck(ring, 3044 acthd); 3045 3046 switch (ring->hangcheck.action) { 3047 case HANGCHECK_IDLE: 3048 case HANGCHECK_WAIT: 3049 case HANGCHECK_ACTIVE: 3050 break; 3051 case HANGCHECK_ACTIVE_LOOP: 3052 ring->hangcheck.score += BUSY; 3053 break; 3054 case HANGCHECK_KICK: 3055 ring->hangcheck.score += KICK; 3056 break; 3057 case HANGCHECK_HUNG: 3058 ring->hangcheck.score += HUNG; 3059 stuck[i] = true; 3060 break; 3061 } 3062 } 3063 } else { 3064 ring->hangcheck.action = HANGCHECK_ACTIVE; 3065 3066 /* Gradually reduce the count so that we catch DoS 3067 * attempts across multiple batches. 3068 */ 3069 if (ring->hangcheck.score > 0) 3070 ring->hangcheck.score--; 3071 3072 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3073 } 3074 3075 ring->hangcheck.seqno = seqno; 3076 ring->hangcheck.acthd = acthd; 3077 busy_count += busy; 3078 } 3079 3080 for_each_ring(ring, dev_priv, i) { 3081 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3082 DRM_INFO("%s on %s\n", 3083 stuck[i] ? "stuck" : "no progress", 3084 ring->name); 3085 rings_hung++; 3086 } 3087 } 3088 3089 if (rings_hung) 3090 return i915_handle_error(dev, true, "Ring hung"); 3091 3092 if (busy_count) 3093 /* Reset timer case chip hangs without another request 3094 * being added */ 3095 i915_queue_hangcheck(dev); 3096 } 3097 3098 void i915_queue_hangcheck(struct drm_device *dev) 3099 { 3100 struct drm_i915_private *dev_priv = dev->dev_private; 3101 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer; 3102 3103 if (!i915.enable_hangcheck) 3104 return; 3105 3106 /* Don't continually defer the hangcheck, but make sure it is active */ 3107 if (timer_pending(timer)) 3108 return; 3109 mod_timer(timer, 3110 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3111 } 3112 3113 static void ibx_irq_reset(struct drm_device *dev) 3114 { 3115 struct drm_i915_private *dev_priv = dev->dev_private; 3116 3117 if (HAS_PCH_NOP(dev)) 3118 return; 3119 3120 GEN5_IRQ_RESET(SDE); 3121 3122 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3123 I915_WRITE(SERR_INT, 0xffffffff); 3124 } 3125 3126 /* 3127 * SDEIER is also touched by the interrupt handler to work around missed PCH 3128 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3129 * instead we unconditionally enable all PCH interrupt sources here, but then 3130 * only unmask them as needed with SDEIMR. 3131 * 3132 * This function needs to be called before interrupts are enabled. 3133 */ 3134 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3135 { 3136 struct drm_i915_private *dev_priv = dev->dev_private; 3137 3138 if (HAS_PCH_NOP(dev)) 3139 return; 3140 3141 WARN_ON(I915_READ(SDEIER) != 0); 3142 I915_WRITE(SDEIER, 0xffffffff); 3143 POSTING_READ(SDEIER); 3144 } 3145 3146 static void gen5_gt_irq_reset(struct drm_device *dev) 3147 { 3148 struct drm_i915_private *dev_priv = dev->dev_private; 3149 3150 GEN5_IRQ_RESET(GT); 3151 if (INTEL_INFO(dev)->gen >= 6) 3152 GEN5_IRQ_RESET(GEN6_PM); 3153 } 3154 3155 /* drm_dma.h hooks 3156 */ 3157 static void ironlake_irq_reset(struct drm_device *dev) 3158 { 3159 struct drm_i915_private *dev_priv = dev->dev_private; 3160 3161 I915_WRITE(HWSTAM, 0xffffffff); 3162 3163 GEN5_IRQ_RESET(DE); 3164 if (IS_GEN7(dev)) 3165 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3166 3167 gen5_gt_irq_reset(dev); 3168 3169 ibx_irq_reset(dev); 3170 } 3171 3172 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3173 { 3174 enum pipe pipe; 3175 3176 I915_WRITE(PORT_HOTPLUG_EN, 0); 3177 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3178 3179 for_each_pipe(dev_priv, pipe) 3180 I915_WRITE(PIPESTAT(pipe), 0xffff); 3181 3182 GEN5_IRQ_RESET(VLV_); 3183 } 3184 3185 static void valleyview_irq_preinstall(struct drm_device *dev) 3186 { 3187 struct drm_i915_private *dev_priv = dev->dev_private; 3188 3189 /* VLV magic */ 3190 I915_WRITE(VLV_IMR, 0); 3191 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3192 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3193 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3194 3195 gen5_gt_irq_reset(dev); 3196 3197 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3198 3199 vlv_display_irq_reset(dev_priv); 3200 } 3201 3202 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3203 { 3204 GEN8_IRQ_RESET_NDX(GT, 0); 3205 GEN8_IRQ_RESET_NDX(GT, 1); 3206 GEN8_IRQ_RESET_NDX(GT, 2); 3207 GEN8_IRQ_RESET_NDX(GT, 3); 3208 } 3209 3210 static void gen8_irq_reset(struct drm_device *dev) 3211 { 3212 struct drm_i915_private *dev_priv = dev->dev_private; 3213 int pipe; 3214 3215 I915_WRITE(GEN8_MASTER_IRQ, 0); 3216 POSTING_READ(GEN8_MASTER_IRQ); 3217 3218 gen8_gt_irq_reset(dev_priv); 3219 3220 for_each_pipe(dev_priv, pipe) 3221 if (intel_display_power_is_enabled(dev_priv, 3222 POWER_DOMAIN_PIPE(pipe))) 3223 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3224 3225 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3226 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3227 GEN5_IRQ_RESET(GEN8_PCU_); 3228 3229 ibx_irq_reset(dev); 3230 } 3231 3232 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3233 { 3234 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3235 3236 spin_lock_irq(&dev_priv->irq_lock); 3237 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3238 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3239 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3240 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3241 spin_unlock_irq(&dev_priv->irq_lock); 3242 } 3243 3244 static void cherryview_irq_preinstall(struct drm_device *dev) 3245 { 3246 struct drm_i915_private *dev_priv = dev->dev_private; 3247 3248 I915_WRITE(GEN8_MASTER_IRQ, 0); 3249 POSTING_READ(GEN8_MASTER_IRQ); 3250 3251 gen8_gt_irq_reset(dev_priv); 3252 3253 GEN5_IRQ_RESET(GEN8_PCU_); 3254 3255 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3256 3257 vlv_display_irq_reset(dev_priv); 3258 } 3259 3260 static void ibx_hpd_irq_setup(struct drm_device *dev) 3261 { 3262 struct drm_i915_private *dev_priv = dev->dev_private; 3263 struct intel_encoder *intel_encoder; 3264 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3265 3266 if (HAS_PCH_IBX(dev)) { 3267 hotplug_irqs = SDE_HOTPLUG_MASK; 3268 for_each_intel_encoder(dev, intel_encoder) 3269 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3270 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3271 } else { 3272 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3273 for_each_intel_encoder(dev, intel_encoder) 3274 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3275 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3276 } 3277 3278 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3279 3280 /* 3281 * Enable digital hotplug on the PCH, and configure the DP short pulse 3282 * duration to 2ms (which is the minimum in the Display Port spec) 3283 * 3284 * This register is the same on all known PCH chips. 3285 */ 3286 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3287 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3288 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3289 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3290 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3291 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3292 } 3293 3294 static void ibx_irq_postinstall(struct drm_device *dev) 3295 { 3296 struct drm_i915_private *dev_priv = dev->dev_private; 3297 u32 mask; 3298 3299 if (HAS_PCH_NOP(dev)) 3300 return; 3301 3302 if (HAS_PCH_IBX(dev)) 3303 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3304 else 3305 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3306 3307 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3308 I915_WRITE(SDEIMR, ~mask); 3309 } 3310 3311 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3312 { 3313 struct drm_i915_private *dev_priv = dev->dev_private; 3314 u32 pm_irqs, gt_irqs; 3315 3316 pm_irqs = gt_irqs = 0; 3317 3318 dev_priv->gt_irq_mask = ~0; 3319 if (HAS_L3_DPF(dev)) { 3320 /* L3 parity interrupt is always unmasked. */ 3321 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3322 gt_irqs |= GT_PARITY_ERROR(dev); 3323 } 3324 3325 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3326 if (IS_GEN5(dev)) { 3327 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3328 ILK_BSD_USER_INTERRUPT; 3329 } else { 3330 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3331 } 3332 3333 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3334 3335 if (INTEL_INFO(dev)->gen >= 6) { 3336 /* 3337 * RPS interrupts will get enabled/disabled on demand when RPS 3338 * itself is enabled/disabled. 3339 */ 3340 if (HAS_VEBOX(dev)) 3341 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3342 3343 dev_priv->pm_irq_mask = 0xffffffff; 3344 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3345 } 3346 } 3347 3348 static int ironlake_irq_postinstall(struct drm_device *dev) 3349 { 3350 struct drm_i915_private *dev_priv = dev->dev_private; 3351 u32 display_mask, extra_mask; 3352 3353 if (INTEL_INFO(dev)->gen >= 7) { 3354 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3355 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3356 DE_PLANEB_FLIP_DONE_IVB | 3357 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3358 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3359 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3360 } else { 3361 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3362 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3363 DE_AUX_CHANNEL_A | 3364 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3365 DE_POISON); 3366 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3367 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3368 } 3369 3370 dev_priv->irq_mask = ~display_mask; 3371 3372 I915_WRITE(HWSTAM, 0xeffe); 3373 3374 ibx_irq_pre_postinstall(dev); 3375 3376 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3377 3378 gen5_gt_irq_postinstall(dev); 3379 3380 ibx_irq_postinstall(dev); 3381 3382 if (IS_IRONLAKE_M(dev)) { 3383 /* Enable PCU event interrupts 3384 * 3385 * spinlocking not required here for correctness since interrupt 3386 * setup is guaranteed to run in single-threaded context. But we 3387 * need it to make the assert_spin_locked happy. */ 3388 spin_lock_irq(&dev_priv->irq_lock); 3389 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3390 spin_unlock_irq(&dev_priv->irq_lock); 3391 } 3392 3393 return 0; 3394 } 3395 3396 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3397 { 3398 u32 pipestat_mask; 3399 u32 iir_mask; 3400 enum pipe pipe; 3401 3402 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3403 PIPE_FIFO_UNDERRUN_STATUS; 3404 3405 for_each_pipe(dev_priv, pipe) 3406 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3407 POSTING_READ(PIPESTAT(PIPE_A)); 3408 3409 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3410 PIPE_CRC_DONE_INTERRUPT_STATUS; 3411 3412 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3413 for_each_pipe(dev_priv, pipe) 3414 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3415 3416 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3417 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3418 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3419 if (IS_CHERRYVIEW(dev_priv)) 3420 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3421 dev_priv->irq_mask &= ~iir_mask; 3422 3423 I915_WRITE(VLV_IIR, iir_mask); 3424 I915_WRITE(VLV_IIR, iir_mask); 3425 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3426 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3427 POSTING_READ(VLV_IMR); 3428 } 3429 3430 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3431 { 3432 u32 pipestat_mask; 3433 u32 iir_mask; 3434 enum pipe pipe; 3435 3436 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3437 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3438 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3439 if (IS_CHERRYVIEW(dev_priv)) 3440 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3441 3442 dev_priv->irq_mask |= iir_mask; 3443 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3444 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3445 I915_WRITE(VLV_IIR, iir_mask); 3446 I915_WRITE(VLV_IIR, iir_mask); 3447 POSTING_READ(VLV_IIR); 3448 3449 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3450 PIPE_CRC_DONE_INTERRUPT_STATUS; 3451 3452 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3453 for_each_pipe(dev_priv, pipe) 3454 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3455 3456 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3457 PIPE_FIFO_UNDERRUN_STATUS; 3458 3459 for_each_pipe(dev_priv, pipe) 3460 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3461 POSTING_READ(PIPESTAT(PIPE_A)); 3462 } 3463 3464 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3465 { 3466 assert_spin_locked(&dev_priv->irq_lock); 3467 3468 if (dev_priv->display_irqs_enabled) 3469 return; 3470 3471 dev_priv->display_irqs_enabled = true; 3472 3473 if (intel_irqs_enabled(dev_priv)) 3474 valleyview_display_irqs_install(dev_priv); 3475 } 3476 3477 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3478 { 3479 assert_spin_locked(&dev_priv->irq_lock); 3480 3481 if (!dev_priv->display_irqs_enabled) 3482 return; 3483 3484 dev_priv->display_irqs_enabled = false; 3485 3486 if (intel_irqs_enabled(dev_priv)) 3487 valleyview_display_irqs_uninstall(dev_priv); 3488 } 3489 3490 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3491 { 3492 dev_priv->irq_mask = ~0; 3493 3494 I915_WRITE(PORT_HOTPLUG_EN, 0); 3495 POSTING_READ(PORT_HOTPLUG_EN); 3496 3497 I915_WRITE(VLV_IIR, 0xffffffff); 3498 I915_WRITE(VLV_IIR, 0xffffffff); 3499 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3500 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3501 POSTING_READ(VLV_IMR); 3502 3503 /* Interrupt setup is already guaranteed to be single-threaded, this is 3504 * just to make the assert_spin_locked check happy. */ 3505 spin_lock_irq(&dev_priv->irq_lock); 3506 if (dev_priv->display_irqs_enabled) 3507 valleyview_display_irqs_install(dev_priv); 3508 spin_unlock_irq(&dev_priv->irq_lock); 3509 } 3510 3511 static int valleyview_irq_postinstall(struct drm_device *dev) 3512 { 3513 struct drm_i915_private *dev_priv = dev->dev_private; 3514 3515 vlv_display_irq_postinstall(dev_priv); 3516 3517 gen5_gt_irq_postinstall(dev); 3518 3519 /* ack & enable invalid PTE error interrupts */ 3520 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3521 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3522 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3523 #endif 3524 3525 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3526 3527 return 0; 3528 } 3529 3530 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3531 { 3532 /* These are interrupts we'll toggle with the ring mask register */ 3533 uint32_t gt_interrupts[] = { 3534 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3535 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3536 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3537 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3538 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3539 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3540 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3541 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3542 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3543 0, 3544 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3545 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3546 }; 3547 3548 dev_priv->pm_irq_mask = 0xffffffff; 3549 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3550 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3551 /* 3552 * RPS interrupts will get enabled/disabled on demand when RPS itself 3553 * is enabled/disabled. 3554 */ 3555 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3556 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3557 } 3558 3559 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3560 { 3561 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3562 uint32_t de_pipe_enables; 3563 int pipe; 3564 u32 aux_en = GEN8_AUX_CHANNEL_A; 3565 3566 if (IS_GEN9(dev_priv)) { 3567 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3568 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3569 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3570 GEN9_AUX_CHANNEL_D; 3571 } else 3572 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3573 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3574 3575 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3576 GEN8_PIPE_FIFO_UNDERRUN; 3577 3578 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3579 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3580 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3581 3582 for_each_pipe(dev_priv, pipe) 3583 if (intel_display_power_is_enabled(dev_priv, 3584 POWER_DOMAIN_PIPE(pipe))) 3585 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3586 dev_priv->de_irq_mask[pipe], 3587 de_pipe_enables); 3588 3589 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); 3590 } 3591 3592 static int gen8_irq_postinstall(struct drm_device *dev) 3593 { 3594 struct drm_i915_private *dev_priv = dev->dev_private; 3595 3596 ibx_irq_pre_postinstall(dev); 3597 3598 gen8_gt_irq_postinstall(dev_priv); 3599 gen8_de_irq_postinstall(dev_priv); 3600 3601 ibx_irq_postinstall(dev); 3602 3603 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3604 POSTING_READ(GEN8_MASTER_IRQ); 3605 3606 return 0; 3607 } 3608 3609 static int cherryview_irq_postinstall(struct drm_device *dev) 3610 { 3611 struct drm_i915_private *dev_priv = dev->dev_private; 3612 3613 vlv_display_irq_postinstall(dev_priv); 3614 3615 gen8_gt_irq_postinstall(dev_priv); 3616 3617 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3618 POSTING_READ(GEN8_MASTER_IRQ); 3619 3620 return 0; 3621 } 3622 3623 static void gen8_irq_uninstall(struct drm_device *dev) 3624 { 3625 struct drm_i915_private *dev_priv = dev->dev_private; 3626 3627 if (!dev_priv) 3628 return; 3629 3630 gen8_irq_reset(dev); 3631 } 3632 3633 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3634 { 3635 /* Interrupt setup is already guaranteed to be single-threaded, this is 3636 * just to make the assert_spin_locked check happy. */ 3637 spin_lock_irq(&dev_priv->irq_lock); 3638 if (dev_priv->display_irqs_enabled) 3639 valleyview_display_irqs_uninstall(dev_priv); 3640 spin_unlock_irq(&dev_priv->irq_lock); 3641 3642 vlv_display_irq_reset(dev_priv); 3643 3644 dev_priv->irq_mask = ~0; 3645 } 3646 3647 static void valleyview_irq_uninstall(struct drm_device *dev) 3648 { 3649 struct drm_i915_private *dev_priv = dev->dev_private; 3650 3651 if (!dev_priv) 3652 return; 3653 3654 I915_WRITE(VLV_MASTER_IER, 0); 3655 3656 gen5_gt_irq_reset(dev); 3657 3658 I915_WRITE(HWSTAM, 0xffffffff); 3659 3660 vlv_display_irq_uninstall(dev_priv); 3661 } 3662 3663 static void cherryview_irq_uninstall(struct drm_device *dev) 3664 { 3665 struct drm_i915_private *dev_priv = dev->dev_private; 3666 3667 if (!dev_priv) 3668 return; 3669 3670 I915_WRITE(GEN8_MASTER_IRQ, 0); 3671 POSTING_READ(GEN8_MASTER_IRQ); 3672 3673 gen8_gt_irq_reset(dev_priv); 3674 3675 GEN5_IRQ_RESET(GEN8_PCU_); 3676 3677 vlv_display_irq_uninstall(dev_priv); 3678 } 3679 3680 static void ironlake_irq_uninstall(struct drm_device *dev) 3681 { 3682 struct drm_i915_private *dev_priv = dev->dev_private; 3683 3684 if (!dev_priv) 3685 return; 3686 3687 ironlake_irq_reset(dev); 3688 } 3689 3690 static void i8xx_irq_preinstall(struct drm_device * dev) 3691 { 3692 struct drm_i915_private *dev_priv = dev->dev_private; 3693 int pipe; 3694 3695 for_each_pipe(dev_priv, pipe) 3696 I915_WRITE(PIPESTAT(pipe), 0); 3697 I915_WRITE16(IMR, 0xffff); 3698 I915_WRITE16(IER, 0x0); 3699 POSTING_READ16(IER); 3700 } 3701 3702 static int i8xx_irq_postinstall(struct drm_device *dev) 3703 { 3704 struct drm_i915_private *dev_priv = dev->dev_private; 3705 3706 I915_WRITE16(EMR, 3707 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3708 3709 /* Unmask the interrupts that we always want on. */ 3710 dev_priv->irq_mask = 3711 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3712 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3713 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3714 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3715 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3716 I915_WRITE16(IMR, dev_priv->irq_mask); 3717 3718 I915_WRITE16(IER, 3719 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3720 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3721 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3722 I915_USER_INTERRUPT); 3723 POSTING_READ16(IER); 3724 3725 /* Interrupt setup is already guaranteed to be single-threaded, this is 3726 * just to make the assert_spin_locked check happy. */ 3727 spin_lock_irq(&dev_priv->irq_lock); 3728 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3729 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3730 spin_unlock_irq(&dev_priv->irq_lock); 3731 3732 return 0; 3733 } 3734 3735 /* 3736 * Returns true when a page flip has completed. 3737 */ 3738 static bool i8xx_handle_vblank(struct drm_device *dev, 3739 int plane, int pipe, u32 iir) 3740 { 3741 struct drm_i915_private *dev_priv = dev->dev_private; 3742 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3743 3744 if (!intel_pipe_handle_vblank(dev, pipe)) 3745 return false; 3746 3747 if ((iir & flip_pending) == 0) 3748 goto check_page_flip; 3749 3750 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3751 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3752 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3753 * the flip is completed (no longer pending). Since this doesn't raise 3754 * an interrupt per se, we watch for the change at vblank. 3755 */ 3756 if (I915_READ16(ISR) & flip_pending) 3757 goto check_page_flip; 3758 3759 intel_prepare_page_flip(dev, plane); 3760 intel_finish_page_flip(dev, pipe); 3761 return true; 3762 3763 check_page_flip: 3764 intel_check_page_flip(dev, pipe); 3765 return false; 3766 } 3767 3768 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3769 { 3770 struct drm_device *dev = arg; 3771 struct drm_i915_private *dev_priv = dev->dev_private; 3772 u16 iir, new_iir; 3773 u32 pipe_stats[2]; 3774 int pipe; 3775 u16 flip_mask = 3776 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3777 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3778 3779 iir = I915_READ16(IIR); 3780 if (iir == 0) 3781 return IRQ_NONE; 3782 3783 while (iir & ~flip_mask) { 3784 /* Can't rely on pipestat interrupt bit in iir as it might 3785 * have been cleared after the pipestat interrupt was received. 3786 * It doesn't set the bit in iir again, but it still produces 3787 * interrupts (for non-MSI). 3788 */ 3789 spin_lock(&dev_priv->irq_lock); 3790 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3791 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3792 3793 for_each_pipe(dev_priv, pipe) { 3794 int reg = PIPESTAT(pipe); 3795 pipe_stats[pipe] = I915_READ(reg); 3796 3797 /* 3798 * Clear the PIPE*STAT regs before the IIR 3799 */ 3800 if (pipe_stats[pipe] & 0x8000ffff) 3801 I915_WRITE(reg, pipe_stats[pipe]); 3802 } 3803 spin_unlock(&dev_priv->irq_lock); 3804 3805 I915_WRITE16(IIR, iir & ~flip_mask); 3806 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3807 3808 if (iir & I915_USER_INTERRUPT) 3809 notify_ring(dev, &dev_priv->ring[RCS]); 3810 3811 for_each_pipe(dev_priv, pipe) { 3812 int plane = pipe; 3813 if (HAS_FBC(dev)) 3814 plane = !plane; 3815 3816 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3817 i8xx_handle_vblank(dev, plane, pipe, iir)) 3818 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3819 3820 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3821 i9xx_pipe_crc_irq_handler(dev, pipe); 3822 3823 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3824 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3825 pipe); 3826 } 3827 3828 iir = new_iir; 3829 } 3830 3831 return IRQ_HANDLED; 3832 } 3833 3834 static void i8xx_irq_uninstall(struct drm_device * dev) 3835 { 3836 struct drm_i915_private *dev_priv = dev->dev_private; 3837 int pipe; 3838 3839 for_each_pipe(dev_priv, pipe) { 3840 /* Clear enable bits; then clear status bits */ 3841 I915_WRITE(PIPESTAT(pipe), 0); 3842 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3843 } 3844 I915_WRITE16(IMR, 0xffff); 3845 I915_WRITE16(IER, 0x0); 3846 I915_WRITE16(IIR, I915_READ16(IIR)); 3847 } 3848 3849 static void i915_irq_preinstall(struct drm_device * dev) 3850 { 3851 struct drm_i915_private *dev_priv = dev->dev_private; 3852 int pipe; 3853 3854 if (I915_HAS_HOTPLUG(dev)) { 3855 I915_WRITE(PORT_HOTPLUG_EN, 0); 3856 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3857 } 3858 3859 I915_WRITE16(HWSTAM, 0xeffe); 3860 for_each_pipe(dev_priv, pipe) 3861 I915_WRITE(PIPESTAT(pipe), 0); 3862 I915_WRITE(IMR, 0xffffffff); 3863 I915_WRITE(IER, 0x0); 3864 POSTING_READ(IER); 3865 } 3866 3867 static int i915_irq_postinstall(struct drm_device *dev) 3868 { 3869 struct drm_i915_private *dev_priv = dev->dev_private; 3870 u32 enable_mask; 3871 3872 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3873 3874 /* Unmask the interrupts that we always want on. */ 3875 dev_priv->irq_mask = 3876 ~(I915_ASLE_INTERRUPT | 3877 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3878 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3879 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3880 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3881 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3882 3883 enable_mask = 3884 I915_ASLE_INTERRUPT | 3885 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3886 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3887 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3888 I915_USER_INTERRUPT; 3889 3890 if (I915_HAS_HOTPLUG(dev)) { 3891 I915_WRITE(PORT_HOTPLUG_EN, 0); 3892 POSTING_READ(PORT_HOTPLUG_EN); 3893 3894 /* Enable in IER... */ 3895 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3896 /* and unmask in IMR */ 3897 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3898 } 3899 3900 I915_WRITE(IMR, dev_priv->irq_mask); 3901 I915_WRITE(IER, enable_mask); 3902 POSTING_READ(IER); 3903 3904 i915_enable_asle_pipestat(dev); 3905 3906 /* Interrupt setup is already guaranteed to be single-threaded, this is 3907 * just to make the assert_spin_locked check happy. */ 3908 spin_lock_irq(&dev_priv->irq_lock); 3909 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3910 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3911 spin_unlock_irq(&dev_priv->irq_lock); 3912 3913 return 0; 3914 } 3915 3916 /* 3917 * Returns true when a page flip has completed. 3918 */ 3919 static bool i915_handle_vblank(struct drm_device *dev, 3920 int plane, int pipe, u32 iir) 3921 { 3922 struct drm_i915_private *dev_priv = dev->dev_private; 3923 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3924 3925 if (!intel_pipe_handle_vblank(dev, pipe)) 3926 return false; 3927 3928 if ((iir & flip_pending) == 0) 3929 goto check_page_flip; 3930 3931 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3932 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3933 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3934 * the flip is completed (no longer pending). Since this doesn't raise 3935 * an interrupt per se, we watch for the change at vblank. 3936 */ 3937 if (I915_READ(ISR) & flip_pending) 3938 goto check_page_flip; 3939 3940 intel_prepare_page_flip(dev, plane); 3941 intel_finish_page_flip(dev, pipe); 3942 return true; 3943 3944 check_page_flip: 3945 intel_check_page_flip(dev, pipe); 3946 return false; 3947 } 3948 3949 static irqreturn_t i915_irq_handler(int irq, void *arg) 3950 { 3951 struct drm_device *dev = arg; 3952 struct drm_i915_private *dev_priv = dev->dev_private; 3953 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3954 u32 flip_mask = 3955 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3956 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3957 int pipe, ret = IRQ_NONE; 3958 3959 iir = I915_READ(IIR); 3960 do { 3961 bool irq_received = (iir & ~flip_mask) != 0; 3962 bool blc_event = false; 3963 3964 /* Can't rely on pipestat interrupt bit in iir as it might 3965 * have been cleared after the pipestat interrupt was received. 3966 * It doesn't set the bit in iir again, but it still produces 3967 * interrupts (for non-MSI). 3968 */ 3969 spin_lock(&dev_priv->irq_lock); 3970 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3971 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3972 3973 for_each_pipe(dev_priv, pipe) { 3974 int reg = PIPESTAT(pipe); 3975 pipe_stats[pipe] = I915_READ(reg); 3976 3977 /* Clear the PIPE*STAT regs before the IIR */ 3978 if (pipe_stats[pipe] & 0x8000ffff) { 3979 I915_WRITE(reg, pipe_stats[pipe]); 3980 irq_received = true; 3981 } 3982 } 3983 spin_unlock(&dev_priv->irq_lock); 3984 3985 if (!irq_received) 3986 break; 3987 3988 /* Consume port. Then clear IIR or we'll miss events */ 3989 if (I915_HAS_HOTPLUG(dev) && 3990 iir & I915_DISPLAY_PORT_INTERRUPT) 3991 i9xx_hpd_irq_handler(dev); 3992 3993 I915_WRITE(IIR, iir & ~flip_mask); 3994 new_iir = I915_READ(IIR); /* Flush posted writes */ 3995 3996 if (iir & I915_USER_INTERRUPT) 3997 notify_ring(dev, &dev_priv->ring[RCS]); 3998 3999 for_each_pipe(dev_priv, pipe) { 4000 int plane = pipe; 4001 if (HAS_FBC(dev)) 4002 plane = !plane; 4003 4004 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4005 i915_handle_vblank(dev, plane, pipe, iir)) 4006 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4007 4008 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4009 blc_event = true; 4010 4011 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4012 i9xx_pipe_crc_irq_handler(dev, pipe); 4013 4014 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4015 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4016 pipe); 4017 } 4018 4019 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4020 intel_opregion_asle_intr(dev); 4021 4022 /* With MSI, interrupts are only generated when iir 4023 * transitions from zero to nonzero. If another bit got 4024 * set while we were handling the existing iir bits, then 4025 * we would never get another interrupt. 4026 * 4027 * This is fine on non-MSI as well, as if we hit this path 4028 * we avoid exiting the interrupt handler only to generate 4029 * another one. 4030 * 4031 * Note that for MSI this could cause a stray interrupt report 4032 * if an interrupt landed in the time between writing IIR and 4033 * the posting read. This should be rare enough to never 4034 * trigger the 99% of 100,000 interrupts test for disabling 4035 * stray interrupts. 4036 */ 4037 ret = IRQ_HANDLED; 4038 iir = new_iir; 4039 } while (iir & ~flip_mask); 4040 4041 return ret; 4042 } 4043 4044 static void i915_irq_uninstall(struct drm_device * dev) 4045 { 4046 struct drm_i915_private *dev_priv = dev->dev_private; 4047 int pipe; 4048 4049 if (I915_HAS_HOTPLUG(dev)) { 4050 I915_WRITE(PORT_HOTPLUG_EN, 0); 4051 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4052 } 4053 4054 I915_WRITE16(HWSTAM, 0xffff); 4055 for_each_pipe(dev_priv, pipe) { 4056 /* Clear enable bits; then clear status bits */ 4057 I915_WRITE(PIPESTAT(pipe), 0); 4058 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4059 } 4060 I915_WRITE(IMR, 0xffffffff); 4061 I915_WRITE(IER, 0x0); 4062 4063 I915_WRITE(IIR, I915_READ(IIR)); 4064 } 4065 4066 static void i965_irq_preinstall(struct drm_device * dev) 4067 { 4068 struct drm_i915_private *dev_priv = dev->dev_private; 4069 int pipe; 4070 4071 I915_WRITE(PORT_HOTPLUG_EN, 0); 4072 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4073 4074 I915_WRITE(HWSTAM, 0xeffe); 4075 for_each_pipe(dev_priv, pipe) 4076 I915_WRITE(PIPESTAT(pipe), 0); 4077 I915_WRITE(IMR, 0xffffffff); 4078 I915_WRITE(IER, 0x0); 4079 POSTING_READ(IER); 4080 } 4081 4082 static int i965_irq_postinstall(struct drm_device *dev) 4083 { 4084 struct drm_i915_private *dev_priv = dev->dev_private; 4085 u32 enable_mask; 4086 u32 error_mask; 4087 4088 /* Unmask the interrupts that we always want on. */ 4089 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4090 I915_DISPLAY_PORT_INTERRUPT | 4091 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4092 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4093 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4094 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4095 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4096 4097 enable_mask = ~dev_priv->irq_mask; 4098 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4099 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4100 enable_mask |= I915_USER_INTERRUPT; 4101 4102 if (IS_G4X(dev)) 4103 enable_mask |= I915_BSD_USER_INTERRUPT; 4104 4105 /* Interrupt setup is already guaranteed to be single-threaded, this is 4106 * just to make the assert_spin_locked check happy. */ 4107 spin_lock_irq(&dev_priv->irq_lock); 4108 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4109 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4110 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4111 spin_unlock_irq(&dev_priv->irq_lock); 4112 4113 /* 4114 * Enable some error detection, note the instruction error mask 4115 * bit is reserved, so we leave it masked. 4116 */ 4117 if (IS_G4X(dev)) { 4118 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4119 GM45_ERROR_MEM_PRIV | 4120 GM45_ERROR_CP_PRIV | 4121 I915_ERROR_MEMORY_REFRESH); 4122 } else { 4123 error_mask = ~(I915_ERROR_PAGE_TABLE | 4124 I915_ERROR_MEMORY_REFRESH); 4125 } 4126 I915_WRITE(EMR, error_mask); 4127 4128 I915_WRITE(IMR, dev_priv->irq_mask); 4129 I915_WRITE(IER, enable_mask); 4130 POSTING_READ(IER); 4131 4132 I915_WRITE(PORT_HOTPLUG_EN, 0); 4133 POSTING_READ(PORT_HOTPLUG_EN); 4134 4135 i915_enable_asle_pipestat(dev); 4136 4137 return 0; 4138 } 4139 4140 static void i915_hpd_irq_setup(struct drm_device *dev) 4141 { 4142 struct drm_i915_private *dev_priv = dev->dev_private; 4143 struct intel_encoder *intel_encoder; 4144 u32 hotplug_en; 4145 4146 assert_spin_locked(&dev_priv->irq_lock); 4147 4148 if (I915_HAS_HOTPLUG(dev)) { 4149 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4150 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4151 /* Note HDMI and DP share hotplug bits */ 4152 /* enable bits are the same for all generations */ 4153 for_each_intel_encoder(dev, intel_encoder) 4154 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4155 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4156 /* Programming the CRT detection parameters tends 4157 to generate a spurious hotplug event about three 4158 seconds later. So just do it once. 4159 */ 4160 if (IS_G4X(dev)) 4161 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4162 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4163 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4164 4165 /* Ignore TV since it's buggy */ 4166 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4167 } 4168 } 4169 4170 static irqreturn_t i965_irq_handler(int irq, void *arg) 4171 { 4172 struct drm_device *dev = arg; 4173 struct drm_i915_private *dev_priv = dev->dev_private; 4174 u32 iir, new_iir; 4175 u32 pipe_stats[I915_MAX_PIPES]; 4176 int ret = IRQ_NONE, pipe; 4177 u32 flip_mask = 4178 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4179 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4180 4181 iir = I915_READ(IIR); 4182 4183 for (;;) { 4184 bool irq_received = (iir & ~flip_mask) != 0; 4185 bool blc_event = false; 4186 4187 /* Can't rely on pipestat interrupt bit in iir as it might 4188 * have been cleared after the pipestat interrupt was received. 4189 * It doesn't set the bit in iir again, but it still produces 4190 * interrupts (for non-MSI). 4191 */ 4192 spin_lock(&dev_priv->irq_lock); 4193 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4194 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4195 4196 for_each_pipe(dev_priv, pipe) { 4197 int reg = PIPESTAT(pipe); 4198 pipe_stats[pipe] = I915_READ(reg); 4199 4200 /* 4201 * Clear the PIPE*STAT regs before the IIR 4202 */ 4203 if (pipe_stats[pipe] & 0x8000ffff) { 4204 I915_WRITE(reg, pipe_stats[pipe]); 4205 irq_received = true; 4206 } 4207 } 4208 spin_unlock(&dev_priv->irq_lock); 4209 4210 if (!irq_received) 4211 break; 4212 4213 ret = IRQ_HANDLED; 4214 4215 /* Consume port. Then clear IIR or we'll miss events */ 4216 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4217 i9xx_hpd_irq_handler(dev); 4218 4219 I915_WRITE(IIR, iir & ~flip_mask); 4220 new_iir = I915_READ(IIR); /* Flush posted writes */ 4221 4222 if (iir & I915_USER_INTERRUPT) 4223 notify_ring(dev, &dev_priv->ring[RCS]); 4224 if (iir & I915_BSD_USER_INTERRUPT) 4225 notify_ring(dev, &dev_priv->ring[VCS]); 4226 4227 for_each_pipe(dev_priv, pipe) { 4228 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4229 i915_handle_vblank(dev, pipe, pipe, iir)) 4230 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4231 4232 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4233 blc_event = true; 4234 4235 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4236 i9xx_pipe_crc_irq_handler(dev, pipe); 4237 4238 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4239 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4240 } 4241 4242 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4243 intel_opregion_asle_intr(dev); 4244 4245 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4246 gmbus_irq_handler(dev); 4247 4248 /* With MSI, interrupts are only generated when iir 4249 * transitions from zero to nonzero. If another bit got 4250 * set while we were handling the existing iir bits, then 4251 * we would never get another interrupt. 4252 * 4253 * This is fine on non-MSI as well, as if we hit this path 4254 * we avoid exiting the interrupt handler only to generate 4255 * another one. 4256 * 4257 * Note that for MSI this could cause a stray interrupt report 4258 * if an interrupt landed in the time between writing IIR and 4259 * the posting read. This should be rare enough to never 4260 * trigger the 99% of 100,000 interrupts test for disabling 4261 * stray interrupts. 4262 */ 4263 iir = new_iir; 4264 } 4265 4266 return ret; 4267 } 4268 4269 static void i965_irq_uninstall(struct drm_device * dev) 4270 { 4271 struct drm_i915_private *dev_priv = dev->dev_private; 4272 int pipe; 4273 4274 if (!dev_priv) 4275 return; 4276 4277 I915_WRITE(PORT_HOTPLUG_EN, 0); 4278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4279 4280 I915_WRITE(HWSTAM, 0xffffffff); 4281 for_each_pipe(dev_priv, pipe) 4282 I915_WRITE(PIPESTAT(pipe), 0); 4283 I915_WRITE(IMR, 0xffffffff); 4284 I915_WRITE(IER, 0x0); 4285 4286 for_each_pipe(dev_priv, pipe) 4287 I915_WRITE(PIPESTAT(pipe), 4288 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4289 I915_WRITE(IIR, I915_READ(IIR)); 4290 } 4291 4292 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4293 { 4294 struct drm_i915_private *dev_priv = 4295 container_of(work, typeof(*dev_priv), 4296 hotplug_reenable_work.work); 4297 struct drm_device *dev = dev_priv->dev; 4298 struct drm_mode_config *mode_config = &dev->mode_config; 4299 int i; 4300 4301 intel_runtime_pm_get(dev_priv); 4302 4303 spin_lock_irq(&dev_priv->irq_lock); 4304 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4305 struct drm_connector *connector; 4306 4307 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4308 continue; 4309 4310 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4311 4312 list_for_each_entry(connector, &mode_config->connector_list, head) { 4313 struct intel_connector *intel_connector = to_intel_connector(connector); 4314 4315 if (intel_connector->encoder->hpd_pin == i) { 4316 if (connector->polled != intel_connector->polled) 4317 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4318 connector->name); 4319 connector->polled = intel_connector->polled; 4320 if (!connector->polled) 4321 connector->polled = DRM_CONNECTOR_POLL_HPD; 4322 } 4323 } 4324 } 4325 if (dev_priv->display.hpd_irq_setup) 4326 dev_priv->display.hpd_irq_setup(dev); 4327 spin_unlock_irq(&dev_priv->irq_lock); 4328 4329 intel_runtime_pm_put(dev_priv); 4330 } 4331 4332 /** 4333 * intel_irq_init - initializes irq support 4334 * @dev_priv: i915 device instance 4335 * 4336 * This function initializes all the irq support including work items, timers 4337 * and all the vtables. It does not setup the interrupt itself though. 4338 */ 4339 void intel_irq_init(struct drm_i915_private *dev_priv) 4340 { 4341 struct drm_device *dev = dev_priv->dev; 4342 4343 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4344 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4345 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4346 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4347 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4348 4349 /* Let's track the enabled rps events */ 4350 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4351 /* WaGsvRC0ResidencyMethod:vlv */ 4352 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4353 else 4354 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4355 4356 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4357 i915_hangcheck_elapsed, 4358 (unsigned long) dev); 4359 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4360 intel_hpd_irq_reenable_work); 4361 4362 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4363 4364 if (IS_GEN2(dev_priv)) { 4365 dev->max_vblank_count = 0; 4366 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4367 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4368 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4369 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4370 } else { 4371 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4372 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4373 } 4374 4375 /* 4376 * Opt out of the vblank disable timer on everything except gen2. 4377 * Gen2 doesn't have a hardware frame counter and so depends on 4378 * vblank interrupts to produce sane vblank seuquence numbers. 4379 */ 4380 if (!IS_GEN2(dev_priv)) 4381 dev->vblank_disable_immediate = true; 4382 4383 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4384 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4385 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4386 } 4387 4388 if (IS_CHERRYVIEW(dev_priv)) { 4389 dev->driver->irq_handler = cherryview_irq_handler; 4390 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4391 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4392 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4393 dev->driver->enable_vblank = valleyview_enable_vblank; 4394 dev->driver->disable_vblank = valleyview_disable_vblank; 4395 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4396 } else if (IS_VALLEYVIEW(dev_priv)) { 4397 dev->driver->irq_handler = valleyview_irq_handler; 4398 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4399 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4400 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4401 dev->driver->enable_vblank = valleyview_enable_vblank; 4402 dev->driver->disable_vblank = valleyview_disable_vblank; 4403 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4404 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4405 dev->driver->irq_handler = gen8_irq_handler; 4406 dev->driver->irq_preinstall = gen8_irq_reset; 4407 dev->driver->irq_postinstall = gen8_irq_postinstall; 4408 dev->driver->irq_uninstall = gen8_irq_uninstall; 4409 dev->driver->enable_vblank = gen8_enable_vblank; 4410 dev->driver->disable_vblank = gen8_disable_vblank; 4411 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4412 } else if (HAS_PCH_SPLIT(dev)) { 4413 dev->driver->irq_handler = ironlake_irq_handler; 4414 dev->driver->irq_preinstall = ironlake_irq_reset; 4415 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4416 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4417 dev->driver->enable_vblank = ironlake_enable_vblank; 4418 dev->driver->disable_vblank = ironlake_disable_vblank; 4419 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4420 } else { 4421 if (INTEL_INFO(dev_priv)->gen == 2) { 4422 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4423 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4424 dev->driver->irq_handler = i8xx_irq_handler; 4425 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4426 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4427 dev->driver->irq_preinstall = i915_irq_preinstall; 4428 dev->driver->irq_postinstall = i915_irq_postinstall; 4429 dev->driver->irq_uninstall = i915_irq_uninstall; 4430 dev->driver->irq_handler = i915_irq_handler; 4431 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4432 } else { 4433 dev->driver->irq_preinstall = i965_irq_preinstall; 4434 dev->driver->irq_postinstall = i965_irq_postinstall; 4435 dev->driver->irq_uninstall = i965_irq_uninstall; 4436 dev->driver->irq_handler = i965_irq_handler; 4437 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4438 } 4439 dev->driver->enable_vblank = i915_enable_vblank; 4440 dev->driver->disable_vblank = i915_disable_vblank; 4441 } 4442 } 4443 4444 /** 4445 * intel_hpd_init - initializes and enables hpd support 4446 * @dev_priv: i915 device instance 4447 * 4448 * This function enables the hotplug support. It requires that interrupts have 4449 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4450 * poll request can run concurrently to other code, so locking rules must be 4451 * obeyed. 4452 * 4453 * This is a separate step from interrupt enabling to simplify the locking rules 4454 * in the driver load and resume code. 4455 */ 4456 void intel_hpd_init(struct drm_i915_private *dev_priv) 4457 { 4458 struct drm_device *dev = dev_priv->dev; 4459 struct drm_mode_config *mode_config = &dev->mode_config; 4460 struct drm_connector *connector; 4461 int i; 4462 4463 for (i = 1; i < HPD_NUM_PINS; i++) { 4464 dev_priv->hpd_stats[i].hpd_cnt = 0; 4465 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4466 } 4467 list_for_each_entry(connector, &mode_config->connector_list, head) { 4468 struct intel_connector *intel_connector = to_intel_connector(connector); 4469 connector->polled = intel_connector->polled; 4470 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4471 connector->polled = DRM_CONNECTOR_POLL_HPD; 4472 if (intel_connector->mst_port) 4473 connector->polled = DRM_CONNECTOR_POLL_HPD; 4474 } 4475 4476 /* Interrupt setup is already guaranteed to be single-threaded, this is 4477 * just to make the assert_spin_locked checks happy. */ 4478 spin_lock_irq(&dev_priv->irq_lock); 4479 if (dev_priv->display.hpd_irq_setup) 4480 dev_priv->display.hpd_irq_setup(dev); 4481 spin_unlock_irq(&dev_priv->irq_lock); 4482 } 4483 4484 /** 4485 * intel_irq_install - enables the hardware interrupt 4486 * @dev_priv: i915 device instance 4487 * 4488 * This function enables the hardware interrupt handling, but leaves the hotplug 4489 * handling still disabled. It is called after intel_irq_init(). 4490 * 4491 * In the driver load and resume code we need working interrupts in a few places 4492 * but don't want to deal with the hassle of concurrent probe and hotplug 4493 * workers. Hence the split into this two-stage approach. 4494 */ 4495 int intel_irq_install(struct drm_i915_private *dev_priv) 4496 { 4497 /* 4498 * We enable some interrupt sources in our postinstall hooks, so mark 4499 * interrupts as enabled _before_ actually enabling them to avoid 4500 * special cases in our ordering checks. 4501 */ 4502 dev_priv->pm.irqs_enabled = true; 4503 4504 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4505 } 4506 4507 /** 4508 * intel_irq_uninstall - finilizes all irq handling 4509 * @dev_priv: i915 device instance 4510 * 4511 * This stops interrupt and hotplug handling and unregisters and frees all 4512 * resources acquired in the init functions. 4513 */ 4514 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4515 { 4516 drm_irq_uninstall(dev_priv->dev); 4517 intel_hpd_cancel_work(dev_priv); 4518 dev_priv->pm.irqs_enabled = false; 4519 } 4520 4521 /** 4522 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4523 * @dev_priv: i915 device instance 4524 * 4525 * This function is used to disable interrupts at runtime, both in the runtime 4526 * pm and the system suspend/resume code. 4527 */ 4528 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4529 { 4530 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4531 dev_priv->pm.irqs_enabled = false; 4532 } 4533 4534 /** 4535 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4536 * @dev_priv: i915 device instance 4537 * 4538 * This function is used to enable interrupts at runtime, both in the runtime 4539 * pm and the system suspend/resume code. 4540 */ 4541 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4542 { 4543 dev_priv->pm.irqs_enabled = true; 4544 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4545 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4546 } 4547