1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ibx[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 54 }; 55 56 static const u32 hpd_cpt[] = { 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 62 }; 63 64 static const u32 hpd_mask_i915[] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 71 }; 72 73 static const u32 hpd_status_g4x[] = { 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 89 }; 90 91 /* IIR can theoretically queue up two events. Be paranoid. */ 92 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 94 POSTING_READ(GEN8_##type##_IMR(which)); \ 95 I915_WRITE(GEN8_##type##_IER(which), 0); \ 96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 97 POSTING_READ(GEN8_##type##_IIR(which)); \ 98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 99 POSTING_READ(GEN8_##type##_IIR(which)); \ 100 } while (0) 101 102 #define GEN5_IRQ_RESET(type) do { \ 103 I915_WRITE(type##IMR, 0xffffffff); \ 104 POSTING_READ(type##IMR); \ 105 I915_WRITE(type##IER, 0); \ 106 I915_WRITE(type##IIR, 0xffffffff); \ 107 POSTING_READ(type##IIR); \ 108 I915_WRITE(type##IIR, 0xffffffff); \ 109 POSTING_READ(type##IIR); \ 110 } while (0) 111 112 /* 113 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 114 */ 115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 116 u32 val = I915_READ(reg); \ 117 if (val) { \ 118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 119 (reg), val); \ 120 I915_WRITE((reg), 0xffffffff); \ 121 POSTING_READ(reg); \ 122 I915_WRITE((reg), 0xffffffff); \ 123 POSTING_READ(reg); \ 124 } \ 125 } while (0) 126 127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 131 POSTING_READ(GEN8_##type##_IMR(which)); \ 132 } while (0) 133 134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 136 I915_WRITE(type##IER, (ier_val)); \ 137 I915_WRITE(type##IMR, (imr_val)); \ 138 POSTING_READ(type##IMR); \ 139 } while (0) 140 141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 142 143 /* For display hotplug interrupt */ 144 void 145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 146 { 147 assert_spin_locked(&dev_priv->irq_lock); 148 149 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 150 return; 151 152 if ((dev_priv->irq_mask & mask) != 0) { 153 dev_priv->irq_mask &= ~mask; 154 I915_WRITE(DEIMR, dev_priv->irq_mask); 155 POSTING_READ(DEIMR); 156 } 157 } 158 159 void 160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 161 { 162 assert_spin_locked(&dev_priv->irq_lock); 163 164 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 165 return; 166 167 if ((dev_priv->irq_mask & mask) != mask) { 168 dev_priv->irq_mask |= mask; 169 I915_WRITE(DEIMR, dev_priv->irq_mask); 170 POSTING_READ(DEIMR); 171 } 172 } 173 174 /** 175 * ilk_update_gt_irq - update GTIMR 176 * @dev_priv: driver private 177 * @interrupt_mask: mask of interrupt bits to update 178 * @enabled_irq_mask: mask of interrupt bits to enable 179 */ 180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 181 uint32_t interrupt_mask, 182 uint32_t enabled_irq_mask) 183 { 184 assert_spin_locked(&dev_priv->irq_lock); 185 186 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 187 return; 188 189 dev_priv->gt_irq_mask &= ~interrupt_mask; 190 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 191 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 192 POSTING_READ(GTIMR); 193 } 194 195 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 196 { 197 ilk_update_gt_irq(dev_priv, mask, mask); 198 } 199 200 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 201 { 202 ilk_update_gt_irq(dev_priv, mask, 0); 203 } 204 205 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 206 { 207 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 208 } 209 210 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 211 { 212 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 213 } 214 215 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 216 { 217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 218 } 219 220 /** 221 * snb_update_pm_irq - update GEN6_PMIMR 222 * @dev_priv: driver private 223 * @interrupt_mask: mask of interrupt bits to update 224 * @enabled_irq_mask: mask of interrupt bits to enable 225 */ 226 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 227 uint32_t interrupt_mask, 228 uint32_t enabled_irq_mask) 229 { 230 uint32_t new_val; 231 232 assert_spin_locked(&dev_priv->irq_lock); 233 234 new_val = dev_priv->pm_irq_mask; 235 new_val &= ~interrupt_mask; 236 new_val |= (~enabled_irq_mask & interrupt_mask); 237 238 if (new_val != dev_priv->pm_irq_mask) { 239 dev_priv->pm_irq_mask = new_val; 240 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 241 POSTING_READ(gen6_pm_imr(dev_priv)); 242 } 243 } 244 245 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 246 { 247 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 248 return; 249 250 snb_update_pm_irq(dev_priv, mask, mask); 251 } 252 253 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 254 uint32_t mask) 255 { 256 snb_update_pm_irq(dev_priv, mask, 0); 257 } 258 259 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 260 { 261 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 262 return; 263 264 __gen6_disable_pm_irq(dev_priv, mask); 265 } 266 267 void gen6_reset_rps_interrupts(struct drm_device *dev) 268 { 269 struct drm_i915_private *dev_priv = dev->dev_private; 270 uint32_t reg = gen6_pm_iir(dev_priv); 271 272 spin_lock_irq(&dev_priv->irq_lock); 273 I915_WRITE(reg, dev_priv->pm_rps_events); 274 I915_WRITE(reg, dev_priv->pm_rps_events); 275 POSTING_READ(reg); 276 spin_unlock_irq(&dev_priv->irq_lock); 277 } 278 279 void gen6_enable_rps_interrupts(struct drm_device *dev) 280 { 281 struct drm_i915_private *dev_priv = dev->dev_private; 282 283 spin_lock_irq(&dev_priv->irq_lock); 284 WARN_ON(dev_priv->rps.pm_iir); 285 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 286 dev_priv->rps.interrupts_enabled = true; 287 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 288 spin_unlock_irq(&dev_priv->irq_lock); 289 } 290 291 void gen6_disable_rps_interrupts(struct drm_device *dev) 292 { 293 struct drm_i915_private *dev_priv = dev->dev_private; 294 295 spin_lock_irq(&dev_priv->irq_lock); 296 dev_priv->rps.interrupts_enabled = false; 297 spin_unlock_irq(&dev_priv->irq_lock); 298 299 cancel_work_sync(&dev_priv->rps.work); 300 301 spin_lock_irq(&dev_priv->irq_lock); 302 303 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? 304 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); 305 306 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 307 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 308 ~dev_priv->pm_rps_events); 309 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 310 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 311 312 dev_priv->rps.pm_iir = 0; 313 314 spin_unlock_irq(&dev_priv->irq_lock); 315 } 316 317 /** 318 * ibx_display_interrupt_update - update SDEIMR 319 * @dev_priv: driver private 320 * @interrupt_mask: mask of interrupt bits to update 321 * @enabled_irq_mask: mask of interrupt bits to enable 322 */ 323 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 324 uint32_t interrupt_mask, 325 uint32_t enabled_irq_mask) 326 { 327 uint32_t sdeimr = I915_READ(SDEIMR); 328 sdeimr &= ~interrupt_mask; 329 sdeimr |= (~enabled_irq_mask & interrupt_mask); 330 331 assert_spin_locked(&dev_priv->irq_lock); 332 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 I915_WRITE(SDEIMR, sdeimr); 337 POSTING_READ(SDEIMR); 338 } 339 340 static void 341 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 342 u32 enable_mask, u32 status_mask) 343 { 344 u32 reg = PIPESTAT(pipe); 345 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 346 347 assert_spin_locked(&dev_priv->irq_lock); 348 WARN_ON(!intel_irqs_enabled(dev_priv)); 349 350 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 351 status_mask & ~PIPESTAT_INT_STATUS_MASK, 352 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 353 pipe_name(pipe), enable_mask, status_mask)) 354 return; 355 356 if ((pipestat & enable_mask) == enable_mask) 357 return; 358 359 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 360 361 /* Enable the interrupt, clear any pending status */ 362 pipestat |= enable_mask | status_mask; 363 I915_WRITE(reg, pipestat); 364 POSTING_READ(reg); 365 } 366 367 static void 368 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 369 u32 enable_mask, u32 status_mask) 370 { 371 u32 reg = PIPESTAT(pipe); 372 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 373 374 assert_spin_locked(&dev_priv->irq_lock); 375 WARN_ON(!intel_irqs_enabled(dev_priv)); 376 377 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 378 status_mask & ~PIPESTAT_INT_STATUS_MASK, 379 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 380 pipe_name(pipe), enable_mask, status_mask)) 381 return; 382 383 if ((pipestat & enable_mask) == 0) 384 return; 385 386 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 387 388 pipestat &= ~enable_mask; 389 I915_WRITE(reg, pipestat); 390 POSTING_READ(reg); 391 } 392 393 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 394 { 395 u32 enable_mask = status_mask << 16; 396 397 /* 398 * On pipe A we don't support the PSR interrupt yet, 399 * on pipe B and C the same bit MBZ. 400 */ 401 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 402 return 0; 403 /* 404 * On pipe B and C we don't support the PSR interrupt yet, on pipe 405 * A the same bit is for perf counters which we don't use either. 406 */ 407 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 408 return 0; 409 410 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 411 SPRITE0_FLIP_DONE_INT_EN_VLV | 412 SPRITE1_FLIP_DONE_INT_EN_VLV); 413 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 414 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 415 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 416 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 417 418 return enable_mask; 419 } 420 421 void 422 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 423 u32 status_mask) 424 { 425 u32 enable_mask; 426 427 if (IS_VALLEYVIEW(dev_priv->dev)) 428 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 429 status_mask); 430 else 431 enable_mask = status_mask << 16; 432 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 433 } 434 435 void 436 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 437 u32 status_mask) 438 { 439 u32 enable_mask; 440 441 if (IS_VALLEYVIEW(dev_priv->dev)) 442 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 443 status_mask); 444 else 445 enable_mask = status_mask << 16; 446 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 447 } 448 449 /** 450 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 451 */ 452 static void i915_enable_asle_pipestat(struct drm_device *dev) 453 { 454 struct drm_i915_private *dev_priv = dev->dev_private; 455 456 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 457 return; 458 459 spin_lock_irq(&dev_priv->irq_lock); 460 461 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 462 if (INTEL_INFO(dev)->gen >= 4) 463 i915_enable_pipestat(dev_priv, PIPE_A, 464 PIPE_LEGACY_BLC_EVENT_STATUS); 465 466 spin_unlock_irq(&dev_priv->irq_lock); 467 } 468 469 /** 470 * i915_pipe_enabled - check if a pipe is enabled 471 * @dev: DRM device 472 * @pipe: pipe to check 473 * 474 * Reading certain registers when the pipe is disabled can hang the chip. 475 * Use this routine to make sure the PLL is running and the pipe is active 476 * before reading such registers if unsure. 477 */ 478 static int 479 i915_pipe_enabled(struct drm_device *dev, int pipe) 480 { 481 struct drm_i915_private *dev_priv = dev->dev_private; 482 483 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 484 /* Locking is horribly broken here, but whatever. */ 485 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 486 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 487 488 return intel_crtc->active; 489 } else { 490 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 491 } 492 } 493 494 /* 495 * This timing diagram depicts the video signal in and 496 * around the vertical blanking period. 497 * 498 * Assumptions about the fictitious mode used in this example: 499 * vblank_start >= 3 500 * vsync_start = vblank_start + 1 501 * vsync_end = vblank_start + 2 502 * vtotal = vblank_start + 3 503 * 504 * start of vblank: 505 * latch double buffered registers 506 * increment frame counter (ctg+) 507 * generate start of vblank interrupt (gen4+) 508 * | 509 * | frame start: 510 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 511 * | may be shifted forward 1-3 extra lines via PIPECONF 512 * | | 513 * | | start of vsync: 514 * | | generate vsync interrupt 515 * | | | 516 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 517 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 518 * ----va---> <-----------------vb--------------------> <--------va------------- 519 * | | <----vs-----> | 520 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 521 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 522 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 523 * | | | 524 * last visible pixel first visible pixel 525 * | increment frame counter (gen3/4) 526 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 527 * 528 * x = horizontal active 529 * _ = horizontal blanking 530 * hs = horizontal sync 531 * va = vertical active 532 * vb = vertical blanking 533 * vs = vertical sync 534 * vbs = vblank_start (number) 535 * 536 * Summary: 537 * - most events happen at the start of horizontal sync 538 * - frame start happens at the start of horizontal blank, 1-4 lines 539 * (depending on PIPECONF settings) after the start of vblank 540 * - gen3/4 pixel and frame counter are synchronized with the start 541 * of horizontal active on the first line of vertical active 542 */ 543 544 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 545 { 546 /* Gen2 doesn't have a hardware frame counter */ 547 return 0; 548 } 549 550 /* Called from drm generic code, passed a 'crtc', which 551 * we use as a pipe index 552 */ 553 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 554 { 555 struct drm_i915_private *dev_priv = dev->dev_private; 556 unsigned long high_frame; 557 unsigned long low_frame; 558 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 559 560 if (!i915_pipe_enabled(dev, pipe)) { 561 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 562 "pipe %c\n", pipe_name(pipe)); 563 return 0; 564 } 565 566 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 567 struct intel_crtc *intel_crtc = 568 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 569 const struct drm_display_mode *mode = 570 &intel_crtc->config.adjusted_mode; 571 572 htotal = mode->crtc_htotal; 573 hsync_start = mode->crtc_hsync_start; 574 vbl_start = mode->crtc_vblank_start; 575 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 576 vbl_start = DIV_ROUND_UP(vbl_start, 2); 577 } else { 578 enum transcoder cpu_transcoder = (enum transcoder) pipe; 579 580 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 581 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 582 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 583 if ((I915_READ(PIPECONF(cpu_transcoder)) & 584 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 585 vbl_start = DIV_ROUND_UP(vbl_start, 2); 586 } 587 588 /* Convert to pixel count */ 589 vbl_start *= htotal; 590 591 /* Start of vblank event occurs at start of hsync */ 592 vbl_start -= htotal - hsync_start; 593 594 high_frame = PIPEFRAME(pipe); 595 low_frame = PIPEFRAMEPIXEL(pipe); 596 597 /* 598 * High & low register fields aren't synchronized, so make sure 599 * we get a low value that's stable across two reads of the high 600 * register. 601 */ 602 do { 603 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 604 low = I915_READ(low_frame); 605 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 606 } while (high1 != high2); 607 608 high1 >>= PIPE_FRAME_HIGH_SHIFT; 609 pixel = low & PIPE_PIXEL_MASK; 610 low >>= PIPE_FRAME_LOW_SHIFT; 611 612 /* 613 * The frame counter increments at beginning of active. 614 * Cook up a vblank counter by also checking the pixel 615 * counter against vblank start. 616 */ 617 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 618 } 619 620 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 621 { 622 struct drm_i915_private *dev_priv = dev->dev_private; 623 int reg = PIPE_FRMCOUNT_GM45(pipe); 624 625 if (!i915_pipe_enabled(dev, pipe)) { 626 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 627 "pipe %c\n", pipe_name(pipe)); 628 return 0; 629 } 630 631 return I915_READ(reg); 632 } 633 634 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 635 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 636 637 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 638 { 639 struct drm_device *dev = crtc->base.dev; 640 struct drm_i915_private *dev_priv = dev->dev_private; 641 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 642 enum pipe pipe = crtc->pipe; 643 int position, vtotal; 644 645 vtotal = mode->crtc_vtotal; 646 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 647 vtotal /= 2; 648 649 if (IS_GEN2(dev)) 650 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 651 else 652 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 653 654 /* 655 * See update_scanline_offset() for the details on the 656 * scanline_offset adjustment. 657 */ 658 return (position + crtc->scanline_offset) % vtotal; 659 } 660 661 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 662 unsigned int flags, int *vpos, int *hpos, 663 ktime_t *stime, ktime_t *etime) 664 { 665 struct drm_i915_private *dev_priv = dev->dev_private; 666 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 668 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 669 int position; 670 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 671 bool in_vbl = true; 672 int ret = 0; 673 unsigned long irqflags; 674 675 if (!intel_crtc->active) { 676 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 677 "pipe %c\n", pipe_name(pipe)); 678 return 0; 679 } 680 681 htotal = mode->crtc_htotal; 682 hsync_start = mode->crtc_hsync_start; 683 vtotal = mode->crtc_vtotal; 684 vbl_start = mode->crtc_vblank_start; 685 vbl_end = mode->crtc_vblank_end; 686 687 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 688 vbl_start = DIV_ROUND_UP(vbl_start, 2); 689 vbl_end /= 2; 690 vtotal /= 2; 691 } 692 693 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 694 695 /* 696 * Lock uncore.lock, as we will do multiple timing critical raw 697 * register reads, potentially with preemption disabled, so the 698 * following code must not block on uncore.lock. 699 */ 700 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 701 702 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 703 704 /* Get optional system timestamp before query. */ 705 if (stime) 706 *stime = ktime_get(); 707 708 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 709 /* No obvious pixelcount register. Only query vertical 710 * scanout position from Display scan line register. 711 */ 712 position = __intel_get_crtc_scanline(intel_crtc); 713 } else { 714 /* Have access to pixelcount since start of frame. 715 * We can split this into vertical and horizontal 716 * scanout position. 717 */ 718 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 719 720 /* convert to pixel counts */ 721 vbl_start *= htotal; 722 vbl_end *= htotal; 723 vtotal *= htotal; 724 725 /* 726 * In interlaced modes, the pixel counter counts all pixels, 727 * so one field will have htotal more pixels. In order to avoid 728 * the reported position from jumping backwards when the pixel 729 * counter is beyond the length of the shorter field, just 730 * clamp the position the length of the shorter field. This 731 * matches how the scanline counter based position works since 732 * the scanline counter doesn't count the two half lines. 733 */ 734 if (position >= vtotal) 735 position = vtotal - 1; 736 737 /* 738 * Start of vblank interrupt is triggered at start of hsync, 739 * just prior to the first active line of vblank. However we 740 * consider lines to start at the leading edge of horizontal 741 * active. So, should we get here before we've crossed into 742 * the horizontal active of the first line in vblank, we would 743 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 744 * always add htotal-hsync_start to the current pixel position. 745 */ 746 position = (position + htotal - hsync_start) % vtotal; 747 } 748 749 /* Get optional system timestamp after query. */ 750 if (etime) 751 *etime = ktime_get(); 752 753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 754 755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 756 757 in_vbl = position >= vbl_start && position < vbl_end; 758 759 /* 760 * While in vblank, position will be negative 761 * counting up towards 0 at vbl_end. And outside 762 * vblank, position will be positive counting 763 * up since vbl_end. 764 */ 765 if (position >= vbl_start) 766 position -= vbl_end; 767 else 768 position += vtotal - vbl_end; 769 770 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 771 *vpos = position; 772 *hpos = 0; 773 } else { 774 *vpos = position / htotal; 775 *hpos = position - (*vpos * htotal); 776 } 777 778 /* In vblank? */ 779 if (in_vbl) 780 ret |= DRM_SCANOUTPOS_IN_VBLANK; 781 782 return ret; 783 } 784 785 int intel_get_crtc_scanline(struct intel_crtc *crtc) 786 { 787 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 788 unsigned long irqflags; 789 int position; 790 791 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 792 position = __intel_get_crtc_scanline(crtc); 793 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 794 795 return position; 796 } 797 798 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 799 int *max_error, 800 struct timeval *vblank_time, 801 unsigned flags) 802 { 803 struct drm_crtc *crtc; 804 805 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 806 DRM_ERROR("Invalid crtc %d\n", pipe); 807 return -EINVAL; 808 } 809 810 /* Get drm_crtc to timestamp: */ 811 crtc = intel_get_crtc_for_pipe(dev, pipe); 812 if (crtc == NULL) { 813 DRM_ERROR("Invalid crtc %d\n", pipe); 814 return -EINVAL; 815 } 816 817 if (!crtc->enabled) { 818 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 819 return -EBUSY; 820 } 821 822 /* Helper routine in DRM core does all the work: */ 823 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 824 vblank_time, flags, 825 crtc, 826 &to_intel_crtc(crtc)->config.adjusted_mode); 827 } 828 829 static bool intel_hpd_irq_event(struct drm_device *dev, 830 struct drm_connector *connector) 831 { 832 enum drm_connector_status old_status; 833 834 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 835 old_status = connector->status; 836 837 connector->status = connector->funcs->detect(connector, false); 838 if (old_status == connector->status) 839 return false; 840 841 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 842 connector->base.id, 843 connector->name, 844 drm_get_connector_status_name(old_status), 845 drm_get_connector_status_name(connector->status)); 846 847 return true; 848 } 849 850 static void i915_digport_work_func(struct work_struct *work) 851 { 852 struct drm_i915_private *dev_priv = 853 container_of(work, struct drm_i915_private, dig_port_work); 854 u32 long_port_mask, short_port_mask; 855 struct intel_digital_port *intel_dig_port; 856 int i, ret; 857 u32 old_bits = 0; 858 859 spin_lock_irq(&dev_priv->irq_lock); 860 long_port_mask = dev_priv->long_hpd_port_mask; 861 dev_priv->long_hpd_port_mask = 0; 862 short_port_mask = dev_priv->short_hpd_port_mask; 863 dev_priv->short_hpd_port_mask = 0; 864 spin_unlock_irq(&dev_priv->irq_lock); 865 866 for (i = 0; i < I915_MAX_PORTS; i++) { 867 bool valid = false; 868 bool long_hpd = false; 869 intel_dig_port = dev_priv->hpd_irq_port[i]; 870 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 871 continue; 872 873 if (long_port_mask & (1 << i)) { 874 valid = true; 875 long_hpd = true; 876 } else if (short_port_mask & (1 << i)) 877 valid = true; 878 879 if (valid) { 880 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 881 if (ret == true) { 882 /* if we get true fallback to old school hpd */ 883 old_bits |= (1 << intel_dig_port->base.hpd_pin); 884 } 885 } 886 } 887 888 if (old_bits) { 889 spin_lock_irq(&dev_priv->irq_lock); 890 dev_priv->hpd_event_bits |= old_bits; 891 spin_unlock_irq(&dev_priv->irq_lock); 892 schedule_work(&dev_priv->hotplug_work); 893 } 894 } 895 896 /* 897 * Handle hotplug events outside the interrupt handler proper. 898 */ 899 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 900 901 static void i915_hotplug_work_func(struct work_struct *work) 902 { 903 struct drm_i915_private *dev_priv = 904 container_of(work, struct drm_i915_private, hotplug_work); 905 struct drm_device *dev = dev_priv->dev; 906 struct drm_mode_config *mode_config = &dev->mode_config; 907 struct intel_connector *intel_connector; 908 struct intel_encoder *intel_encoder; 909 struct drm_connector *connector; 910 bool hpd_disabled = false; 911 bool changed = false; 912 u32 hpd_event_bits; 913 914 mutex_lock(&mode_config->mutex); 915 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 916 917 spin_lock_irq(&dev_priv->irq_lock); 918 919 hpd_event_bits = dev_priv->hpd_event_bits; 920 dev_priv->hpd_event_bits = 0; 921 list_for_each_entry(connector, &mode_config->connector_list, head) { 922 intel_connector = to_intel_connector(connector); 923 if (!intel_connector->encoder) 924 continue; 925 intel_encoder = intel_connector->encoder; 926 if (intel_encoder->hpd_pin > HPD_NONE && 927 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 928 connector->polled == DRM_CONNECTOR_POLL_HPD) { 929 DRM_INFO("HPD interrupt storm detected on connector %s: " 930 "switching from hotplug detection to polling\n", 931 connector->name); 932 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 933 connector->polled = DRM_CONNECTOR_POLL_CONNECT 934 | DRM_CONNECTOR_POLL_DISCONNECT; 935 hpd_disabled = true; 936 } 937 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 938 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 939 connector->name, intel_encoder->hpd_pin); 940 } 941 } 942 /* if there were no outputs to poll, poll was disabled, 943 * therefore make sure it's enabled when disabling HPD on 944 * some connectors */ 945 if (hpd_disabled) { 946 drm_kms_helper_poll_enable(dev); 947 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 948 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 949 } 950 951 spin_unlock_irq(&dev_priv->irq_lock); 952 953 list_for_each_entry(connector, &mode_config->connector_list, head) { 954 intel_connector = to_intel_connector(connector); 955 if (!intel_connector->encoder) 956 continue; 957 intel_encoder = intel_connector->encoder; 958 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 959 if (intel_encoder->hot_plug) 960 intel_encoder->hot_plug(intel_encoder); 961 if (intel_hpd_irq_event(dev, connector)) 962 changed = true; 963 } 964 } 965 mutex_unlock(&mode_config->mutex); 966 967 if (changed) 968 drm_kms_helper_hotplug_event(dev); 969 } 970 971 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 972 { 973 struct drm_i915_private *dev_priv = dev->dev_private; 974 u32 busy_up, busy_down, max_avg, min_avg; 975 u8 new_delay; 976 977 spin_lock(&mchdev_lock); 978 979 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 980 981 new_delay = dev_priv->ips.cur_delay; 982 983 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 984 busy_up = I915_READ(RCPREVBSYTUPAVG); 985 busy_down = I915_READ(RCPREVBSYTDNAVG); 986 max_avg = I915_READ(RCBMAXAVG); 987 min_avg = I915_READ(RCBMINAVG); 988 989 /* Handle RCS change request from hw */ 990 if (busy_up > max_avg) { 991 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 992 new_delay = dev_priv->ips.cur_delay - 1; 993 if (new_delay < dev_priv->ips.max_delay) 994 new_delay = dev_priv->ips.max_delay; 995 } else if (busy_down < min_avg) { 996 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 997 new_delay = dev_priv->ips.cur_delay + 1; 998 if (new_delay > dev_priv->ips.min_delay) 999 new_delay = dev_priv->ips.min_delay; 1000 } 1001 1002 if (ironlake_set_drps(dev, new_delay)) 1003 dev_priv->ips.cur_delay = new_delay; 1004 1005 spin_unlock(&mchdev_lock); 1006 1007 return; 1008 } 1009 1010 static void notify_ring(struct drm_device *dev, 1011 struct intel_engine_cs *ring) 1012 { 1013 if (!intel_ring_initialized(ring)) 1014 return; 1015 1016 trace_i915_gem_request_complete(ring); 1017 1018 wake_up_all(&ring->irq_queue); 1019 } 1020 1021 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1022 struct intel_rps_ei *rps_ei) 1023 { 1024 u32 cz_ts, cz_freq_khz; 1025 u32 render_count, media_count; 1026 u32 elapsed_render, elapsed_media, elapsed_time; 1027 u32 residency = 0; 1028 1029 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1030 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); 1031 1032 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); 1033 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); 1034 1035 if (rps_ei->cz_clock == 0) { 1036 rps_ei->cz_clock = cz_ts; 1037 rps_ei->render_c0 = render_count; 1038 rps_ei->media_c0 = media_count; 1039 1040 return dev_priv->rps.cur_freq; 1041 } 1042 1043 elapsed_time = cz_ts - rps_ei->cz_clock; 1044 rps_ei->cz_clock = cz_ts; 1045 1046 elapsed_render = render_count - rps_ei->render_c0; 1047 rps_ei->render_c0 = render_count; 1048 1049 elapsed_media = media_count - rps_ei->media_c0; 1050 rps_ei->media_c0 = media_count; 1051 1052 /* Convert all the counters into common unit of milli sec */ 1053 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1054 elapsed_render /= cz_freq_khz; 1055 elapsed_media /= cz_freq_khz; 1056 1057 /* 1058 * Calculate overall C0 residency percentage 1059 * only if elapsed time is non zero 1060 */ 1061 if (elapsed_time) { 1062 residency = 1063 ((max(elapsed_render, elapsed_media) * 100) 1064 / elapsed_time); 1065 } 1066 1067 return residency; 1068 } 1069 1070 /** 1071 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU 1072 * busy-ness calculated from C0 counters of render & media power wells 1073 * @dev_priv: DRM device private 1074 * 1075 */ 1076 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1077 { 1078 u32 residency_C0_up = 0, residency_C0_down = 0; 1079 int new_delay, adj; 1080 1081 dev_priv->rps.ei_interrupt_count++; 1082 1083 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 1084 1085 1086 if (dev_priv->rps.up_ei.cz_clock == 0) { 1087 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); 1088 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); 1089 return dev_priv->rps.cur_freq; 1090 } 1091 1092 1093 /* 1094 * To down throttle, C0 residency should be less than down threshold 1095 * for continous EI intervals. So calculate down EI counters 1096 * once in VLV_INT_COUNT_FOR_DOWN_EI 1097 */ 1098 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { 1099 1100 dev_priv->rps.ei_interrupt_count = 0; 1101 1102 residency_C0_down = vlv_c0_residency(dev_priv, 1103 &dev_priv->rps.down_ei); 1104 } else { 1105 residency_C0_up = vlv_c0_residency(dev_priv, 1106 &dev_priv->rps.up_ei); 1107 } 1108 1109 new_delay = dev_priv->rps.cur_freq; 1110 1111 adj = dev_priv->rps.last_adj; 1112 /* C0 residency is greater than UP threshold. Increase Frequency */ 1113 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1114 if (adj > 0) 1115 adj *= 2; 1116 else 1117 adj = 1; 1118 1119 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) 1120 new_delay = dev_priv->rps.cur_freq + adj; 1121 1122 /* 1123 * For better performance, jump directly 1124 * to RPe if we're below it. 1125 */ 1126 if (new_delay < dev_priv->rps.efficient_freq) 1127 new_delay = dev_priv->rps.efficient_freq; 1128 1129 } else if (!dev_priv->rps.ei_interrupt_count && 1130 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { 1131 if (adj < 0) 1132 adj *= 2; 1133 else 1134 adj = -1; 1135 /* 1136 * This means, C0 residency is less than down threshold over 1137 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq 1138 */ 1139 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1140 new_delay = dev_priv->rps.cur_freq + adj; 1141 } 1142 1143 return new_delay; 1144 } 1145 1146 static void gen6_pm_rps_work(struct work_struct *work) 1147 { 1148 struct drm_i915_private *dev_priv = 1149 container_of(work, struct drm_i915_private, rps.work); 1150 u32 pm_iir; 1151 int new_delay, adj; 1152 1153 spin_lock_irq(&dev_priv->irq_lock); 1154 /* Speed up work cancelation during disabling rps interrupts. */ 1155 if (!dev_priv->rps.interrupts_enabled) { 1156 spin_unlock_irq(&dev_priv->irq_lock); 1157 return; 1158 } 1159 pm_iir = dev_priv->rps.pm_iir; 1160 dev_priv->rps.pm_iir = 0; 1161 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1162 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1163 spin_unlock_irq(&dev_priv->irq_lock); 1164 1165 /* Make sure we didn't queue anything we're not going to process. */ 1166 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1167 1168 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1169 return; 1170 1171 mutex_lock(&dev_priv->rps.hw_lock); 1172 1173 adj = dev_priv->rps.last_adj; 1174 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1175 if (adj > 0) 1176 adj *= 2; 1177 else { 1178 /* CHV needs even encode values */ 1179 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1180 } 1181 new_delay = dev_priv->rps.cur_freq + adj; 1182 1183 /* 1184 * For better performance, jump directly 1185 * to RPe if we're below it. 1186 */ 1187 if (new_delay < dev_priv->rps.efficient_freq) 1188 new_delay = dev_priv->rps.efficient_freq; 1189 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1190 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1191 new_delay = dev_priv->rps.efficient_freq; 1192 else 1193 new_delay = dev_priv->rps.min_freq_softlimit; 1194 adj = 0; 1195 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1196 new_delay = vlv_calc_delay_from_C0_counters(dev_priv); 1197 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1198 if (adj < 0) 1199 adj *= 2; 1200 else { 1201 /* CHV needs even encode values */ 1202 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1203 } 1204 new_delay = dev_priv->rps.cur_freq + adj; 1205 } else { /* unknown event */ 1206 new_delay = dev_priv->rps.cur_freq; 1207 } 1208 1209 /* sysfs frequency interfaces may have snuck in while servicing the 1210 * interrupt 1211 */ 1212 new_delay = clamp_t(int, new_delay, 1213 dev_priv->rps.min_freq_softlimit, 1214 dev_priv->rps.max_freq_softlimit); 1215 1216 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1217 1218 if (IS_VALLEYVIEW(dev_priv->dev)) 1219 valleyview_set_rps(dev_priv->dev, new_delay); 1220 else 1221 gen6_set_rps(dev_priv->dev, new_delay); 1222 1223 mutex_unlock(&dev_priv->rps.hw_lock); 1224 } 1225 1226 1227 /** 1228 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1229 * occurred. 1230 * @work: workqueue struct 1231 * 1232 * Doesn't actually do anything except notify userspace. As a consequence of 1233 * this event, userspace should try to remap the bad rows since statistically 1234 * it is likely the same row is more likely to go bad again. 1235 */ 1236 static void ivybridge_parity_work(struct work_struct *work) 1237 { 1238 struct drm_i915_private *dev_priv = 1239 container_of(work, struct drm_i915_private, l3_parity.error_work); 1240 u32 error_status, row, bank, subbank; 1241 char *parity_event[6]; 1242 uint32_t misccpctl; 1243 uint8_t slice = 0; 1244 1245 /* We must turn off DOP level clock gating to access the L3 registers. 1246 * In order to prevent a get/put style interface, acquire struct mutex 1247 * any time we access those registers. 1248 */ 1249 mutex_lock(&dev_priv->dev->struct_mutex); 1250 1251 /* If we've screwed up tracking, just let the interrupt fire again */ 1252 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1253 goto out; 1254 1255 misccpctl = I915_READ(GEN7_MISCCPCTL); 1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1257 POSTING_READ(GEN7_MISCCPCTL); 1258 1259 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1260 u32 reg; 1261 1262 slice--; 1263 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1264 break; 1265 1266 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1267 1268 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1269 1270 error_status = I915_READ(reg); 1271 row = GEN7_PARITY_ERROR_ROW(error_status); 1272 bank = GEN7_PARITY_ERROR_BANK(error_status); 1273 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1274 1275 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1276 POSTING_READ(reg); 1277 1278 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1279 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1280 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1281 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1282 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1283 parity_event[5] = NULL; 1284 1285 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1286 KOBJ_CHANGE, parity_event); 1287 1288 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1289 slice, row, bank, subbank); 1290 1291 kfree(parity_event[4]); 1292 kfree(parity_event[3]); 1293 kfree(parity_event[2]); 1294 kfree(parity_event[1]); 1295 } 1296 1297 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1298 1299 out: 1300 WARN_ON(dev_priv->l3_parity.which_slice); 1301 spin_lock_irq(&dev_priv->irq_lock); 1302 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1303 spin_unlock_irq(&dev_priv->irq_lock); 1304 1305 mutex_unlock(&dev_priv->dev->struct_mutex); 1306 } 1307 1308 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1309 { 1310 struct drm_i915_private *dev_priv = dev->dev_private; 1311 1312 if (!HAS_L3_DPF(dev)) 1313 return; 1314 1315 spin_lock(&dev_priv->irq_lock); 1316 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1317 spin_unlock(&dev_priv->irq_lock); 1318 1319 iir &= GT_PARITY_ERROR(dev); 1320 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1321 dev_priv->l3_parity.which_slice |= 1 << 1; 1322 1323 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1324 dev_priv->l3_parity.which_slice |= 1 << 0; 1325 1326 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1327 } 1328 1329 static void ilk_gt_irq_handler(struct drm_device *dev, 1330 struct drm_i915_private *dev_priv, 1331 u32 gt_iir) 1332 { 1333 if (gt_iir & 1334 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1335 notify_ring(dev, &dev_priv->ring[RCS]); 1336 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1337 notify_ring(dev, &dev_priv->ring[VCS]); 1338 } 1339 1340 static void snb_gt_irq_handler(struct drm_device *dev, 1341 struct drm_i915_private *dev_priv, 1342 u32 gt_iir) 1343 { 1344 1345 if (gt_iir & 1346 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1347 notify_ring(dev, &dev_priv->ring[RCS]); 1348 if (gt_iir & GT_BSD_USER_INTERRUPT) 1349 notify_ring(dev, &dev_priv->ring[VCS]); 1350 if (gt_iir & GT_BLT_USER_INTERRUPT) 1351 notify_ring(dev, &dev_priv->ring[BCS]); 1352 1353 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1354 GT_BSD_CS_ERROR_INTERRUPT | 1355 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1356 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1357 1358 if (gt_iir & GT_PARITY_ERROR(dev)) 1359 ivybridge_parity_error_irq_handler(dev, gt_iir); 1360 } 1361 1362 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1363 struct drm_i915_private *dev_priv, 1364 u32 master_ctl) 1365 { 1366 struct intel_engine_cs *ring; 1367 u32 rcs, bcs, vcs; 1368 uint32_t tmp = 0; 1369 irqreturn_t ret = IRQ_NONE; 1370 1371 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1372 tmp = I915_READ(GEN8_GT_IIR(0)); 1373 if (tmp) { 1374 I915_WRITE(GEN8_GT_IIR(0), tmp); 1375 ret = IRQ_HANDLED; 1376 1377 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1378 ring = &dev_priv->ring[RCS]; 1379 if (rcs & GT_RENDER_USER_INTERRUPT) 1380 notify_ring(dev, ring); 1381 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) 1382 intel_execlists_handle_ctx_events(ring); 1383 1384 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1385 ring = &dev_priv->ring[BCS]; 1386 if (bcs & GT_RENDER_USER_INTERRUPT) 1387 notify_ring(dev, ring); 1388 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) 1389 intel_execlists_handle_ctx_events(ring); 1390 } else 1391 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1392 } 1393 1394 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1395 tmp = I915_READ(GEN8_GT_IIR(1)); 1396 if (tmp) { 1397 I915_WRITE(GEN8_GT_IIR(1), tmp); 1398 ret = IRQ_HANDLED; 1399 1400 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1401 ring = &dev_priv->ring[VCS]; 1402 if (vcs & GT_RENDER_USER_INTERRUPT) 1403 notify_ring(dev, ring); 1404 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1405 intel_execlists_handle_ctx_events(ring); 1406 1407 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1408 ring = &dev_priv->ring[VCS2]; 1409 if (vcs & GT_RENDER_USER_INTERRUPT) 1410 notify_ring(dev, ring); 1411 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1412 intel_execlists_handle_ctx_events(ring); 1413 } else 1414 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1415 } 1416 1417 if (master_ctl & GEN8_GT_PM_IRQ) { 1418 tmp = I915_READ(GEN8_GT_IIR(2)); 1419 if (tmp & dev_priv->pm_rps_events) { 1420 I915_WRITE(GEN8_GT_IIR(2), 1421 tmp & dev_priv->pm_rps_events); 1422 ret = IRQ_HANDLED; 1423 gen6_rps_irq_handler(dev_priv, tmp); 1424 } else 1425 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1426 } 1427 1428 if (master_ctl & GEN8_GT_VECS_IRQ) { 1429 tmp = I915_READ(GEN8_GT_IIR(3)); 1430 if (tmp) { 1431 I915_WRITE(GEN8_GT_IIR(3), tmp); 1432 ret = IRQ_HANDLED; 1433 1434 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1435 ring = &dev_priv->ring[VECS]; 1436 if (vcs & GT_RENDER_USER_INTERRUPT) 1437 notify_ring(dev, ring); 1438 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1439 intel_execlists_handle_ctx_events(ring); 1440 } else 1441 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1442 } 1443 1444 return ret; 1445 } 1446 1447 #define HPD_STORM_DETECT_PERIOD 1000 1448 #define HPD_STORM_THRESHOLD 5 1449 1450 static int pch_port_to_hotplug_shift(enum port port) 1451 { 1452 switch (port) { 1453 case PORT_A: 1454 case PORT_E: 1455 default: 1456 return -1; 1457 case PORT_B: 1458 return 0; 1459 case PORT_C: 1460 return 8; 1461 case PORT_D: 1462 return 16; 1463 } 1464 } 1465 1466 static int i915_port_to_hotplug_shift(enum port port) 1467 { 1468 switch (port) { 1469 case PORT_A: 1470 case PORT_E: 1471 default: 1472 return -1; 1473 case PORT_B: 1474 return 17; 1475 case PORT_C: 1476 return 19; 1477 case PORT_D: 1478 return 21; 1479 } 1480 } 1481 1482 static inline enum port get_port_from_pin(enum hpd_pin pin) 1483 { 1484 switch (pin) { 1485 case HPD_PORT_B: 1486 return PORT_B; 1487 case HPD_PORT_C: 1488 return PORT_C; 1489 case HPD_PORT_D: 1490 return PORT_D; 1491 default: 1492 return PORT_A; /* no hpd */ 1493 } 1494 } 1495 1496 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1497 u32 hotplug_trigger, 1498 u32 dig_hotplug_reg, 1499 const u32 *hpd) 1500 { 1501 struct drm_i915_private *dev_priv = dev->dev_private; 1502 int i; 1503 enum port port; 1504 bool storm_detected = false; 1505 bool queue_dig = false, queue_hp = false; 1506 u32 dig_shift; 1507 u32 dig_port_mask = 0; 1508 1509 if (!hotplug_trigger) 1510 return; 1511 1512 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1513 hotplug_trigger, dig_hotplug_reg); 1514 1515 spin_lock(&dev_priv->irq_lock); 1516 for (i = 1; i < HPD_NUM_PINS; i++) { 1517 if (!(hpd[i] & hotplug_trigger)) 1518 continue; 1519 1520 port = get_port_from_pin(i); 1521 if (port && dev_priv->hpd_irq_port[port]) { 1522 bool long_hpd; 1523 1524 if (HAS_PCH_SPLIT(dev)) { 1525 dig_shift = pch_port_to_hotplug_shift(port); 1526 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1527 } else { 1528 dig_shift = i915_port_to_hotplug_shift(port); 1529 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1530 } 1531 1532 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1533 port_name(port), 1534 long_hpd ? "long" : "short"); 1535 /* for long HPD pulses we want to have the digital queue happen, 1536 but we still want HPD storm detection to function. */ 1537 if (long_hpd) { 1538 dev_priv->long_hpd_port_mask |= (1 << port); 1539 dig_port_mask |= hpd[i]; 1540 } else { 1541 /* for short HPD just trigger the digital queue */ 1542 dev_priv->short_hpd_port_mask |= (1 << port); 1543 hotplug_trigger &= ~hpd[i]; 1544 } 1545 queue_dig = true; 1546 } 1547 } 1548 1549 for (i = 1; i < HPD_NUM_PINS; i++) { 1550 if (hpd[i] & hotplug_trigger && 1551 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1552 /* 1553 * On GMCH platforms the interrupt mask bits only 1554 * prevent irq generation, not the setting of the 1555 * hotplug bits itself. So only WARN about unexpected 1556 * interrupts on saner platforms. 1557 */ 1558 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1559 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1560 hotplug_trigger, i, hpd[i]); 1561 1562 continue; 1563 } 1564 1565 if (!(hpd[i] & hotplug_trigger) || 1566 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1567 continue; 1568 1569 if (!(dig_port_mask & hpd[i])) { 1570 dev_priv->hpd_event_bits |= (1 << i); 1571 queue_hp = true; 1572 } 1573 1574 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1575 dev_priv->hpd_stats[i].hpd_last_jiffies 1576 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1577 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1578 dev_priv->hpd_stats[i].hpd_cnt = 0; 1579 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1580 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1581 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1582 dev_priv->hpd_event_bits &= ~(1 << i); 1583 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1584 storm_detected = true; 1585 } else { 1586 dev_priv->hpd_stats[i].hpd_cnt++; 1587 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1588 dev_priv->hpd_stats[i].hpd_cnt); 1589 } 1590 } 1591 1592 if (storm_detected) 1593 dev_priv->display.hpd_irq_setup(dev); 1594 spin_unlock(&dev_priv->irq_lock); 1595 1596 /* 1597 * Our hotplug handler can grab modeset locks (by calling down into the 1598 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1599 * queue for otherwise the flush_work in the pageflip code will 1600 * deadlock. 1601 */ 1602 if (queue_dig) 1603 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); 1604 if (queue_hp) 1605 schedule_work(&dev_priv->hotplug_work); 1606 } 1607 1608 static void gmbus_irq_handler(struct drm_device *dev) 1609 { 1610 struct drm_i915_private *dev_priv = dev->dev_private; 1611 1612 wake_up_all(&dev_priv->gmbus_wait_queue); 1613 } 1614 1615 static void dp_aux_irq_handler(struct drm_device *dev) 1616 { 1617 struct drm_i915_private *dev_priv = dev->dev_private; 1618 1619 wake_up_all(&dev_priv->gmbus_wait_queue); 1620 } 1621 1622 #if defined(CONFIG_DEBUG_FS) 1623 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1624 uint32_t crc0, uint32_t crc1, 1625 uint32_t crc2, uint32_t crc3, 1626 uint32_t crc4) 1627 { 1628 struct drm_i915_private *dev_priv = dev->dev_private; 1629 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1630 struct intel_pipe_crc_entry *entry; 1631 int head, tail; 1632 1633 spin_lock(&pipe_crc->lock); 1634 1635 if (!pipe_crc->entries) { 1636 spin_unlock(&pipe_crc->lock); 1637 DRM_DEBUG_KMS("spurious interrupt\n"); 1638 return; 1639 } 1640 1641 head = pipe_crc->head; 1642 tail = pipe_crc->tail; 1643 1644 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1645 spin_unlock(&pipe_crc->lock); 1646 DRM_ERROR("CRC buffer overflowing\n"); 1647 return; 1648 } 1649 1650 entry = &pipe_crc->entries[head]; 1651 1652 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1653 entry->crc[0] = crc0; 1654 entry->crc[1] = crc1; 1655 entry->crc[2] = crc2; 1656 entry->crc[3] = crc3; 1657 entry->crc[4] = crc4; 1658 1659 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1660 pipe_crc->head = head; 1661 1662 spin_unlock(&pipe_crc->lock); 1663 1664 wake_up_interruptible(&pipe_crc->wq); 1665 } 1666 #else 1667 static inline void 1668 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1669 uint32_t crc0, uint32_t crc1, 1670 uint32_t crc2, uint32_t crc3, 1671 uint32_t crc4) {} 1672 #endif 1673 1674 1675 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1676 { 1677 struct drm_i915_private *dev_priv = dev->dev_private; 1678 1679 display_pipe_crc_irq_handler(dev, pipe, 1680 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1681 0, 0, 0, 0); 1682 } 1683 1684 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1685 { 1686 struct drm_i915_private *dev_priv = dev->dev_private; 1687 1688 display_pipe_crc_irq_handler(dev, pipe, 1689 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1690 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1691 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1692 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1693 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1694 } 1695 1696 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1697 { 1698 struct drm_i915_private *dev_priv = dev->dev_private; 1699 uint32_t res1, res2; 1700 1701 if (INTEL_INFO(dev)->gen >= 3) 1702 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1703 else 1704 res1 = 0; 1705 1706 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1707 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1708 else 1709 res2 = 0; 1710 1711 display_pipe_crc_irq_handler(dev, pipe, 1712 I915_READ(PIPE_CRC_RES_RED(pipe)), 1713 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1714 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1715 res1, res2); 1716 } 1717 1718 /* The RPS events need forcewake, so we add them to a work queue and mask their 1719 * IMR bits until the work is done. Other interrupts can be processed without 1720 * the work queue. */ 1721 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1722 { 1723 /* TODO: RPS on GEN9+ is not supported yet. */ 1724 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, 1725 "GEN9+: unexpected RPS IRQ\n")) 1726 return; 1727 1728 if (pm_iir & dev_priv->pm_rps_events) { 1729 spin_lock(&dev_priv->irq_lock); 1730 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1731 if (dev_priv->rps.interrupts_enabled) { 1732 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1733 queue_work(dev_priv->wq, &dev_priv->rps.work); 1734 } 1735 spin_unlock(&dev_priv->irq_lock); 1736 } 1737 1738 if (INTEL_INFO(dev_priv)->gen >= 8) 1739 return; 1740 1741 if (HAS_VEBOX(dev_priv->dev)) { 1742 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1743 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1744 1745 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1746 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1747 } 1748 } 1749 1750 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1751 { 1752 if (!drm_handle_vblank(dev, pipe)) 1753 return false; 1754 1755 return true; 1756 } 1757 1758 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1759 { 1760 struct drm_i915_private *dev_priv = dev->dev_private; 1761 u32 pipe_stats[I915_MAX_PIPES] = { }; 1762 int pipe; 1763 1764 spin_lock(&dev_priv->irq_lock); 1765 for_each_pipe(dev_priv, pipe) { 1766 int reg; 1767 u32 mask, iir_bit = 0; 1768 1769 /* 1770 * PIPESTAT bits get signalled even when the interrupt is 1771 * disabled with the mask bits, and some of the status bits do 1772 * not generate interrupts at all (like the underrun bit). Hence 1773 * we need to be careful that we only handle what we want to 1774 * handle. 1775 */ 1776 1777 /* fifo underruns are filterered in the underrun handler. */ 1778 mask = PIPE_FIFO_UNDERRUN_STATUS; 1779 1780 switch (pipe) { 1781 case PIPE_A: 1782 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1783 break; 1784 case PIPE_B: 1785 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1786 break; 1787 case PIPE_C: 1788 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1789 break; 1790 } 1791 if (iir & iir_bit) 1792 mask |= dev_priv->pipestat_irq_mask[pipe]; 1793 1794 if (!mask) 1795 continue; 1796 1797 reg = PIPESTAT(pipe); 1798 mask |= PIPESTAT_INT_ENABLE_MASK; 1799 pipe_stats[pipe] = I915_READ(reg) & mask; 1800 1801 /* 1802 * Clear the PIPE*STAT regs before the IIR 1803 */ 1804 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1805 PIPESTAT_INT_STATUS_MASK)) 1806 I915_WRITE(reg, pipe_stats[pipe]); 1807 } 1808 spin_unlock(&dev_priv->irq_lock); 1809 1810 for_each_pipe(dev_priv, pipe) { 1811 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1812 intel_pipe_handle_vblank(dev, pipe)) 1813 intel_check_page_flip(dev, pipe); 1814 1815 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1816 intel_prepare_page_flip(dev, pipe); 1817 intel_finish_page_flip(dev, pipe); 1818 } 1819 1820 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1821 i9xx_pipe_crc_irq_handler(dev, pipe); 1822 1823 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1824 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1825 } 1826 1827 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1828 gmbus_irq_handler(dev); 1829 } 1830 1831 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1832 { 1833 struct drm_i915_private *dev_priv = dev->dev_private; 1834 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1835 1836 if (hotplug_status) { 1837 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1838 /* 1839 * Make sure hotplug status is cleared before we clear IIR, or else we 1840 * may miss hotplug events. 1841 */ 1842 POSTING_READ(PORT_HOTPLUG_STAT); 1843 1844 if (IS_G4X(dev)) { 1845 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1846 1847 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1848 } else { 1849 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1850 1851 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1852 } 1853 1854 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1855 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1856 dp_aux_irq_handler(dev); 1857 } 1858 } 1859 1860 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1861 { 1862 struct drm_device *dev = arg; 1863 struct drm_i915_private *dev_priv = dev->dev_private; 1864 u32 iir, gt_iir, pm_iir; 1865 irqreturn_t ret = IRQ_NONE; 1866 1867 while (true) { 1868 /* Find, clear, then process each source of interrupt */ 1869 1870 gt_iir = I915_READ(GTIIR); 1871 if (gt_iir) 1872 I915_WRITE(GTIIR, gt_iir); 1873 1874 pm_iir = I915_READ(GEN6_PMIIR); 1875 if (pm_iir) 1876 I915_WRITE(GEN6_PMIIR, pm_iir); 1877 1878 iir = I915_READ(VLV_IIR); 1879 if (iir) { 1880 /* Consume port before clearing IIR or we'll miss events */ 1881 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1882 i9xx_hpd_irq_handler(dev); 1883 I915_WRITE(VLV_IIR, iir); 1884 } 1885 1886 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1887 goto out; 1888 1889 ret = IRQ_HANDLED; 1890 1891 if (gt_iir) 1892 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1893 if (pm_iir) 1894 gen6_rps_irq_handler(dev_priv, pm_iir); 1895 /* Call regardless, as some status bits might not be 1896 * signalled in iir */ 1897 valleyview_pipestat_irq_handler(dev, iir); 1898 } 1899 1900 out: 1901 return ret; 1902 } 1903 1904 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1905 { 1906 struct drm_device *dev = arg; 1907 struct drm_i915_private *dev_priv = dev->dev_private; 1908 u32 master_ctl, iir; 1909 irqreturn_t ret = IRQ_NONE; 1910 1911 for (;;) { 1912 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1913 iir = I915_READ(VLV_IIR); 1914 1915 if (master_ctl == 0 && iir == 0) 1916 break; 1917 1918 ret = IRQ_HANDLED; 1919 1920 I915_WRITE(GEN8_MASTER_IRQ, 0); 1921 1922 /* Find, clear, then process each source of interrupt */ 1923 1924 if (iir) { 1925 /* Consume port before clearing IIR or we'll miss events */ 1926 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1927 i9xx_hpd_irq_handler(dev); 1928 I915_WRITE(VLV_IIR, iir); 1929 } 1930 1931 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1932 1933 /* Call regardless, as some status bits might not be 1934 * signalled in iir */ 1935 valleyview_pipestat_irq_handler(dev, iir); 1936 1937 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1938 POSTING_READ(GEN8_MASTER_IRQ); 1939 } 1940 1941 return ret; 1942 } 1943 1944 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1945 { 1946 struct drm_i915_private *dev_priv = dev->dev_private; 1947 int pipe; 1948 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1949 u32 dig_hotplug_reg; 1950 1951 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1952 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1953 1954 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1955 1956 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1957 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1958 SDE_AUDIO_POWER_SHIFT); 1959 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1960 port_name(port)); 1961 } 1962 1963 if (pch_iir & SDE_AUX_MASK) 1964 dp_aux_irq_handler(dev); 1965 1966 if (pch_iir & SDE_GMBUS) 1967 gmbus_irq_handler(dev); 1968 1969 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1970 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1971 1972 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1973 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1974 1975 if (pch_iir & SDE_POISON) 1976 DRM_ERROR("PCH poison interrupt\n"); 1977 1978 if (pch_iir & SDE_FDI_MASK) 1979 for_each_pipe(dev_priv, pipe) 1980 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1981 pipe_name(pipe), 1982 I915_READ(FDI_RX_IIR(pipe))); 1983 1984 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1985 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1986 1987 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1988 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1989 1990 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1991 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1992 1993 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1994 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1995 } 1996 1997 static void ivb_err_int_handler(struct drm_device *dev) 1998 { 1999 struct drm_i915_private *dev_priv = dev->dev_private; 2000 u32 err_int = I915_READ(GEN7_ERR_INT); 2001 enum pipe pipe; 2002 2003 if (err_int & ERR_INT_POISON) 2004 DRM_ERROR("Poison interrupt\n"); 2005 2006 for_each_pipe(dev_priv, pipe) { 2007 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2008 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2009 2010 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2011 if (IS_IVYBRIDGE(dev)) 2012 ivb_pipe_crc_irq_handler(dev, pipe); 2013 else 2014 hsw_pipe_crc_irq_handler(dev, pipe); 2015 } 2016 } 2017 2018 I915_WRITE(GEN7_ERR_INT, err_int); 2019 } 2020 2021 static void cpt_serr_int_handler(struct drm_device *dev) 2022 { 2023 struct drm_i915_private *dev_priv = dev->dev_private; 2024 u32 serr_int = I915_READ(SERR_INT); 2025 2026 if (serr_int & SERR_INT_POISON) 2027 DRM_ERROR("PCH poison interrupt\n"); 2028 2029 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2030 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2031 2032 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2033 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2034 2035 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2036 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2037 2038 I915_WRITE(SERR_INT, serr_int); 2039 } 2040 2041 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2042 { 2043 struct drm_i915_private *dev_priv = dev->dev_private; 2044 int pipe; 2045 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2046 u32 dig_hotplug_reg; 2047 2048 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2049 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2050 2051 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 2052 2053 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2054 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2055 SDE_AUDIO_POWER_SHIFT_CPT); 2056 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2057 port_name(port)); 2058 } 2059 2060 if (pch_iir & SDE_AUX_MASK_CPT) 2061 dp_aux_irq_handler(dev); 2062 2063 if (pch_iir & SDE_GMBUS_CPT) 2064 gmbus_irq_handler(dev); 2065 2066 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2067 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2068 2069 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2070 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2071 2072 if (pch_iir & SDE_FDI_MASK_CPT) 2073 for_each_pipe(dev_priv, pipe) 2074 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2075 pipe_name(pipe), 2076 I915_READ(FDI_RX_IIR(pipe))); 2077 2078 if (pch_iir & SDE_ERROR_CPT) 2079 cpt_serr_int_handler(dev); 2080 } 2081 2082 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2083 { 2084 struct drm_i915_private *dev_priv = dev->dev_private; 2085 enum pipe pipe; 2086 2087 if (de_iir & DE_AUX_CHANNEL_A) 2088 dp_aux_irq_handler(dev); 2089 2090 if (de_iir & DE_GSE) 2091 intel_opregion_asle_intr(dev); 2092 2093 if (de_iir & DE_POISON) 2094 DRM_ERROR("Poison interrupt\n"); 2095 2096 for_each_pipe(dev_priv, pipe) { 2097 if (de_iir & DE_PIPE_VBLANK(pipe) && 2098 intel_pipe_handle_vblank(dev, pipe)) 2099 intel_check_page_flip(dev, pipe); 2100 2101 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2102 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2103 2104 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2105 i9xx_pipe_crc_irq_handler(dev, pipe); 2106 2107 /* plane/pipes map 1:1 on ilk+ */ 2108 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2109 intel_prepare_page_flip(dev, pipe); 2110 intel_finish_page_flip_plane(dev, pipe); 2111 } 2112 } 2113 2114 /* check event from PCH */ 2115 if (de_iir & DE_PCH_EVENT) { 2116 u32 pch_iir = I915_READ(SDEIIR); 2117 2118 if (HAS_PCH_CPT(dev)) 2119 cpt_irq_handler(dev, pch_iir); 2120 else 2121 ibx_irq_handler(dev, pch_iir); 2122 2123 /* should clear PCH hotplug event before clear CPU irq */ 2124 I915_WRITE(SDEIIR, pch_iir); 2125 } 2126 2127 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2128 ironlake_rps_change_irq_handler(dev); 2129 } 2130 2131 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2132 { 2133 struct drm_i915_private *dev_priv = dev->dev_private; 2134 enum pipe pipe; 2135 2136 if (de_iir & DE_ERR_INT_IVB) 2137 ivb_err_int_handler(dev); 2138 2139 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2140 dp_aux_irq_handler(dev); 2141 2142 if (de_iir & DE_GSE_IVB) 2143 intel_opregion_asle_intr(dev); 2144 2145 for_each_pipe(dev_priv, pipe) { 2146 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2147 intel_pipe_handle_vblank(dev, pipe)) 2148 intel_check_page_flip(dev, pipe); 2149 2150 /* plane/pipes map 1:1 on ilk+ */ 2151 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2152 intel_prepare_page_flip(dev, pipe); 2153 intel_finish_page_flip_plane(dev, pipe); 2154 } 2155 } 2156 2157 /* check event from PCH */ 2158 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2159 u32 pch_iir = I915_READ(SDEIIR); 2160 2161 cpt_irq_handler(dev, pch_iir); 2162 2163 /* clear PCH hotplug event before clear CPU irq */ 2164 I915_WRITE(SDEIIR, pch_iir); 2165 } 2166 } 2167 2168 /* 2169 * To handle irqs with the minimum potential races with fresh interrupts, we: 2170 * 1 - Disable Master Interrupt Control. 2171 * 2 - Find the source(s) of the interrupt. 2172 * 3 - Clear the Interrupt Identity bits (IIR). 2173 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2174 * 5 - Re-enable Master Interrupt Control. 2175 */ 2176 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2177 { 2178 struct drm_device *dev = arg; 2179 struct drm_i915_private *dev_priv = dev->dev_private; 2180 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2181 irqreturn_t ret = IRQ_NONE; 2182 2183 /* We get interrupts on unclaimed registers, so check for this before we 2184 * do any I915_{READ,WRITE}. */ 2185 intel_uncore_check_errors(dev); 2186 2187 /* disable master interrupt before clearing iir */ 2188 de_ier = I915_READ(DEIER); 2189 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2190 POSTING_READ(DEIER); 2191 2192 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2193 * interrupts will will be stored on its back queue, and then we'll be 2194 * able to process them after we restore SDEIER (as soon as we restore 2195 * it, we'll get an interrupt if SDEIIR still has something to process 2196 * due to its back queue). */ 2197 if (!HAS_PCH_NOP(dev)) { 2198 sde_ier = I915_READ(SDEIER); 2199 I915_WRITE(SDEIER, 0); 2200 POSTING_READ(SDEIER); 2201 } 2202 2203 /* Find, clear, then process each source of interrupt */ 2204 2205 gt_iir = I915_READ(GTIIR); 2206 if (gt_iir) { 2207 I915_WRITE(GTIIR, gt_iir); 2208 ret = IRQ_HANDLED; 2209 if (INTEL_INFO(dev)->gen >= 6) 2210 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2211 else 2212 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2213 } 2214 2215 de_iir = I915_READ(DEIIR); 2216 if (de_iir) { 2217 I915_WRITE(DEIIR, de_iir); 2218 ret = IRQ_HANDLED; 2219 if (INTEL_INFO(dev)->gen >= 7) 2220 ivb_display_irq_handler(dev, de_iir); 2221 else 2222 ilk_display_irq_handler(dev, de_iir); 2223 } 2224 2225 if (INTEL_INFO(dev)->gen >= 6) { 2226 u32 pm_iir = I915_READ(GEN6_PMIIR); 2227 if (pm_iir) { 2228 I915_WRITE(GEN6_PMIIR, pm_iir); 2229 ret = IRQ_HANDLED; 2230 gen6_rps_irq_handler(dev_priv, pm_iir); 2231 } 2232 } 2233 2234 I915_WRITE(DEIER, de_ier); 2235 POSTING_READ(DEIER); 2236 if (!HAS_PCH_NOP(dev)) { 2237 I915_WRITE(SDEIER, sde_ier); 2238 POSTING_READ(SDEIER); 2239 } 2240 2241 return ret; 2242 } 2243 2244 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2245 { 2246 struct drm_device *dev = arg; 2247 struct drm_i915_private *dev_priv = dev->dev_private; 2248 u32 master_ctl; 2249 irqreturn_t ret = IRQ_NONE; 2250 uint32_t tmp = 0; 2251 enum pipe pipe; 2252 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2253 2254 if (IS_GEN9(dev)) 2255 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2256 GEN9_AUX_CHANNEL_D; 2257 2258 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2259 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2260 if (!master_ctl) 2261 return IRQ_NONE; 2262 2263 I915_WRITE(GEN8_MASTER_IRQ, 0); 2264 POSTING_READ(GEN8_MASTER_IRQ); 2265 2266 /* Find, clear, then process each source of interrupt */ 2267 2268 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2269 2270 if (master_ctl & GEN8_DE_MISC_IRQ) { 2271 tmp = I915_READ(GEN8_DE_MISC_IIR); 2272 if (tmp) { 2273 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2274 ret = IRQ_HANDLED; 2275 if (tmp & GEN8_DE_MISC_GSE) 2276 intel_opregion_asle_intr(dev); 2277 else 2278 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2279 } 2280 else 2281 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2282 } 2283 2284 if (master_ctl & GEN8_DE_PORT_IRQ) { 2285 tmp = I915_READ(GEN8_DE_PORT_IIR); 2286 if (tmp) { 2287 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2288 ret = IRQ_HANDLED; 2289 2290 if (tmp & aux_mask) 2291 dp_aux_irq_handler(dev); 2292 else 2293 DRM_ERROR("Unexpected DE Port interrupt\n"); 2294 } 2295 else 2296 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2297 } 2298 2299 for_each_pipe(dev_priv, pipe) { 2300 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2301 2302 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2303 continue; 2304 2305 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2306 if (pipe_iir) { 2307 ret = IRQ_HANDLED; 2308 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2309 2310 if (pipe_iir & GEN8_PIPE_VBLANK && 2311 intel_pipe_handle_vblank(dev, pipe)) 2312 intel_check_page_flip(dev, pipe); 2313 2314 if (IS_GEN9(dev)) 2315 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2316 else 2317 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2318 2319 if (flip_done) { 2320 intel_prepare_page_flip(dev, pipe); 2321 intel_finish_page_flip_plane(dev, pipe); 2322 } 2323 2324 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2325 hsw_pipe_crc_irq_handler(dev, pipe); 2326 2327 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2328 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2329 pipe); 2330 2331 2332 if (IS_GEN9(dev)) 2333 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2334 else 2335 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2336 2337 if (fault_errors) 2338 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2339 pipe_name(pipe), 2340 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2341 } else 2342 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2343 } 2344 2345 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2346 /* 2347 * FIXME(BDW): Assume for now that the new interrupt handling 2348 * scheme also closed the SDE interrupt handling race we've seen 2349 * on older pch-split platforms. But this needs testing. 2350 */ 2351 u32 pch_iir = I915_READ(SDEIIR); 2352 if (pch_iir) { 2353 I915_WRITE(SDEIIR, pch_iir); 2354 ret = IRQ_HANDLED; 2355 cpt_irq_handler(dev, pch_iir); 2356 } else 2357 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2358 2359 } 2360 2361 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2362 POSTING_READ(GEN8_MASTER_IRQ); 2363 2364 return ret; 2365 } 2366 2367 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2368 bool reset_completed) 2369 { 2370 struct intel_engine_cs *ring; 2371 int i; 2372 2373 /* 2374 * Notify all waiters for GPU completion events that reset state has 2375 * been changed, and that they need to restart their wait after 2376 * checking for potential errors (and bail out to drop locks if there is 2377 * a gpu reset pending so that i915_error_work_func can acquire them). 2378 */ 2379 2380 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2381 for_each_ring(ring, dev_priv, i) 2382 wake_up_all(&ring->irq_queue); 2383 2384 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2385 wake_up_all(&dev_priv->pending_flip_queue); 2386 2387 /* 2388 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2389 * reset state is cleared. 2390 */ 2391 if (reset_completed) 2392 wake_up_all(&dev_priv->gpu_error.reset_queue); 2393 } 2394 2395 /** 2396 * i915_error_work_func - do process context error handling work 2397 * @work: work struct 2398 * 2399 * Fire an error uevent so userspace can see that a hang or error 2400 * was detected. 2401 */ 2402 static void i915_error_work_func(struct work_struct *work) 2403 { 2404 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2405 work); 2406 struct drm_i915_private *dev_priv = 2407 container_of(error, struct drm_i915_private, gpu_error); 2408 struct drm_device *dev = dev_priv->dev; 2409 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2410 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2411 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2412 int ret; 2413 2414 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2415 2416 /* 2417 * Note that there's only one work item which does gpu resets, so we 2418 * need not worry about concurrent gpu resets potentially incrementing 2419 * error->reset_counter twice. We only need to take care of another 2420 * racing irq/hangcheck declaring the gpu dead for a second time. A 2421 * quick check for that is good enough: schedule_work ensures the 2422 * correct ordering between hang detection and this work item, and since 2423 * the reset in-progress bit is only ever set by code outside of this 2424 * work we don't need to worry about any other races. 2425 */ 2426 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2427 DRM_DEBUG_DRIVER("resetting chip\n"); 2428 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2429 reset_event); 2430 2431 /* 2432 * In most cases it's guaranteed that we get here with an RPM 2433 * reference held, for example because there is a pending GPU 2434 * request that won't finish until the reset is done. This 2435 * isn't the case at least when we get here by doing a 2436 * simulated reset via debugs, so get an RPM reference. 2437 */ 2438 intel_runtime_pm_get(dev_priv); 2439 2440 intel_prepare_reset(dev); 2441 2442 /* 2443 * All state reset _must_ be completed before we update the 2444 * reset counter, for otherwise waiters might miss the reset 2445 * pending state and not properly drop locks, resulting in 2446 * deadlocks with the reset work. 2447 */ 2448 ret = i915_reset(dev); 2449 2450 intel_finish_reset(dev); 2451 2452 intel_runtime_pm_put(dev_priv); 2453 2454 if (ret == 0) { 2455 /* 2456 * After all the gem state is reset, increment the reset 2457 * counter and wake up everyone waiting for the reset to 2458 * complete. 2459 * 2460 * Since unlock operations are a one-sided barrier only, 2461 * we need to insert a barrier here to order any seqno 2462 * updates before 2463 * the counter increment. 2464 */ 2465 smp_mb__before_atomic(); 2466 atomic_inc(&dev_priv->gpu_error.reset_counter); 2467 2468 kobject_uevent_env(&dev->primary->kdev->kobj, 2469 KOBJ_CHANGE, reset_done_event); 2470 } else { 2471 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2472 } 2473 2474 /* 2475 * Note: The wake_up also serves as a memory barrier so that 2476 * waiters see the update value of the reset counter atomic_t. 2477 */ 2478 i915_error_wake_up(dev_priv, true); 2479 } 2480 } 2481 2482 static void i915_report_and_clear_eir(struct drm_device *dev) 2483 { 2484 struct drm_i915_private *dev_priv = dev->dev_private; 2485 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2486 u32 eir = I915_READ(EIR); 2487 int pipe, i; 2488 2489 if (!eir) 2490 return; 2491 2492 pr_err("render error detected, EIR: 0x%08x\n", eir); 2493 2494 i915_get_extra_instdone(dev, instdone); 2495 2496 if (IS_G4X(dev)) { 2497 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2498 u32 ipeir = I915_READ(IPEIR_I965); 2499 2500 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2501 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2502 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2503 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2504 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2505 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2506 I915_WRITE(IPEIR_I965, ipeir); 2507 POSTING_READ(IPEIR_I965); 2508 } 2509 if (eir & GM45_ERROR_PAGE_TABLE) { 2510 u32 pgtbl_err = I915_READ(PGTBL_ER); 2511 pr_err("page table error\n"); 2512 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2513 I915_WRITE(PGTBL_ER, pgtbl_err); 2514 POSTING_READ(PGTBL_ER); 2515 } 2516 } 2517 2518 if (!IS_GEN2(dev)) { 2519 if (eir & I915_ERROR_PAGE_TABLE) { 2520 u32 pgtbl_err = I915_READ(PGTBL_ER); 2521 pr_err("page table error\n"); 2522 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2523 I915_WRITE(PGTBL_ER, pgtbl_err); 2524 POSTING_READ(PGTBL_ER); 2525 } 2526 } 2527 2528 if (eir & I915_ERROR_MEMORY_REFRESH) { 2529 pr_err("memory refresh error:\n"); 2530 for_each_pipe(dev_priv, pipe) 2531 pr_err("pipe %c stat: 0x%08x\n", 2532 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2533 /* pipestat has already been acked */ 2534 } 2535 if (eir & I915_ERROR_INSTRUCTION) { 2536 pr_err("instruction error\n"); 2537 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2538 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2539 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2540 if (INTEL_INFO(dev)->gen < 4) { 2541 u32 ipeir = I915_READ(IPEIR); 2542 2543 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2544 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2545 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2546 I915_WRITE(IPEIR, ipeir); 2547 POSTING_READ(IPEIR); 2548 } else { 2549 u32 ipeir = I915_READ(IPEIR_I965); 2550 2551 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2552 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2553 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2554 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2555 I915_WRITE(IPEIR_I965, ipeir); 2556 POSTING_READ(IPEIR_I965); 2557 } 2558 } 2559 2560 I915_WRITE(EIR, eir); 2561 POSTING_READ(EIR); 2562 eir = I915_READ(EIR); 2563 if (eir) { 2564 /* 2565 * some errors might have become stuck, 2566 * mask them. 2567 */ 2568 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2569 I915_WRITE(EMR, I915_READ(EMR) | eir); 2570 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2571 } 2572 } 2573 2574 /** 2575 * i915_handle_error - handle an error interrupt 2576 * @dev: drm device 2577 * 2578 * Do some basic checking of regsiter state at error interrupt time and 2579 * dump it to the syslog. Also call i915_capture_error_state() to make 2580 * sure we get a record and make it available in debugfs. Fire a uevent 2581 * so userspace knows something bad happened (should trigger collection 2582 * of a ring dump etc.). 2583 */ 2584 void i915_handle_error(struct drm_device *dev, bool wedged, 2585 const char *fmt, ...) 2586 { 2587 struct drm_i915_private *dev_priv = dev->dev_private; 2588 va_list args; 2589 char error_msg[80]; 2590 2591 va_start(args, fmt); 2592 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2593 va_end(args); 2594 2595 i915_capture_error_state(dev, wedged, error_msg); 2596 i915_report_and_clear_eir(dev); 2597 2598 if (wedged) { 2599 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2600 &dev_priv->gpu_error.reset_counter); 2601 2602 /* 2603 * Wakeup waiting processes so that the reset work function 2604 * i915_error_work_func doesn't deadlock trying to grab various 2605 * locks. By bumping the reset counter first, the woken 2606 * processes will see a reset in progress and back off, 2607 * releasing their locks and then wait for the reset completion. 2608 * We must do this for _all_ gpu waiters that might hold locks 2609 * that the reset work needs to acquire. 2610 * 2611 * Note: The wake_up serves as the required memory barrier to 2612 * ensure that the waiters see the updated value of the reset 2613 * counter atomic_t. 2614 */ 2615 i915_error_wake_up(dev_priv, false); 2616 } 2617 2618 /* 2619 * Our reset work can grab modeset locks (since it needs to reset the 2620 * state of outstanding pagelips). Hence it must not be run on our own 2621 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2622 * code will deadlock. 2623 */ 2624 schedule_work(&dev_priv->gpu_error.work); 2625 } 2626 2627 /* Called from drm generic code, passed 'crtc' which 2628 * we use as a pipe index 2629 */ 2630 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2631 { 2632 struct drm_i915_private *dev_priv = dev->dev_private; 2633 unsigned long irqflags; 2634 2635 if (!i915_pipe_enabled(dev, pipe)) 2636 return -EINVAL; 2637 2638 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2639 if (INTEL_INFO(dev)->gen >= 4) 2640 i915_enable_pipestat(dev_priv, pipe, 2641 PIPE_START_VBLANK_INTERRUPT_STATUS); 2642 else 2643 i915_enable_pipestat(dev_priv, pipe, 2644 PIPE_VBLANK_INTERRUPT_STATUS); 2645 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2646 2647 return 0; 2648 } 2649 2650 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2651 { 2652 struct drm_i915_private *dev_priv = dev->dev_private; 2653 unsigned long irqflags; 2654 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2655 DE_PIPE_VBLANK(pipe); 2656 2657 if (!i915_pipe_enabled(dev, pipe)) 2658 return -EINVAL; 2659 2660 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2661 ironlake_enable_display_irq(dev_priv, bit); 2662 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2663 2664 return 0; 2665 } 2666 2667 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2668 { 2669 struct drm_i915_private *dev_priv = dev->dev_private; 2670 unsigned long irqflags; 2671 2672 if (!i915_pipe_enabled(dev, pipe)) 2673 return -EINVAL; 2674 2675 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2676 i915_enable_pipestat(dev_priv, pipe, 2677 PIPE_START_VBLANK_INTERRUPT_STATUS); 2678 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2679 2680 return 0; 2681 } 2682 2683 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2684 { 2685 struct drm_i915_private *dev_priv = dev->dev_private; 2686 unsigned long irqflags; 2687 2688 if (!i915_pipe_enabled(dev, pipe)) 2689 return -EINVAL; 2690 2691 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2692 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2693 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2694 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2695 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2696 return 0; 2697 } 2698 2699 /* Called from drm generic code, passed 'crtc' which 2700 * we use as a pipe index 2701 */ 2702 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2703 { 2704 struct drm_i915_private *dev_priv = dev->dev_private; 2705 unsigned long irqflags; 2706 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2708 i915_disable_pipestat(dev_priv, pipe, 2709 PIPE_VBLANK_INTERRUPT_STATUS | 2710 PIPE_START_VBLANK_INTERRUPT_STATUS); 2711 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2712 } 2713 2714 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2715 { 2716 struct drm_i915_private *dev_priv = dev->dev_private; 2717 unsigned long irqflags; 2718 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2719 DE_PIPE_VBLANK(pipe); 2720 2721 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2722 ironlake_disable_display_irq(dev_priv, bit); 2723 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2724 } 2725 2726 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2727 { 2728 struct drm_i915_private *dev_priv = dev->dev_private; 2729 unsigned long irqflags; 2730 2731 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2732 i915_disable_pipestat(dev_priv, pipe, 2733 PIPE_START_VBLANK_INTERRUPT_STATUS); 2734 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2735 } 2736 2737 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2738 { 2739 struct drm_i915_private *dev_priv = dev->dev_private; 2740 unsigned long irqflags; 2741 2742 if (!i915_pipe_enabled(dev, pipe)) 2743 return; 2744 2745 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2746 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2747 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2748 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2749 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2750 } 2751 2752 static u32 2753 ring_last_seqno(struct intel_engine_cs *ring) 2754 { 2755 return list_entry(ring->request_list.prev, 2756 struct drm_i915_gem_request, list)->seqno; 2757 } 2758 2759 static bool 2760 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2761 { 2762 return (list_empty(&ring->request_list) || 2763 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2764 } 2765 2766 static bool 2767 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2768 { 2769 if (INTEL_INFO(dev)->gen >= 8) { 2770 return (ipehr >> 23) == 0x1c; 2771 } else { 2772 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2773 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2774 MI_SEMAPHORE_REGISTER); 2775 } 2776 } 2777 2778 static struct intel_engine_cs * 2779 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2780 { 2781 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2782 struct intel_engine_cs *signaller; 2783 int i; 2784 2785 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2786 for_each_ring(signaller, dev_priv, i) { 2787 if (ring == signaller) 2788 continue; 2789 2790 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2791 return signaller; 2792 } 2793 } else { 2794 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2795 2796 for_each_ring(signaller, dev_priv, i) { 2797 if(ring == signaller) 2798 continue; 2799 2800 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2801 return signaller; 2802 } 2803 } 2804 2805 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2806 ring->id, ipehr, offset); 2807 2808 return NULL; 2809 } 2810 2811 static struct intel_engine_cs * 2812 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2813 { 2814 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2815 u32 cmd, ipehr, head; 2816 u64 offset = 0; 2817 int i, backwards; 2818 2819 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2820 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2821 return NULL; 2822 2823 /* 2824 * HEAD is likely pointing to the dword after the actual command, 2825 * so scan backwards until we find the MBOX. But limit it to just 3 2826 * or 4 dwords depending on the semaphore wait command size. 2827 * Note that we don't care about ACTHD here since that might 2828 * point at at batch, and semaphores are always emitted into the 2829 * ringbuffer itself. 2830 */ 2831 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2832 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2833 2834 for (i = backwards; i; --i) { 2835 /* 2836 * Be paranoid and presume the hw has gone off into the wild - 2837 * our ring is smaller than what the hardware (and hence 2838 * HEAD_ADDR) allows. Also handles wrap-around. 2839 */ 2840 head &= ring->buffer->size - 1; 2841 2842 /* This here seems to blow up */ 2843 cmd = ioread32(ring->buffer->virtual_start + head); 2844 if (cmd == ipehr) 2845 break; 2846 2847 head -= 4; 2848 } 2849 2850 if (!i) 2851 return NULL; 2852 2853 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2854 if (INTEL_INFO(ring->dev)->gen >= 8) { 2855 offset = ioread32(ring->buffer->virtual_start + head + 12); 2856 offset <<= 32; 2857 offset = ioread32(ring->buffer->virtual_start + head + 8); 2858 } 2859 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2860 } 2861 2862 static int semaphore_passed(struct intel_engine_cs *ring) 2863 { 2864 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2865 struct intel_engine_cs *signaller; 2866 u32 seqno; 2867 2868 ring->hangcheck.deadlock++; 2869 2870 signaller = semaphore_waits_for(ring, &seqno); 2871 if (signaller == NULL) 2872 return -1; 2873 2874 /* Prevent pathological recursion due to driver bugs */ 2875 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2876 return -1; 2877 2878 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2879 return 1; 2880 2881 /* cursory check for an unkickable deadlock */ 2882 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2883 semaphore_passed(signaller) < 0) 2884 return -1; 2885 2886 return 0; 2887 } 2888 2889 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2890 { 2891 struct intel_engine_cs *ring; 2892 int i; 2893 2894 for_each_ring(ring, dev_priv, i) 2895 ring->hangcheck.deadlock = 0; 2896 } 2897 2898 static enum intel_ring_hangcheck_action 2899 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2900 { 2901 struct drm_device *dev = ring->dev; 2902 struct drm_i915_private *dev_priv = dev->dev_private; 2903 u32 tmp; 2904 2905 if (acthd != ring->hangcheck.acthd) { 2906 if (acthd > ring->hangcheck.max_acthd) { 2907 ring->hangcheck.max_acthd = acthd; 2908 return HANGCHECK_ACTIVE; 2909 } 2910 2911 return HANGCHECK_ACTIVE_LOOP; 2912 } 2913 2914 if (IS_GEN2(dev)) 2915 return HANGCHECK_HUNG; 2916 2917 /* Is the chip hanging on a WAIT_FOR_EVENT? 2918 * If so we can simply poke the RB_WAIT bit 2919 * and break the hang. This should work on 2920 * all but the second generation chipsets. 2921 */ 2922 tmp = I915_READ_CTL(ring); 2923 if (tmp & RING_WAIT) { 2924 i915_handle_error(dev, false, 2925 "Kicking stuck wait on %s", 2926 ring->name); 2927 I915_WRITE_CTL(ring, tmp); 2928 return HANGCHECK_KICK; 2929 } 2930 2931 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2932 switch (semaphore_passed(ring)) { 2933 default: 2934 return HANGCHECK_HUNG; 2935 case 1: 2936 i915_handle_error(dev, false, 2937 "Kicking stuck semaphore on %s", 2938 ring->name); 2939 I915_WRITE_CTL(ring, tmp); 2940 return HANGCHECK_KICK; 2941 case 0: 2942 return HANGCHECK_WAIT; 2943 } 2944 } 2945 2946 return HANGCHECK_HUNG; 2947 } 2948 2949 /** 2950 * This is called when the chip hasn't reported back with completed 2951 * batchbuffers in a long time. We keep track per ring seqno progress and 2952 * if there are no progress, hangcheck score for that ring is increased. 2953 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2954 * we kick the ring. If we see no progress on three subsequent calls 2955 * we assume chip is wedged and try to fix it by resetting the chip. 2956 */ 2957 static void i915_hangcheck_elapsed(unsigned long data) 2958 { 2959 struct drm_device *dev = (struct drm_device *)data; 2960 struct drm_i915_private *dev_priv = dev->dev_private; 2961 struct intel_engine_cs *ring; 2962 int i; 2963 int busy_count = 0, rings_hung = 0; 2964 bool stuck[I915_NUM_RINGS] = { 0 }; 2965 #define BUSY 1 2966 #define KICK 5 2967 #define HUNG 20 2968 2969 if (!i915.enable_hangcheck) 2970 return; 2971 2972 for_each_ring(ring, dev_priv, i) { 2973 u64 acthd; 2974 u32 seqno; 2975 bool busy = true; 2976 2977 semaphore_clear_deadlocks(dev_priv); 2978 2979 seqno = ring->get_seqno(ring, false); 2980 acthd = intel_ring_get_active_head(ring); 2981 2982 if (ring->hangcheck.seqno == seqno) { 2983 if (ring_idle(ring, seqno)) { 2984 ring->hangcheck.action = HANGCHECK_IDLE; 2985 2986 if (waitqueue_active(&ring->irq_queue)) { 2987 /* Issue a wake-up to catch stuck h/w. */ 2988 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2989 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2990 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2991 ring->name); 2992 else 2993 DRM_INFO("Fake missed irq on %s\n", 2994 ring->name); 2995 wake_up_all(&ring->irq_queue); 2996 } 2997 /* Safeguard against driver failure */ 2998 ring->hangcheck.score += BUSY; 2999 } else 3000 busy = false; 3001 } else { 3002 /* We always increment the hangcheck score 3003 * if the ring is busy and still processing 3004 * the same request, so that no single request 3005 * can run indefinitely (such as a chain of 3006 * batches). The only time we do not increment 3007 * the hangcheck score on this ring, if this 3008 * ring is in a legitimate wait for another 3009 * ring. In that case the waiting ring is a 3010 * victim and we want to be sure we catch the 3011 * right culprit. Then every time we do kick 3012 * the ring, add a small increment to the 3013 * score so that we can catch a batch that is 3014 * being repeatedly kicked and so responsible 3015 * for stalling the machine. 3016 */ 3017 ring->hangcheck.action = ring_stuck(ring, 3018 acthd); 3019 3020 switch (ring->hangcheck.action) { 3021 case HANGCHECK_IDLE: 3022 case HANGCHECK_WAIT: 3023 case HANGCHECK_ACTIVE: 3024 break; 3025 case HANGCHECK_ACTIVE_LOOP: 3026 ring->hangcheck.score += BUSY; 3027 break; 3028 case HANGCHECK_KICK: 3029 ring->hangcheck.score += KICK; 3030 break; 3031 case HANGCHECK_HUNG: 3032 ring->hangcheck.score += HUNG; 3033 stuck[i] = true; 3034 break; 3035 } 3036 } 3037 } else { 3038 ring->hangcheck.action = HANGCHECK_ACTIVE; 3039 3040 /* Gradually reduce the count so that we catch DoS 3041 * attempts across multiple batches. 3042 */ 3043 if (ring->hangcheck.score > 0) 3044 ring->hangcheck.score--; 3045 3046 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3047 } 3048 3049 ring->hangcheck.seqno = seqno; 3050 ring->hangcheck.acthd = acthd; 3051 busy_count += busy; 3052 } 3053 3054 for_each_ring(ring, dev_priv, i) { 3055 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3056 DRM_INFO("%s on %s\n", 3057 stuck[i] ? "stuck" : "no progress", 3058 ring->name); 3059 rings_hung++; 3060 } 3061 } 3062 3063 if (rings_hung) 3064 return i915_handle_error(dev, true, "Ring hung"); 3065 3066 if (busy_count) 3067 /* Reset timer case chip hangs without another request 3068 * being added */ 3069 i915_queue_hangcheck(dev); 3070 } 3071 3072 void i915_queue_hangcheck(struct drm_device *dev) 3073 { 3074 struct drm_i915_private *dev_priv = dev->dev_private; 3075 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer; 3076 3077 if (!i915.enable_hangcheck) 3078 return; 3079 3080 /* Don't continually defer the hangcheck, but make sure it is active */ 3081 if (timer_pending(timer)) 3082 return; 3083 mod_timer(timer, 3084 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3085 } 3086 3087 static void ibx_irq_reset(struct drm_device *dev) 3088 { 3089 struct drm_i915_private *dev_priv = dev->dev_private; 3090 3091 if (HAS_PCH_NOP(dev)) 3092 return; 3093 3094 GEN5_IRQ_RESET(SDE); 3095 3096 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3097 I915_WRITE(SERR_INT, 0xffffffff); 3098 } 3099 3100 /* 3101 * SDEIER is also touched by the interrupt handler to work around missed PCH 3102 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3103 * instead we unconditionally enable all PCH interrupt sources here, but then 3104 * only unmask them as needed with SDEIMR. 3105 * 3106 * This function needs to be called before interrupts are enabled. 3107 */ 3108 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3109 { 3110 struct drm_i915_private *dev_priv = dev->dev_private; 3111 3112 if (HAS_PCH_NOP(dev)) 3113 return; 3114 3115 WARN_ON(I915_READ(SDEIER) != 0); 3116 I915_WRITE(SDEIER, 0xffffffff); 3117 POSTING_READ(SDEIER); 3118 } 3119 3120 static void gen5_gt_irq_reset(struct drm_device *dev) 3121 { 3122 struct drm_i915_private *dev_priv = dev->dev_private; 3123 3124 GEN5_IRQ_RESET(GT); 3125 if (INTEL_INFO(dev)->gen >= 6) 3126 GEN5_IRQ_RESET(GEN6_PM); 3127 } 3128 3129 /* drm_dma.h hooks 3130 */ 3131 static void ironlake_irq_reset(struct drm_device *dev) 3132 { 3133 struct drm_i915_private *dev_priv = dev->dev_private; 3134 3135 I915_WRITE(HWSTAM, 0xffffffff); 3136 3137 GEN5_IRQ_RESET(DE); 3138 if (IS_GEN7(dev)) 3139 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3140 3141 gen5_gt_irq_reset(dev); 3142 3143 ibx_irq_reset(dev); 3144 } 3145 3146 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3147 { 3148 enum pipe pipe; 3149 3150 I915_WRITE(PORT_HOTPLUG_EN, 0); 3151 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3152 3153 for_each_pipe(dev_priv, pipe) 3154 I915_WRITE(PIPESTAT(pipe), 0xffff); 3155 3156 GEN5_IRQ_RESET(VLV_); 3157 } 3158 3159 static void valleyview_irq_preinstall(struct drm_device *dev) 3160 { 3161 struct drm_i915_private *dev_priv = dev->dev_private; 3162 3163 /* VLV magic */ 3164 I915_WRITE(VLV_IMR, 0); 3165 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3166 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3167 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3168 3169 gen5_gt_irq_reset(dev); 3170 3171 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3172 3173 vlv_display_irq_reset(dev_priv); 3174 } 3175 3176 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3177 { 3178 GEN8_IRQ_RESET_NDX(GT, 0); 3179 GEN8_IRQ_RESET_NDX(GT, 1); 3180 GEN8_IRQ_RESET_NDX(GT, 2); 3181 GEN8_IRQ_RESET_NDX(GT, 3); 3182 } 3183 3184 static void gen8_irq_reset(struct drm_device *dev) 3185 { 3186 struct drm_i915_private *dev_priv = dev->dev_private; 3187 int pipe; 3188 3189 I915_WRITE(GEN8_MASTER_IRQ, 0); 3190 POSTING_READ(GEN8_MASTER_IRQ); 3191 3192 gen8_gt_irq_reset(dev_priv); 3193 3194 for_each_pipe(dev_priv, pipe) 3195 if (intel_display_power_is_enabled(dev_priv, 3196 POWER_DOMAIN_PIPE(pipe))) 3197 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3198 3199 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3200 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3201 GEN5_IRQ_RESET(GEN8_PCU_); 3202 3203 ibx_irq_reset(dev); 3204 } 3205 3206 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3207 { 3208 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3209 3210 spin_lock_irq(&dev_priv->irq_lock); 3211 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3212 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3213 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3214 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3215 spin_unlock_irq(&dev_priv->irq_lock); 3216 } 3217 3218 static void cherryview_irq_preinstall(struct drm_device *dev) 3219 { 3220 struct drm_i915_private *dev_priv = dev->dev_private; 3221 3222 I915_WRITE(GEN8_MASTER_IRQ, 0); 3223 POSTING_READ(GEN8_MASTER_IRQ); 3224 3225 gen8_gt_irq_reset(dev_priv); 3226 3227 GEN5_IRQ_RESET(GEN8_PCU_); 3228 3229 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3230 3231 vlv_display_irq_reset(dev_priv); 3232 } 3233 3234 static void ibx_hpd_irq_setup(struct drm_device *dev) 3235 { 3236 struct drm_i915_private *dev_priv = dev->dev_private; 3237 struct intel_encoder *intel_encoder; 3238 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3239 3240 if (HAS_PCH_IBX(dev)) { 3241 hotplug_irqs = SDE_HOTPLUG_MASK; 3242 for_each_intel_encoder(dev, intel_encoder) 3243 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3244 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3245 } else { 3246 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3247 for_each_intel_encoder(dev, intel_encoder) 3248 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3249 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3250 } 3251 3252 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3253 3254 /* 3255 * Enable digital hotplug on the PCH, and configure the DP short pulse 3256 * duration to 2ms (which is the minimum in the Display Port spec) 3257 * 3258 * This register is the same on all known PCH chips. 3259 */ 3260 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3261 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3262 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3263 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3264 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3265 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3266 } 3267 3268 static void ibx_irq_postinstall(struct drm_device *dev) 3269 { 3270 struct drm_i915_private *dev_priv = dev->dev_private; 3271 u32 mask; 3272 3273 if (HAS_PCH_NOP(dev)) 3274 return; 3275 3276 if (HAS_PCH_IBX(dev)) 3277 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3278 else 3279 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3280 3281 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3282 I915_WRITE(SDEIMR, ~mask); 3283 } 3284 3285 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3286 { 3287 struct drm_i915_private *dev_priv = dev->dev_private; 3288 u32 pm_irqs, gt_irqs; 3289 3290 pm_irqs = gt_irqs = 0; 3291 3292 dev_priv->gt_irq_mask = ~0; 3293 if (HAS_L3_DPF(dev)) { 3294 /* L3 parity interrupt is always unmasked. */ 3295 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3296 gt_irqs |= GT_PARITY_ERROR(dev); 3297 } 3298 3299 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3300 if (IS_GEN5(dev)) { 3301 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3302 ILK_BSD_USER_INTERRUPT; 3303 } else { 3304 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3305 } 3306 3307 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3308 3309 if (INTEL_INFO(dev)->gen >= 6) { 3310 pm_irqs |= dev_priv->pm_rps_events; 3311 3312 if (HAS_VEBOX(dev)) 3313 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3314 3315 dev_priv->pm_irq_mask = 0xffffffff; 3316 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3317 } 3318 } 3319 3320 static int ironlake_irq_postinstall(struct drm_device *dev) 3321 { 3322 struct drm_i915_private *dev_priv = dev->dev_private; 3323 u32 display_mask, extra_mask; 3324 3325 if (INTEL_INFO(dev)->gen >= 7) { 3326 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3327 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3328 DE_PLANEB_FLIP_DONE_IVB | 3329 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3330 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3331 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3332 } else { 3333 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3334 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3335 DE_AUX_CHANNEL_A | 3336 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3337 DE_POISON); 3338 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3339 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3340 } 3341 3342 dev_priv->irq_mask = ~display_mask; 3343 3344 I915_WRITE(HWSTAM, 0xeffe); 3345 3346 ibx_irq_pre_postinstall(dev); 3347 3348 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3349 3350 gen5_gt_irq_postinstall(dev); 3351 3352 ibx_irq_postinstall(dev); 3353 3354 if (IS_IRONLAKE_M(dev)) { 3355 /* Enable PCU event interrupts 3356 * 3357 * spinlocking not required here for correctness since interrupt 3358 * setup is guaranteed to run in single-threaded context. But we 3359 * need it to make the assert_spin_locked happy. */ 3360 spin_lock_irq(&dev_priv->irq_lock); 3361 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3362 spin_unlock_irq(&dev_priv->irq_lock); 3363 } 3364 3365 return 0; 3366 } 3367 3368 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3369 { 3370 u32 pipestat_mask; 3371 u32 iir_mask; 3372 enum pipe pipe; 3373 3374 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3375 PIPE_FIFO_UNDERRUN_STATUS; 3376 3377 for_each_pipe(dev_priv, pipe) 3378 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3379 POSTING_READ(PIPESTAT(PIPE_A)); 3380 3381 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3382 PIPE_CRC_DONE_INTERRUPT_STATUS; 3383 3384 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3385 for_each_pipe(dev_priv, pipe) 3386 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3387 3388 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3389 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3390 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3391 if (IS_CHERRYVIEW(dev_priv)) 3392 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3393 dev_priv->irq_mask &= ~iir_mask; 3394 3395 I915_WRITE(VLV_IIR, iir_mask); 3396 I915_WRITE(VLV_IIR, iir_mask); 3397 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3398 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3399 POSTING_READ(VLV_IMR); 3400 } 3401 3402 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3403 { 3404 u32 pipestat_mask; 3405 u32 iir_mask; 3406 enum pipe pipe; 3407 3408 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3409 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3410 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3411 if (IS_CHERRYVIEW(dev_priv)) 3412 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3413 3414 dev_priv->irq_mask |= iir_mask; 3415 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3416 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3417 I915_WRITE(VLV_IIR, iir_mask); 3418 I915_WRITE(VLV_IIR, iir_mask); 3419 POSTING_READ(VLV_IIR); 3420 3421 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3422 PIPE_CRC_DONE_INTERRUPT_STATUS; 3423 3424 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3425 for_each_pipe(dev_priv, pipe) 3426 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3427 3428 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3429 PIPE_FIFO_UNDERRUN_STATUS; 3430 3431 for_each_pipe(dev_priv, pipe) 3432 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3433 POSTING_READ(PIPESTAT(PIPE_A)); 3434 } 3435 3436 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3437 { 3438 assert_spin_locked(&dev_priv->irq_lock); 3439 3440 if (dev_priv->display_irqs_enabled) 3441 return; 3442 3443 dev_priv->display_irqs_enabled = true; 3444 3445 if (intel_irqs_enabled(dev_priv)) 3446 valleyview_display_irqs_install(dev_priv); 3447 } 3448 3449 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3450 { 3451 assert_spin_locked(&dev_priv->irq_lock); 3452 3453 if (!dev_priv->display_irqs_enabled) 3454 return; 3455 3456 dev_priv->display_irqs_enabled = false; 3457 3458 if (intel_irqs_enabled(dev_priv)) 3459 valleyview_display_irqs_uninstall(dev_priv); 3460 } 3461 3462 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3463 { 3464 dev_priv->irq_mask = ~0; 3465 3466 I915_WRITE(PORT_HOTPLUG_EN, 0); 3467 POSTING_READ(PORT_HOTPLUG_EN); 3468 3469 I915_WRITE(VLV_IIR, 0xffffffff); 3470 I915_WRITE(VLV_IIR, 0xffffffff); 3471 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3472 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3473 POSTING_READ(VLV_IMR); 3474 3475 /* Interrupt setup is already guaranteed to be single-threaded, this is 3476 * just to make the assert_spin_locked check happy. */ 3477 spin_lock_irq(&dev_priv->irq_lock); 3478 if (dev_priv->display_irqs_enabled) 3479 valleyview_display_irqs_install(dev_priv); 3480 spin_unlock_irq(&dev_priv->irq_lock); 3481 } 3482 3483 static int valleyview_irq_postinstall(struct drm_device *dev) 3484 { 3485 struct drm_i915_private *dev_priv = dev->dev_private; 3486 3487 vlv_display_irq_postinstall(dev_priv); 3488 3489 gen5_gt_irq_postinstall(dev); 3490 3491 /* ack & enable invalid PTE error interrupts */ 3492 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3493 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3494 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3495 #endif 3496 3497 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3498 3499 return 0; 3500 } 3501 3502 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3503 { 3504 /* These are interrupts we'll toggle with the ring mask register */ 3505 uint32_t gt_interrupts[] = { 3506 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3507 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3508 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3509 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3510 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3511 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3512 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3513 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3514 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3515 0, 3516 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3517 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3518 }; 3519 3520 dev_priv->pm_irq_mask = 0xffffffff; 3521 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3522 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3523 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); 3524 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3525 } 3526 3527 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3528 { 3529 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3530 uint32_t de_pipe_enables; 3531 int pipe; 3532 u32 aux_en = GEN8_AUX_CHANNEL_A; 3533 3534 if (IS_GEN9(dev_priv)) { 3535 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3536 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3537 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3538 GEN9_AUX_CHANNEL_D; 3539 } else 3540 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3541 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3542 3543 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3544 GEN8_PIPE_FIFO_UNDERRUN; 3545 3546 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3547 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3548 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3549 3550 for_each_pipe(dev_priv, pipe) 3551 if (intel_display_power_is_enabled(dev_priv, 3552 POWER_DOMAIN_PIPE(pipe))) 3553 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3554 dev_priv->de_irq_mask[pipe], 3555 de_pipe_enables); 3556 3557 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); 3558 } 3559 3560 static int gen8_irq_postinstall(struct drm_device *dev) 3561 { 3562 struct drm_i915_private *dev_priv = dev->dev_private; 3563 3564 ibx_irq_pre_postinstall(dev); 3565 3566 gen8_gt_irq_postinstall(dev_priv); 3567 gen8_de_irq_postinstall(dev_priv); 3568 3569 ibx_irq_postinstall(dev); 3570 3571 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3572 POSTING_READ(GEN8_MASTER_IRQ); 3573 3574 return 0; 3575 } 3576 3577 static int cherryview_irq_postinstall(struct drm_device *dev) 3578 { 3579 struct drm_i915_private *dev_priv = dev->dev_private; 3580 3581 vlv_display_irq_postinstall(dev_priv); 3582 3583 gen8_gt_irq_postinstall(dev_priv); 3584 3585 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3586 POSTING_READ(GEN8_MASTER_IRQ); 3587 3588 return 0; 3589 } 3590 3591 static void gen8_irq_uninstall(struct drm_device *dev) 3592 { 3593 struct drm_i915_private *dev_priv = dev->dev_private; 3594 3595 if (!dev_priv) 3596 return; 3597 3598 gen8_irq_reset(dev); 3599 } 3600 3601 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3602 { 3603 /* Interrupt setup is already guaranteed to be single-threaded, this is 3604 * just to make the assert_spin_locked check happy. */ 3605 spin_lock_irq(&dev_priv->irq_lock); 3606 if (dev_priv->display_irqs_enabled) 3607 valleyview_display_irqs_uninstall(dev_priv); 3608 spin_unlock_irq(&dev_priv->irq_lock); 3609 3610 vlv_display_irq_reset(dev_priv); 3611 3612 dev_priv->irq_mask = 0; 3613 } 3614 3615 static void valleyview_irq_uninstall(struct drm_device *dev) 3616 { 3617 struct drm_i915_private *dev_priv = dev->dev_private; 3618 3619 if (!dev_priv) 3620 return; 3621 3622 I915_WRITE(VLV_MASTER_IER, 0); 3623 3624 gen5_gt_irq_reset(dev); 3625 3626 I915_WRITE(HWSTAM, 0xffffffff); 3627 3628 vlv_display_irq_uninstall(dev_priv); 3629 } 3630 3631 static void cherryview_irq_uninstall(struct drm_device *dev) 3632 { 3633 struct drm_i915_private *dev_priv = dev->dev_private; 3634 3635 if (!dev_priv) 3636 return; 3637 3638 I915_WRITE(GEN8_MASTER_IRQ, 0); 3639 POSTING_READ(GEN8_MASTER_IRQ); 3640 3641 gen8_gt_irq_reset(dev_priv); 3642 3643 GEN5_IRQ_RESET(GEN8_PCU_); 3644 3645 vlv_display_irq_uninstall(dev_priv); 3646 } 3647 3648 static void ironlake_irq_uninstall(struct drm_device *dev) 3649 { 3650 struct drm_i915_private *dev_priv = dev->dev_private; 3651 3652 if (!dev_priv) 3653 return; 3654 3655 ironlake_irq_reset(dev); 3656 } 3657 3658 static void i8xx_irq_preinstall(struct drm_device * dev) 3659 { 3660 struct drm_i915_private *dev_priv = dev->dev_private; 3661 int pipe; 3662 3663 for_each_pipe(dev_priv, pipe) 3664 I915_WRITE(PIPESTAT(pipe), 0); 3665 I915_WRITE16(IMR, 0xffff); 3666 I915_WRITE16(IER, 0x0); 3667 POSTING_READ16(IER); 3668 } 3669 3670 static int i8xx_irq_postinstall(struct drm_device *dev) 3671 { 3672 struct drm_i915_private *dev_priv = dev->dev_private; 3673 3674 I915_WRITE16(EMR, 3675 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3676 3677 /* Unmask the interrupts that we always want on. */ 3678 dev_priv->irq_mask = 3679 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3680 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3681 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3682 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3683 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3684 I915_WRITE16(IMR, dev_priv->irq_mask); 3685 3686 I915_WRITE16(IER, 3687 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3688 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3689 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3690 I915_USER_INTERRUPT); 3691 POSTING_READ16(IER); 3692 3693 /* Interrupt setup is already guaranteed to be single-threaded, this is 3694 * just to make the assert_spin_locked check happy. */ 3695 spin_lock_irq(&dev_priv->irq_lock); 3696 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3697 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3698 spin_unlock_irq(&dev_priv->irq_lock); 3699 3700 return 0; 3701 } 3702 3703 /* 3704 * Returns true when a page flip has completed. 3705 */ 3706 static bool i8xx_handle_vblank(struct drm_device *dev, 3707 int plane, int pipe, u32 iir) 3708 { 3709 struct drm_i915_private *dev_priv = dev->dev_private; 3710 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3711 3712 if (!intel_pipe_handle_vblank(dev, pipe)) 3713 return false; 3714 3715 if ((iir & flip_pending) == 0) 3716 goto check_page_flip; 3717 3718 intel_prepare_page_flip(dev, plane); 3719 3720 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3721 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3722 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3723 * the flip is completed (no longer pending). Since this doesn't raise 3724 * an interrupt per se, we watch for the change at vblank. 3725 */ 3726 if (I915_READ16(ISR) & flip_pending) 3727 goto check_page_flip; 3728 3729 intel_finish_page_flip(dev, pipe); 3730 return true; 3731 3732 check_page_flip: 3733 intel_check_page_flip(dev, pipe); 3734 return false; 3735 } 3736 3737 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3738 { 3739 struct drm_device *dev = arg; 3740 struct drm_i915_private *dev_priv = dev->dev_private; 3741 u16 iir, new_iir; 3742 u32 pipe_stats[2]; 3743 int pipe; 3744 u16 flip_mask = 3745 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3746 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3747 3748 iir = I915_READ16(IIR); 3749 if (iir == 0) 3750 return IRQ_NONE; 3751 3752 while (iir & ~flip_mask) { 3753 /* Can't rely on pipestat interrupt bit in iir as it might 3754 * have been cleared after the pipestat interrupt was received. 3755 * It doesn't set the bit in iir again, but it still produces 3756 * interrupts (for non-MSI). 3757 */ 3758 spin_lock(&dev_priv->irq_lock); 3759 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3760 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3761 3762 for_each_pipe(dev_priv, pipe) { 3763 int reg = PIPESTAT(pipe); 3764 pipe_stats[pipe] = I915_READ(reg); 3765 3766 /* 3767 * Clear the PIPE*STAT regs before the IIR 3768 */ 3769 if (pipe_stats[pipe] & 0x8000ffff) 3770 I915_WRITE(reg, pipe_stats[pipe]); 3771 } 3772 spin_unlock(&dev_priv->irq_lock); 3773 3774 I915_WRITE16(IIR, iir & ~flip_mask); 3775 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3776 3777 if (iir & I915_USER_INTERRUPT) 3778 notify_ring(dev, &dev_priv->ring[RCS]); 3779 3780 for_each_pipe(dev_priv, pipe) { 3781 int plane = pipe; 3782 if (HAS_FBC(dev)) 3783 plane = !plane; 3784 3785 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3786 i8xx_handle_vblank(dev, plane, pipe, iir)) 3787 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3788 3789 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3790 i9xx_pipe_crc_irq_handler(dev, pipe); 3791 3792 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3793 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3794 pipe); 3795 } 3796 3797 iir = new_iir; 3798 } 3799 3800 return IRQ_HANDLED; 3801 } 3802 3803 static void i8xx_irq_uninstall(struct drm_device * dev) 3804 { 3805 struct drm_i915_private *dev_priv = dev->dev_private; 3806 int pipe; 3807 3808 for_each_pipe(dev_priv, pipe) { 3809 /* Clear enable bits; then clear status bits */ 3810 I915_WRITE(PIPESTAT(pipe), 0); 3811 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3812 } 3813 I915_WRITE16(IMR, 0xffff); 3814 I915_WRITE16(IER, 0x0); 3815 I915_WRITE16(IIR, I915_READ16(IIR)); 3816 } 3817 3818 static void i915_irq_preinstall(struct drm_device * dev) 3819 { 3820 struct drm_i915_private *dev_priv = dev->dev_private; 3821 int pipe; 3822 3823 if (I915_HAS_HOTPLUG(dev)) { 3824 I915_WRITE(PORT_HOTPLUG_EN, 0); 3825 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3826 } 3827 3828 I915_WRITE16(HWSTAM, 0xeffe); 3829 for_each_pipe(dev_priv, pipe) 3830 I915_WRITE(PIPESTAT(pipe), 0); 3831 I915_WRITE(IMR, 0xffffffff); 3832 I915_WRITE(IER, 0x0); 3833 POSTING_READ(IER); 3834 } 3835 3836 static int i915_irq_postinstall(struct drm_device *dev) 3837 { 3838 struct drm_i915_private *dev_priv = dev->dev_private; 3839 u32 enable_mask; 3840 3841 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3842 3843 /* Unmask the interrupts that we always want on. */ 3844 dev_priv->irq_mask = 3845 ~(I915_ASLE_INTERRUPT | 3846 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3847 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3848 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3849 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3850 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3851 3852 enable_mask = 3853 I915_ASLE_INTERRUPT | 3854 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3855 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3856 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3857 I915_USER_INTERRUPT; 3858 3859 if (I915_HAS_HOTPLUG(dev)) { 3860 I915_WRITE(PORT_HOTPLUG_EN, 0); 3861 POSTING_READ(PORT_HOTPLUG_EN); 3862 3863 /* Enable in IER... */ 3864 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3865 /* and unmask in IMR */ 3866 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3867 } 3868 3869 I915_WRITE(IMR, dev_priv->irq_mask); 3870 I915_WRITE(IER, enable_mask); 3871 POSTING_READ(IER); 3872 3873 i915_enable_asle_pipestat(dev); 3874 3875 /* Interrupt setup is already guaranteed to be single-threaded, this is 3876 * just to make the assert_spin_locked check happy. */ 3877 spin_lock_irq(&dev_priv->irq_lock); 3878 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3879 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3880 spin_unlock_irq(&dev_priv->irq_lock); 3881 3882 return 0; 3883 } 3884 3885 /* 3886 * Returns true when a page flip has completed. 3887 */ 3888 static bool i915_handle_vblank(struct drm_device *dev, 3889 int plane, int pipe, u32 iir) 3890 { 3891 struct drm_i915_private *dev_priv = dev->dev_private; 3892 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3893 3894 if (!intel_pipe_handle_vblank(dev, pipe)) 3895 return false; 3896 3897 if ((iir & flip_pending) == 0) 3898 goto check_page_flip; 3899 3900 intel_prepare_page_flip(dev, plane); 3901 3902 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3903 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3904 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3905 * the flip is completed (no longer pending). Since this doesn't raise 3906 * an interrupt per se, we watch for the change at vblank. 3907 */ 3908 if (I915_READ(ISR) & flip_pending) 3909 goto check_page_flip; 3910 3911 intel_finish_page_flip(dev, pipe); 3912 return true; 3913 3914 check_page_flip: 3915 intel_check_page_flip(dev, pipe); 3916 return false; 3917 } 3918 3919 static irqreturn_t i915_irq_handler(int irq, void *arg) 3920 { 3921 struct drm_device *dev = arg; 3922 struct drm_i915_private *dev_priv = dev->dev_private; 3923 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3924 u32 flip_mask = 3925 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3926 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3927 int pipe, ret = IRQ_NONE; 3928 3929 iir = I915_READ(IIR); 3930 do { 3931 bool irq_received = (iir & ~flip_mask) != 0; 3932 bool blc_event = false; 3933 3934 /* Can't rely on pipestat interrupt bit in iir as it might 3935 * have been cleared after the pipestat interrupt was received. 3936 * It doesn't set the bit in iir again, but it still produces 3937 * interrupts (for non-MSI). 3938 */ 3939 spin_lock(&dev_priv->irq_lock); 3940 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3941 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3942 3943 for_each_pipe(dev_priv, pipe) { 3944 int reg = PIPESTAT(pipe); 3945 pipe_stats[pipe] = I915_READ(reg); 3946 3947 /* Clear the PIPE*STAT regs before the IIR */ 3948 if (pipe_stats[pipe] & 0x8000ffff) { 3949 I915_WRITE(reg, pipe_stats[pipe]); 3950 irq_received = true; 3951 } 3952 } 3953 spin_unlock(&dev_priv->irq_lock); 3954 3955 if (!irq_received) 3956 break; 3957 3958 /* Consume port. Then clear IIR or we'll miss events */ 3959 if (I915_HAS_HOTPLUG(dev) && 3960 iir & I915_DISPLAY_PORT_INTERRUPT) 3961 i9xx_hpd_irq_handler(dev); 3962 3963 I915_WRITE(IIR, iir & ~flip_mask); 3964 new_iir = I915_READ(IIR); /* Flush posted writes */ 3965 3966 if (iir & I915_USER_INTERRUPT) 3967 notify_ring(dev, &dev_priv->ring[RCS]); 3968 3969 for_each_pipe(dev_priv, pipe) { 3970 int plane = pipe; 3971 if (HAS_FBC(dev)) 3972 plane = !plane; 3973 3974 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3975 i915_handle_vblank(dev, plane, pipe, iir)) 3976 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3977 3978 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3979 blc_event = true; 3980 3981 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3982 i9xx_pipe_crc_irq_handler(dev, pipe); 3983 3984 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3985 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3986 pipe); 3987 } 3988 3989 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3990 intel_opregion_asle_intr(dev); 3991 3992 /* With MSI, interrupts are only generated when iir 3993 * transitions from zero to nonzero. If another bit got 3994 * set while we were handling the existing iir bits, then 3995 * we would never get another interrupt. 3996 * 3997 * This is fine on non-MSI as well, as if we hit this path 3998 * we avoid exiting the interrupt handler only to generate 3999 * another one. 4000 * 4001 * Note that for MSI this could cause a stray interrupt report 4002 * if an interrupt landed in the time between writing IIR and 4003 * the posting read. This should be rare enough to never 4004 * trigger the 99% of 100,000 interrupts test for disabling 4005 * stray interrupts. 4006 */ 4007 ret = IRQ_HANDLED; 4008 iir = new_iir; 4009 } while (iir & ~flip_mask); 4010 4011 return ret; 4012 } 4013 4014 static void i915_irq_uninstall(struct drm_device * dev) 4015 { 4016 struct drm_i915_private *dev_priv = dev->dev_private; 4017 int pipe; 4018 4019 if (I915_HAS_HOTPLUG(dev)) { 4020 I915_WRITE(PORT_HOTPLUG_EN, 0); 4021 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4022 } 4023 4024 I915_WRITE16(HWSTAM, 0xffff); 4025 for_each_pipe(dev_priv, pipe) { 4026 /* Clear enable bits; then clear status bits */ 4027 I915_WRITE(PIPESTAT(pipe), 0); 4028 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4029 } 4030 I915_WRITE(IMR, 0xffffffff); 4031 I915_WRITE(IER, 0x0); 4032 4033 I915_WRITE(IIR, I915_READ(IIR)); 4034 } 4035 4036 static void i965_irq_preinstall(struct drm_device * dev) 4037 { 4038 struct drm_i915_private *dev_priv = dev->dev_private; 4039 int pipe; 4040 4041 I915_WRITE(PORT_HOTPLUG_EN, 0); 4042 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4043 4044 I915_WRITE(HWSTAM, 0xeffe); 4045 for_each_pipe(dev_priv, pipe) 4046 I915_WRITE(PIPESTAT(pipe), 0); 4047 I915_WRITE(IMR, 0xffffffff); 4048 I915_WRITE(IER, 0x0); 4049 POSTING_READ(IER); 4050 } 4051 4052 static int i965_irq_postinstall(struct drm_device *dev) 4053 { 4054 struct drm_i915_private *dev_priv = dev->dev_private; 4055 u32 enable_mask; 4056 u32 error_mask; 4057 4058 /* Unmask the interrupts that we always want on. */ 4059 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4060 I915_DISPLAY_PORT_INTERRUPT | 4061 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4062 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4063 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4064 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4065 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4066 4067 enable_mask = ~dev_priv->irq_mask; 4068 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4069 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4070 enable_mask |= I915_USER_INTERRUPT; 4071 4072 if (IS_G4X(dev)) 4073 enable_mask |= I915_BSD_USER_INTERRUPT; 4074 4075 /* Interrupt setup is already guaranteed to be single-threaded, this is 4076 * just to make the assert_spin_locked check happy. */ 4077 spin_lock_irq(&dev_priv->irq_lock); 4078 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4079 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4080 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4081 spin_unlock_irq(&dev_priv->irq_lock); 4082 4083 /* 4084 * Enable some error detection, note the instruction error mask 4085 * bit is reserved, so we leave it masked. 4086 */ 4087 if (IS_G4X(dev)) { 4088 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4089 GM45_ERROR_MEM_PRIV | 4090 GM45_ERROR_CP_PRIV | 4091 I915_ERROR_MEMORY_REFRESH); 4092 } else { 4093 error_mask = ~(I915_ERROR_PAGE_TABLE | 4094 I915_ERROR_MEMORY_REFRESH); 4095 } 4096 I915_WRITE(EMR, error_mask); 4097 4098 I915_WRITE(IMR, dev_priv->irq_mask); 4099 I915_WRITE(IER, enable_mask); 4100 POSTING_READ(IER); 4101 4102 I915_WRITE(PORT_HOTPLUG_EN, 0); 4103 POSTING_READ(PORT_HOTPLUG_EN); 4104 4105 i915_enable_asle_pipestat(dev); 4106 4107 return 0; 4108 } 4109 4110 static void i915_hpd_irq_setup(struct drm_device *dev) 4111 { 4112 struct drm_i915_private *dev_priv = dev->dev_private; 4113 struct intel_encoder *intel_encoder; 4114 u32 hotplug_en; 4115 4116 assert_spin_locked(&dev_priv->irq_lock); 4117 4118 if (I915_HAS_HOTPLUG(dev)) { 4119 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4120 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4121 /* Note HDMI and DP share hotplug bits */ 4122 /* enable bits are the same for all generations */ 4123 for_each_intel_encoder(dev, intel_encoder) 4124 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4125 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4126 /* Programming the CRT detection parameters tends 4127 to generate a spurious hotplug event about three 4128 seconds later. So just do it once. 4129 */ 4130 if (IS_G4X(dev)) 4131 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4132 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4133 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4134 4135 /* Ignore TV since it's buggy */ 4136 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4137 } 4138 } 4139 4140 static irqreturn_t i965_irq_handler(int irq, void *arg) 4141 { 4142 struct drm_device *dev = arg; 4143 struct drm_i915_private *dev_priv = dev->dev_private; 4144 u32 iir, new_iir; 4145 u32 pipe_stats[I915_MAX_PIPES]; 4146 int ret = IRQ_NONE, pipe; 4147 u32 flip_mask = 4148 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4149 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4150 4151 iir = I915_READ(IIR); 4152 4153 for (;;) { 4154 bool irq_received = (iir & ~flip_mask) != 0; 4155 bool blc_event = false; 4156 4157 /* Can't rely on pipestat interrupt bit in iir as it might 4158 * have been cleared after the pipestat interrupt was received. 4159 * It doesn't set the bit in iir again, but it still produces 4160 * interrupts (for non-MSI). 4161 */ 4162 spin_lock(&dev_priv->irq_lock); 4163 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4164 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4165 4166 for_each_pipe(dev_priv, pipe) { 4167 int reg = PIPESTAT(pipe); 4168 pipe_stats[pipe] = I915_READ(reg); 4169 4170 /* 4171 * Clear the PIPE*STAT regs before the IIR 4172 */ 4173 if (pipe_stats[pipe] & 0x8000ffff) { 4174 I915_WRITE(reg, pipe_stats[pipe]); 4175 irq_received = true; 4176 } 4177 } 4178 spin_unlock(&dev_priv->irq_lock); 4179 4180 if (!irq_received) 4181 break; 4182 4183 ret = IRQ_HANDLED; 4184 4185 /* Consume port. Then clear IIR or we'll miss events */ 4186 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4187 i9xx_hpd_irq_handler(dev); 4188 4189 I915_WRITE(IIR, iir & ~flip_mask); 4190 new_iir = I915_READ(IIR); /* Flush posted writes */ 4191 4192 if (iir & I915_USER_INTERRUPT) 4193 notify_ring(dev, &dev_priv->ring[RCS]); 4194 if (iir & I915_BSD_USER_INTERRUPT) 4195 notify_ring(dev, &dev_priv->ring[VCS]); 4196 4197 for_each_pipe(dev_priv, pipe) { 4198 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4199 i915_handle_vblank(dev, pipe, pipe, iir)) 4200 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4201 4202 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4203 blc_event = true; 4204 4205 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4206 i9xx_pipe_crc_irq_handler(dev, pipe); 4207 4208 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4209 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4210 } 4211 4212 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4213 intel_opregion_asle_intr(dev); 4214 4215 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4216 gmbus_irq_handler(dev); 4217 4218 /* With MSI, interrupts are only generated when iir 4219 * transitions from zero to nonzero. If another bit got 4220 * set while we were handling the existing iir bits, then 4221 * we would never get another interrupt. 4222 * 4223 * This is fine on non-MSI as well, as if we hit this path 4224 * we avoid exiting the interrupt handler only to generate 4225 * another one. 4226 * 4227 * Note that for MSI this could cause a stray interrupt report 4228 * if an interrupt landed in the time between writing IIR and 4229 * the posting read. This should be rare enough to never 4230 * trigger the 99% of 100,000 interrupts test for disabling 4231 * stray interrupts. 4232 */ 4233 iir = new_iir; 4234 } 4235 4236 return ret; 4237 } 4238 4239 static void i965_irq_uninstall(struct drm_device * dev) 4240 { 4241 struct drm_i915_private *dev_priv = dev->dev_private; 4242 int pipe; 4243 4244 if (!dev_priv) 4245 return; 4246 4247 I915_WRITE(PORT_HOTPLUG_EN, 0); 4248 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4249 4250 I915_WRITE(HWSTAM, 0xffffffff); 4251 for_each_pipe(dev_priv, pipe) 4252 I915_WRITE(PIPESTAT(pipe), 0); 4253 I915_WRITE(IMR, 0xffffffff); 4254 I915_WRITE(IER, 0x0); 4255 4256 for_each_pipe(dev_priv, pipe) 4257 I915_WRITE(PIPESTAT(pipe), 4258 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4259 I915_WRITE(IIR, I915_READ(IIR)); 4260 } 4261 4262 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4263 { 4264 struct drm_i915_private *dev_priv = 4265 container_of(work, typeof(*dev_priv), 4266 hotplug_reenable_work.work); 4267 struct drm_device *dev = dev_priv->dev; 4268 struct drm_mode_config *mode_config = &dev->mode_config; 4269 int i; 4270 4271 intel_runtime_pm_get(dev_priv); 4272 4273 spin_lock_irq(&dev_priv->irq_lock); 4274 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4275 struct drm_connector *connector; 4276 4277 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4278 continue; 4279 4280 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4281 4282 list_for_each_entry(connector, &mode_config->connector_list, head) { 4283 struct intel_connector *intel_connector = to_intel_connector(connector); 4284 4285 if (intel_connector->encoder->hpd_pin == i) { 4286 if (connector->polled != intel_connector->polled) 4287 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4288 connector->name); 4289 connector->polled = intel_connector->polled; 4290 if (!connector->polled) 4291 connector->polled = DRM_CONNECTOR_POLL_HPD; 4292 } 4293 } 4294 } 4295 if (dev_priv->display.hpd_irq_setup) 4296 dev_priv->display.hpd_irq_setup(dev); 4297 spin_unlock_irq(&dev_priv->irq_lock); 4298 4299 intel_runtime_pm_put(dev_priv); 4300 } 4301 4302 /** 4303 * intel_irq_init - initializes irq support 4304 * @dev_priv: i915 device instance 4305 * 4306 * This function initializes all the irq support including work items, timers 4307 * and all the vtables. It does not setup the interrupt itself though. 4308 */ 4309 void intel_irq_init(struct drm_i915_private *dev_priv) 4310 { 4311 struct drm_device *dev = dev_priv->dev; 4312 4313 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4314 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4315 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4316 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4317 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4318 4319 /* Let's track the enabled rps events */ 4320 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4321 /* WaGsvRC0ResidencyMethod:vlv */ 4322 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4323 else 4324 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4325 4326 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4327 i915_hangcheck_elapsed, 4328 (unsigned long) dev); 4329 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4330 intel_hpd_irq_reenable_work); 4331 4332 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4333 4334 if (IS_GEN2(dev_priv)) { 4335 dev->max_vblank_count = 0; 4336 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4337 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4338 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4339 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4340 } else { 4341 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4342 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4343 } 4344 4345 /* 4346 * Opt out of the vblank disable timer on everything except gen2. 4347 * Gen2 doesn't have a hardware frame counter and so depends on 4348 * vblank interrupts to produce sane vblank seuquence numbers. 4349 */ 4350 if (!IS_GEN2(dev_priv)) 4351 dev->vblank_disable_immediate = true; 4352 4353 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4354 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4355 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4356 } 4357 4358 if (IS_CHERRYVIEW(dev_priv)) { 4359 dev->driver->irq_handler = cherryview_irq_handler; 4360 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4361 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4362 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4363 dev->driver->enable_vblank = valleyview_enable_vblank; 4364 dev->driver->disable_vblank = valleyview_disable_vblank; 4365 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4366 } else if (IS_VALLEYVIEW(dev_priv)) { 4367 dev->driver->irq_handler = valleyview_irq_handler; 4368 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4369 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4370 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4371 dev->driver->enable_vblank = valleyview_enable_vblank; 4372 dev->driver->disable_vblank = valleyview_disable_vblank; 4373 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4374 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4375 dev->driver->irq_handler = gen8_irq_handler; 4376 dev->driver->irq_preinstall = gen8_irq_reset; 4377 dev->driver->irq_postinstall = gen8_irq_postinstall; 4378 dev->driver->irq_uninstall = gen8_irq_uninstall; 4379 dev->driver->enable_vblank = gen8_enable_vblank; 4380 dev->driver->disable_vblank = gen8_disable_vblank; 4381 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4382 } else if (HAS_PCH_SPLIT(dev)) { 4383 dev->driver->irq_handler = ironlake_irq_handler; 4384 dev->driver->irq_preinstall = ironlake_irq_reset; 4385 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4386 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4387 dev->driver->enable_vblank = ironlake_enable_vblank; 4388 dev->driver->disable_vblank = ironlake_disable_vblank; 4389 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4390 } else { 4391 if (INTEL_INFO(dev_priv)->gen == 2) { 4392 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4393 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4394 dev->driver->irq_handler = i8xx_irq_handler; 4395 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4396 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4397 dev->driver->irq_preinstall = i915_irq_preinstall; 4398 dev->driver->irq_postinstall = i915_irq_postinstall; 4399 dev->driver->irq_uninstall = i915_irq_uninstall; 4400 dev->driver->irq_handler = i915_irq_handler; 4401 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4402 } else { 4403 dev->driver->irq_preinstall = i965_irq_preinstall; 4404 dev->driver->irq_postinstall = i965_irq_postinstall; 4405 dev->driver->irq_uninstall = i965_irq_uninstall; 4406 dev->driver->irq_handler = i965_irq_handler; 4407 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4408 } 4409 dev->driver->enable_vblank = i915_enable_vblank; 4410 dev->driver->disable_vblank = i915_disable_vblank; 4411 } 4412 } 4413 4414 /** 4415 * intel_hpd_init - initializes and enables hpd support 4416 * @dev_priv: i915 device instance 4417 * 4418 * This function enables the hotplug support. It requires that interrupts have 4419 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4420 * poll request can run concurrently to other code, so locking rules must be 4421 * obeyed. 4422 * 4423 * This is a separate step from interrupt enabling to simplify the locking rules 4424 * in the driver load and resume code. 4425 */ 4426 void intel_hpd_init(struct drm_i915_private *dev_priv) 4427 { 4428 struct drm_device *dev = dev_priv->dev; 4429 struct drm_mode_config *mode_config = &dev->mode_config; 4430 struct drm_connector *connector; 4431 int i; 4432 4433 for (i = 1; i < HPD_NUM_PINS; i++) { 4434 dev_priv->hpd_stats[i].hpd_cnt = 0; 4435 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4436 } 4437 list_for_each_entry(connector, &mode_config->connector_list, head) { 4438 struct intel_connector *intel_connector = to_intel_connector(connector); 4439 connector->polled = intel_connector->polled; 4440 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4441 connector->polled = DRM_CONNECTOR_POLL_HPD; 4442 if (intel_connector->mst_port) 4443 connector->polled = DRM_CONNECTOR_POLL_HPD; 4444 } 4445 4446 /* Interrupt setup is already guaranteed to be single-threaded, this is 4447 * just to make the assert_spin_locked checks happy. */ 4448 spin_lock_irq(&dev_priv->irq_lock); 4449 if (dev_priv->display.hpd_irq_setup) 4450 dev_priv->display.hpd_irq_setup(dev); 4451 spin_unlock_irq(&dev_priv->irq_lock); 4452 } 4453 4454 /** 4455 * intel_irq_install - enables the hardware interrupt 4456 * @dev_priv: i915 device instance 4457 * 4458 * This function enables the hardware interrupt handling, but leaves the hotplug 4459 * handling still disabled. It is called after intel_irq_init(). 4460 * 4461 * In the driver load and resume code we need working interrupts in a few places 4462 * but don't want to deal with the hassle of concurrent probe and hotplug 4463 * workers. Hence the split into this two-stage approach. 4464 */ 4465 int intel_irq_install(struct drm_i915_private *dev_priv) 4466 { 4467 /* 4468 * We enable some interrupt sources in our postinstall hooks, so mark 4469 * interrupts as enabled _before_ actually enabling them to avoid 4470 * special cases in our ordering checks. 4471 */ 4472 dev_priv->pm.irqs_enabled = true; 4473 4474 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4475 } 4476 4477 /** 4478 * intel_irq_uninstall - finilizes all irq handling 4479 * @dev_priv: i915 device instance 4480 * 4481 * This stops interrupt and hotplug handling and unregisters and frees all 4482 * resources acquired in the init functions. 4483 */ 4484 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4485 { 4486 drm_irq_uninstall(dev_priv->dev); 4487 intel_hpd_cancel_work(dev_priv); 4488 dev_priv->pm.irqs_enabled = false; 4489 } 4490 4491 /** 4492 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4493 * @dev_priv: i915 device instance 4494 * 4495 * This function is used to disable interrupts at runtime, both in the runtime 4496 * pm and the system suspend/resume code. 4497 */ 4498 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4499 { 4500 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4501 dev_priv->pm.irqs_enabled = false; 4502 } 4503 4504 /** 4505 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4506 * @dev_priv: i915 device instance 4507 * 4508 * This function is used to enable interrupts at runtime, both in the runtime 4509 * pm and the system suspend/resume code. 4510 */ 4511 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4512 { 4513 dev_priv->pm.irqs_enabled = true; 4514 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4515 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4516 } 4517